v2.0.0 - Bootstrap Only Edition

This commit is contained in:
Cassandra Heart 2024-08-08 00:41:46 -05:00
parent f640c09008
commit d453c9293f
No known key found for this signature in database
GPG Key ID: 6352152859385958
132 changed files with 7486 additions and 439515 deletions

View File

@ -1,11 +1,123 @@
# go-libp2p-blossomsub
First-pass of blossomsub, rudimentary fork of gossipsub  it does not merge subscriptions, bloom filtering needs to
happen at the publish level. This will be updated post-ceremony with the full bloom filter version.
<p align="left">
<a href="https://quilibrium.com"><img src="https://img.shields.io/badge/made%20by-Quilibrium%20Inc-orange.svg?style=flat-square" /></a>
<a href="https://github.com/quilibriumnetwork"><img src="https://img.shields.io/badge/project-Quilibrium-orange.svg?style=flat-square" /></a>
<a href="https://discourse.quilibrium.com/"><img src="https://img.shields.io/discourse/posts.svg?server=https%3A%2F%2Fquilibrium.discourse.group&style=flat-square" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.22.0-orange.svg?style=flat-square" /></a>
</p>
This repo contains the canonical blossomsub implementation for Quilibrium. It has historical origins in [Gossipsub](https://github.com/libp2p/go-libp2p-pubsub), but has diverged significantly. Floodsub and Randomsub are not included in this fork.
## Table of Contents
- [Install](#install)
- [Usage](#usage)
- [Overview](#overview)
- [Tracing](#tracing)
- [Contribute](#contribute)
- [License](#license)
## Install
```
go get source.quilibrium.com/quilibrium/monorepo/go-libp2p-pubsub
```
## Usage
To be used for messaging in high scale, high throughput p2p instrastructure such as Quilibrium.
### Overview
```
.
├── LICENSE
├── README.md
# Regular Golang repo set up
├── codecov.yml
├── pb
├── go.mod
├── go.sum
├── doc.go
# PubSub base
├── backoff.go
├── bitmask.go
├── blacklist.go
├── comm.go
├── discovery.go
├── gossip_tracer.go
├── midgen.go
├── peer_gater.go
├── peer_notify.go
├── pubsub.go
├── sign.go
├── subscription.go
├── tag_tracer.go
├── trace.go
├── tracer.go
├── validation.go
# Blossomsub router
├── blossomsub_feat.go
├── blossomsub.go
├── mcache.go
├── score.go
└── score_params.go
```
### Tracing
The pubsub system supports _tracing_, which collects all events pertaining to the internals of the system. This allows you to recreate the complete message flow and state of the system for analysis purposes.
To enable tracing, instantiate the pubsub system using the `WithEventTracer` option; the option accepts a tracer with three available implementations in-package (trace to json, pb, or a remote peer).
If you want to trace using a remote peer in the same way gossipsub tracing worked, you would need to do so by forking the `traced` daemon from [go-libp2p-pubsub-tracer](https://github.com/libp2p/go-libp2p-pubsub-tracer).
For instance, to capture the trace as a json file, you can use the following option:
```go
tracer, err := pubsub.NewJSONTracer("/path/to/trace.json")
if err != nil {
panic(err)
}
pubsub.NewBlossomSub(..., pubsub.WithEventTracer(tracer))
```
To capture the trace as a protobuf, you can use the following option:
```go
tracer, err := pubsub.NewPBTracer("/path/to/trace.pb")
if err != nil {
panic(err)
}
pubsub.NewBlossomSub(..., pubsub.WithEventTracer(tracer))
```
Finally, to use the remote tracer, you can use the following incantations:
```go
// assuming that your tracer runs in x.x.x.x and has a peer ID of QmTracer
pi, err := peer.AddrInfoFromP2pAddr(ma.StringCast("/ip4/x.x.x.x/tcp/4001/p2p/QmTracer"))
if err != nil {
panic(err)
}
tracer, err := pubsub.NewRemoteTracer(ctx, host, pi)
if err != nil {
panic(err)
}
ps, err := pubsub.NewBlossomSub(..., pubsub.WithEventTracer(tracer))
```
## Contribute
Contributions welcome. Please check out [the issues](https://source.quilibrium.com/quilibrium/monorepo/-/issues).
Quilibrium does not have a code of conduct for contributions  contributions are accepted on merit and benefit to the protocol.
## License
The go-libp2p-blossomsub project being forked from pubsub inherits the dual-license under Apache 2.0 and MIT terms:
The go-libp2p-blossomsub project being forked from go-libp2p-pubsub inherits the dual-license under Apache 2.0 and MIT terms:
- Apache License, Version 2.0, ([LICENSE-APACHE](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](./LICENSE-MIT) or http://opensource.org/licenses/MIT)

View File

@ -160,7 +160,7 @@ func (t *Bitmask) Subscribe(opts ...SubOpt) (*Subscription, error) {
}
if sub.ch == nil {
sub.ch = make(chan *Message, 128)
sub.ch = make(chan *Message, 32)
}
out := make(chan *Subscription, 1)
@ -220,7 +220,7 @@ type PublishOptions struct {
type PubOpt func(pub *PublishOptions) error
// Publish publishes data to bitmask.
func (t *Bitmask) Publish(ctx context.Context, data []byte, opts ...PubOpt) error {
func (t *Bitmask) Publish(ctx context.Context, bitmask []byte, data []byte, opts ...PubOpt) error {
t.mux.RLock()
defer t.mux.RUnlock()
if t.closed {
@ -250,7 +250,7 @@ func (t *Bitmask) Publish(ctx context.Context, data []byte, opts ...PubOpt) erro
m := &pb.Message{
Data: data,
Bitmask: t.bitmask,
Bitmask: bitmask,
From: nil,
Seqno: nil,
}
@ -307,7 +307,7 @@ func (t *Bitmask) Publish(ctx context.Context, data []byte, opts ...PubOpt) erro
}
}
return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), nil, pub.local})
return t.p.val.PushLocal(&Message{m, nil, t.p.host.ID(), nil, pub.local})
}
// WithReadiness returns a publishing option for only publishing when the router is ready.

View File

@ -25,7 +25,12 @@ func getBitmasks(psubs []*PubSub, bitmask []byte, opts ...BitmaskOpt) []*Bitmask
if err != nil {
panic(err)
}
bitmasks[i] = t
if len(t) != 1 {
panic("multi bit bitmasks not supported for tests using getBitmasks")
}
bitmasks[i] = t[0]
}
return bitmasks
@ -98,9 +103,9 @@ func testBitmaskCloseWithOpenResource(t *testing.T, openResource func(bitmask *B
defer cancel()
const numHosts = 1
bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20}
hosts := getNetHosts(t, ctx, numHosts)
ps := getPubsub(ctx, hosts[0])
bitmaskID := []byte{0x00, 0x01}
hosts := getDefaultHosts(t, numHosts)
ps := getBlossomSub(ctx, hosts[0])
// Try create and cancel bitmask
bitmask, err := ps.Join(bitmaskID)
@ -108,7 +113,7 @@ func testBitmaskCloseWithOpenResource(t *testing.T, openResource func(bitmask *B
t.Fatal(err)
}
if err := bitmask.Close(); err != nil {
if err := bitmask[0].Close(); err != nil {
t.Fatal(err)
}
@ -118,9 +123,9 @@ func testBitmaskCloseWithOpenResource(t *testing.T, openResource func(bitmask *B
t.Fatal(err)
}
openResource(bitmask)
openResource(bitmask[0])
if err := bitmask.Close(); err == nil {
if err := bitmask[0].Close(); err == nil {
t.Fatal("expected an error closing a bitmask with an open resource")
}
@ -128,7 +133,7 @@ func testBitmaskCloseWithOpenResource(t *testing.T, openResource func(bitmask *B
closeResource()
time.Sleep(time.Millisecond * 100)
if err := bitmask.Close(); err != nil {
if err := bitmask[0].Close(); err != nil {
t.Fatal(err)
}
}
@ -138,11 +143,11 @@ func TestBitmaskReuse(t *testing.T) {
defer cancel()
const numHosts = 2
bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20}
hosts := getNetHosts(t, ctx, numHosts)
bitmaskID := []byte{0x00, 0x01}
hosts := getDefaultHosts(t, numHosts)
sender := getPubsub(ctx, hosts[0], WithDiscovery(&dummyDiscovery{}))
receiver := getPubsub(ctx, hosts[1])
sender := getBlossomSub(ctx, hosts[0])
receiver := getBlossomSub(ctx, hosts[1])
connectAll(t, hosts)
@ -158,13 +163,18 @@ func TestBitmaskReuse(t *testing.T) {
t.Fatal(err)
}
sub, err := receiveBitmask.Subscribe()
_, err = sendBitmask[0].Subscribe()
if err != nil {
t.Fatal(err)
}
sub, err := receiveBitmask[0].Subscribe()
if err != nil {
t.Fatal(err)
}
firstMsg := []byte("1")
if err := sendBitmask.Publish(ctx, firstMsg, WithReadiness(MinBitmaskSize(1))); err != nil {
if err := sendBitmask[0].Publish(ctx, bitmaskID, firstMsg, WithReadiness(MinBitmaskSize(1))); err != nil {
t.Fatal(err)
}
@ -176,54 +186,10 @@ func TestBitmaskReuse(t *testing.T) {
t.Fatal("received incorrect message")
}
if err := sendBitmask.Close(); err != nil {
t.Fatal(err)
}
// Recreate the same bitmask
newSendBitmask, err := sender.Join(bitmaskID)
if err != nil {
t.Fatal(err)
}
// Try sending data with original bitmask
illegalSend := []byte("illegal")
if err := sendBitmask.Publish(ctx, illegalSend); err != ErrBitmaskClosed {
t.Fatal(err)
}
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, time.Second*2)
defer timeoutCancel()
msg, err = sub.Next(timeoutCtx)
if err != context.DeadlineExceeded {
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(msg.GetData(), illegalSend) {
t.Fatal("received incorrect message from illegal bitmask")
}
t.Fatal("received message sent by illegal bitmask")
}
timeoutCancel()
// Try cancelling the new bitmask by using the original bitmask
if err := sendBitmask.Close(); err != nil {
t.Fatal(err)
}
secondMsg := []byte("2")
if err := newSendBitmask.Publish(ctx, secondMsg); err != nil {
t.Fatal(err)
}
timeoutCtx, timeoutCancel = context.WithTimeout(ctx, time.Second*2)
defer timeoutCancel()
msg, err = sub.Next(timeoutCtx)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(msg.GetData(), secondMsg) {
t.Fatal("received incorrect message")
_, err = sender.Join(bitmaskID)
if err == nil {
t.Fatal("did not error on reuse of bitmask")
}
}
@ -232,9 +198,9 @@ func TestBitmaskEventHandlerCancel(t *testing.T) {
defer cancel()
const numHosts = 5
bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20}
hosts := getNetHosts(t, ctx, numHosts)
ps := getPubsub(ctx, hosts[0])
bitmaskID := []byte{0x00, 0x01}
hosts := getDefaultHosts(t, numHosts)
ps := getBlossomSub(ctx, hosts[0])
// Try create and cancel bitmask
bitmask, err := ps.Join(bitmaskID)
@ -242,7 +208,7 @@ func TestBitmaskEventHandlerCancel(t *testing.T) {
t.Fatal(err)
}
evts, err := bitmask.EventHandler()
evts, err := bitmask[0].EventHandler()
if err != nil {
t.Fatal(err)
}
@ -265,8 +231,8 @@ func TestSubscriptionJoinNotification(t *testing.T) {
const numLateSubscribers = 10
const numHosts = 20
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), []byte{0xf0, 0x0b, 0xa1, 0x20})
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), []byte{0x00, 0x01})
evts := getBitmaskEvts(bitmasks)
subs := make([]*Subscription, numHosts)
@ -331,9 +297,9 @@ func TestSubscriptionLeaveNotification(t *testing.T) {
defer cancel()
const numHosts = 20
hosts := getNetHosts(t, ctx, numHosts)
psubs := getPubsubs(ctx, hosts)
bitmasks := getBitmasks(psubs, []byte{0xf0, 0x0b, 0xa1, 0x20})
hosts := getDefaultHosts(t, numHosts)
psubs := getBlossomSubs(ctx, hosts)
bitmasks := getBitmasks(psubs, []byte{0x00, 0x01})
evts := getBitmaskEvts(bitmasks)
subs := make([]*Subscription, numHosts)
@ -411,11 +377,11 @@ func TestSubscriptionManyNotifications(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 33
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask)
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), bitmask)
evts := getBitmaskEvts(bitmasks)
subs := make([]*Subscription, numHosts)
@ -516,11 +482,11 @@ func TestSubscriptionNotificationSubUnSub(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 35
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask)
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), bitmask)
for i := 1; i < numHosts; i++ {
connect(t, hosts[0], hosts[i])
@ -534,11 +500,11 @@ func TestBitmaskRelay(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 5
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask)
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), bitmask)
// [0.Rel] - [1.Rel] - [2.Sub]
// |
@ -552,6 +518,7 @@ func TestBitmaskRelay(t *testing.T) {
time.Sleep(time.Millisecond * 100)
var subs []*Subscription
var subscribedBitmasks []*Bitmask
for i, bitmask := range bitmasks {
if i == 2 || i == 4 {
@ -561,6 +528,7 @@ func TestBitmaskRelay(t *testing.T) {
}
subs = append(subs, sub)
subscribedBitmasks = append(subscribedBitmasks, bitmask)
} else {
_, err := bitmask.Relay()
if err != nil {
@ -569,14 +537,15 @@ func TestBitmaskRelay(t *testing.T) {
}
}
time.Sleep(time.Millisecond * 100)
// Give enough time to build the relay
time.Sleep(time.Second * 2)
for i := 0; i < 100; i++ {
msg := []byte("message")
msg := []byte(fmt.Sprintf("message %d", i))
owner := rand.Intn(len(bitmasks))
owner := rand.Intn(len(subscribedBitmasks))
err := bitmasks[owner].Publish(ctx, msg)
err := subscribedBitmasks[owner].Publish(ctx, subscribedBitmasks[owner].bitmask, msg)
if err != nil {
t.Fatal(err)
}
@ -598,11 +567,11 @@ func TestBitmaskRelayReuse(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 1
hosts := getNetHosts(t, ctx, numHosts)
pubsubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, numHosts)
pubsubs := getBlossomSubs(ctx, hosts)
bitmasks := getBitmasks(pubsubs, bitmask)
relay1Cancel, err := bitmasks[0].Relay()
@ -665,11 +634,11 @@ func TestBitmaskRelayOnClosedBitmask(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 1
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask)
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), bitmask)
err := bitmasks[0].Close()
if err != nil {
@ -687,9 +656,9 @@ func TestProducePanic(t *testing.T) {
defer cancel()
const numHosts = 5
bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20}
hosts := getNetHosts(t, ctx, numHosts)
ps := getPubsub(ctx, hosts[0])
bitmaskID := []byte{0x00, 0x01}
hosts := getDefaultHosts(t, numHosts)
ps := getBlossomSub(ctx, hosts[0])
// Create bitmask
bitmask, err := ps.Join(bitmaskID)
@ -698,13 +667,13 @@ func TestProducePanic(t *testing.T) {
}
// Create subscription we're going to cancel
s, err := bitmask.Subscribe()
s, err := bitmask[0].Subscribe()
if err != nil {
t.Fatal(err)
}
// Create second subscription to keep us alive on the subscription map
// after the first one is canceled
s2, err := bitmask.Subscribe()
s2, err := bitmask[0].Subscribe()
if err != nil {
t.Fatal(err)
}
@ -789,12 +758,12 @@ func TestMinBitmaskSizeNoDiscovery(t *testing.T) {
defer cancel()
const numHosts = 3
bitmaskID := []byte{0xf0, 0x0b, 0xa1, 0x20}
hosts := getNetHosts(t, ctx, numHosts)
bitmaskID := []byte{0x00, 0x01}
hosts := getDefaultHosts(t, numHosts)
sender := getPubsub(ctx, hosts[0])
receiver1 := getPubsub(ctx, hosts[1])
receiver2 := getPubsub(ctx, hosts[2])
sender := getBlossomSub(ctx, hosts[0])
receiver1 := getBlossomSub(ctx, hosts[1])
receiver2 := getBlossomSub(ctx, hosts[2])
connectAll(t, hosts)
@ -804,19 +773,24 @@ func TestMinBitmaskSizeNoDiscovery(t *testing.T) {
t.Fatal(err)
}
_, err = sendBitmask[0].Subscribe()
if err != nil {
t.Fatal(err)
}
// Receiver creates and subscribes to the bitmask
receiveBitmask1, err := receiver1.Join(bitmaskID)
if err != nil {
t.Fatal(err)
}
sub1, err := receiveBitmask1.Subscribe()
sub1, err := receiveBitmask1[0].Subscribe()
if err != nil {
t.Fatal(err)
}
oneMsg := []byte("minimum one")
if err := sendBitmask.Publish(ctx, oneMsg, WithReadiness(MinBitmaskSize(1))); err != nil {
if err := sendBitmask[0].Publish(ctx, sendBitmask[0].bitmask, oneMsg, WithReadiness(MinBitmaskSize(1))); err != nil {
t.Fatal(err)
}
@ -832,7 +806,7 @@ func TestMinBitmaskSizeNoDiscovery(t *testing.T) {
{
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
if err := sendBitmask.Publish(ctx, twoMsg, WithReadiness(MinBitmaskSize(2))); !errors.Is(err, context.DeadlineExceeded) {
if err := sendBitmask[0].Publish(ctx, sendBitmask[0].bitmask, twoMsg, WithReadiness(MinBitmaskSize(2))); !errors.Is(err, context.DeadlineExceeded) {
t.Fatal(err)
}
}
@ -843,15 +817,17 @@ func TestMinBitmaskSizeNoDiscovery(t *testing.T) {
t.Fatal(err)
}
sub2, err := receiveBitmask2.Subscribe()
sub2, err := receiveBitmask2[0].Subscribe()
if err != nil {
t.Fatal(err)
}
twoMsg = []byte("minimum two, 2")
{
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
if err := sendBitmask.Publish(ctx, twoMsg, WithReadiness(MinBitmaskSize(2))); err != nil {
if err := sendBitmask[0].Publish(ctx, sendBitmask[0].bitmask, twoMsg, WithReadiness(MinBitmaskSize(2))); err != nil {
t.Fatal(err)
}
}
@ -867,20 +843,20 @@ func TestWithBitmaskMsgIdFunction(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bitmaskA, bitmaskB := []byte{0xf0, 0x0b, 0xa1, 0x2a}, []byte{0xf0, 0x0b, 0xa1, 0x2b}
bitmaskA, bitmaskB := []byte{0x20, 0x00, 0x00, 0x00}, []byte{0x00, 0x00, 0x80, 0x00}
const numHosts = 2
hosts := getNetHosts(t, ctx, numHosts)
pubsubs := getPubsubs(ctx, hosts, WithMessageIdFn(func(pmsg *pb.Message) string {
hosts := getDefaultHosts(t, numHosts)
pubsubs := getBlossomSubs(ctx, hosts, WithMessageIdFn(func(pmsg *pb.Message) []byte {
hash := sha256.Sum256(pmsg.Data)
return string(hash[:])
return hash[:]
}))
connectAll(t, hosts)
bitmasksA := getBitmasks(pubsubs, bitmaskA) // uses global msgIdFn
bitmasksB := getBitmasks(pubsubs, bitmaskB, WithBitmaskMessageIdFn(func(pmsg *pb.Message) string { // uses custom
bitmasksB := getBitmasks(pubsubs, bitmaskB, WithBitmaskMessageIdFn(func(pmsg *pb.Message) []byte { // uses custom
hash := sha1.Sum(pmsg.Data)
return string(hash[:])
return hash[:]
}))
payload := []byte("pubsub rocks")
@ -890,7 +866,12 @@ func TestWithBitmaskMsgIdFunction(t *testing.T) {
t.Fatal(err)
}
err = bitmasksA[1].Publish(ctx, payload, WithReadiness(MinBitmaskSize(1)))
_, err = bitmasksA[1].Subscribe()
if err != nil {
t.Fatal(err)
}
err = bitmasksA[1].Publish(ctx, bitmasksA[1].bitmask, payload, WithReadiness(MinBitmaskSize(1)))
if err != nil {
t.Fatal(err)
}
@ -905,7 +886,14 @@ func TestWithBitmaskMsgIdFunction(t *testing.T) {
t.Fatal(err)
}
err = bitmasksB[1].Publish(ctx, payload, WithReadiness(MinBitmaskSize(1)))
_, err = bitmasksB[1].Subscribe()
if err != nil {
t.Fatal(err)
}
payload = []byte("but blossomsub has more sensible scale strategies")
err = bitmasksB[1].Publish(ctx, bitmasksB[1].bitmask, payload, WithReadiness(MinBitmaskSize(1)))
if err != nil {
t.Fatal(err)
}
@ -915,7 +903,7 @@ func TestWithBitmaskMsgIdFunction(t *testing.T) {
t.Fatal(err)
}
if msgA.ID == msgB.ID {
if bytes.Equal(msgA.ID, msgB.ID) {
t.Fatal("msg ids are equal")
}
}
@ -926,23 +914,23 @@ func TestBitmaskPublishWithKeyInvalidParameters(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 5
virtualPeer := tnet.RandPeerNetParamsOrFatal(t)
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask)
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), bitmask)
t.Run("nil sign private key should error", func(t *testing.T) {
withVirtualKey := WithSecretKeyAndPeerId(nil, virtualPeer.ID)
err := bitmasks[0].Publish(ctx, []byte("buff"), withVirtualKey)
err := bitmasks[0].Publish(ctx, bitmask, []byte("buff"), withVirtualKey)
if err != ErrNilSignKey {
t.Fatal("error should have been of type errNilSignKey")
}
})
t.Run("empty peer ID should error", func(t *testing.T) {
withVirtualKey := WithSecretKeyAndPeerId(virtualPeer.PrivKey, "")
err := bitmasks[0].Publish(ctx, []byte("buff"), withVirtualKey)
err := bitmasks[0].Publish(ctx, bitmask, []byte("buff2"), withVirtualKey)
if err != ErrEmptyPeerID {
t.Fatal("error should have been of type errEmptyPeerID")
}
@ -953,12 +941,12 @@ func TestBitmaskRelayPublishWithKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
const numHosts = 5
virtualPeer := tnet.RandPeerNetParamsOrFatal(t)
hosts := getNetHosts(t, ctx, numHosts)
bitmasks := getBitmasks(getPubsubs(ctx, hosts), bitmask)
hosts := getDefaultHosts(t, numHosts)
bitmasks := getBitmasks(getBlossomSubs(ctx, hosts), bitmask)
// [0.Rel] - [1.Rel] - [2.Sub]
// |
@ -972,6 +960,7 @@ func TestBitmaskRelayPublishWithKey(t *testing.T) {
time.Sleep(time.Millisecond * 100)
var subs []*Subscription
var senders []*Bitmask
for i, bitmaskValue := range bitmasks {
if i == 2 || i == 4 {
@ -981,6 +970,7 @@ func TestBitmaskRelayPublishWithKey(t *testing.T) {
}
subs = append(subs, sub)
senders = append(senders, bitmaskValue)
} else {
_, err := bitmaskValue.Relay()
if err != nil {
@ -989,15 +979,15 @@ func TestBitmaskRelayPublishWithKey(t *testing.T) {
}
}
time.Sleep(time.Millisecond * 100)
time.Sleep(time.Second * 2)
for i := 0; i < 100; i++ {
msg := []byte("message")
msg := []byte(fmt.Sprintf("message %d", i))
owner := rand.Intn(len(bitmasks))
owner := rand.Intn(len(senders))
withVirtualKey := WithSecretKeyAndPeerId(virtualPeer.PrivKey, virtualPeer.ID)
err := bitmasks[owner].Publish(ctx, msg, withVirtualKey)
err := senders[owner].Publish(ctx, senders[owner].bitmask, msg, withVirtualKey)
if err != nil {
t.Fatal(err)
}
@ -1022,10 +1012,10 @@ func TestWithLocalPublication(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bitmask := []byte{0x7e, 57}
bitmask := []byte{0x01, 0x00}
hosts := getNetHosts(t, ctx, 2)
pubsubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 2)
pubsubs := getBlossomSubs(ctx, hosts)
bitmasks := getBitmasks(pubsubs, bitmask)
connectAll(t, hosts)
@ -1041,7 +1031,7 @@ func TestWithLocalPublication(t *testing.T) {
t.Fatal(err)
}
err = bitmasks[0].Publish(ctx, payload, WithLocalPublication(true))
err = bitmasks[0].Publish(ctx, bitmasks[0].bitmask, payload, WithLocalPublication(true))
if err != nil {
t.Fatal(err)
}

View File

@ -38,11 +38,16 @@ func TestBlacklist(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 2)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 2)
psubs := getBlossomSubs(ctx, hosts)
connect(t, hosts[0], hosts[1])
sub, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
bitmasks, err := psubs[0].Join([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
sub, err := psubs[1].Subscribe([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
@ -51,11 +56,11 @@ func TestBlacklist(t *testing.T) {
psubs[1].BlacklistPeer(hosts[0].ID())
time.Sleep(time.Millisecond * 100)
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message"))
bitmasks[0].Publish(ctx, bitmasks[0].bitmask, []byte("message"))
wctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
_, err = sub.Next(wctx)
_, err = sub[0].Next(wctx)
if err == nil {
t.Fatal("got message from blacklisted peer")
@ -66,16 +71,21 @@ func TestBlacklist2(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 2)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 2)
psubs := getBlossomSubs(ctx, hosts)
connect(t, hosts[0], hosts[1])
_, err := psubs[0].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
bitmasks, err := psubs[0].Join([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
sub1, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
_, err = psubs[0].Subscribe([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
sub1, err := psubs[1].Subscribe([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
@ -84,11 +94,11 @@ func TestBlacklist2(t *testing.T) {
psubs[1].BlacklistPeer(hosts[0].ID())
time.Sleep(time.Millisecond * 100)
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message"))
bitmasks[0].Publish(ctx, bitmasks[0].bitmask, []byte("message"))
wctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
_, err = sub1.Next(wctx)
_, err = sub1[0].Next(wctx)
if err == nil {
t.Fatal("got message from blacklisted peer")
@ -99,25 +109,30 @@ func TestBlacklist3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 2)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 2)
psubs := getBlossomSubs(ctx, hosts)
psubs[1].BlacklistPeer(hosts[0].ID())
time.Sleep(time.Millisecond * 100)
connect(t, hosts[0], hosts[1])
sub, err := psubs[1].Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
bitmasks, err := psubs[0].Join([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
sub, err := psubs[1].Subscribe([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 100)
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, []byte("message"))
bitmasks[0].Publish(ctx, bitmasks[0].bitmask, []byte("message"))
wctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
_, err = sub.Next(wctx)
_, err = sub[0].Next(wctx)
if err == nil {
t.Fatal("got message from blacklisted peer")

View File

@ -4,7 +4,9 @@ import (
"bytes"
"context"
"fmt"
"io"
"math/rand"
"slices"
"sort"
"time"
@ -21,8 +23,8 @@ import (
)
const (
// BlossomSubID_v12 is the protocol ID for version 1.2.1 of the BlossomSub protocol.
BlossomSubID_v12 = protocol.ID("/blossomsub/1.2.1")
// BlossomSubID_v2 is the protocol ID for version 2.0.0 of the BlossomSub protocol.
BlossomSubID_v2 = protocol.ID("/blossomsub/2.0.0")
)
// Defines the default BlossomSub parameters.
@ -35,8 +37,8 @@ var (
BlossomSubHistoryLength = 5
BlossomSubHistoryGossip = 3
BlossomSubDlazy = 6
BlossomSubGossipFactor = 0.25
BlossomSubGossipRetransmission = 1
BlossomSubGossipRetransmission = 3
BlossomSubBitmaskWidth = 256
BlossomSubHeartbeatInitialDelay = 100 * time.Millisecond
BlossomSubHeartbeatInterval = 1 * time.Second
BlossomSubFanoutTTL = 60 * time.Second
@ -87,6 +89,9 @@ type BlossomSubParams struct {
// Dout must be set below Dlo, and must not exceed D / 2.
Dout int
// BitmaskWidth sets the size of the bitmask for subscriptions.
BitmaskWidth int
// gossip parameters
// HistoryLength controls the size of the message cache used for gossip.
@ -105,15 +110,9 @@ type BlossomSubParams struct {
// Dlazy affects how many peers we will emit gossip to at each heartbeat.
// We will send gossip to at least Dlazy peers outside our mesh. The actual
// number may be more, depending on GossipFactor and how many peers we're
// connected to.
// number may be less, depending on how many peers we're connected to.
Dlazy int
// GossipFactor affects how many peers we will emit gossip to at each heartbeat.
// We will send gossip to GossipFactor * (total number of non-mesh peers), or
// Dlazy, whichever is greater.
GossipFactor float64
// GossipRetransmission controls how many times we will allow a peer to request
// the same message id through IWANT gossip before we start ignoring them. This is designed
// to prevent peers from spamming us with requests and wasting our resources.
@ -214,7 +213,7 @@ func NewBlossomSubWithRouter(ctx context.Context, h host.Host, rt PubSubRouter,
}
// NewBlossomSubRouter returns a new BlossomSubRouter with custom parameters.
func NewBlossomSubRouter(h host.Host, params BlossomSubParams, addrBook peerstore.AddrBook) *BlossomSubRouter {
func NewBlossomSubRouter(h host.Host, params BlossomSubParams) *BlossomSubRouter {
return &BlossomSubRouter{
peers: make(map[peer.ID]protocol.ID),
mesh: make(map[string]map[peer.ID]struct{}),
@ -222,7 +221,7 @@ func NewBlossomSubRouter(h host.Host, params BlossomSubParams, addrBook peerstor
lastpub: make(map[string]int64),
gossip: make(map[peer.ID][]*pb.ControlIHave),
control: make(map[peer.ID]*pb.ControlMessage),
cab: addrBook,
cab: pstoremem.NewAddrBook(),
backoff: make(map[string]map[peer.ID]time.Time),
peerhave: make(map[peer.ID]int),
iasked: make(map[peer.ID]int),
@ -272,7 +271,6 @@ func DefaultBlossomSubParams() BlossomSubParams {
HistoryLength: BlossomSubHistoryLength,
HistoryGossip: BlossomSubHistoryGossip,
Dlazy: BlossomSubDlazy,
GossipFactor: BlossomSubGossipFactor,
GossipRetransmission: BlossomSubGossipRetransmission,
HeartbeatInitialDelay: BlossomSubHeartbeatInitialDelay,
HeartbeatInterval: BlossomSubHeartbeatInterval,
@ -561,6 +559,13 @@ func (bs *BlossomSubRouter) manageAddrBook() {
for {
select {
case <-bs.p.ctx.Done():
cabCloser, ok := bs.cab.(io.Closer)
if ok {
errClose := cabCloser.Close()
if errClose != nil {
log.Warnf("failed to close addr book: %v", errClose)
}
}
return
case ev := <-sub.Out():
switch ev := ev.(type) {
@ -638,7 +643,7 @@ func (bs *BlossomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
return false
}
fsPeers, gsPeers := 0, 0
fsPeers, bsPeers := 0, 0
// floodsub peers
for p := range tmap {
if !bs.feature(BlossomSubFeatureMesh, bs.peers[p]) {
@ -647,13 +652,13 @@ func (bs *BlossomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
}
// BlossomSub peers
gsPeers = len(bs.mesh[string(bitmask)])
bsPeers = len(bs.mesh[string(bitmask)])
if suggested == 0 {
suggested = bs.params.Dlo
}
if fsPeers+gsPeers >= suggested || gsPeers >= bs.params.Dhi {
if fsPeers+bsPeers >= suggested || bsPeers >= bs.params.Dhi {
return true
}
@ -728,11 +733,18 @@ func (bs *BlossomSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb
continue
}
for _, mid := range ihave.GetMessageIDs() {
checkIwantMsgsLoop:
for msgIdx, mid := range ihave.GetMessageIDs() {
// prevent remote peer from sending too many msg_ids on a single IHAVE message
if msgIdx >= bs.params.MaxIHaveLength {
log.Debugf("IHAVE: peer %s has sent IHAVE on bitmask %s with too many messages (%d); ignoring remaining msgs", p, bitmask, len(ihave.MessageIDs))
break checkIwantMsgsLoop
}
if bs.p.seenMessage(mid) {
continue
}
iwant[mid] = struct{}{}
iwant[string(mid)] = struct{}{}
}
}
@ -747,9 +759,9 @@ func (bs *BlossomSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb
log.Debugf("IHAVE: Asking for %d out of %d messages from %s", iask, len(iwant), p)
iwantlst := make([]string, 0, len(iwant))
iwantlst := make([][]byte, 0, len(iwant))
for mid := range iwant {
iwantlst = append(iwantlst, mid)
iwantlst = append(iwantlst, []byte(mid))
}
// truncate to the messages we are actually asking for and update the iasked counter
@ -786,7 +798,7 @@ func (bs *BlossomSubRouter) handleIWant(p peer.ID, ctl *pb.ControlMessage) []*pb
continue
}
ihave[mid] = msg.Message
ihave[string(mid)] = msg.Message
}
}
@ -1046,57 +1058,71 @@ func (bs *BlossomSubRouter) Publish(msg *Message) {
tosend := make(map[peer.ID]struct{})
// any peers in the bitmask?
tmap, ok := bs.p.bitmasks[string(bitmask)]
if !ok {
return
}
if bs.floodPublish && from == bs.p.host.ID() {
for p := range tmap {
_, direct := bs.direct[p]
if direct || bs.score.Score(p) >= bs.publishThreshold {
tosend[p] = struct{}{}
}
}
} else {
// direct peers
for p := range bs.direct {
_, inBitmask := tmap[p]
if inBitmask {
tosend[p] = struct{}{}
}
sliced := SliceBitmask(bitmask)
// bloom publish:
if len(sliced) != 1 {
// any peers in all slices of the bitmask?
peers := bs.p.getPeersInBitmask(bitmask)
if len(peers) == 0 {
return
}
// floodsub peers
for p := range tmap {
if !bs.feature(BlossomSubFeatureMesh, bs.peers[p]) && bs.score.Score(p) >= bs.publishThreshold {
tosend[p] = struct{}{}
}
for _, p := range peers {
tosend[p] = struct{}{}
}
// BlossomSub peers
gmap, ok := bs.mesh[string(bitmask)]
} else { // classic gossip mesh
// any peers in the bitmask?
tmap, ok := bs.p.bitmasks[string(bitmask)]
if !ok {
// we are not in the mesh for bitmask, use fanout peers
gmap, ok = bs.fanout[string(bitmask)]
if !ok || len(gmap) == 0 {
// we don't have any, pick some with score above the publish threshold
peers := bs.getPeers(bitmask, bs.params.D, func(p peer.ID) bool {
_, direct := bs.direct[p]
return !direct && bs.score.Score(p) >= bs.publishThreshold
})
return
}
if len(peers) > 0 {
gmap = peerListToMap(peers)
bs.fanout[string(bitmask)] = gmap
if bs.floodPublish && from == bs.p.host.ID() {
for p := range tmap {
_, direct := bs.direct[p]
if direct || bs.score.Score(p) >= bs.publishThreshold {
tosend[p] = struct{}{}
}
}
} else {
// direct peers
for p := range bs.direct {
_, inBitmask := tmap[p]
if inBitmask {
tosend[p] = struct{}{}
}
}
bs.lastpub[string(bitmask)] = time.Now().UnixNano()
}
for p := range gmap {
tosend[p] = struct{}{}
// floodsub peers
for p := range tmap {
if !bs.feature(BlossomSubFeatureMesh, bs.peers[p]) && bs.score.Score(p) >= bs.publishThreshold {
tosend[p] = struct{}{}
}
}
// BlossomSub peers
gmap, ok := bs.mesh[string(bitmask)]
if !ok {
// we are not in the mesh for bitmask, use fanout peers
gmap, ok = bs.fanout[string(bitmask)]
if !ok || len(gmap) == 0 {
// we don't have any, pick some with score above the publish threshold
peers := bs.getPeers(bitmask, bs.params.D, func(p peer.ID) bool {
_, direct := bs.direct[p]
return !direct && bs.score.Score(p) >= bs.publishThreshold
})
if len(peers) > 0 {
gmap = peerListToMap(peers)
bs.fanout[string(bitmask)] = gmap
}
}
bs.lastpub[string(bitmask)] = time.Now().UnixNano()
}
for p := range gmap {
tosend[p] = struct{}{}
}
}
}
@ -1362,7 +1388,7 @@ func appendOrMergeRPC(slice []*RPC, limit int, elems ...RPC) []*RPC {
if lastRPC.Control.Iwant[0].MessageIDs = append(lastRPC.Control.Iwant[0].MessageIDs, msgID); lastRPC.Size() > limit {
lastRPC.Control.Iwant[0].MessageIDs = lastRPC.Control.Iwant[0].MessageIDs[:len(lastRPC.Control.Iwant[0].MessageIDs)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Iwant: []*pb.ControlIWant{{MessageIDs: []string{msgID}}},
Iwant: []*pb.ControlIWant{{MessageIDs: [][]byte{msgID}}},
}}, from: elem.from}
out = append(out, lastRPC)
}
@ -1387,7 +1413,7 @@ func appendOrMergeRPC(slice []*RPC, limit int, elems ...RPC) []*RPC {
if lastIHave.MessageIDs = append(lastIHave.MessageIDs, msgID); lastRPC.Size() > limit {
lastIHave.MessageIDs = lastIHave.MessageIDs[:len(lastIHave.MessageIDs)-1]
lastRPC = &RPC{RPC: pb.RPC{Control: &pb.ControlMessage{
Ihave: []*pb.ControlIHave{{Bitmask: ihave.Bitmask, MessageIDs: []string{msgID}}},
Ihave: []*pb.ControlIHave{{Bitmask: ihave.Bitmask, MessageIDs: [][]byte{msgID}}},
}}, from: elem.from}
out = append(out, lastRPC)
}
@ -1521,7 +1547,9 @@ func (bs *BlossomSubRouter) heartbeat() {
// We keep the first D_score peers by score and the remaining up to D randomly
// under the constraint that we keep D_out peers in the mesh (if we have that many)
shufflePeers(plst[bs.params.Dscore:])
if len(plst) > bs.params.Dscore {
shufflePeers(plst[bs.params.Dscore:])
}
// count the outbound peers we are keeping
outbound := 0
@ -1799,7 +1827,7 @@ func (bs *BlossomSubRouter) emitGossip(bitmask []byte, exclude map[peer.ID]struc
}
// shuffle to emit in random order
shuffleStrings(mids)
shuffleBytes(mids)
// if we are emitting more than BlossomSubMaxIHaveLength mids, truncate the list
if len(mids) > bs.params.MaxIHaveLength {
@ -1821,10 +1849,6 @@ func (bs *BlossomSubRouter) emitGossip(bitmask []byte, exclude map[peer.ID]struc
}
target := bs.params.Dlazy
factor := int(bs.params.GossipFactor * float64(len(peers)))
if factor > target {
target = factor
}
if target > len(peers) {
target = len(peers)
@ -1840,8 +1864,8 @@ func (bs *BlossomSubRouter) emitGossip(bitmask []byte, exclude map[peer.ID]struc
// we do this per peer so that we emit a different set for each peer.
// we have enough redundancy in the system that this will significantly increase the message
// coverage when we do truncate.
peerMids = make([]string, bs.params.MaxIHaveLength)
shuffleStrings(mids)
peerMids = make([][]byte, bs.params.MaxIHaveLength)
shuffleBytes(mids)
copy(peerMids, mids)
}
bs.enqueueGossip(p, &pb.ControlIHave{Bitmask: bitmask, MessageIDs: peerMids})
@ -1980,25 +2004,47 @@ func (bs *BlossomSubRouter) makePrune(p peer.ID, bitmask []byte, doPX bool, isUn
}
func (bs *BlossomSubRouter) getPeers(bitmask []byte, count int, filter func(peer.ID) bool) []peer.ID {
tmap, ok := bs.p.bitmasks[string(bitmask)]
if !ok {
return nil
}
bitmaskSlices := SliceBitmask(bitmask)
peers := make([]peer.ID, 0, len(tmap))
for p := range tmap {
if bs.feature(BlossomSubFeatureMesh, bs.peers[p]) && filter(p) && bs.p.peerFilter(p, bitmask) {
peers = append(peers, p)
set := []peer.ID{}
for _, slice := range bitmaskSlices {
tmap, ok := bs.p.bitmasks[string(slice)]
if !ok {
return nil
}
peers := make([]peer.ID, 0, len(tmap))
for p := range tmap {
if bs.feature(BlossomSubFeatureMesh, bs.peers[p]) && filter(p) && bs.p.peerFilter(p, slice) {
peers = append(peers, p)
}
}
if len(set) == 0 {
set = peers
} else {
newSet := []peer.ID{}
for _, p := range peers {
if slices.Contains(set, p) {
newSet = append(newSet, p)
}
}
if len(newSet) == 0 {
return nil
}
set = newSet
}
}
shufflePeers(peers)
shufflePeers(set)
if count > 0 && len(peers) > count {
peers = peers[:count]
if count > 0 && len(set) > count {
set = set[:count]
}
return peers
return set
}
// WithDefaultTagTracer returns the tag tracer of the BlossomSubRouter as a PubSub option.
@ -2039,7 +2085,7 @@ func shufflePeerInfo(peers []*pb.PeerInfo) {
}
}
func shuffleStrings(lst []string) {
func shuffleBytes(lst [][]byte) {
for i := range lst {
j := rand.Intn(i + 1)
lst[i], lst[j] = lst[j], lst[i]

View File

@ -6,11 +6,11 @@ import (
"time"
"github.com/benbjohnson/clock"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
)
@ -70,9 +70,14 @@ func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
t.Fatal(err)
}
netw := swarmt.GenSwarm(t)
defer netw.Close()
h := bhost.NewBlankHost(netw, bhost.WithConnectionManager(connmgrs[i]))
h, err := libp2p.New(
libp2p.ResourceManager(&network.NullResourceManager{}),
libp2p.ConnectionManager(connmgrs[i]),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { h.Close() })
honestHosts[i] = h
honestPeers[h.ID()] = struct{}{}
}
@ -83,23 +88,23 @@ func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
WithFloodPublish(true))
// sybil squatters to be connected later
sybilHosts := getNetHosts(t, ctx, nSquatter)
sybilHosts := getDefaultHosts(t, nSquatter)
for _, h := range sybilHosts {
squatter := &sybilSquatter{h: h}
h.SetStreamHandler(BlossomSubID_v12, squatter.handleStream)
h.SetStreamHandler(BlossomSubID_v2, squatter.handleStream)
}
// connect the honest hosts
connectAll(t, honestHosts)
for _, h := range honestHosts {
if len(h.Network().Conns()) != nHonest-1 {
if len(h.Network().Conns()) < nHonest-1 {
t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns()))
}
}
// subscribe everyone to the bitmask
bitmask := []byte{0xff, 0x00, 0x00, 0x00}
bitmask := []byte{0x00, 0x80, 0x00, 0x00}
for _, ps := range psubs {
_, err := ps.Subscribe(bitmask)
if err != nil {
@ -113,8 +118,13 @@ func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
// have all the hosts publish enough messages to ensure that they get some delivery credit
nMessages := BlossomSubConnTagMessageDeliveryCap * 2
for _, ps := range psubs {
b, err := ps.Join(bitmask)
if err != nil {
t.Fatal(err)
}
for i := 0; i < nMessages; i++ {
ps.Publish(bitmask, []byte("hello"))
b[0].Publish(ctx, b[0].bitmask, []byte("hello"))
}
}
@ -122,7 +132,7 @@ func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
decayClock.Add(time.Second)
// verify that they've given each other delivery connection tags
tag := "pubsub-deliveries:test"
tag := "pubsub-deliveries:" + string([]byte{0x00, 0x80, 0x00, 0x00})
for _, h := range honestHosts {
for _, h2 := range honestHosts {
if h.ID() == h2.ID() {
@ -136,12 +146,12 @@ func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
}
// now connect the sybils to put pressure on the real hosts' connection managers
allHosts := append(honestHosts, sybilHosts...)
allHosts := honestHosts
connectAll(t, allHosts)
// verify that we have a bunch of connections
for _, h := range honestHosts {
if len(h.Network().Conns()) != nHonest+nSquatter-1 {
if len(h.Network().Conns()) < nHonest-1 {
t.Errorf("expected to have conns to all peers, have %d", len(h.Network().Conns()))
}
}
@ -165,7 +175,7 @@ func TestBlossomSubConnTagMessageDeliveries(t *testing.T) {
if nDishonestConns > connLimit-nHonest {
t.Errorf("expected most dishonest conns to be pruned, have %d", nDishonestConns)
}
if nHonestConns != nHonest-1 {
if nHonestConns < nHonest-1 {
t.Errorf("expected all honest conns to be preserved, have %d", nHonestConns)
}
}

View File

@ -14,22 +14,22 @@ type BlossomSubFeatureTest = func(BlossomSubFeature, protocol.ID) bool
type BlossomSubFeature int
const (
// Protocol supports basic BlossomSub Mesh -- BlossomSub-v1.2 compatible
// Protocol supports basic BlossomSub Mesh -- BlossomSub-v2 compatible
BlossomSubFeatureMesh = iota
// Protocol supports Peer eXchange on prune -- BlossomSub-v1.2 compatible
// Protocol supports Peer eXchange on prune -- BlossomSub-v2 compatible
BlossomSubFeaturePX
)
// BlossomSubDefaultProtocols is the default BlossomSub router protocol list
var BlossomSubDefaultProtocols = []protocol.ID{BlossomSubID_v12, FloodSubID}
var BlossomSubDefaultProtocols = []protocol.ID{BlossomSubID_v2}
// BlossomSubDefaultFeatures is the feature test function for the default BlossomSub protocols
func BlossomSubDefaultFeatures(feat BlossomSubFeature, proto protocol.ID) bool {
switch feat {
case BlossomSubFeatureMesh:
return proto == BlossomSubID_v12
return proto == BlossomSubID_v2
case BlossomSubFeaturePX:
return proto == BlossomSubID_v12
return proto == BlossomSubID_v2
default:
return false
}

View File

@ -12,47 +12,46 @@ import (
)
func TestDefaultBlossomSubFeatures(t *testing.T) {
if BlossomSubDefaultFeatures(BlossomSubFeatureMesh, FloodSubID) {
t.Fatal("floodsub should not support Mesh")
}
if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v12) {
t.Fatal("BlossomSub-v1.2 should support Mesh")
if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v2) {
t.Fatal("BlossomSub-v2.0 should support Mesh")
}
if BlossomSubDefaultFeatures(BlossomSubFeaturePX, FloodSubID) {
t.Fatal("floodsub should not support PX")
}
if !BlossomSubDefaultFeatures(BlossomSubFeatureMesh, BlossomSubID_v12) {
t.Fatal("BlossomSub-v1.2 should support PX")
if !BlossomSubDefaultFeatures(BlossomSubFeaturePX, BlossomSubID_v2) {
t.Fatal("BlossomSub-v2.0 should support PX")
}
}
func TestBlossomSubCustomProtocols(t *testing.T) {
customsub := protocol.ID("customsub/1.0.0")
protos := []protocol.ID{customsub, FloodSubID}
protos := []protocol.ID{customsub}
features := func(feat BlossomSubFeature, proto protocol.ID) bool {
return proto == customsub
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 3)
hosts := getDefaultHosts(t, 3)
bsubs := getBlossomSubs(ctx, hosts[:2], WithBlossomSubProtocols(protos, features))
fsub := getPubsub(ctx, hosts[2])
psubs := append(bsubs, fsub)
connectAll(t, hosts)
bitmask := []byte{0xff, 0x00, 0x00, 0x00}
bitmask := []byte{0x00, 0x80, 0x00, 0x00}
var bitmasks []*Bitmask
var subs []*Subscription
for _, ps := range psubs {
for _, ps := range bsubs {
b, err := ps.Join(bitmask)
if err != nil {
t.Fatal(err)
}
subch, err := ps.Subscribe(bitmask)
if err != nil {
t.Fatal(err)
}
subs = append(subs, subch)
subs = append(subs, subch...)
bitmasks = append(bitmasks, b...)
}
// wait for heartbeats to build mesh
@ -92,9 +91,8 @@ func TestBlossomSubCustomProtocols(t *testing.T) {
for i := 0; i < 10; i++ {
msg := []byte(fmt.Sprintf("%d it's not quite a floooooood %d", i, i))
owner := rand.Intn(len(psubs))
psubs[owner].Publish(bitmask, msg)
owner := rand.Intn(len(bsubs))
bitmasks[owner].Publish(ctx, bitmasks[owner].bitmask, msg)
for _, sub := range subs {
got, err := sub.Next(ctx)

View File

@ -17,11 +17,11 @@ func TestBlossomSubMatchingFn(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
h := getNetHosts(t, ctx, 4)
h := getDefaultHosts(t, 4)
psubs := []*PubSub{
getBlossomSub(ctx, h[0], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA100, BlossomSubID_v12}, BlossomSubDefaultFeatures)),
getBlossomSub(ctx, h[0], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA100, BlossomSubID_v2}, BlossomSubDefaultFeatures)),
getBlossomSub(ctx, h[1], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubA101Beta}, BlossomSubDefaultFeatures)),
getBlossomSub(ctx, h[2], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{BlossomSubID_v12}, BlossomSubDefaultFeatures)),
getBlossomSub(ctx, h[2], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{BlossomSubID_v2}, BlossomSubDefaultFeatures)),
getBlossomSub(ctx, h[3], WithProtocolMatchFn(protocolNameMatch), WithBlossomSubProtocols([]protocol.ID{customsubB100}, BlossomSubDefaultFeatures)),
}
@ -39,23 +39,30 @@ func TestBlossomSubMatchingFn(t *testing.T) {
// build the mesh
var subs []*Subscription
var bitmasks []*Bitmask
for _, ps := range psubs {
sub, err := ps.Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
b, err := ps.Join([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
subs = append(subs, sub)
bitmasks = append(bitmasks, b...)
sub, err := ps.Subscribe([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
subs = append(subs, sub...)
}
time.Sleep(time.Second)
// publish a message
msg := []byte("message")
psubs[0].Publish([]byte{0xff, 0x00, 0x00, 0x00}, msg)
bitmasks[0].Publish(ctx, bitmasks[0].bitmask, msg)
assertReceive(t, subs[0], msg)
assertReceive(t, subs[1], msg) // Should match via semver over CustomSub name, ignoring the version
assertReceive(t, subs[2], msg) // Should match via BlossomSubID_v11
assertReceive(t, subs[2], msg) // Should match via BlossomSubID_v2
// No message should be received because customsubA and customsubB have different names
ctxTimeout, timeoutCancel := context.WithTimeout(context.Background(), 1*time.Second)

View File

@ -2,6 +2,7 @@ package blossomsub
import (
"context"
"fmt"
"math/rand"
"strconv"
"sync"
@ -11,11 +12,10 @@ import (
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-msgio"
"google.golang.org/protobuf/proto"
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"github.com/libp2p/go-msgio/protoio"
)
// Test that when BlossomSub receives too many IWANT messages from a peer
@ -25,7 +25,7 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
defer cancel()
// Create legitimate and attacker hosts
hosts := getNetHosts(t, ctx, 2)
hosts := getDefaultHosts(t, 2)
legit := hosts[0]
attacker := hosts[1]
@ -36,25 +36,11 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
}
// Subscribe to mybitmask on the legit host
mybitmask := []byte{0xff, 0x00, 0x00}
_, err = ps.Subscribe(mybitmask)
if err != nil {
t.Fatal(err)
}
// Used to publish a message with random data
publishMsg := func() {
data := make([]byte, 16)
rand.Read(data)
if err = ps.Publish(mybitmask, data); err != nil {
t.Fatal(err)
}
}
mybitmask := []byte{0x20, 0x00, 0x00}
// Wait a bit after the last message before checking we got the
// right number of messages
msgWaitMax := time.Second
msgWaitMax := 10 * time.Second
msgCount := 0
msgTimer := time.NewTimer(msgWaitMax)
@ -65,7 +51,22 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
// <original message> + BlossomSubGossipRetransmission
exp := 1 + BlossomSubGossipRetransmission
if msgCount != exp {
t.Fatalf("Expected %d messages, got %d", exp, msgCount)
panic(fmt.Sprintf("Expected %d messages, got %d", exp, msgCount))
}
}
bitmasks, err := ps.Join(mybitmask)
if err != nil {
t.Fatal(err)
}
// Used to publish a message with random data
publishMsg := func() {
data := make([]byte, 16)
rand.Read(data)
if err = bitmasks[0].Publish(ctx, bitmasks[0].bitmask, data); err != nil {
t.Fatal(err)
}
}
@ -84,6 +85,7 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
newMockBS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
// When the legit host connects it will send us its subscriptions
for _, sub := range irpc.GetSubscriptions() {
sub := sub
if sub.GetSubscribe() {
// Reply by subcribing to the bitmask and grafting to the peer
writeMsg(&pb.RPC{
@ -94,7 +96,7 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
go func() {
// Wait for a short interval to make sure the legit host
// received and processed the subscribe + graft
time.Sleep(100 * time.Millisecond)
time.Sleep(1 * time.Second)
// Publish a message from the legit host
publishMsg()
@ -118,7 +120,7 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
// Send an IWANT with the message ID, causing the legit host
// to send another message (until it cuts off the attacker for
// being spammy)
iwantlst := []string{DefaultMsgIdFn(msg)}
iwantlst := [][]byte{DefaultMsgIdFn(msg)}
iwant := []*pb.ControlIWant{{MessageIDs: iwantlst}}
orpc := rpcWithControl(nil, nil, iwant, nil, nil)
writeMsg(&orpc.RPC)
@ -127,6 +129,13 @@ func TestBlossomSubAttackSpamIWANT(t *testing.T) {
connect(t, hosts[0], hosts[1])
time.Sleep(100 * time.Millisecond)
_, err = ps.Subscribe(mybitmask)
if err != nil {
t.Fatal(err)
}
<-ctx.Done()
}
@ -142,7 +151,7 @@ func TestBlossomSubAttackSpamIHAVE(t *testing.T) {
defer cancel()
// Create legitimate and attacker hosts
hosts := getNetHosts(t, ctx, 2)
hosts := getDefaultHosts(t, 2)
legit := hosts[0]
attacker := hosts[1]
@ -166,7 +175,7 @@ func TestBlossomSubAttackSpamIHAVE(t *testing.T) {
}
// Subscribe to mybitmask on the legit host
mybitmask := []byte{0xff, 0x00, 0x00}
mybitmask := []byte{0x20, 0x00, 0x00}
_, err = ps.Subscribe(mybitmask)
if err != nil {
t.Fatal(err)
@ -188,6 +197,7 @@ func TestBlossomSubAttackSpamIHAVE(t *testing.T) {
newMockBS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
// When the legit host connects it will send us its subscriptions
for _, sub := range irpc.GetSubscriptions() {
sub := sub
if sub.GetSubscribe() {
// Reply by subcribing to the bitmask and grafting to the peer
writeMsg(&pb.RPC{
@ -204,7 +214,7 @@ func TestBlossomSubAttackSpamIHAVE(t *testing.T) {
// Send a bunch of IHAVEs
for i := 0; i < 3*BlossomSubMaxIHaveLength; i++ {
ihavelst := []string{"someid" + strconv.Itoa(i)}
ihavelst := [][]byte{[]byte("someid" + strconv.Itoa(i))}
ihave := []*pb.ControlIHave{{Bitmask: sub.Bitmask, MessageIDs: ihavelst}}
orpc := rpcWithControl(nil, ihave, nil, nil, nil)
writeMsg(&orpc.RPC)
@ -234,7 +244,7 @@ func TestBlossomSubAttackSpamIHAVE(t *testing.T) {
// Send a bunch of IHAVEs
for i := 0; i < 3*BlossomSubMaxIHaveLength; i++ {
ihavelst := []string{"someid" + strconv.Itoa(i+100)}
ihavelst := [][]byte{[]byte("someid" + strconv.Itoa(i+100))}
ihave := []*pb.ControlIHave{{Bitmask: sub.Bitmask, MessageIDs: ihavelst}}
orpc := rpcWithControl(nil, ihave, nil, nil, nil)
writeMsg(&orpc.RPC)
@ -292,7 +302,7 @@ func TestBlossomSubAttackGRAFTNonExistentBitmask(t *testing.T) {
defer cancel()
// Create legitimate and attacker hosts
hosts := getNetHosts(t, ctx, 2)
hosts := getDefaultHosts(t, 2)
legit := hosts[0]
attacker := hosts[1]
@ -303,7 +313,7 @@ func TestBlossomSubAttackGRAFTNonExistentBitmask(t *testing.T) {
}
// Subscribe to mybitmask on the legit host
mybitmask := []byte{0xff, 0x00, 0x00}
mybitmask := []byte{0x20, 0x00, 0x00}
_, err = ps.Subscribe(mybitmask)
if err != nil {
t.Fatal(err)
@ -322,6 +332,7 @@ func TestBlossomSubAttackGRAFTNonExistentBitmask(t *testing.T) {
newMockBS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
// When the legit host connects it will send us its subscriptions
for _, sub := range irpc.GetSubscriptions() {
sub := sub
if sub.GetSubscribe() {
// Reply by subcribing to the bitmask and grafting to the peer
writeMsg(&pb.RPC{
@ -330,7 +341,7 @@ func TestBlossomSubAttackGRAFTNonExistentBitmask(t *testing.T) {
})
// Graft to the peer on a non-existent bitmask
nonExistentBitmask := []byte{0xff, 0x00, 0x00, 0xff, 0xff, 0xff}
nonExistentBitmask := []byte{0x20, 0x00, 0x00, 0x02, 0xff, 0xff}
writeMsg(&pb.RPC{
Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{Bitmask: nonExistentBitmask}}},
})
@ -376,7 +387,7 @@ func TestBlossomSubAttackGRAFTDuringBackoff(t *testing.T) {
defer cancel()
// Create legitimate and attacker hosts
hosts := getNetHosts(t, ctx, 2)
hosts := getDefaultHosts(t, 2)
legit := hosts[0]
attacker := hosts[1]
@ -400,7 +411,7 @@ func TestBlossomSubAttackGRAFTDuringBackoff(t *testing.T) {
}
// Subscribe to mybitmask on the legit host
mybitmask := []byte{0xff, 0x00, 0x00}
mybitmask := []byte{0x20, 0x00, 0x00}
_, err = ps.Subscribe(mybitmask)
if err != nil {
t.Fatal(err)
@ -422,6 +433,7 @@ func TestBlossomSubAttackGRAFTDuringBackoff(t *testing.T) {
newMockBS(ctx, t, attacker, func(writeMsg func(*pb.RPC), irpc *pb.RPC) {
// When the legit host connects it will send us its subscriptions
for _, sub := range irpc.GetSubscriptions() {
sub := sub
if sub.GetSubscribe() {
// Reply by subcribing to the bitmask and grafting to the peer
graft := []*pb.ControlGraft{{Bitmask: sub.Bitmask}}
@ -617,11 +629,11 @@ func TestBlossomSubAttackInvalidMessageSpam(t *testing.T) {
defer cancel()
// Create legitimate and attacker hosts
hosts := getNetHosts(t, ctx, 2)
hosts := getDefaultHosts(t, 2)
legit := hosts[0]
attacker := hosts[1]
mybitmask := []byte{0xff, 0x00, 0x00}
mybitmask := []byte{0x20, 0x00, 0x00}
// Create parameters with reasonable default values
params := &PeerScoreParams{
@ -766,7 +778,7 @@ type MockBSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC)
func newMockBS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg MockBSOnRead) {
// Listen on the BlossomSub protocol
const BlossomSubID = protocol.ID("/meshsub/1.0.0")
const BlossomSubID = BlossomSubID_v2
const maxMessageSize = 1024 * 1024
attacker.SetStreamHandler(BlossomSubID, func(stream network.Stream) {
// When an incoming stream is opened, set up an outgoing stream
@ -776,13 +788,17 @@ func newMockBS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg
t.Fatal(err)
}
r := protoio.NewDelimitedReader(stream, maxMessageSize)
w := protoio.NewDelimitedWriter(ostream)
r := msgio.NewVarintReaderSize(stream, maxMessageSize)
w := msgio.NewVarintWriter(ostream)
var irpc pb.RPC
writeMsg := func(rpc *pb.RPC) {
if err = w.WriteMsg(rpc); err != nil {
out, err := proto.Marshal(rpc)
if err != nil {
t.Fatalf("error writing RPC: %s", err)
}
if err = w.WriteMsg(out); err != nil {
t.Fatalf("error writing RPC: %s", err)
}
}
@ -795,8 +811,21 @@ func newMockBS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg
}
irpc.Reset()
v, err := r.ReadMsg()
err := r.ReadMsg(&irpc)
// Bail out when the test finishes
if ctx.Err() != nil {
return
}
if err != nil {
t.Fatal(err)
}
err = proto.Unmarshal(v, &irpc)
if err != nil {
t.Fatal(err)
}
// Bail out when the test finishes
if ctx.Err() != nil {

File diff suppressed because it is too large Load Diff

View File

@ -321,11 +321,11 @@ type pubSubDiscovery struct {
}
func (d *pubSubDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
return d.Discovery.Advertise(ctx, "floodsub:"+ns, append(opts, d.opts...)...)
return d.Discovery.Advertise(ctx, "blossomsub:"+ns, append(opts, d.opts...)...)
}
func (d *pubSubDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
return d.Discovery.FindPeers(ctx, "floodsub:"+ns, append(opts, d.opts...)...)
return d.Discovery.FindPeers(ctx, "blossomsub:"+ns, append(opts, d.opts...)...)
}
// WithDiscoveryOpts passes libp2p Discovery options into the PubSub discovery subsystem

View File

@ -123,101 +123,6 @@ func (d *dummyDiscovery) FindPeers(ctx context.Context, ns string, opts ...disco
return retCh, nil
}
func TestSimpleDiscovery(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Setup Discovery server and pubsub clients
const numHosts = 20
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
server := newDiscoveryServer()
discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(1 * time.Minute)}
hosts := getNetHosts(t, ctx, numHosts)
psubs := make([]*PubSub, numHosts)
bitmaskHandlers := make([]*Bitmask, numHosts)
for i, h := range hosts {
disc := &mockDiscoveryClient{h, server}
ps := getPubsub(ctx, h, WithDiscovery(disc, WithDiscoveryOpts(discOpts...)))
psubs[i] = ps
bitmaskHandlers[i], _ = ps.Join(bitmask)
}
// Subscribe with all but one pubsub instance
msgs := make([]*Subscription, numHosts)
for i, th := range bitmaskHandlers[1:] {
subch, err := th.Subscribe()
if err != nil {
t.Fatal(err)
}
msgs[i+1] = subch
}
// Wait for the advertisements to go through then check that they did
for {
server.mx.Lock()
numPeers := len(server.db["floodsub:foobar"])
server.mx.Unlock()
if numPeers == numHosts-1 {
break
} else {
time.Sleep(time.Millisecond * 100)
}
}
for i, h := range hosts[1:] {
if !server.hasPeerRecord("floodsub:"+string(bitmask), h.ID()) {
t.Fatalf("Server did not register host %d with ID: %s", i+1, h.ID().Pretty())
}
}
// Try subscribing followed by publishing a single message
subch, err := bitmaskHandlers[0].Subscribe()
if err != nil {
t.Fatal(err)
}
msgs[0] = subch
msg := []byte("first message")
if err := bitmaskHandlers[0].Publish(ctx, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil {
t.Fatal(err)
}
for _, sub := range msgs {
got, err := sub.Next(ctx)
if err != nil {
t.Fatal(sub.err)
}
if !bytes.Equal(msg, got.Data) {
t.Fatal("got wrong message!")
}
}
// Try random peers sending messages and make sure they are received
for i := 0; i < 100; i++ {
msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i))
owner := rand.Intn(len(psubs))
if err := bitmaskHandlers[owner].Publish(ctx, msg, WithReadiness(MinBitmaskSize(1))); err != nil {
t.Fatal(err)
}
for _, sub := range msgs {
got, err := sub.Next(ctx)
if err != nil {
t.Fatal(sub.err)
}
if !bytes.Equal(msg, got.Data) {
t.Fatal("got wrong message!")
}
}
}
}
func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
t.Skip("flaky test disabled")
ctx, cancel := context.WithCancel(context.Background())
@ -226,15 +131,15 @@ func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
// Setup Discovery server and pubsub clients
partitionSize := BlossomSubDlo - 1
numHosts := partitionSize * 2
const ttl = 1 * time.Minute
const ttl = 10 * time.Minute
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
server1, server2 := newDiscoveryServer(), newDiscoveryServer()
discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(ttl)}
// Put the pubsub clients into two partitions
hosts := getNetHosts(t, ctx, numHosts)
hosts := getDefaultHosts(t, numHosts)
psubs := make([]*PubSub, numHosts)
bitmaskHandlers := make([]*Bitmask, numHosts)
@ -246,7 +151,8 @@ func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
disc := &mockDiscoveryClient{h, s}
ps := getBlossomSub(ctx, h, WithDiscovery(disc, WithDiscoveryOpts(discOpts...)))
psubs[i] = ps
bitmaskHandlers[i], _ = ps.Join(bitmask)
handler, _ := ps.Join(bitmask)
bitmaskHandlers[i] = handler[0]
}
msgs := make([]*Subscription, numHosts)
@ -265,7 +171,7 @@ func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
}
for i := 0; i < partitionSize; i++ {
if _, err := server1.Advertise("floodsub:"+string(bitmask), *host.InfoFromHost(hosts[i+partitionSize]), ttl); err != nil {
if _, err := server1.Advertise("blossomsub:"+string(bitmask), *host.InfoFromHost(hosts[i+partitionSize]), ttl); err != nil {
t.Fatal(err)
}
}
@ -276,7 +182,7 @@ func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
owner := rand.Intn(numHosts)
if err := bitmaskHandlers[owner].Publish(ctx, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil {
if err := bitmaskHandlers[owner].Publish(ctx, bitmaskHandlers[owner].bitmask, msg, WithReadiness(MinBitmaskSize(numHosts-1))); err != nil {
t.Fatal(err)
}
@ -292,7 +198,6 @@ func TestBlossomSubDiscoveryAfterBootstrap(t *testing.T) {
}
}
//lint:ignore U1000 used only by skipped tests at present
func waitUntilBlossomSubMeshCount(ps *PubSub, bitmask []byte, count int) {
done := false
doneCh := make(chan bool, 1)

View File

@ -1,112 +0,0 @@
package blossomsub
import (
"context"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
const (
FloodSubID = protocol.ID("/floodsub/1.0.0")
FloodSubBitmaskSearchSize = 5
)
// NewFloodsubWithProtocols returns a new floodsub-enabled PubSub objecting using the protocols specified in ps.
func NewFloodsubWithProtocols(ctx context.Context, h host.Host, ps []protocol.ID, opts ...Option) (*PubSub, error) {
rt := &FloodSubRouter{
protocols: ps,
}
return NewPubSub(ctx, h, rt, opts...)
}
// NewFloodSub returns a new PubSub object using the FloodSubRouter.
func NewFloodSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) {
return NewFloodsubWithProtocols(ctx, h, []protocol.ID{FloodSubID}, opts...)
}
type FloodSubRouter struct {
p *PubSub
protocols []protocol.ID
tracer *pubsubTracer
}
func (fs *FloodSubRouter) Protocols() []protocol.ID {
return fs.protocols
}
func (fs *FloodSubRouter) Attach(p *PubSub) {
fs.p = p
fs.tracer = p.tracer
}
func (fs *FloodSubRouter) PeerScore(p peer.ID) float64 {
return fs.p.PeerScore(p)
}
func (fs *FloodSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
fs.tracer.AddPeer(p, proto)
}
func (fs *FloodSubRouter) RemovePeer(p peer.ID) {
fs.tracer.RemovePeer(p)
}
func (fs *FloodSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
// check all peers in the bitmask
tmap, ok := fs.p.bitmasks[string(bitmask)]
if !ok {
return false
}
if suggested == 0 {
suggested = FloodSubBitmaskSearchSize
}
if len(tmap) >= suggested {
return true
}
return false
}
func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus {
return AcceptAll
}
func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {}
func (fs *FloodSubRouter) Publish(msg *Message) {
from := msg.ReceivedFrom
bitmask := msg.GetBitmask()
out := rpcWithMessages(msg.Message)
for pid := range fs.p.bitmasks[string(bitmask)] {
if pid == from || pid == peer.ID(msg.GetFrom()) {
continue
}
mch, ok := fs.p.peers[pid]
if !ok {
continue
}
select {
case mch <- out:
fs.tracer.SendRPC(out, pid)
default:
log.Infof("dropping message to peer %s: queue full", pid)
fs.tracer.DropRPC(out, pid)
// Drop it. The peer is too slow.
}
}
}
func (fs *FloodSubRouter) Join(bitmask []byte) {
fs.tracer.Join(bitmask)
}
func (fs *FloodSubRouter) Leave(bitmask []byte) {
fs.tracer.Leave(bitmask)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,84 +1,125 @@
module source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub
go 1.18
go 1.21
toolchain go1.22.4
replace github.com/libp2p/go-libp2p => ../go-libp2p
replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream
require (
github.com/benbjohnson/clock v1.3.0
github.com/benbjohnson/clock v1.3.5
github.com/gogo/protobuf v1.3.2
github.com/ipfs/go-log/v2 v2.5.1
github.com/libp2p/go-buffer-pool v0.1.0
github.com/libp2p/go-libp2p v0.25.0
github.com/libp2p/go-libp2p v0.35.4
github.com/libp2p/go-libp2p-gostream v0.6.0
github.com/libp2p/go-libp2p-testing v0.12.0
github.com/libp2p/go-msgio v0.3.0
github.com/multiformats/go-multiaddr v0.8.0
github.com/multiformats/go-multiaddr v0.12.4
github.com/multiformats/go-varint v0.0.7
google.golang.org/protobuf v1.28.1
google.golang.org/protobuf v1.34.1
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
github.com/ipfs/go-cid v0.3.2 // indirect
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.2.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/miekg/dns v1.1.58 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.1.1 // indirect
github.com/multiformats/go-multicodec v0.7.0 // indirect
github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.0 // indirect
github.com/onsi/ginkgo/v2 v2.5.1 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.5.0 // indirect
github.com/onsi/ginkgo/v2 v2.15.0 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.6 // indirect
github.com/pion/dtls/v2 v2.2.11 // indirect
github.com/pion/ice/v2 v2.3.25 // indirect
github.com/pion/interceptor v0.1.29 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/mdns v0.0.12 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.14 // indirect
github.com/pion/rtp v1.8.6 // indirect
github.com/pion/sctp v1.8.16 // indirect
github.com/pion/sdp/v3 v3.0.9 // indirect
github.com/pion/srtp/v2 v2.0.18 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/transport/v2 v2.2.5 // indirect
github.com/pion/turn/v2 v2.1.6 // indirect
github.com/pion/webrtc/v3 v3.2.40 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/quic-go/quic-go v0.37.5 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/quic-go v0.44.0 // indirect
github.com/quic-go/webtransport-go v0.8.0 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/stretchr/testify v1.9.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/tools v0.7.0 // indirect
go.uber.org/dig v1.17.1 // indirect
go.uber.org/fx v1.22.1 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/tools v0.21.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
)

View File

@ -12,6 +12,8 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -21,9 +23,13 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@ -39,6 +45,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
@ -48,6 +56,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -57,6 +67,8 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
@ -77,6 +89,7 @@ github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgj
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -90,17 +103,34 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM=
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc=
github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw=
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
@ -111,11 +141,18 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI=
github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@ -129,16 +166,26 @@ github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFG
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.25.0 h1:ND6Hc6ZYCzC8S++C4mOD7LdPnLXRkNbr12/8FXgUfIo=
github.com/libp2p/go-libp2p v0.25.0/go.mod h1:vXHmFpcfl+xIGN4qW58Bw3a0/SKGAesr5/T4IuJHE3o=
github.com/libp2p/go-libp2p v0.35.4 h1:FDiBUYLkueFwsuNJUZaxKRdpKvBOWU64qQPL768bSeg=
github.com/libp2p/go-libp2p v0.35.4/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560=
github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ=
github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
@ -146,6 +193,8 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@ -153,6 +202,8 @@ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00v
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@ -163,6 +214,8 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
@ -176,19 +229,29 @@ github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc=
github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ=
github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw=
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.4.0 h1:5i4JbawClkbuaX+mIVXiHQYVPxUW+zjv6w7jtSRukxc=
github.com/multiformats/go-multistream v0.4.0/go.mod h1:BS6ZSYcA4NwYEaIMeCtpJydp2Dc+fNRA6uJMSu/m8+4=
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
@ -196,10 +259,59 @@ github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJE
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw=
github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc=
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg=
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg=
github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw=
github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA=
github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY=
github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE=
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo=
github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc=
github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc=
github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0=
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU=
github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@ -209,16 +321,25 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
github.com/quic-go/qtls-go1-18 v0.2.0 h1:5ViXqBZ90wpUcZS0ge79rf029yx0dYB0McyPJwqqj7U=
github.com/quic-go/qtls-go1-18 v0.2.0/go.mod h1:moGulGHK7o6O8lSPSZNoOwcLvJKJ85vVNc7oJFD65bc=
github.com/quic-go/qtls-go1-19 v0.2.0 h1:Cvn2WdhyViFUHoOqK52i51k4nDX8EwIh5VJiVM4nttk=
@ -227,7 +348,11 @@ github.com/quic-go/qtls-go1-20 v0.1.0 h1:d1PK3ErFy9t7zxKsG3NXBJXZjp/kMLoIb3y/kV5
github.com/quic-go/qtls-go1-20 v0.1.0/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
github.com/quic-go/quic-go v0.32.0 h1:lY02md31s1JgPiiyfqJijpu/UX/Iun304FI3yUqX7tA=
github.com/quic-go/quic-go v0.32.0/go.mod h1:/fCsKANhQIeD5l76c2JFU+07gVE3KaA0FP+0zMWwfwo=
github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0=
github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek=
github.com/quic-go/webtransport-go v0.5.1 h1:1eVb7WDWCRoaeTtFHpFBJ6WDN1bSrPrRoW6tZgSw0Ow=
github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg=
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
@ -266,15 +391,21 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
@ -282,18 +413,29 @@ github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMI
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -303,11 +445,23 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -317,8 +471,12 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -336,8 +494,19 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -351,7 +520,11 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -368,17 +541,48 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -395,8 +599,12 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -421,9 +629,13 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -438,6 +650,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@ -45,7 +45,7 @@ func (gt *gossipTracer) Start(bs *BlossomSubRouter) {
}
// track a promise to deliver a message from a list of msgIDs we are requesting
func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs []string) {
func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs [][]byte) {
if gt == nil {
return
}
@ -56,10 +56,10 @@ func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs []string) {
gt.Lock()
defer gt.Unlock()
promises, ok := gt.promises[mid]
promises, ok := gt.promises[string(mid)]
if !ok {
promises = make(map[peer.ID]time.Time)
gt.promises[mid] = promises
gt.promises[string(mid)] = promises
}
_, ok = promises[p]
@ -70,7 +70,7 @@ func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs []string) {
peerPromises = make(map[string]struct{})
gt.peerPromises[p] = peerPromises
}
peerPromises[mid] = struct{}{}
peerPromises[string(mid)] = struct{}{}
}
}
@ -122,17 +122,17 @@ func (gt *gossipTracer) fulfillPromise(msg *Message) {
gt.Lock()
defer gt.Unlock()
promises, ok := gt.promises[mid]
promises, ok := gt.promises[string(mid)]
if !ok {
return
}
delete(gt.promises, mid)
delete(gt.promises, string(mid))
// delete the promise for all peers that promised it, as they have no way to fulfill it.
for p := range promises {
peerPromises, ok := gt.peerPromises[p]
if ok {
delete(peerPromises, mid)
delete(peerPromises, string(mid))
if len(peerPromises) == 0 {
delete(gt.peerPromises, p)
}

View File

@ -18,7 +18,7 @@ func TestBrokenPromises(t *testing.T) {
peerB := peer.ID("B")
peerC := peer.ID("C")
var mids []string
var mids [][]byte
for i := 0; i < 100; i++ {
m := makeTestMessage(i)
m.From = []byte(peerA)
@ -72,7 +72,7 @@ func TestNoBrokenPromises(t *testing.T) {
peerB := peer.ID("B")
var msgs []*pb.Message
var mids []string
var mids [][]byte
for i := 0; i < 100; i++ {
m := makeTestMessage(i)
m.From = []byte(peerA)

View File

@ -30,7 +30,7 @@ func NewMessageCache(gossip, history int) *MessageCache {
peertx: make(map[string]map[peer.ID]int),
history: make([][]CacheEntry, history),
gossip: gossip,
msgID: func(msg *Message) string {
msgID: func(msg *Message) []byte {
return DefaultMsgIdFn(msg.Message)
},
}
@ -41,47 +41,47 @@ type MessageCache struct {
peertx map[string]map[peer.ID]int
history [][]CacheEntry
gossip int
msgID func(*Message) string
msgID func(*Message) []byte
}
func (mc *MessageCache) SetMsgIdFn(msgID func(*Message) string) {
func (mc *MessageCache) SetMsgIdFn(msgID func(*Message) []byte) {
mc.msgID = msgID
}
type CacheEntry struct {
mid string
mid []byte
bitmask []byte
}
func (mc *MessageCache) Put(msg *Message) {
mid := mc.msgID(msg)
mc.msgs[mid] = msg
mc.msgs[string(mid)] = msg
mc.history[0] = append(mc.history[0], CacheEntry{mid: mid, bitmask: msg.GetBitmask()})
}
func (mc *MessageCache) Get(mid string) (*Message, bool) {
m, ok := mc.msgs[mid]
func (mc *MessageCache) Get(mid []byte) (*Message, bool) {
m, ok := mc.msgs[string(mid)]
return m, ok
}
func (mc *MessageCache) GetForPeer(mid string, p peer.ID) (*Message, int, bool) {
m, ok := mc.msgs[mid]
func (mc *MessageCache) GetForPeer(mid []byte, p peer.ID) (*Message, int, bool) {
m, ok := mc.msgs[string(mid)]
if !ok {
return nil, 0, false
}
tx, ok := mc.peertx[mid]
tx, ok := mc.peertx[string(mid)]
if !ok {
tx = make(map[peer.ID]int)
mc.peertx[mid] = tx
mc.peertx[string(mid)] = tx
}
tx[p]++
return m, tx[p], true
}
func (mc *MessageCache) GetGossipIDs(bitmask []byte) []string {
var mids []string
func (mc *MessageCache) GetGossipIDs(bitmask []byte) [][]byte {
var mids [][]byte
for _, entries := range mc.history[:mc.gossip] {
for _, entry := range entries {
if bytes.Equal(entry.bitmask, bitmask) {
@ -95,8 +95,8 @@ func (mc *MessageCache) GetGossipIDs(bitmask []byte) []string {
func (mc *MessageCache) Shift() {
last := mc.history[len(mc.history)-1]
for _, entry := range last {
delete(mc.msgs, entry.mid)
delete(mc.peertx, entry.mid)
delete(mc.msgs, string(entry.mid))
delete(mc.peertx, string(entry.mid))
}
for i := len(mc.history) - 2; i >= 0; i-- {
mc.history[i+1] = mc.history[i]

View File

@ -1,6 +1,7 @@
package blossomsub
import (
"bytes"
"encoding/binary"
"fmt"
"testing"
@ -33,14 +34,14 @@ func TestMessageCache(t *testing.T) {
}
}
gids := mcache.GetGossipIDs([]byte{0x7e, 0x57})
gids := mcache.GetGossipIDs([]byte{0x01, 0x00})
if len(gids) != 10 {
t.Fatalf("Expected 10 gossip IDs; got %d", len(gids))
}
for i := 0; i < 10; i++ {
mid := msgID(msgs[i])
if mid != gids[i] {
if !bytes.Equal(mid, gids[i]) {
t.Fatalf("GossipID mismatch for message %d", i)
}
}
@ -62,21 +63,21 @@ func TestMessageCache(t *testing.T) {
}
}
gids = mcache.GetGossipIDs([]byte{0x7e, 0x57})
gids = mcache.GetGossipIDs([]byte{0x01, 0x00})
if len(gids) != 20 {
t.Fatalf("Expected 20 gossip IDs; got %d", len(gids))
}
for i := 0; i < 10; i++ {
mid := msgID(msgs[i])
if mid != gids[10+i] {
if !bytes.Equal(mid, gids[10+i]) {
t.Fatalf("GossipID mismatch for message %d", i)
}
}
for i := 10; i < 20; i++ {
mid := msgID(msgs[i])
if mid != gids[i-10] {
if !bytes.Equal(mid, gids[i-10]) {
t.Fatalf("GossipID mismatch for message %d", i)
}
}
@ -125,28 +126,28 @@ func TestMessageCache(t *testing.T) {
}
}
gids = mcache.GetGossipIDs([]byte{0x7e, 0x57})
gids = mcache.GetGossipIDs([]byte{0x01, 0x00})
if len(gids) != 30 {
t.Fatalf("Expected 30 gossip IDs; got %d", len(gids))
}
for i := 0; i < 10; i++ {
mid := msgID(msgs[50+i])
if mid != gids[i] {
if !bytes.Equal(mid, gids[i]) {
t.Fatalf("GossipID mismatch for message %d", i)
}
}
for i := 10; i < 20; i++ {
mid := msgID(msgs[30+i])
if mid != gids[i] {
if !bytes.Equal(mid, gids[i]) {
t.Fatalf("GossipID mismatch for message %d", i)
}
}
for i := 20; i < 30; i++ {
mid := msgID(msgs[10+i])
if mid != gids[i] {
if !bytes.Equal(mid, gids[i]) {
t.Fatalf("GossipID mismatch for message %d", i)
}
}
@ -157,11 +158,11 @@ func makeTestMessage(n int) *pb.Message {
seqno := make([]byte, 8)
binary.BigEndian.PutUint64(seqno, uint64(n))
data := []byte(fmt.Sprintf("%d", n))
bitmask := []byte{0x7e, 0x57}
bitmask := []byte{0x01, 0x00}
return &pb.Message{
Data: data,
Bitmask: bitmask,
From: []byte([]byte{0x7e, 0x57}),
From: []byte([]byte{0x01, 0x00}),
Seqno: seqno,
}
}

View File

@ -30,8 +30,8 @@ func (m *msgIDGenerator) Set(bitmask []byte, gen MsgIdFunction) {
}
// ID computes ID for the msg or short-circuits with the cached value.
func (m *msgIDGenerator) ID(msg *Message) string {
if msg.ID != "" {
func (m *msgIDGenerator) ID(msg *Message) []byte {
if len(msg.ID) != 0 {
return msg.ID
}
@ -40,7 +40,7 @@ func (m *msgIDGenerator) ID(msg *Message) string {
}
// RawID computes ID for the proto 'msg'.
func (m *msgIDGenerator) RawID(msg *pb.Message) string {
func (m *msgIDGenerator) RawID(msg *pb.Message) []byte {
m.bitmaskGensLk.RLock()
gen, ok := m.bitmaskGens[string(msg.GetBitmask())]
m.bitmaskGensLk.RUnlock()

View File

@ -1,75 +0,0 @@
package blossomsub
import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
var _ network.Notifiee = (*PubSubNotif)(nil)
type PubSubNotif PubSub
func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) {
}
func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) {
}
func (p *PubSubNotif) Connected(n network.Network, c network.Conn) {
// ignore transient connections
if c.Stat().Limited {
return
}
go func() {
p.newPeersPrioLk.RLock()
p.newPeersMx.Lock()
p.newPeersPend[c.RemotePeer()] = struct{}{}
p.newPeersMx.Unlock()
p.newPeersPrioLk.RUnlock()
select {
case p.newPeers <- struct{}{}:
default:
}
}()
}
func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) {
}
func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) {
}
func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) {
}
func (p *PubSubNotif) Initialize() {
isTransient := func(pid peer.ID) bool {
for _, c := range p.host.Network().ConnsToPeer(pid) {
if !c.Stat().Limited {
return false
}
}
return true
}
p.newPeersPrioLk.RLock()
p.newPeersMx.Lock()
for _, pid := range p.host.Network().Peers() {
if isTransient(pid) {
continue
}
p.newPeersPend[pid] = struct{}{}
}
p.newPeersMx.Unlock()
p.newPeersPrioLk.RUnlock()
select {
case p.newPeers <- struct{}{}:
default:
}
}

View File

@ -246,9 +246,8 @@ type ControlIHave struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
MessageIDs []string `protobuf:"bytes,2,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"`
Bitmask []byte `protobuf:"bytes,1,opt,name=bitmask,proto3" json:"bitmask,omitempty"`
MessageIDs [][]byte `protobuf:"bytes,2,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"`
}
func (x *ControlIHave) Reset() {
@ -290,7 +289,7 @@ func (x *ControlIHave) GetBitmask() []byte {
return nil
}
func (x *ControlIHave) GetMessageIDs() []string {
func (x *ControlIHave) GetMessageIDs() [][]byte {
if x != nil {
return x.MessageIDs
}
@ -302,8 +301,7 @@ type ControlIWant struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
MessageIDs []string `protobuf:"bytes,1,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"`
MessageIDs [][]byte `protobuf:"bytes,1,rep,name=messageIDs,proto3" json:"messageIDs,omitempty"`
}
func (x *ControlIWant) Reset() {
@ -338,7 +336,7 @@ func (*ControlIWant) Descriptor() ([]byte, []int) {
return file_rpc_proto_rawDescGZIP(), []int{4}
}
func (x *ControlIWant) GetMessageIDs() []string {
func (x *ControlIWant) GetMessageIDs() [][]byte {
if x != nil {
return x.MessageIDs
}
@ -612,10 +610,10 @@ var file_rpc_proto_rawDesc = []byte{
0x49, 0x48, 0x61, 0x76, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x12,
0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22,
0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22,
0x2e, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x12,
0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22,
0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x22,
0x28, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x12,
0x18, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x0c, 0x43, 0x6f, 0x6e,

View File

@ -34,13 +34,11 @@ message ControlMessage {
message ControlIHave {
bytes bitmask = 1;
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
repeated string messageIDs = 2;
repeated bytes messageIDs = 2;
}
message ControlIWant {
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
repeated string messageIDs = 1;
repeated bytes messageIDs = 1;
}
message ControlGraft {

View File

@ -23,19 +23,20 @@ const (
type TraceEvent_Type int32
const (
TraceEvent_PUBLISH_MESSAGE TraceEvent_Type = 0
TraceEvent_REJECT_MESSAGE TraceEvent_Type = 1
TraceEvent_DUPLICATE_MESSAGE TraceEvent_Type = 2
TraceEvent_DELIVER_MESSAGE TraceEvent_Type = 3
TraceEvent_ADD_PEER TraceEvent_Type = 4
TraceEvent_REMOVE_PEER TraceEvent_Type = 5
TraceEvent_RECV_RPC TraceEvent_Type = 6
TraceEvent_SEND_RPC TraceEvent_Type = 7
TraceEvent_DROP_RPC TraceEvent_Type = 8
TraceEvent_JOIN TraceEvent_Type = 9
TraceEvent_LEAVE TraceEvent_Type = 10
TraceEvent_GRAFT TraceEvent_Type = 11
TraceEvent_PRUNE TraceEvent_Type = 12
TraceEvent_PUBLISH_MESSAGE TraceEvent_Type = 0
TraceEvent_REJECT_MESSAGE TraceEvent_Type = 1
TraceEvent_DUPLICATE_MESSAGE TraceEvent_Type = 2
TraceEvent_DELIVER_MESSAGE TraceEvent_Type = 3
TraceEvent_ADD_PEER TraceEvent_Type = 4
TraceEvent_REMOVE_PEER TraceEvent_Type = 5
TraceEvent_RECV_RPC TraceEvent_Type = 6
TraceEvent_SEND_RPC TraceEvent_Type = 7
TraceEvent_DROP_RPC TraceEvent_Type = 8
TraceEvent_JOIN TraceEvent_Type = 9
TraceEvent_LEAVE TraceEvent_Type = 10
TraceEvent_GRAFT TraceEvent_Type = 11
TraceEvent_PRUNE TraceEvent_Type = 12
TraceEvent_UNDELIVERABLE_MESSAGE TraceEvent_Type = 13
)
// Enum value maps for TraceEvent_Type.
@ -54,21 +55,23 @@ var (
10: "LEAVE",
11: "GRAFT",
12: "PRUNE",
13: "UNDELIVERABLE_MESSAGE",
}
TraceEvent_Type_value = map[string]int32{
"PUBLISH_MESSAGE": 0,
"REJECT_MESSAGE": 1,
"DUPLICATE_MESSAGE": 2,
"DELIVER_MESSAGE": 3,
"ADD_PEER": 4,
"REMOVE_PEER": 5,
"RECV_RPC": 6,
"SEND_RPC": 7,
"DROP_RPC": 8,
"JOIN": 9,
"LEAVE": 10,
"GRAFT": 11,
"PRUNE": 12,
"PUBLISH_MESSAGE": 0,
"REJECT_MESSAGE": 1,
"DUPLICATE_MESSAGE": 2,
"DELIVER_MESSAGE": 3,
"ADD_PEER": 4,
"REMOVE_PEER": 5,
"RECV_RPC": 6,
"SEND_RPC": 7,
"DROP_RPC": 8,
"JOIN": 9,
"LEAVE": 10,
"GRAFT": 11,
"PRUNE": 12,
"UNDELIVERABLE_MESSAGE": 13,
}
)
@ -104,22 +107,23 @@ type TraceEvent struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type *TraceEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=blossomsub.pb.TraceEvent_Type,oneof" json:"type,omitempty"`
PeerID []byte `protobuf:"bytes,2,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"`
Timestamp *int64 `protobuf:"varint,3,opt,name=timestamp,proto3,oneof" json:"timestamp,omitempty"`
PublishMessage *TraceEvent_PublishMessage `protobuf:"bytes,4,opt,name=publishMessage,proto3,oneof" json:"publishMessage,omitempty"`
RejectMessage *TraceEvent_RejectMessage `protobuf:"bytes,5,opt,name=rejectMessage,proto3,oneof" json:"rejectMessage,omitempty"`
DuplicateMessage *TraceEvent_DuplicateMessage `protobuf:"bytes,6,opt,name=duplicateMessage,proto3,oneof" json:"duplicateMessage,omitempty"`
DeliverMessage *TraceEvent_DeliverMessage `protobuf:"bytes,7,opt,name=deliverMessage,proto3,oneof" json:"deliverMessage,omitempty"`
AddPeer *TraceEvent_AddPeer `protobuf:"bytes,8,opt,name=addPeer,proto3,oneof" json:"addPeer,omitempty"`
RemovePeer *TraceEvent_RemovePeer `protobuf:"bytes,9,opt,name=removePeer,proto3,oneof" json:"removePeer,omitempty"`
RecvRPC *TraceEvent_RecvRPC `protobuf:"bytes,10,opt,name=recvRPC,proto3,oneof" json:"recvRPC,omitempty"`
SendRPC *TraceEvent_SendRPC `protobuf:"bytes,11,opt,name=sendRPC,proto3,oneof" json:"sendRPC,omitempty"`
DropRPC *TraceEvent_DropRPC `protobuf:"bytes,12,opt,name=dropRPC,proto3,oneof" json:"dropRPC,omitempty"`
Join *TraceEvent_Join `protobuf:"bytes,13,opt,name=join,proto3,oneof" json:"join,omitempty"`
Leave *TraceEvent_Leave `protobuf:"bytes,14,opt,name=leave,proto3,oneof" json:"leave,omitempty"`
Graft *TraceEvent_Graft `protobuf:"bytes,15,opt,name=graft,proto3,oneof" json:"graft,omitempty"`
Prune *TraceEvent_Prune `protobuf:"bytes,16,opt,name=prune,proto3,oneof" json:"prune,omitempty"`
Type *TraceEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=blossomsub.pb.TraceEvent_Type,oneof" json:"type,omitempty"`
PeerID []byte `protobuf:"bytes,2,opt,name=peerID,proto3,oneof" json:"peerID,omitempty"`
Timestamp *int64 `protobuf:"varint,3,opt,name=timestamp,proto3,oneof" json:"timestamp,omitempty"`
PublishMessage *TraceEvent_PublishMessage `protobuf:"bytes,4,opt,name=publishMessage,proto3,oneof" json:"publishMessage,omitempty"`
RejectMessage *TraceEvent_RejectMessage `protobuf:"bytes,5,opt,name=rejectMessage,proto3,oneof" json:"rejectMessage,omitempty"`
DuplicateMessage *TraceEvent_DuplicateMessage `protobuf:"bytes,6,opt,name=duplicateMessage,proto3,oneof" json:"duplicateMessage,omitempty"`
DeliverMessage *TraceEvent_DeliverMessage `protobuf:"bytes,7,opt,name=deliverMessage,proto3,oneof" json:"deliverMessage,omitempty"`
AddPeer *TraceEvent_AddPeer `protobuf:"bytes,8,opt,name=addPeer,proto3,oneof" json:"addPeer,omitempty"`
RemovePeer *TraceEvent_RemovePeer `protobuf:"bytes,9,opt,name=removePeer,proto3,oneof" json:"removePeer,omitempty"`
RecvRPC *TraceEvent_RecvRPC `protobuf:"bytes,10,opt,name=recvRPC,proto3,oneof" json:"recvRPC,omitempty"`
SendRPC *TraceEvent_SendRPC `protobuf:"bytes,11,opt,name=sendRPC,proto3,oneof" json:"sendRPC,omitempty"`
DropRPC *TraceEvent_DropRPC `protobuf:"bytes,12,opt,name=dropRPC,proto3,oneof" json:"dropRPC,omitempty"`
Join *TraceEvent_Join `protobuf:"bytes,13,opt,name=join,proto3,oneof" json:"join,omitempty"`
Leave *TraceEvent_Leave `protobuf:"bytes,14,opt,name=leave,proto3,oneof" json:"leave,omitempty"`
Graft *TraceEvent_Graft `protobuf:"bytes,15,opt,name=graft,proto3,oneof" json:"graft,omitempty"`
Prune *TraceEvent_Prune `protobuf:"bytes,16,opt,name=prune,proto3,oneof" json:"prune,omitempty"`
UndeliverableMessage *TraceEvent_UndeliverableMessage `protobuf:"bytes,17,opt,name=undeliverableMessage,proto3,oneof" json:"undeliverableMessage,omitempty"`
}
func (x *TraceEvent) Reset() {
@ -266,6 +270,13 @@ func (x *TraceEvent) GetPrune() *TraceEvent_Prune {
return nil
}
func (x *TraceEvent) GetUndeliverableMessage() *TraceEvent_UndeliverableMessage {
if x != nil {
return x.UndeliverableMessage
}
return nil
}
type TraceEventBatch struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -1036,6 +1047,69 @@ func (x *TraceEvent_Prune) GetBitmask() []byte {
return nil
}
type TraceEvent_UndeliverableMessage struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
MessageID []byte `protobuf:"bytes,1,opt,name=messageID,proto3,oneof" json:"messageID,omitempty"`
Bitmask []byte `protobuf:"bytes,2,opt,name=bitmask,proto3,oneof" json:"bitmask,omitempty"`
ReceivedFrom []byte `protobuf:"bytes,3,opt,name=receivedFrom,proto3,oneof" json:"receivedFrom,omitempty"`
}
func (x *TraceEvent_UndeliverableMessage) Reset() {
*x = TraceEvent_UndeliverableMessage{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TraceEvent_UndeliverableMessage) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TraceEvent_UndeliverableMessage) ProtoMessage() {}
func (x *TraceEvent_UndeliverableMessage) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TraceEvent_UndeliverableMessage.ProtoReflect.Descriptor instead.
func (*TraceEvent_UndeliverableMessage) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 13}
}
func (x *TraceEvent_UndeliverableMessage) GetMessageID() []byte {
if x != nil {
return x.MessageID
}
return nil
}
func (x *TraceEvent_UndeliverableMessage) GetBitmask() []byte {
if x != nil {
return x.Bitmask
}
return nil
}
func (x *TraceEvent_UndeliverableMessage) GetReceivedFrom() []byte {
if x != nil {
return x.ReceivedFrom
}
return nil
}
type TraceEvent_RPCMeta struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -1049,7 +1123,7 @@ type TraceEvent_RPCMeta struct {
func (x *TraceEvent_RPCMeta) Reset() {
*x = TraceEvent_RPCMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[15]
mi := &file_trace_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1062,7 +1136,7 @@ func (x *TraceEvent_RPCMeta) String() string {
func (*TraceEvent_RPCMeta) ProtoMessage() {}
func (x *TraceEvent_RPCMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[15]
mi := &file_trace_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1075,7 +1149,7 @@ func (x *TraceEvent_RPCMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_RPCMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_RPCMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 13}
return file_trace_proto_rawDescGZIP(), []int{0, 14}
}
func (x *TraceEvent_RPCMeta) GetMessages() []*TraceEvent_MessageMeta {
@ -1111,7 +1185,7 @@ type TraceEvent_MessageMeta struct {
func (x *TraceEvent_MessageMeta) Reset() {
*x = TraceEvent_MessageMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[16]
mi := &file_trace_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1124,7 +1198,7 @@ func (x *TraceEvent_MessageMeta) String() string {
func (*TraceEvent_MessageMeta) ProtoMessage() {}
func (x *TraceEvent_MessageMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[16]
mi := &file_trace_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1137,7 +1211,7 @@ func (x *TraceEvent_MessageMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_MessageMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_MessageMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 14}
return file_trace_proto_rawDescGZIP(), []int{0, 15}
}
func (x *TraceEvent_MessageMeta) GetMessageID() []byte {
@ -1166,7 +1240,7 @@ type TraceEvent_SubMeta struct {
func (x *TraceEvent_SubMeta) Reset() {
*x = TraceEvent_SubMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[17]
mi := &file_trace_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1179,7 +1253,7 @@ func (x *TraceEvent_SubMeta) String() string {
func (*TraceEvent_SubMeta) ProtoMessage() {}
func (x *TraceEvent_SubMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[17]
mi := &file_trace_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1192,7 +1266,7 @@ func (x *TraceEvent_SubMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_SubMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_SubMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 15}
return file_trace_proto_rawDescGZIP(), []int{0, 16}
}
func (x *TraceEvent_SubMeta) GetSubscribe() bool {
@ -1223,7 +1297,7 @@ type TraceEvent_ControlMeta struct {
func (x *TraceEvent_ControlMeta) Reset() {
*x = TraceEvent_ControlMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[18]
mi := &file_trace_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1236,7 +1310,7 @@ func (x *TraceEvent_ControlMeta) String() string {
func (*TraceEvent_ControlMeta) ProtoMessage() {}
func (x *TraceEvent_ControlMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[18]
mi := &file_trace_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1249,7 +1323,7 @@ func (x *TraceEvent_ControlMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_ControlMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_ControlMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 16}
return file_trace_proto_rawDescGZIP(), []int{0, 17}
}
func (x *TraceEvent_ControlMeta) GetIhave() []*TraceEvent_ControlIHaveMeta {
@ -1292,7 +1366,7 @@ type TraceEvent_ControlIHaveMeta struct {
func (x *TraceEvent_ControlIHaveMeta) Reset() {
*x = TraceEvent_ControlIHaveMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[19]
mi := &file_trace_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1305,7 +1379,7 @@ func (x *TraceEvent_ControlIHaveMeta) String() string {
func (*TraceEvent_ControlIHaveMeta) ProtoMessage() {}
func (x *TraceEvent_ControlIHaveMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[19]
mi := &file_trace_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1318,7 +1392,7 @@ func (x *TraceEvent_ControlIHaveMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_ControlIHaveMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_ControlIHaveMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 17}
return file_trace_proto_rawDescGZIP(), []int{0, 18}
}
func (x *TraceEvent_ControlIHaveMeta) GetBitmask() []byte {
@ -1346,7 +1420,7 @@ type TraceEvent_ControlIWantMeta struct {
func (x *TraceEvent_ControlIWantMeta) Reset() {
*x = TraceEvent_ControlIWantMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[20]
mi := &file_trace_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1359,7 +1433,7 @@ func (x *TraceEvent_ControlIWantMeta) String() string {
func (*TraceEvent_ControlIWantMeta) ProtoMessage() {}
func (x *TraceEvent_ControlIWantMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[20]
mi := &file_trace_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1372,7 +1446,7 @@ func (x *TraceEvent_ControlIWantMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_ControlIWantMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_ControlIWantMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 18}
return file_trace_proto_rawDescGZIP(), []int{0, 19}
}
func (x *TraceEvent_ControlIWantMeta) GetMessageIDs() [][]byte {
@ -1393,7 +1467,7 @@ type TraceEvent_ControlGraftMeta struct {
func (x *TraceEvent_ControlGraftMeta) Reset() {
*x = TraceEvent_ControlGraftMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[21]
mi := &file_trace_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1406,7 +1480,7 @@ func (x *TraceEvent_ControlGraftMeta) String() string {
func (*TraceEvent_ControlGraftMeta) ProtoMessage() {}
func (x *TraceEvent_ControlGraftMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[21]
mi := &file_trace_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1419,7 +1493,7 @@ func (x *TraceEvent_ControlGraftMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_ControlGraftMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_ControlGraftMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 19}
return file_trace_proto_rawDescGZIP(), []int{0, 20}
}
func (x *TraceEvent_ControlGraftMeta) GetBitmask() []byte {
@ -1441,7 +1515,7 @@ type TraceEvent_ControlPruneMeta struct {
func (x *TraceEvent_ControlPruneMeta) Reset() {
*x = TraceEvent_ControlPruneMeta{}
if protoimpl.UnsafeEnabled {
mi := &file_trace_proto_msgTypes[22]
mi := &file_trace_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1454,7 +1528,7 @@ func (x *TraceEvent_ControlPruneMeta) String() string {
func (*TraceEvent_ControlPruneMeta) ProtoMessage() {}
func (x *TraceEvent_ControlPruneMeta) ProtoReflect() protoreflect.Message {
mi := &file_trace_proto_msgTypes[22]
mi := &file_trace_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1467,7 +1541,7 @@ func (x *TraceEvent_ControlPruneMeta) ProtoReflect() protoreflect.Message {
// Deprecated: Use TraceEvent_ControlPruneMeta.ProtoReflect.Descriptor instead.
func (*TraceEvent_ControlPruneMeta) Descriptor() ([]byte, []int) {
return file_trace_proto_rawDescGZIP(), []int{0, 20}
return file_trace_proto_rawDescGZIP(), []int{0, 21}
}
func (x *TraceEvent_ControlPruneMeta) GetBitmask() []byte {
@ -1488,7 +1562,7 @@ var File_trace_proto protoreflect.FileDescriptor
var file_trace_proto_rawDesc = []byte{
0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x62,
0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x22, 0xfe, 0x1e, 0x0a,
0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x22, 0xca, 0x21, 0x0a,
0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x74,
0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x62, 0x6c, 0x6f, 0x73,
0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45,
@ -1555,38 +1629,106 @@ var file_trace_proto_rawDesc = []byte{
0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d,
0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x48, 0x0f, 0x52, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65,
0x88, 0x01, 0x01, 0x1a, 0x6c, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d,
0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74,
0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x1a, 0xcd, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76,
0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c,
0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x12,
0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48,
0x02, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07,
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x03, 0x52,
0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f,
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65,
0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72,
0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x1a, 0xa8, 0x01, 0x0a, 0x10, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63,
0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48,
0x01, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88,
0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0c, 0x48, 0x02, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01,
0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42,
0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d,
0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa6, 0x01, 0x0a,
0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
0x88, 0x01, 0x01, 0x12, 0x67, 0x0a, 0x14, 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72,
0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x2e, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70,
0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x64,
0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x48, 0x10, 0x52, 0x14, 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x61, 0x62,
0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x6c, 0x0a, 0x0e,
0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21,
0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01,
0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a,
0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xcd, 0x01, 0x0a, 0x0d, 0x52,
0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09,
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48,
0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12,
0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65,
0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73,
0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73,
0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x03, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x49, 0x44, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46,
0x72, 0x6f, 0x6d, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x0a,
0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa8, 0x01, 0x0a, 0x10, 0x44,
0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88,
0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72,
0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65,
0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x02, 0x52, 0x07,
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63,
0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69,
0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa6, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65,
0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07,
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65,
0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
0x48, 0x02, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d,
0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49,
0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x42, 0x0f, 0x0a,
0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x1a, 0x56,
0x0a, 0x07, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65,
0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65,
0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x88, 0x01,
0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x08, 0x0a, 0x06,
0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65,
0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01,
0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x1a, 0x88, 0x01, 0x0a,
0x07, 0x52, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x12, 0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65,
0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00,
0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01,
0x01, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e,
0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65,
0x74, 0x61, 0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a,
0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x07,
0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x76, 0x0a, 0x07, 0x53, 0x65, 0x6e, 0x64, 0x52,
0x50, 0x43, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x88, 0x01, 0x01, 0x12,
0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72,
0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61,
0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f,
0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a,
0x76, 0x0a, 0x07, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x50, 0x43, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65,
0x6e, 0x64, 0x54, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65,
0x6e, 0x64, 0x54, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73,
0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61,
0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x42, 0x07,
0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x31, 0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, 0x12,
0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a,
0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x32, 0x0a, 0x05, 0x4c, 0x65,
0x61, 0x76, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88,
0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x5a,
0x0a, 0x05, 0x47, 0x72, 0x61, 0x66, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49,
0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49,
0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b,
0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x0a,
0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x5a, 0x0a, 0x05, 0x50, 0x72,
0x75, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01,
0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42,
0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xac, 0x01, 0x0a, 0x14, 0x55, 0x6e, 0x64, 0x65, 0x6c,
0x69, 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12,
0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88,
0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20,
@ -1596,156 +1738,109 @@ var file_trace_proto_rawDesc = []byte{
0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74,
0x6d, 0x61, 0x73, 0x6b, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65,
0x64, 0x46, 0x72, 0x6f, 0x6d, 0x1a, 0x56, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72,
0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
0x48, 0x00, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x05,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65,
0x72, 0x49, 0x44, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x0a,
0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x06, 0x70,
0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70,
0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65,
0x72, 0x49, 0x44, 0x1a, 0x88, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x12,
0x27, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65,
0x64, 0x46, 0x72, 0x6f, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d,
0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74,
0x61, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65,
0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x76,
0x0a, 0x07, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x6e,
0x64, 0x54, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x6e,
0x64, 0x54, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75,
0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e,
0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88,
0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x42, 0x07, 0x0a,
0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x76, 0x0a, 0x07, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x50,
0x43, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x3a,
0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62,
0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61,
0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x48,
0x01, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73,
0x65, 0x6e, 0x64, 0x54, 0x6f, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x31,
0x0a, 0x04, 0x4a, 0x6f, 0x69, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61,
0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x1a, 0x32, 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69,
0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69,
0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x5a, 0x0a, 0x05, 0x47, 0x72, 0x61, 0x66, 0x74, 0x12, 0x1b,
0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00,
0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07,
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70,
0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x1a, 0x5a, 0x0a, 0x05, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x65,
0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x70, 0x65,
0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61,
0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d,
0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49,
0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xe5, 0x01,
0x0a, 0x07, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c,
0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63,
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65,
0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x0c,
0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e,
0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75,
0x62, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75,
0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e,
0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x00, 0x52, 0x07, 0x63,
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f,
0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x69, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x4d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49,
0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61,
0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d,
0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b,
0x1a, 0x65, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x73,
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00,
0x52, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1d,
0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48,
0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a,
0x0a, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f,
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x95, 0x02, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74,
0x72, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65,
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d,
0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e,
0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x48, 0x61, 0x76, 0x65, 0x4d, 0x65,
0x74, 0x61, 0x52, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x77, 0x61,
0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73,
0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76,
0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74,
0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x69, 0x77, 0x61, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x67,
0x72, 0x61, 0x66, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f,
0x64, 0x46, 0x72, 0x6f, 0x6d, 0x1a, 0xe5, 0x01, 0x0a, 0x07, 0x52, 0x50, 0x43, 0x4d, 0x65, 0x74,
0x61, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62,
0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x6c, 0x6f,
0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65,
0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61,
0x66, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x67, 0x72, 0x61, 0x66, 0x74, 0x12, 0x40, 0x0a,
0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62,
0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x62, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0c, 0x73,
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x07, 0x63,
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62,
0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61,
0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50,
0x72, 0x75, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x1a,
0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x48, 0x61, 0x76, 0x65, 0x4d,
0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88,
0x01, 0x01, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49,
0x44, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x32,
0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x4d, 0x65,
0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73,
0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49,
0x44, 0x73, 0x1a, 0x3d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61,
0x66, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61,
0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x1a, 0x53, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e,
0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20,
0x03, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xcf, 0x01, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12,
0x13, 0x0a, 0x0f, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41,
0x47, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4d,
0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x55, 0x50, 0x4c,
0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x02, 0x12,
0x13, 0x0a, 0x0f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41,
0x47, 0x45, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x44, 0x44, 0x5f, 0x50, 0x45, 0x45, 0x52,
0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x5f, 0x50, 0x45, 0x45,
0x52, 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x43, 0x56, 0x5f, 0x52, 0x50, 0x43, 0x10,
0x06, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x45, 0x4e, 0x44, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x07, 0x12,
0x0c, 0x0a, 0x08, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x08, 0x12, 0x08, 0x0a,
0x04, 0x4a, 0x4f, 0x49, 0x4e, 0x10, 0x09, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x45, 0x41, 0x56, 0x45,
0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x41, 0x46, 0x54, 0x10, 0x0b, 0x12, 0x09, 0x0a,
0x05, 0x50, 0x52, 0x55, 0x4e, 0x45, 0x10, 0x0c, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70,
0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x42, 0x0c, 0x0a, 0x0a,
0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70,
0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x10, 0x0a,
0x0e, 0x5f, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
0x13, 0x0a, 0x11, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72,
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, 0x50,
0x65, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65,
0x65, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x72, 0x65, 0x63, 0x76, 0x52, 0x50, 0x43, 0x42, 0x0a,
0x0a, 0x08, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x64,
0x72, 0x6f, 0x70, 0x52, 0x50, 0x43, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6a, 0x6f, 0x69, 0x6e, 0x42,
0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x67, 0x72,
0x61, 0x66, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x22, 0x42, 0x0a,
0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68,
0x12, 0x2f, 0x0a, 0x05, 0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x19, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e,
0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x62, 0x61, 0x74, 0x63,
0x68, 0x42, 0x43, 0x5a, 0x41, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c,
0x69, 0x62, 0x72, 0x69, 0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69,
0x62, 0x72, 0x69, 0x75, 0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x67,
0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2d, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d,
0x73, 0x75, 0x62, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4d,
0x65, 0x74, 0x61, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x88, 0x01,
0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x69, 0x0a,
0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09,
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48,
0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12,
0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c,
0x0a, 0x0a, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x42, 0x0a, 0x0a, 0x08,
0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x65, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x4d,
0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73,
0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61,
0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a,
0x95, 0x02, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12,
0x40, 0x0a, 0x05, 0x69, 0x68, 0x61, 0x76, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a,
0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54,
0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
0x6c, 0x49, 0x48, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x69, 0x68, 0x61, 0x76,
0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x77, 0x61, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62,
0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
0x72, 0x6f, 0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x69, 0x77,
0x61, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x66, 0x74, 0x18, 0x03, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e,
0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f,
0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05,
0x67, 0x72, 0x61, 0x66, 0x74, 0x12, 0x40, 0x0a, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x18, 0x04,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75,
0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e,
0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x61,
0x52, 0x05, 0x70, 0x72, 0x75, 0x6e, 0x65, 0x1a, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72,
0x6f, 0x6c, 0x49, 0x48, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x07, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07,
0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a,
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62,
0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x32, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
0x6c, 0x49, 0x57, 0x61, 0x6e, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a,
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x44, 0x73, 0x1a, 0x3d, 0x0a, 0x10, 0x43, 0x6f,
0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x47, 0x72, 0x61, 0x66, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d,
0x0a, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48,
0x00, 0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a,
0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x53, 0x0a, 0x10, 0x43, 0x6f, 0x6e,
0x74, 0x72, 0x6f, 0x6c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a,
0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00,
0x52, 0x07, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05,
0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x65, 0x65,
0x72, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xea,
0x01, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x55, 0x42, 0x4c, 0x49,
0x53, 0x48, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e,
0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x01,
0x12, 0x15, 0x0a, 0x11, 0x44, 0x55, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x45,
0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x4c, 0x49, 0x56,
0x45, 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08,
0x41, 0x44, 0x44, 0x5f, 0x50, 0x45, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45,
0x4d, 0x4f, 0x56, 0x45, 0x5f, 0x50, 0x45, 0x45, 0x52, 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x52,
0x45, 0x43, 0x56, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x45, 0x4e,
0x44, 0x5f, 0x52, 0x50, 0x43, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x4f, 0x50, 0x5f,
0x52, 0x50, 0x43, 0x10, 0x08, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x4f, 0x49, 0x4e, 0x10, 0x09, 0x12,
0x09, 0x0a, 0x05, 0x4c, 0x45, 0x41, 0x56, 0x45, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52,
0x41, 0x46, 0x54, 0x10, 0x0b, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x52, 0x55, 0x4e, 0x45, 0x10, 0x0c,
0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x41, 0x42, 0x4c,
0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0d, 0x42, 0x07, 0x0a, 0x05, 0x5f,
0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x42,
0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x11, 0x0a,
0x0f, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65,
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x64, 0x65, 0x6c, 0x69,
0x76, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61,
0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76,
0x65, 0x50, 0x65, 0x65, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x72, 0x65, 0x63, 0x76, 0x52, 0x50,
0x43, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x50, 0x43, 0x42, 0x0a, 0x0a,
0x08, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x50, 0x43, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6a, 0x6f,
0x69, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x42, 0x08, 0x0a, 0x06,
0x5f, 0x67, 0x72, 0x61, 0x66, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x70, 0x72, 0x75, 0x6e, 0x65,
0x42, 0x17, 0x0a, 0x15, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x61, 0x62,
0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x42, 0x0a, 0x0f, 0x54, 0x72, 0x61,
0x63, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2f, 0x0a, 0x05,
0x62, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x6c,
0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x63,
0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x62, 0x61, 0x74, 0x63, 0x68, 0x42, 0x43, 0x5a,
0x41, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69,
0x75, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x71, 0x75, 0x69, 0x6c, 0x69, 0x62, 0x72, 0x69, 0x75,
0x6d, 0x2f, 0x6d, 0x6f, 0x6e, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2f, 0x67, 0x6f, 0x2d, 0x6c, 0x69,
0x62, 0x70, 0x32, 0x70, 0x2d, 0x62, 0x6c, 0x6f, 0x73, 0x73, 0x6f, 0x6d, 0x73, 0x75, 0x62, 0x2f,
0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1761,32 +1856,33 @@ func file_trace_proto_rawDescGZIP() []byte {
}
var file_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
var file_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 24)
var file_trace_proto_goTypes = []interface{}{
(TraceEvent_Type)(0), // 0: blossomsub.pb.TraceEvent.Type
(*TraceEvent)(nil), // 1: blossomsub.pb.TraceEvent
(*TraceEventBatch)(nil), // 2: blossomsub.pb.TraceEventBatch
(*TraceEvent_PublishMessage)(nil), // 3: blossomsub.pb.TraceEvent.PublishMessage
(*TraceEvent_RejectMessage)(nil), // 4: blossomsub.pb.TraceEvent.RejectMessage
(*TraceEvent_DuplicateMessage)(nil), // 5: blossomsub.pb.TraceEvent.DuplicateMessage
(*TraceEvent_DeliverMessage)(nil), // 6: blossomsub.pb.TraceEvent.DeliverMessage
(*TraceEvent_AddPeer)(nil), // 7: blossomsub.pb.TraceEvent.AddPeer
(*TraceEvent_RemovePeer)(nil), // 8: blossomsub.pb.TraceEvent.RemovePeer
(*TraceEvent_RecvRPC)(nil), // 9: blossomsub.pb.TraceEvent.RecvRPC
(*TraceEvent_SendRPC)(nil), // 10: blossomsub.pb.TraceEvent.SendRPC
(*TraceEvent_DropRPC)(nil), // 11: blossomsub.pb.TraceEvent.DropRPC
(*TraceEvent_Join)(nil), // 12: blossomsub.pb.TraceEvent.Join
(*TraceEvent_Leave)(nil), // 13: blossomsub.pb.TraceEvent.Leave
(*TraceEvent_Graft)(nil), // 14: blossomsub.pb.TraceEvent.Graft
(*TraceEvent_Prune)(nil), // 15: blossomsub.pb.TraceEvent.Prune
(*TraceEvent_RPCMeta)(nil), // 16: blossomsub.pb.TraceEvent.RPCMeta
(*TraceEvent_MessageMeta)(nil), // 17: blossomsub.pb.TraceEvent.MessageMeta
(*TraceEvent_SubMeta)(nil), // 18: blossomsub.pb.TraceEvent.SubMeta
(*TraceEvent_ControlMeta)(nil), // 19: blossomsub.pb.TraceEvent.ControlMeta
(*TraceEvent_ControlIHaveMeta)(nil), // 20: blossomsub.pb.TraceEvent.ControlIHaveMeta
(*TraceEvent_ControlIWantMeta)(nil), // 21: blossomsub.pb.TraceEvent.ControlIWantMeta
(*TraceEvent_ControlGraftMeta)(nil), // 22: blossomsub.pb.TraceEvent.ControlGraftMeta
(*TraceEvent_ControlPruneMeta)(nil), // 23: blossomsub.pb.TraceEvent.ControlPruneMeta
(TraceEvent_Type)(0), // 0: blossomsub.pb.TraceEvent.Type
(*TraceEvent)(nil), // 1: blossomsub.pb.TraceEvent
(*TraceEventBatch)(nil), // 2: blossomsub.pb.TraceEventBatch
(*TraceEvent_PublishMessage)(nil), // 3: blossomsub.pb.TraceEvent.PublishMessage
(*TraceEvent_RejectMessage)(nil), // 4: blossomsub.pb.TraceEvent.RejectMessage
(*TraceEvent_DuplicateMessage)(nil), // 5: blossomsub.pb.TraceEvent.DuplicateMessage
(*TraceEvent_DeliverMessage)(nil), // 6: blossomsub.pb.TraceEvent.DeliverMessage
(*TraceEvent_AddPeer)(nil), // 7: blossomsub.pb.TraceEvent.AddPeer
(*TraceEvent_RemovePeer)(nil), // 8: blossomsub.pb.TraceEvent.RemovePeer
(*TraceEvent_RecvRPC)(nil), // 9: blossomsub.pb.TraceEvent.RecvRPC
(*TraceEvent_SendRPC)(nil), // 10: blossomsub.pb.TraceEvent.SendRPC
(*TraceEvent_DropRPC)(nil), // 11: blossomsub.pb.TraceEvent.DropRPC
(*TraceEvent_Join)(nil), // 12: blossomsub.pb.TraceEvent.Join
(*TraceEvent_Leave)(nil), // 13: blossomsub.pb.TraceEvent.Leave
(*TraceEvent_Graft)(nil), // 14: blossomsub.pb.TraceEvent.Graft
(*TraceEvent_Prune)(nil), // 15: blossomsub.pb.TraceEvent.Prune
(*TraceEvent_UndeliverableMessage)(nil), // 16: blossomsub.pb.TraceEvent.UndeliverableMessage
(*TraceEvent_RPCMeta)(nil), // 17: blossomsub.pb.TraceEvent.RPCMeta
(*TraceEvent_MessageMeta)(nil), // 18: blossomsub.pb.TraceEvent.MessageMeta
(*TraceEvent_SubMeta)(nil), // 19: blossomsub.pb.TraceEvent.SubMeta
(*TraceEvent_ControlMeta)(nil), // 20: blossomsub.pb.TraceEvent.ControlMeta
(*TraceEvent_ControlIHaveMeta)(nil), // 21: blossomsub.pb.TraceEvent.ControlIHaveMeta
(*TraceEvent_ControlIWantMeta)(nil), // 22: blossomsub.pb.TraceEvent.ControlIWantMeta
(*TraceEvent_ControlGraftMeta)(nil), // 23: blossomsub.pb.TraceEvent.ControlGraftMeta
(*TraceEvent_ControlPruneMeta)(nil), // 24: blossomsub.pb.TraceEvent.ControlPruneMeta
}
var file_trace_proto_depIdxs = []int32{
0, // 0: blossomsub.pb.TraceEvent.type:type_name -> blossomsub.pb.TraceEvent.Type
@ -1803,22 +1899,23 @@ var file_trace_proto_depIdxs = []int32{
13, // 11: blossomsub.pb.TraceEvent.leave:type_name -> blossomsub.pb.TraceEvent.Leave
14, // 12: blossomsub.pb.TraceEvent.graft:type_name -> blossomsub.pb.TraceEvent.Graft
15, // 13: blossomsub.pb.TraceEvent.prune:type_name -> blossomsub.pb.TraceEvent.Prune
1, // 14: blossomsub.pb.TraceEventBatch.batch:type_name -> blossomsub.pb.TraceEvent
16, // 15: blossomsub.pb.TraceEvent.RecvRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta
16, // 16: blossomsub.pb.TraceEvent.SendRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta
16, // 17: blossomsub.pb.TraceEvent.DropRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta
17, // 18: blossomsub.pb.TraceEvent.RPCMeta.messages:type_name -> blossomsub.pb.TraceEvent.MessageMeta
18, // 19: blossomsub.pb.TraceEvent.RPCMeta.subscription:type_name -> blossomsub.pb.TraceEvent.SubMeta
19, // 20: blossomsub.pb.TraceEvent.RPCMeta.control:type_name -> blossomsub.pb.TraceEvent.ControlMeta
20, // 21: blossomsub.pb.TraceEvent.ControlMeta.ihave:type_name -> blossomsub.pb.TraceEvent.ControlIHaveMeta
21, // 22: blossomsub.pb.TraceEvent.ControlMeta.iwant:type_name -> blossomsub.pb.TraceEvent.ControlIWantMeta
22, // 23: blossomsub.pb.TraceEvent.ControlMeta.graft:type_name -> blossomsub.pb.TraceEvent.ControlGraftMeta
23, // 24: blossomsub.pb.TraceEvent.ControlMeta.prune:type_name -> blossomsub.pb.TraceEvent.ControlPruneMeta
25, // [25:25] is the sub-list for method output_type
25, // [25:25] is the sub-list for method input_type
25, // [25:25] is the sub-list for extension type_name
25, // [25:25] is the sub-list for extension extendee
0, // [0:25] is the sub-list for field type_name
16, // 14: blossomsub.pb.TraceEvent.undeliverableMessage:type_name -> blossomsub.pb.TraceEvent.UndeliverableMessage
1, // 15: blossomsub.pb.TraceEventBatch.batch:type_name -> blossomsub.pb.TraceEvent
17, // 16: blossomsub.pb.TraceEvent.RecvRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta
17, // 17: blossomsub.pb.TraceEvent.SendRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta
17, // 18: blossomsub.pb.TraceEvent.DropRPC.meta:type_name -> blossomsub.pb.TraceEvent.RPCMeta
18, // 19: blossomsub.pb.TraceEvent.RPCMeta.messages:type_name -> blossomsub.pb.TraceEvent.MessageMeta
19, // 20: blossomsub.pb.TraceEvent.RPCMeta.subscription:type_name -> blossomsub.pb.TraceEvent.SubMeta
20, // 21: blossomsub.pb.TraceEvent.RPCMeta.control:type_name -> blossomsub.pb.TraceEvent.ControlMeta
21, // 22: blossomsub.pb.TraceEvent.ControlMeta.ihave:type_name -> blossomsub.pb.TraceEvent.ControlIHaveMeta
22, // 23: blossomsub.pb.TraceEvent.ControlMeta.iwant:type_name -> blossomsub.pb.TraceEvent.ControlIWantMeta
23, // 24: blossomsub.pb.TraceEvent.ControlMeta.graft:type_name -> blossomsub.pb.TraceEvent.ControlGraftMeta
24, // 25: blossomsub.pb.TraceEvent.ControlMeta.prune:type_name -> blossomsub.pb.TraceEvent.ControlPruneMeta
26, // [26:26] is the sub-list for method output_type
26, // [26:26] is the sub-list for method input_type
26, // [26:26] is the sub-list for extension type_name
26, // [26:26] is the sub-list for extension extendee
0, // [0:26] is the sub-list for field type_name
}
func init() { file_trace_proto_init() }
@ -2008,7 +2105,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_RPCMeta); i {
switch v := v.(*TraceEvent_UndeliverableMessage); i {
case 0:
return &v.state
case 1:
@ -2020,7 +2117,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_MessageMeta); i {
switch v := v.(*TraceEvent_RPCMeta); i {
case 0:
return &v.state
case 1:
@ -2032,7 +2129,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_SubMeta); i {
switch v := v.(*TraceEvent_MessageMeta); i {
case 0:
return &v.state
case 1:
@ -2044,7 +2141,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_ControlMeta); i {
switch v := v.(*TraceEvent_SubMeta); i {
case 0:
return &v.state
case 1:
@ -2056,7 +2153,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_ControlIHaveMeta); i {
switch v := v.(*TraceEvent_ControlMeta); i {
case 0:
return &v.state
case 1:
@ -2068,7 +2165,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_ControlIWantMeta); i {
switch v := v.(*TraceEvent_ControlIHaveMeta); i {
case 0:
return &v.state
case 1:
@ -2080,7 +2177,7 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_ControlGraftMeta); i {
switch v := v.(*TraceEvent_ControlIWantMeta); i {
case 0:
return &v.state
case 1:
@ -2092,6 +2189,18 @@ func file_trace_proto_init() {
}
}
file_trace_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_ControlGraftMeta); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trace_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceEvent_ControlPruneMeta); i {
case 0:
return &v.state
@ -2121,16 +2230,17 @@ func file_trace_proto_init() {
file_trace_proto_msgTypes[15].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[16].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[17].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[19].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[21].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[18].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[20].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[22].OneofWrappers = []interface{}{}
file_trace_proto_msgTypes[23].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_trace_proto_rawDesc,
NumEnums: 1,
NumMessages: 23,
NumMessages: 24,
NumExtensions: 0,
NumServices: 0,
},

View File

@ -22,6 +22,7 @@ message TraceEvent {
optional Leave leave = 14;
optional Graft graft = 15;
optional Prune prune = 16;
optional UndeliverableMessage undeliverableMessage = 17;
enum Type {
PUBLISH_MESSAGE = 0;
@ -37,6 +38,7 @@ message TraceEvent {
LEAVE = 10;
GRAFT = 11;
PRUNE = 12;
UNDELIVERABLE_MESSAGE = 13;
}
message PublishMessage {
@ -105,6 +107,12 @@ message TraceEvent {
optional bytes bitmask = 2;
}
message UndeliverableMessage {
optional bytes messageID = 1;
optional bytes bitmask = 2;
optional bytes receivedFrom = 3;
}
message RPCMeta {
repeated MessageMeta messages = 1;
repeated SubMeta subscription = 2;

View File

@ -0,0 +1,112 @@
package blossomsub
import (
"context"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
func (ps *PubSub) watchForNewPeers(ctx context.Context) {
// We don't bother subscribing to "connectivity" events because we always run identify after
// every new connection.
sub, err := ps.host.EventBus().Subscribe([]interface{}{
&event.EvtPeerIdentificationCompleted{},
&event.EvtPeerProtocolsUpdated{},
})
if err != nil {
log.Errorf("failed to subscribe to peer identification events: %v", err)
return
}
defer sub.Close()
ps.newPeersPrioLk.RLock()
ps.newPeersMx.Lock()
for _, pid := range ps.host.Network().Peers() {
if ps.host.Network().Connectedness(pid) != network.Connected {
continue
}
ps.newPeersPend[pid] = struct{}{}
}
ps.newPeersMx.Unlock()
ps.newPeersPrioLk.RUnlock()
select {
case ps.newPeers <- struct{}{}:
default:
}
var supportsProtocol func(protocol.ID) bool
if ps.protoMatchFunc != nil {
var supportedProtocols []func(protocol.ID) bool
for _, proto := range ps.rt.Protocols() {
supportedProtocols = append(supportedProtocols, ps.protoMatchFunc(proto))
}
supportsProtocol = func(proto protocol.ID) bool {
for _, fn := range supportedProtocols {
if (fn)(proto) {
return true
}
}
return false
}
} else {
supportedProtocols := make(map[protocol.ID]struct{})
for _, proto := range ps.rt.Protocols() {
supportedProtocols[proto] = struct{}{}
}
supportsProtocol = func(proto protocol.ID) bool {
_, ok := supportedProtocols[proto]
return ok
}
}
for ctx.Err() == nil {
var ev any
select {
case <-ctx.Done():
return
case ev = <-sub.Out():
}
var protos []protocol.ID
var peer peer.ID
switch ev := ev.(type) {
case event.EvtPeerIdentificationCompleted:
peer = ev.Peer
protos = ev.Protocols
case event.EvtPeerProtocolsUpdated:
peer = ev.Peer
protos = ev.Added
default:
continue
}
// We don't bother checking connectivity (connected and non-"limited") here because
// we'll check when actually handling the new peer.
for _, p := range protos {
if supportsProtocol(p) {
ps.notifyNewPeer(peer)
break
}
}
}
}
func (ps *PubSub) notifyNewPeer(peer peer.ID) {
ps.newPeersPrioLk.RLock()
ps.newPeersMx.Lock()
ps.newPeersPend[peer] = struct{}{}
ps.newPeersMx.Unlock()
ps.newPeersPrioLk.RUnlock()
select {
case ps.newPeers <- struct{}{}:
default:
}
}

View File

@ -1,11 +1,14 @@
package blossomsub
import (
"bytes"
"context"
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"slices"
"sync"
"sync/atomic"
"time"
@ -23,8 +26,8 @@ import (
logging "github.com/ipfs/go-log/v2"
)
// DefaultMaximumMessageSize is 16.7 MB.
const DefaultMaxMessageSize = 1 << 24
// DefaultMaximumMessageSize is 1 MB.
const DefaultMaxMessageSize = 1 << 20
var (
// TimeCacheDuration specifies how long a message ID will be remembered as seen.
@ -231,7 +234,7 @@ const (
type Message struct {
*pb.Message
ID string
ID []byte
ReceivedFrom peer.ID
ValidatorData interface{}
Local bool
@ -330,20 +333,19 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option
h.SetStreamHandler(id, ps.handleNewStream)
}
}
h.Network().Notify((*PubSubNotif)(ps))
go ps.watchForNewPeers(ctx)
ps.val.Start(ps)
go ps.processLoop(ctx)
(*PubSubNotif)(ps).Initialize()
return ps, nil
}
// MsgIdFunction returns a unique ID for the passed Message, and PubSub can be customized to use any
// implementation of this function by configuring it with the Option from WithMessageIdFn.
type MsgIdFunction func(pmsg *pb.Message) string
type MsgIdFunction func(pmsg *pb.Message) []byte
// WithMessageIdFn is an option to customize the way a message ID is computed for a pubsub message.
// The default ID function is DefaultMsgIdFn (concatenate source and seq nr.),
@ -495,14 +497,14 @@ func WithRawTracer(tracer RawTracer) Option {
}
// WithMaxMessageSize sets the global maximum message size for pubsub wire
// messages. The default value is 16.7MiB (DefaultMaxMessageSize).
// messages. The default value is 1MiB (DefaultMaxMessageSize).
//
// Observe the following warnings when setting this option.
//
// WARNING #1: Make sure to change the default protocol prefixes for floodsub
// (FloodSubID) and BlossomSub (BlossomSubID). This avoids accidentally joining
// the public default network, which uses the default max message size, and
// therefore will cause messages to be dropped.
// WARNING #1: Make sure to change the default protocol prefixes for BlossomSub
// (BlossomSubID). This avoids accidentally joining the public default network,
// which uses the default max message size, and therefore will cause messages to
// be dropped.
//
// WARNING #2: Reducing the default max message limit is fine, if you are
// certain that your application messages will not exceed the new limit.
@ -622,22 +624,13 @@ func (p *PubSub) processLoop(ctx context.Context) {
case bitmask := <-p.rmRelay:
p.handleRemoveRelay([]byte(bitmask))
case preq := <-p.getPeers:
tmap, ok := p.bitmasks[string(preq.bitmask)]
if preq.bitmask != nil && !ok {
peers := p.getPeersInBitmask(preq.bitmask)
if len(peers) == 0 {
preq.resp <- nil
continue
} else {
preq.resp <- peers
}
var peers []peer.ID
for p := range p.peers {
if preq.bitmask != nil {
_, ok := tmap[p]
if !ok {
continue
}
}
peers = append(peers, p)
}
preq.resp <- peers
case rpc := <-p.incoming:
p.handleIncomingRPC(rpc)
@ -677,6 +670,47 @@ func (p *PubSub) processLoop(ctx context.Context) {
}
}
func (p *PubSub) getPeersInBitmask(bitmask []byte) []peer.ID {
bitmaskSlices := SliceBitmask(bitmask)
var peers []peer.ID
peerloop:
for _, slice := range bitmaskSlices {
tmap, ok := p.bitmasks[string(slice)]
if !ok {
peers = []peer.ID{}
break peerloop
}
var peerset []peer.ID
for p := range p.peers {
_, ok := tmap[p]
if !ok {
continue
}
peerset = append(peerset, p)
}
if len(peers) == 0 {
peers = peerset
} else {
var update []peer.ID
for _, p := range peers {
if slices.Contains(peerset, p) {
update = append(update, p)
}
}
peers = update
if len(update) == 0 {
break peerloop
}
}
}
return peers
}
func (p *PubSub) handlePendingPeers() {
p.newPeersPrioLk.Lock()
@ -975,14 +1009,13 @@ func (p *PubSub) doAnnounceRetry(pid peer.ID, bitmask []byte, sub bool) {
// Only called from processLoop.
func (p *PubSub) notifySubs(msg *Message) {
bitmask := msg.GetBitmask()
subs := p.mySubs[string(bitmask)]
slices := SliceBitmask(bitmask)
// o := rand.Intn(len(slices))
subs := p.mySubs[string(slices[0])]
for f := range subs {
select {
case f.ch <- msg:
case <-time.After(5 * time.Millisecond):
// it's unreasonable to immediately fall over because a subscriber didn't
// answer, message delivery sometimes lands next nanosecond and dropping
// it when there's room is absurd.
default:
p.tracer.UndeliverableMessage(msg)
log.Infof("Can't deliver message to subscription for bitmask %x; subscriber too slow", bitmask)
}
@ -990,14 +1023,14 @@ func (p *PubSub) notifySubs(msg *Message) {
}
// seenMessage returns whether we already saw this message before
func (p *PubSub) seenMessage(id string) bool {
return p.seenMessages.Has(id)
func (p *PubSub) seenMessage(id []byte) bool {
return p.seenMessages.Has(string(id))
}
// markSeen marks a message as seen such that seenMessage returns `true' for the given id
// returns true if the message was freshly marked
func (p *PubSub) markSeen(id string) bool {
return p.seenMessages.Add(id)
func (p *PubSub) markSeen(id []byte) bool {
return p.seenMessages.Add(string(id))
}
// subscribedToMessage returns whether we are subscribed to one of the bitmasks
@ -1008,9 +1041,15 @@ func (p *PubSub) subscribedToMsg(msg *pb.Message) bool {
}
bitmask := msg.GetBitmask()
_, ok := p.mySubs[string(bitmask)]
slices := SliceBitmask(bitmask)
for _, slice := range slices {
_, ok := p.mySubs[string(slice)]
if !ok {
return false
}
}
return ok
return true
}
// canRelayMsg returns whether we are able to relay for one of the bitmasks
@ -1021,9 +1060,15 @@ func (p *PubSub) canRelayMsg(msg *pb.Message) bool {
}
bitmask := msg.GetBitmask()
relays := p.myRelays[string(bitmask)]
slices := SliceBitmask(bitmask)
for _, slice := range slices {
relays := p.myRelays[string(slice)]
if relays > 0 {
return true
}
}
return relays > 0
return false
}
func (p *PubSub) notifyLeave(bitmask []byte, pid peer.ID) {
@ -1049,7 +1094,7 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) {
var err error
subs, err = p.subFilter.FilterIncomingSubscriptions(rpc.from, subs)
if err != nil {
log.Debugf("subscription filter error: %s; ignoring RPC", err)
log.Debugf("subscription filter error: %s; ignoring RPC\n", err)
return
}
}
@ -1103,7 +1148,7 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) {
continue
}
p.pushMsg(&Message{pmsg, "", rpc.from, nil, false})
p.pushMsg(&Message{pmsg, []byte{}, rpc.from, nil, false})
}
}
@ -1111,8 +1156,10 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) {
}
// DefaultMsgIdFn returns a unique ID of the passed Message
func DefaultMsgIdFn(pmsg *pb.Message) string {
return string(pmsg.GetFrom()) + string(pmsg.GetSeqno())
func DefaultMsgIdFn(pmsg *pb.Message) []byte {
h := sha256.New()
h.Write(pmsg.Data)
return h.Sum([]byte{0x01})
}
// DefaultPeerFilter accepts all peers on all bitmasks
@ -1233,59 +1280,75 @@ func (p *PubSub) PeerScore(pr peer.ID) float64 {
return p.rt.PeerScore(pr)
}
// Join joins the bitmask and returns a Bitmask handle. Only one Bitmask handle should exist per bitmask, and Join will error if
// the Bitmask handle already exists.
func (p *PubSub) Join(bitmask []byte, opts ...BitmaskOpt) (*Bitmask, error) {
t, ok, err := p.tryJoin(bitmask, opts...)
if err != nil {
return nil, err
// Join joins the bitmasks and returns a set of Bitmask handles. Only one Bitmask
// handle should exist per bit, and Join will error if all the Bitmask handles already exist.
func (p *PubSub) Join(bitmask []byte, opts ...BitmaskOpt) ([]*Bitmask, error) {
ts, news, errs := p.tryJoin(bitmask, opts...)
if len(errs) != 0 {
return nil, errors.Join(errs...)
}
if !ok {
if !slices.Contains(news, true) {
return nil, fmt.Errorf("bitmask already exists")
}
return t, nil
return ts, nil
}
// tryJoin is an internal function that tries to join a bitmask
// Returns the bitmask if it can be created or found
// Returns true if the bitmask was newly created, false otherwise
// Can be removed once pubsub.Publish() and pubsub.Subscribe() are removed
func (p *PubSub) tryJoin(bitmask []byte, opts ...BitmaskOpt) (*Bitmask, bool, error) {
func (p *PubSub) tryJoin(bitmask []byte, opts ...BitmaskOpt) ([]*Bitmask, []bool, []error) {
if p.subFilter != nil && !p.subFilter.CanSubscribe(bitmask) {
return nil, false, fmt.Errorf("bitmask is not allowed by the subscription filter")
return nil, nil, []error{fmt.Errorf("bitmask is not allowed by the subscription filter")}
}
t := &Bitmask{
p: p,
bitmask: bitmask,
evtHandlers: make(map[*BitmaskEventHandler]struct{}),
}
sliced := SliceBitmask(bitmask)
var bitmasks []*Bitmask
var newBitmasks []bool
var errors []error
for _, opt := range opts {
err := opt(t)
if err != nil {
return nil, false, err
loop:
for _, slice := range sliced {
slice := slice
t := &Bitmask{
p: p,
bitmask: slice,
evtHandlers: make(map[*BitmaskEventHandler]struct{}),
}
for _, opt := range opts {
err := opt(t)
if err != nil {
errors = append(errors, err)
continue loop
}
}
resp := make(chan *Bitmask, 1)
select {
case t.p.addBitmask <- &addBitmaskReq{
bitmask: t,
resp: resp,
}:
case <-t.p.ctx.Done():
errors = append(errors, t.p.ctx.Err())
continue loop
}
returnedBitmask := <-resp
if returnedBitmask != t {
bitmasks = append(bitmasks, returnedBitmask)
newBitmasks = append(newBitmasks, false)
} else {
bitmasks = append(bitmasks, t)
newBitmasks = append(newBitmasks, true)
}
}
resp := make(chan *Bitmask, 1)
select {
case t.p.addBitmask <- &addBitmaskReq{
bitmask: t,
resp: resp,
}:
case <-t.p.ctx.Done():
return nil, false, t.p.ctx.Err()
}
returnedBitmask := <-resp
if returnedBitmask != t {
return returnedBitmask, false, nil
}
return t, true, nil
return bitmasks, newBitmasks, errors
}
type addSubReq struct {
@ -1300,14 +1363,24 @@ type SubOpt func(sub *Subscription) error
// before the subscription is processed by the pubsub main loop and propagated to our peers.
//
// Deprecated: use pubsub.Join() and bitmask.Subscribe() instead
func (p *PubSub) Subscribe(bitmask []byte, opts ...SubOpt) (*Subscription, error) {
func (p *PubSub) Subscribe(bitmask []byte, opts ...SubOpt) ([]*Subscription, error) {
// ignore whether the bitmask was newly created or not, since either way we have a valid bitmask to work with
bitmaskHandle, _, err := p.tryJoin(bitmask)
if err != nil {
return nil, err
bitmaskHandles, _, errs := p.tryJoin(bitmask)
if len(errs) != 0 {
return nil, errors.Join(errs...)
}
return bitmaskHandle.Subscribe(opts...)
var subs []*Subscription
for _, handle := range bitmaskHandles {
sub, err := handle.Subscribe(opts...)
if err != nil {
return nil, err
}
subs = append(subs, sub)
}
return subs, nil
}
// WithBufferSize is a Subscribe option to customize the size of the subscribe output buffer.
@ -1335,17 +1408,20 @@ func (p *PubSub) GetBitmasks() []string {
return <-out
}
// Publish publishes data to the given bitmask.
//
// Deprecated: use pubsub.Join() and bitmask.Publish() instead
func (p *PubSub) Publish(bitmask []byte, data []byte, opts ...PubOpt) error {
// ignore whether the bitmask was newly created or not, since either way we have a valid bitmask to work with
t, _, err := p.tryJoin(bitmask)
if err != nil {
return err
func (p *PubSub) Publish(ctx context.Context, bitmask []byte, data []byte, opts ...PubOpt) error {
peers := p.ListPeers(bitmask)
if len(peers) == 0 {
return ErrBitmaskClosed
}
return t.Publish(context.TODO(), data, opts...)
slices := SliceBitmask(bitmask)
o := rand.Intn(len(slices))
b, _, errs := p.tryJoin(slices[o])
if len(errs) != 0 {
return errors.Join(errs...)
}
return b[0].Publish(ctx, bitmask, data, opts...)
}
func (p *PubSub) nextSeqno() []byte {
@ -1430,3 +1506,34 @@ type addRelayReq struct {
bitmask []byte
resp chan RelayCancelFunc
}
func SliceBitmask(bitmask []byte) [][]byte {
sliced := [][]byte{}
if bytes.Equal(bitmask, make([]byte, len(bitmask))) {
sliced = append(sliced, bitmask)
} else {
for i, b := range bitmask {
if b == 0 {
continue
}
// fast: one bit in byte
if b&(b-1) == 0 {
slice := make([]byte, len(bitmask))
slice[i] = b
sliced = append(sliced, slice)
continue
}
for j := 7; j >= 0; j-- {
if (b>>j)&1 == 1 {
slice := make([]byte, len(bitmask))
slice[i] = 1 << j
sliced = append(sliced, slice)
}
}
}
}
return sliced
}

View File

@ -2,48 +2,98 @@ package blossomsub
import (
"context"
"crypto/rand"
"testing"
"time"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
)
// See https://source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/issues/426
func TestPubSubRemovesBlacklistedPeer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
hosts := getNetHosts(t, ctx, 2)
hosts := getDefaultHosts(t, 2)
bl := NewMapBlacklist()
psubs0 := getPubsub(ctx, hosts[0])
psubs1 := getPubsub(ctx, hosts[1], WithBlacklist(bl))
psubs0 := getBlossomSub(ctx, hosts[0])
psubs1 := getBlossomSub(ctx, hosts[1], WithBlacklist(bl))
connect(t, hosts[0], hosts[1])
// Bad peer is blacklisted after it has connected.
// Calling p.BlacklistPeer directly does the right thing but we should also clean
// up the peer if it has been added the the blacklist by another means.
bl.Add(hosts[0].ID())
_, err := psubs0.Subscribe([]byte{0x7e, 0x57})
bitmasks, err := psubs0.Join([]byte{0x01, 0x00})
if err != nil {
t.Fatal(err)
}
sub1, err := psubs1.Subscribe([]byte{0x7e, 0x57})
_, err = psubs0.Subscribe([]byte{0x01, 0x00})
if err != nil {
t.Fatal(err)
}
sub1, err := psubs1.Subscribe([]byte{0x01, 0x00})
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 100)
psubs0.Publish([]byte{0x7e, 0x57}, []byte("message"))
bitmasks[0].Publish(ctx, []byte{0x01, 0x00}, []byte("message"))
wctx, cancel2 := context.WithTimeout(ctx, 1*time.Second)
defer cancel2()
_, _ = sub1.Next(wctx)
_, _ = sub1[0].Next(wctx)
// Explicitly cancel context so PubSub cleans up peer channels.
// Issue 426 reports a panic due to a peer channel being closed twice.
cancel()
time.Sleep(time.Millisecond * 100)
}
func TestSliceBitmask(t *testing.T) {
fullVector := []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}
partialVector := []byte{
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x04, 0x03, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
outputs := SliceBitmask(fullVector)
if len(outputs) != 256 {
t.Fatalf("output length mismatch: %d, expected %d", len(outputs), 256)
}
outputs = SliceBitmask(partialVector)
if len(outputs) != 4 {
t.Fatalf("output length mismatch: %d, expected %d", len(outputs), 4)
}
}
func TestDefaultMsgIdFn(t *testing.T) {
for i := 0; i < 10; i++ {
data := make([]byte, 1024)
rand.Read(data)
// for v2, prepends 0x01
out := DefaultMsgIdFn(&pb.Message{
Data: data,
})
if len(out) != 33 {
t.Fatalf("length mismatch for msg id fn: %d, expected %d\n", len(out), 33)
}
if out[0] != 0x01 {
t.Fatalf("missing prefix byte for msg id fn: %x, expected %x\n", out[:1], []byte{0x01})
}
}
}

View File

@ -1,172 +0,0 @@
package blossomsub
import (
"context"
"math"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
const (
RandomSubID = protocol.ID("/randomsub/1.0.0")
)
var (
RandomSubD = 6
)
// NewRandomSub returns a new PubSub object using RandomSubRouter as the router.
func NewRandomSub(ctx context.Context, h host.Host, size int, opts ...Option) (*PubSub, error) {
rt := &RandomSubRouter{
size: size,
peers: make(map[peer.ID]protocol.ID),
}
return NewPubSub(ctx, h, rt, opts...)
}
// RandomSubRouter is a router that implements a random propagation strategy.
// For each message, it selects the square root of the network size peers, with a min of RandomSubD,
// and forwards the message to them.
type RandomSubRouter struct {
p *PubSub
peers map[peer.ID]protocol.ID
size int
tracer *pubsubTracer
}
func (rs *RandomSubRouter) Protocols() []protocol.ID {
return []protocol.ID{RandomSubID, FloodSubID}
}
func (rs *RandomSubRouter) Attach(p *PubSub) {
rs.p = p
rs.tracer = p.tracer
}
func (rs *RandomSubRouter) PeerScore(p peer.ID) float64 {
return rs.p.PeerScore(p)
}
func (rs *RandomSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
rs.tracer.AddPeer(p, proto)
rs.peers[p] = proto
}
func (rs *RandomSubRouter) RemovePeer(p peer.ID) {
rs.tracer.RemovePeer(p)
delete(rs.peers, p)
}
func (rs *RandomSubRouter) EnoughPeers(bitmask []byte, suggested int) bool {
// check all peers in the bitmask
tmap, ok := rs.p.bitmasks[string(bitmask)]
if !ok {
return false
}
fsPeers := 0
rsPeers := 0
// count floodsub and randomsub peers
for p := range tmap {
switch rs.peers[p] {
case FloodSubID:
fsPeers++
case RandomSubID:
rsPeers++
}
}
if suggested == 0 {
suggested = RandomSubD
}
if fsPeers+rsPeers >= suggested {
return true
}
if rsPeers >= RandomSubD {
return true
}
return false
}
func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus {
return AcceptAll
}
func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {}
func (rs *RandomSubRouter) Publish(msg *Message) {
from := msg.ReceivedFrom
tosend := make(map[peer.ID]struct{})
rspeers := make(map[peer.ID]struct{})
src := peer.ID(msg.GetFrom())
bitmask := msg.GetBitmask()
tmap, ok := rs.p.bitmasks[string(bitmask)]
if !ok {
return
}
for p := range tmap {
if p == from || p == src {
continue
}
if rs.peers[p] == FloodSubID {
tosend[p] = struct{}{}
} else {
rspeers[p] = struct{}{}
}
}
if len(rspeers) > RandomSubD {
target := RandomSubD
sqrt := int(math.Ceil(math.Sqrt(float64(rs.size))))
if sqrt > target {
target = sqrt
}
if target > len(rspeers) {
target = len(rspeers)
}
xpeers := peerMapToList(rspeers)
shufflePeers(xpeers)
xpeers = xpeers[:target]
for _, p := range xpeers {
tosend[p] = struct{}{}
}
} else {
for p := range rspeers {
tosend[p] = struct{}{}
}
}
out := rpcWithMessages(msg.Message)
for p := range tosend {
mch, ok := rs.p.peers[p]
if !ok {
continue
}
select {
case mch <- out:
rs.tracer.SendRPC(out, p)
default:
log.Infof("dropping message to peer %s: queue full", p)
rs.tracer.DropRPC(out, p)
}
}
}
func (rs *RandomSubRouter) Join(bitmask []byte) {
rs.tracer.Join(bitmask)
}
func (rs *RandomSubRouter) Leave(bitmask []byte) {
rs.tracer.Join(bitmask)
}

View File

@ -1,192 +0,0 @@
package blossomsub
import (
"context"
"fmt"
"testing"
"time"
"github.com/libp2p/go-libp2p/core/host"
)
func getRandomsub(ctx context.Context, h host.Host, size int, opts ...Option) *PubSub {
ps, err := NewRandomSub(ctx, h, size, opts...)
if err != nil {
panic(err)
}
return ps
}
func getRandomsubs(ctx context.Context, hs []host.Host, size int, opts ...Option) []*PubSub {
var psubs []*PubSub
for _, h := range hs {
psubs = append(psubs, getRandomsub(ctx, h, size, opts...))
}
return psubs
}
func tryReceive(sub *Subscription) *Message {
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
m, err := sub.Next(ctx)
if err != nil {
return nil
} else {
return m
}
}
func TestRandomsubSmall(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 10)
psubs := getRandomsubs(ctx, hosts, 10)
connectAll(t, hosts)
var subs []*Subscription
for _, ps := range psubs {
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
if err != nil {
t.Fatal(err)
}
subs = append(subs, sub)
}
time.Sleep(time.Second)
count := 0
for i := 0; i < 10; i++ {
msg := []byte(fmt.Sprintf("message %d", i))
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
for _, sub := range subs {
if tryReceive(sub) != nil {
count++
}
}
}
if count < 7*len(hosts) {
t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count)
}
}
func TestRandomsubBig(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 50)
psubs := getRandomsubs(ctx, hosts, 50)
connectSome(t, hosts, 12)
var subs []*Subscription
for _, ps := range psubs {
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
if err != nil {
t.Fatal(err)
}
subs = append(subs, sub)
}
time.Sleep(time.Second)
count := 0
for i := 0; i < 10; i++ {
msg := []byte(fmt.Sprintf("message %d", i))
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
for _, sub := range subs {
if tryReceive(sub) != nil {
count++
}
}
}
if count < 7*len(hosts) {
t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count)
}
}
func TestRandomsubMixed(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 40)
fsubs := getPubsubs(ctx, hosts[:10])
rsubs := getRandomsubs(ctx, hosts[10:], 30)
psubs := append(fsubs, rsubs...)
connectSome(t, hosts, 12)
var subs []*Subscription
for _, ps := range psubs {
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
if err != nil {
t.Fatal(err)
}
subs = append(subs, sub)
}
time.Sleep(time.Second)
count := 0
for i := 0; i < 10; i++ {
msg := []byte(fmt.Sprintf("message %d", i))
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
for _, sub := range subs {
if tryReceive(sub) != nil {
count++
}
}
}
if count < 7*len(hosts) {
t.Fatalf("received too few messages; expected at least %d but got %d", 9*len(hosts), count)
}
}
func TestRandomsubEnoughPeers(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 40)
fsubs := getPubsubs(ctx, hosts[:10])
rsubs := getRandomsubs(ctx, hosts[10:], 30)
psubs := append(fsubs, rsubs...)
connectSome(t, hosts, 12)
for _, ps := range psubs {
_, err := ps.Subscribe([]byte{0x7e, 0x57})
if err != nil {
t.Fatal(err)
}
}
time.Sleep(time.Second)
res := make(chan bool, 1)
rsubs[0].eval <- func() {
rs := rsubs[0].rt.(*RandomSubRouter)
res <- rs.EnoughPeers([]byte{0x7e, 0x57}, 0)
}
enough := <-res
if !enough {
t.Fatal("expected enough peers")
}
rsubs[0].eval <- func() {
rs := rsubs[0].rt.(*RandomSubRouter)
res <- rs.EnoughPeers([]byte{0x7e, 0x57}, 100)
}
enough = <-res
if !enough {
t.Fatal("expected enough peers")
}
}

View File

@ -105,7 +105,7 @@ type deliveryRecord struct {
}
type deliveryEntry struct {
id string
id []byte
expire time.Time
next *deliveryEntry
}
@ -837,8 +837,8 @@ func (ps *peerScore) DropRPC(rpc *RPC, p peer.ID) {}
func (ps *peerScore) UndeliverableMessage(msg *Message) {}
// message delivery records
func (d *messageDeliveries) getRecord(id string) *deliveryRecord {
rec, ok := d.records[id]
func (d *messageDeliveries) getRecord(id []byte) *deliveryRecord {
rec, ok := d.records[string(id)]
if ok {
return rec
}
@ -846,7 +846,7 @@ func (d *messageDeliveries) getRecord(id string) *deliveryRecord {
now := time.Now()
rec = &deliveryRecord{peers: make(map[peer.ID]struct{}), firstSeen: now}
d.records[id] = rec
d.records[string(id)] = rec
entry := &deliveryEntry{id: id, expire: now.Add(d.seenMsgTTL)}
if d.tail != nil {
@ -867,7 +867,7 @@ func (d *messageDeliveries) gc() {
now := time.Now()
for d.head != nil && now.After(d.head.expire) {
delete(d.records, d.head.id)
delete(d.records, string(d.head.id))
d.head = d.head.next
}

View File

@ -11,12 +11,25 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
)
func mustSubscribe(t *testing.T, ps *PubSub, bitmask []byte) *Subscription {
sub, err := ps.Subscribe(bitmask)
if err != nil {
t.Fatal(err)
}
if len(sub) != 1 {
t.Fatal("must subscribe only allows single bit bitmasks")
}
return sub[0]
}
func TestBasicSubscriptionFilter(t *testing.T) {
peerA := peer.ID("A")
bitmask1 := []byte{0xff, 0x00, 0x00, 0x00}
bitmask2 := []byte{0x00, 0xff, 0x00, 0x00}
bitmask3 := []byte{0x00, 0x00, 0xff, 0x00}
bitmask1 := []byte{0x00, 0x80, 0x00, 0x00}
bitmask2 := []byte{0x00, 0x20, 0x00, 0x00}
bitmask3 := []byte{0x00, 0x00, 0x02, 0x00}
yes := true
subs := []*pb.RPC_SubOpts{
&pb.RPC_SubOpts{
@ -69,9 +82,9 @@ func TestBasicSubscriptionFilter(t *testing.T) {
func TestSubscriptionFilterDeduplication(t *testing.T) {
peerA := peer.ID("A")
bitmask1 := []byte{0xff, 0x00, 0x00, 0x00}
bitmask2 := []byte{0x00, 0xff, 0x00, 0x00}
bitmask3 := []byte{0x00, 0x00, 0xff, 0x00}
bitmask1 := []byte{0x00, 0x80, 0x00, 0x00}
bitmask2 := []byte{0x00, 0x20, 0x00, 0x00}
bitmask3 := []byte{0x00, 0x00, 0x02, 0x00}
yes := true
no := false
subs := []*pb.RPC_SubOpts{
@ -117,17 +130,17 @@ func TestSubscriptionFilterRPC(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 2)
ps1 := getPubsub(ctx, hosts[0], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0xff, 0x00, 0x00, 0x00}, []byte{0x00, 0xff, 0x00, 0x00})))
ps2 := getPubsub(ctx, hosts[1], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0x00, 0xff, 0x00, 0x00}, []byte{0x00, 0x00, 0xff, 0x00})))
hosts := getDefaultHosts(t, 2)
ps1 := getBlossomSub(ctx, hosts[0], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0x00, 0x80, 0x00, 0x00}, []byte{0x00, 0x20, 0x00, 0x00})))
ps2 := getBlossomSub(ctx, hosts[1], WithSubscriptionFilter(NewAllowlistSubscriptionFilter([]byte{0x00, 0x20, 0x00, 0x00}, []byte{0x00, 0x00, 0x02, 0x00})))
_ = mustSubscribe(t, ps1, []byte{0xff, 0x00, 0x00, 0x00})
_ = mustSubscribe(t, ps1, []byte{0x00, 0xff, 0x00, 0x00})
_ = mustSubscribe(t, ps2, []byte{0x00, 0xff, 0x00, 0x00})
_ = mustSubscribe(t, ps2, []byte{0x00, 0x00, 0xff, 0x00})
_ = mustSubscribe(t, ps1, []byte{0x00, 0x80, 0x00, 0x00})
_ = mustSubscribe(t, ps1, []byte{0x00, 0x20, 0x00, 0x00})
_ = mustSubscribe(t, ps2, []byte{0x00, 0x20, 0x00, 0x00})
_ = mustSubscribe(t, ps2, []byte{0x00, 0x00, 0x02, 0x00})
// check the rejection as well
_, err := ps1.Join([]byte{0x00, 0x00, 0xff, 0x00})
_, err := ps1.Join([]byte{0x00, 0x00, 0x02, 0x00})
if err == nil {
t.Fatal("expected subscription error")
}
@ -140,9 +153,9 @@ func TestSubscriptionFilterRPC(t *testing.T) {
ready := make(chan struct{})
ps1.eval <- func() {
_, sub1 = ps1.bitmasks[string([]byte{0xff, 0x00, 0x00, 0x00})][hosts[1].ID()]
_, sub2 = ps1.bitmasks[string([]byte{0x00, 0xff, 0x00, 0x00})][hosts[1].ID()]
_, sub3 = ps1.bitmasks[string([]byte{0x00, 0x00, 0xff, 0x00})][hosts[1].ID()]
_, sub1 = ps1.bitmasks[string([]byte{0x00, 0x80, 0x00, 0x00})][hosts[1].ID()]
_, sub2 = ps1.bitmasks[string([]byte{0x00, 0x20, 0x00, 0x00})][hosts[1].ID()]
_, sub3 = ps1.bitmasks[string([]byte{0x00, 0x00, 0x02, 0x00})][hosts[1].ID()]
ready <- struct{}{}
}
<-ready
@ -158,9 +171,9 @@ func TestSubscriptionFilterRPC(t *testing.T) {
}
ps2.eval <- func() {
_, sub1 = ps2.bitmasks[string([]byte{0xff, 0x00, 0x00, 0x00})][hosts[0].ID()]
_, sub2 = ps2.bitmasks[string([]byte{0x00, 0xff, 0x00, 0x00})][hosts[0].ID()]
_, sub3 = ps2.bitmasks[string([]byte{0x00, 0x00, 0xff, 0x00})][hosts[0].ID()]
_, sub1 = ps2.bitmasks[string([]byte{0x00, 0x80, 0x00, 0x00})][hosts[0].ID()]
_, sub2 = ps2.bitmasks[string([]byte{0x00, 0x20, 0x00, 0x00})][hosts[0].ID()]
_, sub3 = ps2.bitmasks[string([]byte{0x00, 0x00, 0x02, 0x00})][hosts[0].ID()]
ready <- struct{}{}
}
<-ready

View File

@ -109,7 +109,7 @@ func (t *tagTracer) addDeliveryTag(bitmask []byte) {
return
}
name := fmt.Sprintf("pubsub-deliveries:%s", bitmask)
name := "pubsub-deliveries:" + string(bitmask)
t.Lock()
defer t.Unlock()
tag, err := t.decayer.RegisterDecayingTag(
@ -162,7 +162,7 @@ func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) {
func (t *tagTracer) nearFirstPeers(msg *Message) []peer.ID {
t.Lock()
defer t.Unlock()
peersMap, ok := t.nearFirst[t.idGen.ID(msg)]
peersMap, ok := t.nearFirst[string(t.idGen.ID(msg))]
if !ok {
return nil
}
@ -194,7 +194,7 @@ func (t *tagTracer) DeliverMessage(msg *Message) {
// delete the delivery state for this message
t.Lock()
delete(t.nearFirst, t.idGen.ID(msg))
delete(t.nearFirst, string(t.idGen.ID(msg)))
t.Unlock()
}
@ -216,10 +216,10 @@ func (t *tagTracer) ValidateMessage(msg *Message) {
// create map to start tracking the peers who deliver while we're validating
id := t.idGen.ID(msg)
if _, exists := t.nearFirst[id]; exists {
if _, exists := t.nearFirst[string(id)]; exists {
return
}
t.nearFirst[id] = make(map[peer.ID]struct{})
t.nearFirst[string(id)] = make(map[peer.ID]struct{})
}
func (t *tagTracer) DuplicateMessage(msg *Message) {
@ -227,7 +227,7 @@ func (t *tagTracer) DuplicateMessage(msg *Message) {
defer t.Unlock()
id := t.idGen.ID(msg)
peers, ok := t.nearFirst[id]
peers, ok := t.nearFirst[string(id)]
if !ok {
return
}
@ -247,7 +247,7 @@ func (t *tagTracer) RejectMessage(msg *Message, reason string) {
case RejectValidationIgnored:
fallthrough
case RejectValidationFailed:
delete(t.nearFirst, t.idGen.ID(msg))
delete(t.nearFirst, string(t.idGen.ID(msg)))
}
}

View File

@ -55,9 +55,9 @@ func TestTagTracerDirectPeerTags(t *testing.T) {
tt.direct = make(map[peer.ID]struct{})
tt.direct[p1] = struct{}{}
tt.AddPeer(p1, BlossomSubID_v11)
tt.AddPeer(p2, BlossomSubID_v11)
tt.AddPeer(p3, BlossomSubID_v11)
tt.AddPeer(p1, BlossomSubID_v2)
tt.AddPeer(p2, BlossomSubID_v2)
tt.AddPeer(p3, BlossomSubID_v2)
tag := "pubsub:<direct>"
if !cmgr.IsProtected(p1, tag) {
@ -179,7 +179,7 @@ func TestTagTracerDeliveryTagsNearFirst(t *testing.T) {
tt := newTagTracer(cmgr)
bitmask := []byte{0x7e, 0x57}
bitmask := []byte{0x01, 0x00}
p := peer.ID("a-peer")
p2 := peer.ID("another-peer")

View File

@ -336,6 +336,24 @@ func (t *pubsubTracer) UndeliverableMessage(msg *Message) {
for _, tr := range t.raw {
tr.UndeliverableMessage(msg)
}
if t.tracer == nil {
return
}
now := time.Now().UnixNano()
evt := &pb.TraceEvent{
Type: pb.TraceEvent_UNDELIVERABLE_MESSAGE.Enum(),
PeerID: []byte(t.pid),
Timestamp: &now,
UndeliverableMessage: &pb.TraceEvent_UndeliverableMessage{
MessageID: []byte(t.idGen.ID(msg)),
Bitmask: msg.Bitmask,
ReceivedFrom: []byte(msg.ReceivedFrom),
},
}
t.tracer.Trace(evt)
}
func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta {

View File

@ -11,24 +11,22 @@ import (
"testing"
"time"
"google.golang.org/protobuf/proto"
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
"github.com/libp2p/go-msgio/protoio"
"github.com/libp2p/go-msgio"
)
func testWithTracer(t *testing.T, tracer EventTracer) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 20)
hosts := getDefaultHosts(t, 20)
psubs := getBlossomSubs(ctx, hosts,
WithMessageIdFn(func(pmsg *pb.Message) []byte { return pmsg.Data }),
WithEventTracer(tracer),
// to bootstrap from star topology
WithPeerExchange(true),
@ -49,7 +47,7 @@ func testWithTracer(t *testing.T, tracer EventTracer) {
// add a validator that rejects some messages to exercise those code paths in the tracer
for _, ps := range psubs {
ps.RegisterBitmaskValidator([]byte{0x7e, 57}, func(ctx context.Context, p peer.ID, msg *Message) bool {
ps.RegisterBitmaskValidator([]byte{0x01, 0x00}, func(ctx context.Context, p peer.ID, msg *Message) bool {
if string(msg.Data) == "invalid!" {
return false
} else {
@ -79,8 +77,14 @@ func testWithTracer(t *testing.T, tracer EventTracer) {
// build the mesh
var subs []*Subscription
var bitmasks []*Bitmask
for _, ps := range psubs {
sub, err := ps.Subscribe([]byte{0x7e, 0x57})
b, err := ps.Join([]byte{0x01, 0x00})
if err != nil {
t.Fatal(err)
}
bitmasks = append(bitmasks, b...)
sub, err := ps.Subscribe([]byte{0x01, 0x00})
if err != nil {
t.Fatal(err)
}
@ -91,8 +95,8 @@ func testWithTracer(t *testing.T, tracer EventTracer) {
return
}
}
}(sub)
subs = append(subs, sub)
}(sub[0])
subs = append(subs, sub...)
}
// wait for the mesh to build
@ -101,10 +105,14 @@ func testWithTracer(t *testing.T, tracer EventTracer) {
// publish some messages
for i := 0; i < 20; i++ {
if i%7 == 0 {
psubs[i].Publish([]byte{0x7e, 0x57}, []byte("invalid!"))
bitmasks[i].Publish(ctx, bitmasks[i].bitmask, []byte("invalid!"))
} else {
msg := []byte(fmt.Sprintf("message %d", i))
psubs[i].Publish([]byte{0x7e, 0x57}, msg)
if i%9 == 0 {
bitmasks[i].Publish(ctx, bitmasks[i].bitmask, []byte("dupe"))
} else {
msg := []byte(fmt.Sprintf("message %d", i))
bitmasks[i].Publish(ctx, bitmasks[i].bitmask, msg)
}
}
}
@ -125,7 +133,6 @@ type traceStats struct {
}
func (t *traceStats) process(evt *pb.TraceEvent) {
// fmt.Printf("process event %s\n", evt.GetType())
switch evt.GetType() {
case pb.TraceEvent_PUBLISH_MESSAGE:
t.publish++
@ -244,10 +251,16 @@ func TestPBTracer(t *testing.T) {
}
defer f.Close()
r := protoio.NewDelimitedReader(f, 1<<20)
r := msgio.NewVarintReaderSize(f, DefaultMaxMessageSize)
for {
evt.Reset()
err := r.ReadMsg(&evt)
v, err := r.ReadMsg()
if err != nil {
break
}
err = proto.Unmarshal(v, &evt)
if err != nil {
break
}
@ -271,12 +284,13 @@ func (mrt *mockRemoteTracer) handleStream(s network.Stream) {
panic(err)
}
r := protoio.NewDelimitedReader(gzr, 1<<24)
r := msgio.NewVarintReader(gzr)
var batch pb.TraceEventBatch
for {
batch.Reset()
err := r.ReadMsg(&batch)
v, err := r.ReadMsg()
if err != nil {
if err != io.EOF {
s.Reset()
@ -284,6 +298,11 @@ func (mrt *mockRemoteTracer) handleStream(s network.Stream) {
return
}
err = proto.Unmarshal(v, &batch)
if err != nil {
break
}
mrt.mx.Lock()
for _, evt := range batch.GetBatch() {
mrt.ts.process(evt)
@ -302,10 +321,9 @@ func TestRemoteTracer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
h2 := bhost.NewBlankHost(swarmt.GenSwarm(t))
defer h1.Close()
defer h2.Close()
hosts := getDefaultHosts(t, 2)
h1 := hosts[0]
h2 := hosts[1]
mrt := &mockRemoteTracer{}
h1.SetStreamHandler(RemoteTracerProtoID, mrt.handleStream)

View File

@ -9,6 +9,7 @@ import (
"sync"
"time"
"google.golang.org/protobuf/proto"
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"github.com/libp2p/go-libp2p/core/host"
@ -16,8 +17,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-msgio/protoio"
"github.com/libp2p/go-msgio"
)
var TraceBufferSize = 1 << 16 // 64K ought to be enough for everyone; famous last words.
@ -160,7 +160,6 @@ func OpenPBTracer(file string, flags int, perm os.FileMode) (*PBTracer, error) {
func (t *PBTracer) doWrite() {
var buf []*pb.TraceEvent
w := protoio.NewDelimitedWriter(t.w)
for {
_, ok := <-t.ch
@ -170,11 +169,20 @@ func (t *PBTracer) doWrite() {
buf = tmp
t.mx.Unlock()
w := msgio.NewVarintWriter(t.w)
for i, evt := range buf {
err := w.WriteMsg(evt)
out, err := proto.Marshal(evt)
if err != nil {
log.Warnf("error writing event trace: %s", err.Error())
continue
}
err = w.WriteMsg(out)
if err != nil {
log.Warnf("error writing event trace: %s", err.Error())
}
buf[i] = nil
}
@ -187,7 +195,7 @@ func (t *PBTracer) doWrite() {
var _ EventTracer = (*PBTracer)(nil)
const RemoteTracerProtoID = protocol.ID("/libp2p/pubsub/tracer/1.0.0")
const RemoteTracerProtoID = protocol.ID("/libp2p/pubsub/tracer/2.0.0")
// RemoteTracer is a tracer that sends trace events to a remote peer
type RemoteTracer struct {
@ -217,7 +225,7 @@ func (t *RemoteTracer) doWrite() {
var batch pb.TraceEventBatch
gzipW := gzip.NewWriter(s)
w := protoio.NewDelimitedWriter(gzipW)
w := msgio.NewVarintWriter(gzipW)
for {
_, ok := <-t.ch
@ -235,6 +243,7 @@ func (t *RemoteTracer) doWrite() {
tmp := t.buf
t.buf = buf[:0]
buf = tmp
var out []byte
t.mx.Unlock()
if len(buf) == 0 {
@ -242,8 +251,13 @@ func (t *RemoteTracer) doWrite() {
}
batch.Batch = buf
out, err = proto.Marshal(&batch)
if err != nil {
log.Debugf("error marshaling trace event batch: %s", err)
goto end
}
err = w.WriteMsg(&batch)
err = w.WriteMsg(out)
if err != nil {
log.Debugf("error writing trace event batch: %s", err)
goto end
@ -251,7 +265,7 @@ func (t *RemoteTracer) doWrite() {
err = gzipW.Flush()
if err != nil {
log.Debugf("error flushin gzip stream: %s", err)
log.Debugf("error flushing gzip stream: %s", err)
goto end
}

View File

@ -20,6 +20,18 @@ import (
pb "source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
)
func getBlossomSubsWithOptionC(ctx context.Context, hs []host.Host, cons ...func(int) Option) []*PubSub {
var psubs []*PubSub
for _, h := range hs {
var opts []Option
for i, c := range cons {
opts = append(opts, c(i))
}
psubs = append(psubs, getBlossomSub(ctx, h, opts...))
}
return psubs
}
var rng *rand.Rand
func init() {
@ -38,8 +50,8 @@ func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 20)
psubs := getPubsubsWithOptionC(ctx, hosts,
hosts := getDefaultHosts(t, 20)
psubs := getBlossomSubsWithOptionC(ctx, hosts,
func(i int) Option {
return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore()))
},
@ -49,13 +61,19 @@ func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) {
)
var msgs []*Subscription
var bitmasks []*Bitmask
for _, ps := range psubs {
subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20})
b, err := ps.Join([]byte{0x00, 0x01})
if err != nil {
t.Fatal(err)
}
bitmasks = append(bitmasks, b...)
subch, err := ps.Subscribe([]byte{0x00, 0x01})
if err != nil {
t.Fatal(err)
}
msgs = append(msgs, subch)
msgs = append(msgs, subch...)
}
// connectAll(t, hosts)
@ -68,7 +86,7 @@ func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) {
owner := rng.Intn(len(psubs))
psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg)
bitmasks[owner].Publish(ctx, bitmasks[owner].bitmask, msg)
for _, sub := range msgs {
got, err := sub.Next(ctx)
@ -86,8 +104,8 @@ func TestBasicSeqnoValidatorReplay(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 20)
psubs := getPubsubsWithOptionC(ctx, hosts[:19],
hosts := getDefaultHosts(t, 20)
psubs := getBlossomSubsWithOptionC(ctx, hosts[:19],
func(i int) Option {
return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore()))
},
@ -98,13 +116,19 @@ func TestBasicSeqnoValidatorReplay(t *testing.T) {
_ = newReplayActor(t, ctx, hosts[19])
var msgs []*Subscription
var bitmasks []*Bitmask
for _, ps := range psubs {
subch, err := ps.Subscribe([]byte{0xf0, 0x0b, 0xa1, 0x20})
b, err := ps.Join([]byte{0x00, 0x01})
if err != nil {
t.Fatal(err)
}
bitmasks = append(bitmasks, b...)
subch, err := ps.Subscribe([]byte{0x00, 0x01})
if err != nil {
t.Fatal(err)
}
msgs = append(msgs, subch)
msgs = append(msgs, subch...)
}
sparseConnect(t, hosts)
@ -116,7 +140,7 @@ func TestBasicSeqnoValidatorReplay(t *testing.T) {
owner := rng.Intn(len(psubs))
psubs[owner].Publish([]byte{0xf0, 0x0b, 0xa1, 0x20}, msg)
bitmasks[owner].Publish(ctx, bitmasks[owner].bitmask, msg)
for _, sub := range msgs {
got, err := sub.Next(ctx)
@ -169,7 +193,7 @@ type replayActor struct {
func newReplayActor(t *testing.T, ctx context.Context, h host.Host) *replayActor {
replay := &replayActor{t: t, ctx: ctx, h: h, out: make(map[peer.ID]network.Stream)}
h.SetStreamHandler(FloodSubID, replay.handleStream)
h.SetStreamHandler(BlossomSubID_v2, replay.handleStream)
h.Network().Notify(&network.NotifyBundle{ConnectedF: replay.connected})
return replay
}
@ -246,7 +270,7 @@ func (r *replayActor) replay(msg *pb.Message) {
var peers []peer.ID
r.mx.Lock()
for p, _ := range r.out {
for p := range r.out {
if rng.Intn(2) > 0 {
peers = append(peers, p)
}
@ -262,7 +286,7 @@ func (r *replayActor) replay(msg *pb.Message) {
}
func (r *replayActor) handleConnected(p peer.ID) {
s, err := r.h.NewStream(r.ctx, p, FloodSubID)
s, err := r.h.NewStream(r.ctx, p, BlossomSubID_v2)
if err != nil {
r.t.Logf("replay: error opening stream: %s", err)
return

View File

@ -15,8 +15,8 @@ func TestRegisterUnregisterValidator(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 1)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 1)
psubs := getBlossomSubs(ctx, hosts)
err := psubs[0].RegisterBitmaskValidator([]byte{0xf0, 0x00}, func(context.Context, peer.ID, *Message) bool {
return true
@ -40,10 +40,10 @@ func TestRegisterValidatorEx(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 3)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 3)
psubs := getBlossomSubs(ctx, hosts)
err := psubs[0].RegisterBitmaskValidator([]byte{0x7e, 0x57},
err := psubs[0].RegisterBitmaskValidator([]byte{0x01, 0x00},
Validator(func(context.Context, peer.ID, *Message) bool {
return true
}))
@ -51,7 +51,7 @@ func TestRegisterValidatorEx(t *testing.T) {
t.Fatal(err)
}
err = psubs[1].RegisterBitmaskValidator([]byte{0x7e, 0x57},
err = psubs[1].RegisterBitmaskValidator([]byte{0x01, 0x00},
ValidatorEx(func(context.Context, peer.ID, *Message) ValidationResult {
return ValidationAccept
}))
@ -59,7 +59,7 @@ func TestRegisterValidatorEx(t *testing.T) {
t.Fatal(err)
}
err = psubs[2].RegisterBitmaskValidator([]byte{0x7e, 0x57}, "bogus")
err = psubs[2].RegisterBitmaskValidator([]byte{0x01, 0x00}, "bogus")
if err == nil {
t.Fatal("expected error")
}
@ -69,11 +69,11 @@ func TestValidate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 2)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 2)
psubs := getBlossomSubs(ctx, hosts)
connect(t, hosts[0], hosts[1])
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
err := psubs[1].RegisterBitmaskValidator(bitmask, func(ctx context.Context, from peer.ID, msg *Message) bool {
return !bytes.Contains(msg.Data, []byte("illegal"))
@ -82,6 +82,11 @@ func TestValidate(t *testing.T) {
t.Fatal(err)
}
b, err := psubs[0].Join(bitmask)
if err != nil {
t.Fatal(err)
}
sub, err := psubs[1].Subscribe(bitmask)
if err != nil {
t.Fatal(err)
@ -100,13 +105,13 @@ func TestValidate(t *testing.T) {
}
for _, tc := range msgs {
err := psubs[0].Publish(bitmask, tc.msg)
err := b[0].Publish(ctx, b[0].bitmask, tc.msg)
if err != nil {
t.Fatal(err)
}
select {
case msg := <-sub.ch:
case msg := <-sub[0].ch:
if !tc.validates {
t.Log(msg)
t.Error("expected message validation to filter out the message")
@ -123,10 +128,10 @@ func TestValidate2(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 1)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 1)
psubs := getBlossomSubs(ctx, hosts)
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
err := psubs[0].RegisterBitmaskValidator(bitmask, func(ctx context.Context, from peer.ID, msg *Message) bool {
return !bytes.Contains(msg.Data, []byte("illegal"))
@ -145,8 +150,13 @@ func TestValidate2(t *testing.T) {
{msg: []byte("but subversive actors will use leetspeek to spread 1ll3g4l content"), validates: true},
}
b, err := psubs[0].Join(bitmask)
if err != nil {
t.Fatal(err)
}
for _, tc := range msgs {
err := psubs[0].Publish(bitmask, tc.msg)
err := b[0].Publish(ctx, b[0].bitmask, tc.msg)
if tc.validates {
if err != nil {
t.Fatal(err)
@ -183,7 +193,7 @@ func TestValidateOverload(t *testing.T) {
{msg: []byte("still, all good"), validates: true},
{msg: []byte("this is getting boring"), validates: true},
{msg: []byte([]byte{0xf0, 0x00}), validates: true},
{msg: []byte([]byte{0xf0, 0x0b, 0xa1, 0x20}), validates: true},
{msg: []byte([]byte{0x00, 0x01}), validates: true},
{msg: []byte("foofoo"), validates: true},
{msg: []byte("barfoo"), validates: true},
{msg: []byte("oh no!"), validates: false},
@ -201,11 +211,11 @@ func TestValidateOverload(t *testing.T) {
for tci, tc := range tcs {
t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) {
hosts := getNetHosts(t, ctx, 2)
psubs := getPubsubs(ctx, hosts)
hosts := getDefaultHosts(t, 2)
psubs := getBlossomSubs(ctx, hosts)
connect(t, hosts[0], hosts[1])
bitmask := []byte{0xf0, 0x0b, 0xa1, 0x20}
bitmask := []byte{0x00, 0x01}
block := make(chan struct{})
@ -232,13 +242,17 @@ func TestValidateOverload(t *testing.T) {
}
p := psubs[0]
b, err := p.Join(bitmask)
if err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
for _, tmsg := range tc.msgs {
select {
case msg := <-sub.ch:
case msg := <-sub[0].ch:
if !tmsg.validates {
t.Log(msg)
t.Error("expected message validation to drop the message because all validator goroutines are taken")
@ -253,7 +267,7 @@ func TestValidateOverload(t *testing.T) {
}()
for _, tmsg := range tc.msgs {
err := p.Publish(bitmask, tmsg.msg)
err := b[0].Publish(ctx, b[0].bitmask, tmsg.msg)
if err != nil {
t.Fatal(err)
}
@ -273,8 +287,8 @@ func TestValidateAssortedOptions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
hosts := getNetHosts(t, ctx, 10)
psubs := getPubsubs(ctx, hosts,
hosts := getDefaultHosts(t, 10)
psubs := getBlossomSubs(ctx, hosts,
WithValidateQueueSize(10),
WithValidateThrottle(10),
WithValidateWorkers(10))
@ -282,7 +296,7 @@ func TestValidateAssortedOptions(t *testing.T) {
sparseConnect(t, hosts)
for _, psub := range psubs {
err := psub.RegisterBitmaskValidator([]byte{0xff, 0x00, 0x00, 0x00},
err := psub.RegisterBitmaskValidator([]byte{0x00, 0x80, 0x00, 0x00},
func(context.Context, peer.ID, *Message) bool {
return true
},
@ -291,7 +305,7 @@ func TestValidateAssortedOptions(t *testing.T) {
t.Fatal(err)
}
err = psub.RegisterBitmaskValidator([]byte{0x00, 0xff, 0x00, 0x00},
err = psub.RegisterBitmaskValidator([]byte{0x00, 0x20, 0x00, 0x00},
func(context.Context, peer.ID, *Message) bool {
return true
},
@ -302,31 +316,44 @@ func TestValidateAssortedOptions(t *testing.T) {
}
var subs1, subs2 []*Subscription
var bitmasks1, bitmasks2 []*Bitmask
for _, ps := range psubs {
sub, err := ps.Subscribe([]byte{0xff, 0x00, 0x00, 0x00})
b, err := ps.Join([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
subs1 = append(subs1, sub)
bitmasks1 = append(bitmasks1, b...)
sub, err = ps.Subscribe([]byte{0x00, 0xff, 0x00, 0x00})
b, err = ps.Join([]byte{0x00, 0x04, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
subs2 = append(subs2, sub)
bitmasks2 = append(bitmasks2, b...)
sub, err := ps.Subscribe([]byte{0x00, 0x80, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
subs1 = append(subs1, sub...)
sub, err = ps.Subscribe([]byte{0x00, 0x04, 0x00, 0x00})
if err != nil {
t.Fatal(err)
}
subs2 = append(subs2, sub...)
}
time.Sleep(time.Second)
for i := 0; i < 10; i++ {
msg := []byte(fmt.Sprintf("message %d", i))
msg := []byte(fmt.Sprintf("message1 %d", i))
psubs[i].Publish([]byte{0xff, 0x00, 0x00, 0x00}, msg)
bitmasks1[i].Publish(ctx, bitmasks1[i].bitmask, msg)
for _, sub := range subs1 {
assertReceive(t, sub, msg)
}
msg = []byte(fmt.Sprintf("message2 %d", i))
psubs[i].Publish([]byte{0x00, 0xff, 0x00, 0x00}, msg)
bitmasks2[i].Publish(ctx, bitmasks2[i].bitmask, msg)
for _, sub := range subs2 {
assertReceive(t, sub, msg)
}

View File

@ -7,7 +7,6 @@
package curves
import (
"arena"
"errors"
"fmt"
"io"
@ -48,9 +47,9 @@ func (s *ScalarBls48581) Random(reader io.Reader) Scalar {
func (s *ScalarBls48581) Hash(bytes []byte) Scalar {
DST := []byte("BLS_SIG_BLS48581G1_XMD:SHA-512_SVDW_RO_NUL_")
u := bls48581.Hash_to_field(ext.MC_SHA2, bls48581.HASH_TYPE, DST, bytes, 2)
u[0].Add(u[1], nil)
b := u[0].Redc(nil)
b.Mod(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
u[0].Add(u[1])
b := u[0].Redc()
b.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: b,
point: s.point,
@ -59,14 +58,14 @@ func (s *ScalarBls48581) Hash(bytes []byte) Scalar {
func (s *ScalarBls48581) Zero() Scalar {
return &ScalarBls48581{
Value: bls48581.NewBIGint(0, nil),
Value: bls48581.NewBIGint(0),
point: s.point,
}
}
func (s *ScalarBls48581) One() Scalar {
return &ScalarBls48581{
Value: bls48581.NewBIGint(1, nil),
Value: bls48581.NewBIGint(1),
point: s.point,
}
}
@ -76,7 +75,7 @@ func (s *ScalarBls48581) IsZero() bool {
}
func (s *ScalarBls48581) IsOne() bool {
t := bls48581.NewBIGint(1, nil)
t := bls48581.NewBIGint(1)
t.Sub(s.Value)
return t.IsZero()
}
@ -95,15 +94,15 @@ func (s *ScalarBls48581) IsEven() bool {
func (s *ScalarBls48581) New(value int) Scalar {
if value > 0 {
t := bls48581.NewBIGint(value, nil)
t.Mod(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
t := bls48581.NewBIGint(value)
t.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: t,
point: s.point,
}
} else {
t := bls48581.NewBIGint(-value, nil)
v := bls48581.NewBIGints(bls48581.CURVE_Order, nil)
t := bls48581.NewBIGint(-value)
v := bls48581.NewBIGints(bls48581.CURVE_Order)
v.Sub(t)
return &ScalarBls48581{
Value: v,
@ -122,8 +121,8 @@ func (s *ScalarBls48581) Cmp(rhs Scalar) int {
}
func (s *ScalarBls48581) Square() Scalar {
sqr := bls48581.NewBIGcopy(s.Value, nil)
sqr = bls48581.Modsqr(sqr, bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
sqr := bls48581.NewBIGcopy(s.Value)
sqr = bls48581.Modsqr(sqr, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: sqr,
point: s.point,
@ -131,13 +130,8 @@ func (s *ScalarBls48581) Square() Scalar {
}
func (s *ScalarBls48581) Double() Scalar {
dbl := bls48581.NewBIGcopy(s.Value, nil)
dbl = bls48581.Modmul(
dbl,
bls48581.NewBIGint(2, nil),
bls48581.NewBIGints(bls48581.CURVE_Order, nil),
nil,
)
dbl := bls48581.NewBIGcopy(s.Value)
dbl = bls48581.Modmul(dbl, bls48581.NewBIGint(2), bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: dbl,
point: s.point,
@ -145,8 +139,8 @@ func (s *ScalarBls48581) Double() Scalar {
}
func (s *ScalarBls48581) Invert() (Scalar, error) {
v := bls48581.NewBIGcopy(s.Value, nil)
v.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order, nil))
v := bls48581.NewBIGcopy(s.Value)
v.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order))
if v == nil {
return nil, fmt.Errorf("inverse doesn't exist")
}
@ -161,9 +155,9 @@ func (s *ScalarBls48581) Sqrt() (Scalar, error) {
}
func (s *ScalarBls48581) Cube() Scalar {
value := bls48581.NewBIGcopy(s.Value, nil)
value = bls48581.Modsqr(value, bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
value := bls48581.NewBIGcopy(s.Value)
value = bls48581.Modsqr(value, bls48581.NewBIGints(bls48581.CURVE_Order))
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: value,
point: s.point,
@ -173,11 +167,8 @@ func (s *ScalarBls48581) Cube() Scalar {
func (s *ScalarBls48581) Add(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarBls48581)
if ok {
mem := arena.NewArena()
defer mem.Free()
value := bls48581.NewBIGcopy(s.Value, mem)
value = bls48581.ModAdd(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
value = bls48581.NewBIGcopy(value, nil)
value := bls48581.NewBIGcopy(s.Value)
value = bls48581.ModAdd(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: value,
point: s.point,
@ -190,12 +181,9 @@ func (s *ScalarBls48581) Add(rhs Scalar) Scalar {
func (s *ScalarBls48581) Sub(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarBls48581)
if ok {
mem := arena.NewArena()
defer mem.Free()
value := bls48581.NewBIGcopy(r.Value, mem)
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
value = bls48581.ModAdd(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
value = bls48581.NewBIGcopy(value, nil)
value := bls48581.NewBIGcopy(r.Value)
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order))
value = bls48581.ModAdd(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: value,
point: s.point,
@ -208,11 +196,8 @@ func (s *ScalarBls48581) Sub(rhs Scalar) Scalar {
func (s *ScalarBls48581) Mul(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarBls48581)
if ok {
mem := arena.NewArena()
defer mem.Free()
value := bls48581.NewBIGcopy(s.Value, mem)
value = bls48581.Modmul(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
value = bls48581.NewBIGcopy(value, nil)
value := bls48581.NewBIGcopy(s.Value)
value = bls48581.Modmul(value, r.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: value,
point: s.point,
@ -229,12 +214,9 @@ func (s *ScalarBls48581) MulAdd(y, z Scalar) Scalar {
func (s *ScalarBls48581) Div(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarBls48581)
if ok {
mem := arena.NewArena()
defer mem.Free()
value := bls48581.NewBIGcopy(r.Value, mem)
value.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order, mem))
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
value = bls48581.NewBIGcopy(value, nil)
value := bls48581.NewBIGcopy(r.Value)
value.Invmodp(bls48581.NewBIGints(bls48581.CURVE_Order))
value = bls48581.Modmul(value, s.Value, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: value,
point: s.point,
@ -245,11 +227,8 @@ func (s *ScalarBls48581) Div(rhs Scalar) Scalar {
}
func (s *ScalarBls48581) Neg() Scalar {
mem := arena.NewArena()
defer mem.Free()
value := bls48581.NewBIGcopy(s.Value, mem)
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order, mem), mem)
value = bls48581.NewBIGcopy(value, nil)
value := bls48581.NewBIGcopy(s.Value)
value = bls48581.Modneg(value, bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: value,
point: s.point,
@ -265,7 +244,7 @@ func (s *ScalarBls48581) SetBigInt(v *big.Int) (Scalar, error) {
copy(t[bls48581.MODBYTES-uint(len(b)):], b)
i := bls48581.FromBytes(t)
i.Mod(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
i.Mod(bls48581.NewBIGints(bls48581.CURVE_Order))
return &ScalarBls48581{
Value: i,
point: s.point,
@ -319,7 +298,7 @@ func (s *ScalarBls48581) Point() Point {
}
func (s *ScalarBls48581) Clone() Scalar {
value := bls48581.NewBIGcopy(s.Value, nil)
value := bls48581.NewBIGcopy(s.Value)
return &ScalarBls48581{
Value: value,
point: s.point,
@ -327,7 +306,7 @@ func (s *ScalarBls48581) Clone() Scalar {
}
func (s *ScalarBls48581) SetPoint(p Point) PairingScalar {
value := bls48581.NewBIGcopy(s.Value, nil)
value := bls48581.NewBIGcopy(s.Value)
return &ScalarBls48581{
Value: value,
point: p,
@ -335,7 +314,7 @@ func (s *ScalarBls48581) SetPoint(p Point) PairingScalar {
}
func (s *ScalarBls48581) Order() *big.Int {
b := bls48581.NewBIGints(bls48581.CURVE_Order, nil)
b := bls48581.NewBIGints(bls48581.CURVE_Order)
bytes := make([]byte, bls48581.MODBYTES)
b.ToBytes(bytes)
return new(big.Int).SetBytes(bytes)
@ -390,7 +369,7 @@ func (p *PointBls48581G1) Hash(bytes []byte) Point {
func (p *PointBls48581G1) Identity() Point {
g1 := bls48581.ECP_generator()
g1 = g1.Mul(bls48581.NewBIGint(0, nil), nil, nil)
g1 = g1.Mul(bls48581.NewBIGint(0))
return &PointBls48581G1{
Value: g1,
}
@ -405,7 +384,7 @@ func (p *PointBls48581G1) Generator() Point {
}
func (p *PointBls48581G1) IsIdentity() bool {
return p.Value.Is_infinity(nil)
return p.Value.Is_infinity()
}
func (p *PointBls48581G1) IsNegative() bool {
@ -416,18 +395,18 @@ func (p *PointBls48581G1) IsNegative() bool {
}
func (p *PointBls48581G1) IsOnCurve() bool {
return bls48581.G1member(p.Value, nil)
return bls48581.G1member(p.Value)
}
func (p *PointBls48581G1) Double() Point {
v := bls48581.NewECP(nil)
v := bls48581.NewECP()
v.Copy(p.Value)
v.Dbl(nil)
v.Dbl()
return &PointBls48581G1{v}
}
func (p *PointBls48581G1) Scalar() Scalar {
value := bls48581.NewBIG(nil)
value := bls48581.NewBIG()
return &ScalarBls48581{
Value: value,
point: new(PointBls48581G1),
@ -435,9 +414,9 @@ func (p *PointBls48581G1) Scalar() Scalar {
}
func (p *PointBls48581G1) Neg() Point {
v := bls48581.NewECP(nil)
v := bls48581.NewECP()
v.Copy(p.Value)
v.Neg(nil)
v.Neg()
return &PointBls48581G1{v}
}
@ -447,9 +426,9 @@ func (p *PointBls48581G1) Add(rhs Point) Point {
}
r, ok := rhs.(*PointBls48581G1)
if ok {
v := bls48581.NewECP(nil)
v := bls48581.NewECP()
v.Copy(p.Value)
v.Add(r.Value, nil)
v.Add(r.Value)
return &PointBls48581G1{v}
} else {
return nil
@ -462,9 +441,9 @@ func (p *PointBls48581G1) Sub(rhs Point) Point {
}
r, ok := rhs.(*PointBls48581G1)
if ok {
v := bls48581.NewECP(nil)
v := bls48581.NewECP()
v.Copy(p.Value)
v.Sub(r.Value, nil)
v.Sub(r.Value)
return &PointBls48581G1{v}
} else {
return nil
@ -477,11 +456,9 @@ func (p *PointBls48581G1) Mul(rhs Scalar) Point {
}
r, ok := rhs.(*ScalarBls48581)
if ok {
mem := arena.NewArena()
defer mem.Free()
v := bls48581.NewECP(mem)
v := bls48581.NewECP()
v.Copy(p.Value)
v = v.Mul(r.Value, nil, mem)
v = v.Mul(r.Value)
return &PointBls48581G1{v}
} else {
return nil
@ -504,7 +481,7 @@ func (p *PointBls48581G1) Set(x, y *big.Int) (Point, error) {
y.FillBytes(yBytes)
xBig := bls48581.FromBytes(xBytes)
yBig := bls48581.FromBytes(yBytes)
v := bls48581.NewECPbigs(xBig, yBig, nil)
v := bls48581.NewECPbigs(xBig, yBig)
if v == nil {
return nil, fmt.Errorf("invalid coordinates")
}
@ -527,7 +504,7 @@ func (p *PointBls48581G1) FromAffineCompressed(bytes []byte) (Point, error) {
var b [bls48581.MODBYTES + 1]byte
copy(b[:], bytes)
value := bls48581.ECP_fromBytes(b[:])
if value == nil || value.Is_infinity(nil) {
if value == nil || value.Is_infinity() {
return nil, errors.New("could not decode")
}
return &PointBls48581G1{value}, nil
@ -537,7 +514,7 @@ func (p *PointBls48581G1) FromAffineUncompressed(bytes []byte) (Point, error) {
var b [bls48581.MODBYTES*2 + 1]byte
copy(b[:], bytes)
value := bls48581.ECP_fromBytes(b[:])
if value == nil || value.Is_infinity(nil) {
if value == nil || value.Is_infinity() {
return nil, errors.New("could not decode")
}
return &PointBls48581G1{value}, nil
@ -564,10 +541,8 @@ func (p *PointBls48581G1) SumOfProducts(points []Point, scalars []Scalar) Point
}
nScalars[i] = s.Value
}
mem := arena.NewArena()
defer mem.Free()
value := bls48581.ECP_muln(len(points), nPoints, nScalars, mem)
if value == nil || value.Is_infinity(mem) {
value := bls48581.ECP_muln(len(points), nPoints, nScalars)
if value == nil || value.Is_infinity() {
return nil
}
return &PointBls48581G1{value}
@ -604,44 +579,71 @@ func (p *PointBls48581G1) MultiPairing(points ...PairingPoint) Scalar {
func (p *PointBls48581G1) X() *big.Int {
bytes := make([]byte, bls48581.MODBYTES)
p.Value.GetX(nil).ToBytes(bytes[:])
p.Value.GetX().ToBytes(bytes[:])
return new(big.Int).SetBytes(bytes)
}
func (p *PointBls48581G1) Y() *big.Int {
bytes := make([]byte, bls48581.MODBYTES)
p.Value.GetY(nil).ToBytes(bytes[:])
p.Value.GetY().ToBytes(bytes[:])
return new(big.Int).SetBytes(bytes)
}
func (p *PointBls48581G1) Modulus() *big.Int {
b := bls48581.NewBIGints(bls48581.Modulus, nil)
b := bls48581.NewBIGints(bls48581.Modulus)
bytes := make([]byte, bls48581.MODBYTES)
b.ToBytes(bytes)
return new(big.Int).SetBytes(bytes)
}
func (p *PointBls48581G1) MarshalBinary() ([]byte, error) {
return nil, nil
return pointMarshalBinary(p)
}
func (p *PointBls48581G1) UnmarshalBinary(input []byte) error {
pt, err := pointUnmarshalBinary(input)
if err != nil {
return err
}
ppt, ok := pt.(*PointBls48581G1)
if !ok {
return fmt.Errorf("invalid point")
}
p.Value = ppt.Value
return nil
}
func (p *PointBls48581G1) MarshalText() ([]byte, error) {
return nil, nil
return pointMarshalText(p)
}
func (p *PointBls48581G1) UnmarshalText(input []byte) error {
pt, err := pointUnmarshalText(input)
if err != nil {
return err
}
ppt, ok := pt.(*PointBls48581G1)
if !ok {
return fmt.Errorf("invalid point")
}
p.Value = ppt.Value
return nil
}
func (p *PointBls48581G1) MarshalJSON() ([]byte, error) {
return nil, nil
return pointMarshalJson(p)
}
func (p *PointBls48581G1) UnmarshalJSON(input []byte) error {
pt, err := pointUnmarshalJson(input)
if err != nil {
return err
}
P, ok := pt.(*PointBls48581G1)
if !ok {
return fmt.Errorf("invalid type")
}
p.Value = P.Value
return nil
}
@ -654,15 +656,15 @@ func (p *PointBls48581G2) Random(reader io.Reader) Point {
func (p *PointBls48581G2) Hash(bytes []byte) Point {
DST := []byte("BLS_SIG_BLS48581G2_XMD:SHA-512_SVDW_RO_NUL_")
u := bls48581.Hash_to_field(ext.MC_SHA2, bls48581.HASH_TYPE, DST, bytes, 2)
u[0].Add(u[1], nil)
fp8 := bls48581.NewFP8fp(u[0], nil)
u[0].Add(u[1])
fp8 := bls48581.NewFP8fp(u[0])
v := bls48581.ECP8_map2point(fp8)
return &PointBls48581G2{v}
}
func (p *PointBls48581G2) Identity() Point {
g2 := bls48581.ECP8_generator()
g2 = g2.Mul(bls48581.NewBIGint(0, nil), nil)
g2 = g2.Mul(bls48581.NewBIGint(0))
return &PointBls48581G2{
Value: g2,
}
@ -677,7 +679,7 @@ func (p *PointBls48581G2) Generator() Point {
}
func (p *PointBls48581G2) IsIdentity() bool {
return p.Value.Is_infinity(nil)
return p.Value.Is_infinity()
}
func (p *PointBls48581G2) IsNegative() bool {
@ -688,18 +690,18 @@ func (p *PointBls48581G2) IsNegative() bool {
}
func (p *PointBls48581G2) IsOnCurve() bool {
return bls48581.G2member(p.Value, nil)
return bls48581.G2member(p.Value)
}
func (p *PointBls48581G2) Double() Point {
v := bls48581.NewECP8(nil)
v := bls48581.NewECP8()
v.Copy(p.Value)
v.Dbl(nil)
v.Dbl()
return &PointBls48581G2{v}
}
func (p *PointBls48581G2) Scalar() Scalar {
value := bls48581.NewBIG(nil)
value := bls48581.NewBIG()
return &ScalarBls48581{
Value: value,
point: new(PointBls48581G2),
@ -707,9 +709,9 @@ func (p *PointBls48581G2) Scalar() Scalar {
}
func (p *PointBls48581G2) Neg() Point {
v := bls48581.NewECP8(nil)
v := bls48581.NewECP8()
v.Copy(p.Value)
v.Neg(nil)
v.Neg()
return &PointBls48581G2{v}
}
@ -719,9 +721,9 @@ func (p *PointBls48581G2) Add(rhs Point) Point {
}
r, ok := rhs.(*PointBls48581G2)
if ok {
v := bls48581.NewECP8(nil)
v := bls48581.NewECP8()
v.Copy(p.Value)
v.Add(r.Value, nil)
v.Add(r.Value)
return &PointBls48581G2{v}
} else {
return nil
@ -734,9 +736,9 @@ func (p *PointBls48581G2) Sub(rhs Point) Point {
}
r, ok := rhs.(*PointBls48581G2)
if ok {
v := bls48581.NewECP8(nil)
v := bls48581.NewECP8()
v.Copy(p.Value)
v.Sub(r.Value, nil)
v.Sub(r.Value)
return &PointBls48581G2{v}
} else {
return nil
@ -749,11 +751,11 @@ func (p *PointBls48581G2) Mul(rhs Scalar) Point {
}
r, ok := rhs.(*ScalarBls48581)
if ok {
mem := arena.NewArena()
defer mem.Free()
v := bls48581.NewECP8(nil)
v := bls48581.NewECP8()
v.Copy(p.Value)
v = v.Mul(r.Value, mem)
bytes := make([]byte, bls48581.MODBYTES)
r.Value.ToBytes(bytes)
v = v.Mul(bls48581.FromBytes(bytes))
return &PointBls48581G2{v}
} else {
return nil
@ -776,8 +778,8 @@ func (p *PointBls48581G2) Set(x, y *big.Int) (Point, error) {
y.FillBytes(yBytes)
xBig := bls48581.FP8_fromBytes(xBytes)
yBig := bls48581.FP8_fromBytes(yBytes)
v := bls48581.NewECP8fp8s(xBig, yBig, nil)
if v == nil || v.Is_infinity(nil) {
v := bls48581.NewECP8fp8s(xBig, yBig)
if v == nil || v.Is_infinity() {
return nil, fmt.Errorf("invalid coordinates")
}
return &PointBls48581G2{v}, nil
@ -799,7 +801,7 @@ func (p *PointBls48581G2) FromAffineCompressed(bytes []byte) (Point, error) {
var b [bls48581.MODBYTES*8 + 1]byte
copy(b[:], bytes)
value := bls48581.ECP8_fromBytes(b[:])
if value == nil || value.Is_infinity(nil) {
if value == nil || value.Is_infinity() {
return nil, errors.New("could not decode")
}
return &PointBls48581G2{value}, nil
@ -809,7 +811,7 @@ func (p *PointBls48581G2) FromAffineUncompressed(bytes []byte) (Point, error) {
var b [bls48581.MODBYTES*16 + 1]byte
copy(b[:], bytes)
value := bls48581.ECP8_fromBytes(b[:])
if value == nil || value.Is_infinity(nil) {
if value == nil || value.Is_infinity() {
return nil, errors.New("could not decode")
}
return &PointBls48581G2{value}, nil
@ -836,8 +838,8 @@ func (p *PointBls48581G2) SumOfProducts(points []Point, scalars []Scalar) Point
}
nScalars[i] = s.Value
}
value := bls48581.Mul16(nPoints, nScalars, nil)
if value == nil || value.Is_infinity(nil) {
value := bls48581.Mul16(nPoints, nScalars)
if value == nil || value.Is_infinity() {
return nil
}
return &PointBls48581G2{value}
@ -863,47 +865,74 @@ func (p *PointBls48581G2) MultiPairing(points ...PairingPoint) Scalar {
}
func (p *PointBls48581G2) X() *big.Int {
x := p.Value.GetX(nil)
x := p.Value.GetX()
bytes := make([]byte, 8*bls48581.MODBYTES)
x.ToBytes(bytes)
return new(big.Int).SetBytes(bytes)
}
func (p *PointBls48581G2) Y() *big.Int {
y := p.Value.GetY(nil)
y := p.Value.GetY()
bytes := make([]byte, 8*bls48581.MODBYTES)
y.ToBytes(bytes)
return new(big.Int).SetBytes(bytes)
}
func (p *PointBls48581G2) Modulus() *big.Int {
b := bls48581.NewBIGints(bls48581.Modulus, nil)
b := bls48581.NewBIGints(bls48581.Modulus)
bytes := make([]byte, bls48581.MODBYTES)
b.ToBytes(bytes)
return new(big.Int).SetBytes(bytes)
}
func (p *PointBls48581G2) MarshalBinary() ([]byte, error) {
return nil, nil
return pointMarshalBinary(p)
}
func (p *PointBls48581G2) UnmarshalBinary(input []byte) error {
pt, err := pointUnmarshalBinary(input)
if err != nil {
return err
}
ppt, ok := pt.(*PointBls48581G2)
if !ok {
return fmt.Errorf("invalid point")
}
p.Value = ppt.Value
return nil
}
func (p *PointBls48581G2) MarshalText() ([]byte, error) {
return nil, nil
return pointMarshalText(p)
}
func (p *PointBls48581G2) UnmarshalText(input []byte) error {
pt, err := pointUnmarshalText(input)
if err != nil {
return err
}
ppt, ok := pt.(*PointBls48581G2)
if !ok {
return fmt.Errorf("invalid point")
}
p.Value = ppt.Value
return nil
}
func (p *PointBls48581G2) MarshalJSON() ([]byte, error) {
return nil, nil
return pointMarshalJson(p)
}
func (p *PointBls48581G2) UnmarshalJSON(input []byte) error {
pt, err := pointUnmarshalJson(input)
if err != nil {
return err
}
P, ok := pt.(*PointBls48581G2)
if !ok {
return fmt.Errorf("invalid type")
}
p.Value = P.Value
return nil
}
@ -912,25 +941,21 @@ func bls48multiPairing(points ...PairingPoint) Scalar {
return nil
}
valid := true
mem := arena.NewArena()
defer mem.Free()
r := bls48581.Initmp(mem)
r := bls48581.Initmp()
for i := 0; i < len(points); i += 2 {
pt1, ok := points[i].(*PointBls48581G1)
valid = valid && ok
pt2, ok := points[i+1].(*PointBls48581G2)
valid = valid && ok
if valid {
inner := arena.NewArena()
bls48581.Another(r, pt2.Value, pt1.Value, inner)
inner.Free()
bls48581.Another(r, pt2.Value, pt1.Value)
}
}
if !valid {
return nil
}
v := bls48581.Miller(r, mem)
v := bls48581.Miller(r)
v = bls48581.Fexp(v)
return &ScalarBls48581Gt{v}
}
@ -958,15 +983,15 @@ func (s *ScalarBls48581Gt) Hash(bytes []byte) Scalar {
}
func (s *ScalarBls48581Gt) Zero() Scalar {
return &ScalarBls48581Gt{bls48581.NewFP48int(0, nil)}
return &ScalarBls48581Gt{bls48581.NewFP48int(0)}
}
func (s *ScalarBls48581Gt) One() Scalar {
return &ScalarBls48581Gt{bls48581.NewFP48int(1, nil)}
return &ScalarBls48581Gt{bls48581.NewFP48int(1)}
}
func (s *ScalarBls48581Gt) IsZero() bool {
return s.Value.IsZero(nil)
return s.Value.IsZero()
}
func (s *ScalarBls48581Gt) IsOne() bool {
@ -1019,7 +1044,7 @@ func (s *ScalarBls48581Gt) IsEven() bool {
}
func (s *ScalarBls48581Gt) New(input int) Scalar {
fp := bls48581.NewFP48int(input, nil)
fp := bls48581.NewFP48int(input)
return &ScalarBls48581Gt{fp}
}
@ -1033,20 +1058,20 @@ func (s *ScalarBls48581Gt) Cmp(rhs Scalar) int {
}
func (s *ScalarBls48581Gt) Square() Scalar {
v := bls48581.NewFP48copy(s.Value, nil)
v.Sqr(nil)
v := bls48581.NewFP48copy(s.Value)
v.Sqr()
return &ScalarBls48581Gt{v}
}
func (s *ScalarBls48581Gt) Double() Scalar {
v := bls48581.NewFP48copy(s.Value, nil)
v.Mul(bls48581.NewFP48int(2, nil), nil)
v := bls48581.NewFP48copy(s.Value)
v.Mul(bls48581.NewFP48int(2))
return &ScalarBls48581Gt{v}
}
func (s *ScalarBls48581Gt) Invert() (Scalar, error) {
v := bls48581.NewFP48copy(s.Value, nil)
v.Invert(nil)
v := bls48581.NewFP48copy(s.Value)
v.Invert()
if v == nil {
return nil, fmt.Errorf("not invertible")
}
@ -1059,9 +1084,9 @@ func (s *ScalarBls48581Gt) Sqrt() (Scalar, error) {
}
func (s *ScalarBls48581Gt) Cube() Scalar {
v := bls48581.NewFP48copy(s.Value, nil)
v.Sqr(nil)
v.Mul(s.Value, nil)
v := bls48581.NewFP48copy(s.Value)
v.Sqr()
v.Mul(s.Value)
return &ScalarBls48581Gt{v}
}
@ -1078,8 +1103,8 @@ func (s *ScalarBls48581Gt) Sub(rhs Scalar) Scalar {
func (s *ScalarBls48581Gt) Mul(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarBls48581Gt)
if ok {
v := bls48581.NewFP48copy(s.Value, nil)
v.Mul(r.Value, nil)
v := bls48581.NewFP48copy(s.Value)
v.Mul(r.Value)
return &ScalarBls48581Gt{v}
} else {
return nil
@ -1093,9 +1118,9 @@ func (s *ScalarBls48581Gt) MulAdd(y, z Scalar) Scalar {
func (s *ScalarBls48581Gt) Div(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarBls48581Gt)
if ok {
v := bls48581.NewFP48copy(r.Value, nil)
v.Invert(nil)
v.Mul(s.Value, nil)
v := bls48581.NewFP48copy(r.Value)
v.Invert()
v.Mul(s.Value)
return &ScalarBls48581Gt{v}
} else {
return nil
@ -1145,7 +1170,7 @@ func (s *ScalarBls48581Gt) SetBytesWide(bytes []byte) (Scalar, error) {
}
func (s *ScalarBls48581Gt) Clone() Scalar {
fp := bls48581.NewFP48copy(s.Value, nil)
fp := bls48581.NewFP48copy(s.Value)
return &ScalarBls48581Gt{
Value: fp,
}

View File

@ -80,7 +80,7 @@ func TestScalarBls48581G1Invert(t *testing.T) {
sa, _ := actual.(*ScalarBls48581)
expected, err := bls48581G1.Scalar.SetBigInt(bhex("000000000000000007e51ad0414ec8f8799b3f49cc04d5850f9a0c8cf190b82a38f1b4c29e8b47c188b93dea0bb9f3ce3dec8654a0132439b9f49c13e8170ebbeae908716e2da522ab"))
require.NoError(t, err)
require.Equal(t, sa.Value.ToString(), expected.(*ScalarBls48581).Value.ToString())
require.Equal(t, sa.Cmp(expected), 0)
}
func TestScalarBls48581G1Add(t *testing.T) {
@ -91,11 +91,11 @@ func TestScalarBls48581G1Add(t *testing.T) {
require.NotNil(t, fifteen)
expected := bls48581G1.Scalar.New(15)
require.Equal(t, expected.Cmp(fifteen), 0)
qq := bls48581.NewBIGints(bls48581.CURVE_Order, nil)
qq.Sub(bls48581.NewBIGint(3, nil))
qq := bls48581.NewBIGints(bls48581.CURVE_Order)
qq.Sub(bls48581.NewBIGint(3))
upper := &ScalarBls48581{
Value: bls48581.NewBIGcopy(qq, nil),
Value: bls48581.NewBIGcopy(qq),
}
actual := upper.Add(nine)
require.NotNil(t, actual)
@ -106,8 +106,8 @@ func TestScalarBls48581G1Sub(t *testing.T) {
bls48581G1 := BLS48581G1()
nine := bls48581G1.Scalar.New(9)
six := bls48581G1.Scalar.New(6)
n := bls48581.NewFPbig(bls48581.NewBIGints(bls48581.CURVE_Order, nil), nil)
n.Sub(bls48581.NewFPint(3, nil), nil)
n := bls48581.NewFPbig(bls48581.NewBIGints(bls48581.CURVE_Order))
n.Sub(bls48581.NewFPint(3))
expected := bls48581G1.Scalar.New(0).Sub(bls48581G1.Scalar.New(3))
actual := six.Sub(nine)

View File

@ -575,7 +575,7 @@ func BLS48581G1() *Curve {
func bls48581g1Init() {
bls48581g1 = Curve{
Scalar: &ScalarBls48581{
Value: bls48581.NewBIGint(1, nil),
Value: bls48581.NewBIGint(1),
point: new(PointBls48581G1),
},
Point: new(PointBls48581G1).Identity(),
@ -592,7 +592,7 @@ func BLS48581G2() *Curve {
func bls48581g2Init() {
bls48581g2 = Curve{
Scalar: &ScalarBls48581{
Value: bls48581.NewBIGint(1, nil),
Value: bls48581.NewBIGint(1),
point: new(PointBls48581G2),
},
Point: new(PointBls48581G2).Identity(),
@ -603,7 +603,7 @@ func bls48581g2Init() {
func BLS48581(preferredPoint Point) *PairingCurve {
return &PairingCurve{
Scalar: &ScalarBls48581{
Value: bls48581.NewBIG(nil),
Value: bls48581.NewBIG(),
point: preferredPoint,
},
PointG1: &PointBls48581G1{
@ -613,7 +613,7 @@ func BLS48581(preferredPoint Point) *PairingCurve {
Value: bls48581.ECP8_generator(),
},
GT: &ScalarBls48581Gt{
Value: bls48581.NewFP48int(1, nil),
Value: bls48581.NewFP48int(1),
},
Name: BLS48581Name,
}

View File

@ -24,7 +24,6 @@
package bls48581
import (
"arena"
"math/bits"
"strconv"
@ -47,8 +46,8 @@ type DBIG struct {
/* Note that because of the lack of a 128-bit integer, 32 and 64-bit code needs to be done differently */
/* return a*b as DBIG */
func mul(a *BIG, b *BIG, mem *arena.Arena) *DBIG {
c := NewDBIG(mem)
func mul(a *BIG, b *BIG) *DBIG {
c := NewDBIG()
carry := Chunk(0)
for i := 0; i < NLEN; i++ {
@ -63,8 +62,8 @@ func mul(a *BIG, b *BIG, mem *arena.Arena) *DBIG {
}
/* return a^2 as DBIG */
func sqr(a *BIG, mem *arena.Arena) *DBIG {
c := NewDBIG(mem)
func sqr(a *BIG) *DBIG {
c := NewDBIG()
carry := Chunk(0)
for i := 0; i < NLEN; i++ {
@ -88,7 +87,7 @@ func sqr(a *BIG, mem *arena.Arena) *DBIG {
return c
}
func monty(md *BIG, mc Chunk, d *DBIG, mem *arena.Arena) *BIG {
func monty(md *BIG, mc Chunk, d *DBIG) *BIG {
carry := Chunk(0)
m := Chunk(0)
for i := 0; i < NLEN; i++ {
@ -111,7 +110,7 @@ func monty(md *BIG, mc Chunk, d *DBIG, mem *arena.Arena) *BIG {
d.w[NLEN+i] += carry
}
b := NewBIG(mem)
b := NewBIG()
for i := 0; i < NLEN; i++ {
b.w[i] = d.w[NLEN+i]
}
@ -180,39 +179,27 @@ func (r *BIG) fshl(k uint) int {
return int(r.w[NLEN-1] >> ((8 * MODBYTES) % BASEBITS)) /* return excess - only used in ff.c */
}
func NewBIG(mem *arena.Arena) *BIG {
func NewBIG() *BIG {
var b *BIG
if mem != nil {
b = arena.New[BIG](mem)
} else {
b = new(BIG)
}
b = new(BIG)
for i := 0; i < NLEN; i++ {
b.w[i] = 0
}
return b
}
func NewBIGints(x [NLEN]Chunk, mem *arena.Arena) *BIG {
func NewBIGints(x [NLEN]Chunk) *BIG {
var b *BIG
if mem != nil {
b = arena.New[BIG](mem)
} else {
b = new(BIG)
}
b = new(BIG)
for i := 0; i < NLEN; i++ {
b.w[i] = x[i]
}
return b
}
func NewBIGint(x int, mem *arena.Arena) *BIG {
func NewBIGint(x int) *BIG {
var b *BIG
if mem != nil {
b = arena.New[BIG](mem)
} else {
b = new(BIG)
}
b = new(BIG)
b.w[0] = Chunk(x)
for i := 1; i < NLEN; i++ {
b.w[i] = 0
@ -220,26 +207,18 @@ func NewBIGint(x int, mem *arena.Arena) *BIG {
return b
}
func NewBIGcopy(x *BIG, mem *arena.Arena) *BIG {
func NewBIGcopy(x *BIG) *BIG {
var b *BIG
if mem != nil {
b = arena.New[BIG](mem)
} else {
b = new(BIG)
}
b = new(BIG)
for i := 0; i < NLEN; i++ {
b.w[i] = x.w[i]
}
return b
}
func NewBIGdcopy(x *DBIG, mem *arena.Arena) *BIG {
func NewBIGdcopy(x *DBIG) *BIG {
var b *BIG
if mem != nil {
b = arena.New[BIG](mem)
} else {
b = new(BIG)
}
b = new(BIG)
for i := 0; i < NLEN; i++ {
b.w[i] = x.w[i]
}
@ -362,7 +341,7 @@ func (r *BIG) shl(k uint) {
/* return number of bits */
func (r *BIG) nbits() int {
t := NewBIGcopy(r, nil)
t := NewBIGcopy(r)
k := NLEN - 1
t.norm()
for k >= 0 && t.w[k] == 0 {
@ -402,7 +381,7 @@ func (r *BIG) ToString() string {
}
for i := len - 1; i >= 0; i-- {
b := NewBIGcopy(r, nil)
b := NewBIGcopy(r)
b.shr(uint(i * 4))
s += strconv.FormatInt(int64(b.w[0]&15), 16)
@ -439,8 +418,8 @@ func (r *BIG) inc(x int) {
}
/* this*=c and catch overflow in DBIG */
func (r *BIG) pxmul(c int, mem *arena.Arena) *DBIG {
m := NewDBIG(mem)
func (r *BIG) pxmul(c int) *DBIG {
m := NewDBIG()
carry := Chunk(0)
for j := 0; j < NLEN; j++ {
carry, m.w[j] = mulAdd(r.w[j], Chunk(c), carry, m.w[j])
@ -504,7 +483,7 @@ func (r *BIG) pmul(c int) Chunk {
/* convert this BIG to byte array */
func (r *BIG) tobytearray(b []byte, n int) {
//r.norm();
c := NewBIGcopy(r, nil)
c := NewBIGcopy(r)
c.norm()
for i := int(MODBYTES) - 1; i >= 0; i-- {
@ -515,7 +494,7 @@ func (r *BIG) tobytearray(b []byte, n int) {
/* convert from byte array to BIG */
func frombytearray(b []byte, n int) *BIG {
m := NewBIG(nil)
m := NewBIG()
l := len(b)
for i := 0; i < int(MODBYTES); i++ {
m.fshl(8)
@ -552,7 +531,7 @@ func (r *BIG) div3() int {
/* return a*b where result fits in a BIG */
func smul(a *BIG, b *BIG) *BIG {
carry := Chunk(0)
c := NewBIG(nil)
c := NewBIG()
for i := 0; i < NLEN; i++ {
carry = 0
for j := 0; j < NLEN; j++ {
@ -609,9 +588,9 @@ func (r *BIG) mod2m(m uint) {
/* a=1/a mod 2^256. This is very fast! */
func (r *BIG) invmod2m() {
U := NewBIG(nil)
b := NewBIG(nil)
c := NewBIG(nil)
U := NewBIG()
b := NewBIG()
c := NewBIG()
U.inc(invmod256(r.lastbits(8)))
@ -646,10 +625,10 @@ func (r *BIG) invmod2m() {
r.norm()
}
func (r *BIG) ctmod(m *BIG, bd uint, mem *arena.Arena) {
func (r *BIG) ctmod(m *BIG, bd uint) {
k := bd
sr := NewBIG(mem)
c := NewBIGcopy(m, mem)
sr := NewBIG()
c := NewBIGcopy(m)
r.norm()
c.shl(k)
@ -668,20 +647,20 @@ func (r *BIG) ctmod(m *BIG, bd uint, mem *arena.Arena) {
}
/* reduce this mod m */
func (r *BIG) Mod(m *BIG, mem *arena.Arena) {
func (r *BIG) Mod(m *BIG) {
k := r.nbits() - m.nbits()
if k < 0 {
k = 0
}
r.ctmod(m, uint(k), mem)
r.ctmod(m, uint(k))
}
func (r *BIG) ctdiv(m *BIG, bd uint, mem *arena.Arena) {
func (r *BIG) ctdiv(m *BIG, bd uint) {
k := bd
e := NewBIGint(1, mem)
sr := NewBIG(mem)
a := NewBIGcopy(r, mem)
c := NewBIGcopy(m, mem)
e := NewBIGint(1)
sr := NewBIG()
a := NewBIGcopy(r)
c := NewBIGcopy(m)
r.norm()
r.zero()
@ -708,17 +687,17 @@ func (r *BIG) ctdiv(m *BIG, bd uint, mem *arena.Arena) {
}
/* divide this by m */
func (r *BIG) div(m *BIG, mem *arena.Arena) {
func (r *BIG) div(m *BIG) {
k := r.nbits() - m.nbits()
if k < 0 {
k = 0
}
r.ctdiv(m, uint(k), mem)
r.ctdiv(m, uint(k))
}
/* get 8*MODBYTES size random number */
func Random(rng *ext.RAND) *BIG {
m := NewBIG(nil)
m := NewBIG()
var j int = 0
var r byte = 0
/* generate random BIG */
@ -740,7 +719,7 @@ func Random(rng *ext.RAND) *BIG {
/* Create random BIG in portable way, one bit at a time */
func Randomnum(q *BIG, rng *ext.RAND) *BIG {
d := NewDBIG(nil)
d := NewDBIG()
var j int = 0
var r byte = 0
for i := 0; i < 2*q.nbits(); i++ {
@ -756,7 +735,7 @@ func Randomnum(q *BIG, rng *ext.RAND) *BIG {
j++
j &= 7
}
m := d.Mod(q, nil)
m := d.Mod(q)
return m
}
@ -769,61 +748,59 @@ func Randtrunc(q *BIG, trunc int, rng *ext.RAND) *BIG {
}
/* return a*b mod m */
func Modmul(a1, b1, m *BIG, mem *arena.Arena) *BIG {
a := NewBIGcopy(a1, mem)
b := NewBIGcopy(b1, mem)
a.Mod(m, mem)
b.Mod(m, mem)
d := mul(a, b, mem)
return d.ctmod(m, uint(m.nbits()), mem)
func Modmul(a1, b1, m *BIG) *BIG {
a := NewBIGcopy(a1)
b := NewBIGcopy(b1)
a.Mod(m)
b.Mod(m)
d := mul(a, b)
return d.ctmod(m, uint(m.nbits()))
}
/* return a^2 mod m */
func Modsqr(a1, m *BIG, mem *arena.Arena) *BIG {
a := NewBIGcopy(a1, mem)
a.Mod(m, mem)
d := sqr(a, mem)
return d.ctmod(m, uint(m.nbits()), mem)
func Modsqr(a1, m *BIG) *BIG {
a := NewBIGcopy(a1)
a.Mod(m)
d := sqr(a)
return d.ctmod(m, uint(m.nbits()))
}
/* return -a mod m */
func Modneg(a1, m *BIG, mem *arena.Arena) *BIG {
a := NewBIGcopy(a1, mem)
a.Mod(m, mem)
func Modneg(a1, m *BIG) *BIG {
a := NewBIGcopy(a1)
a.Mod(m)
a.rsub(m)
a.norm()
return a
}
/* return a+b mod m */
func ModAdd(a1, b1, m *BIG, mem *arena.Arena) *BIG {
a := NewBIGcopy(a1, mem)
b := NewBIGcopy(b1, mem)
a.Mod(m, mem)
b.Mod(m, mem)
func ModAdd(a1, b1, m *BIG) *BIG {
a := NewBIGcopy(a1)
b := NewBIGcopy(b1)
a.Mod(m)
b.Mod(m)
a.Add(b)
a.norm()
a.ctmod(m, 1, mem)
a.ctmod(m, 1)
return a
}
/* Jacobi Symbol (this/p). Returns 0, 1 or -1 */
func (r *BIG) Jacobi(p *BIG) int {
mem := arena.NewArena()
defer mem.Free()
m := 0
t := NewBIGint(0, mem)
x := NewBIGint(0, mem)
n := NewBIGint(0, mem)
zilch := NewBIGint(0, mem)
one := NewBIGint(1, mem)
t := NewBIGint(0)
x := NewBIGint(0)
n := NewBIGint(0)
zilch := NewBIGint(0)
one := NewBIGint(1)
if p.parity() == 0 || Comp(r, zilch) == 0 || Comp(p, one) <= 0 {
return 0
}
r.norm()
x.copy(r)
n.copy(p)
x.Mod(p, mem)
x.Mod(p)
for Comp(n, one) > 0 {
if Comp(x, zilch) == 0 {
@ -840,7 +817,7 @@ func (r *BIG) Jacobi(p *BIG) int {
}
m += (n8 - 1) * (x.lastbits(2) - 1) / 4
t.copy(n)
t.Mod(x, mem)
t.Mod(x)
n.copy(x)
x.copy(t)
m %= 2
@ -854,18 +831,16 @@ func (r *BIG) Jacobi(p *BIG) int {
/* this=1/this mod p. Binary method */
func (r *BIG) Invmodp(p *BIG) {
mem := arena.NewArena()
defer mem.Free()
r.Mod(p, mem)
r.Mod(p)
if r.IsZero() {
return
}
u := NewBIGcopy(r, mem)
v := NewBIGcopy(p, mem)
x1 := NewBIGint(1, mem)
x2 := NewBIGint(0, mem)
t := NewBIGint(0, mem)
one := NewBIGint(1, mem)
u := NewBIGcopy(r)
v := NewBIGcopy(p)
x1 := NewBIGint(1)
x2 := NewBIGint(0)
t := NewBIGint(0)
one := NewBIGint(1)
for Comp(u, one) != 0 && Comp(v, one) != 0 {
for u.parity() == 0 {
u.fshr(1)
@ -906,23 +881,23 @@ func (r *BIG) Invmodp(p *BIG) {
}
/* return this^e mod m */
func (r *BIG) Powmod(e1 *BIG, m *BIG, mem *arena.Arena) *BIG {
e := NewBIGcopy(e1, mem)
func (r *BIG) Powmod(e1 *BIG, m *BIG) *BIG {
e := NewBIGcopy(e1)
r.norm()
e.norm()
a := NewBIGint(1, mem)
z := NewBIGcopy(e, mem)
s := NewBIGcopy(r, mem)
a := NewBIGint(1)
z := NewBIGcopy(e)
s := NewBIGcopy(r)
for true {
bt := z.parity()
z.fshr(1)
if bt == 1 {
a = Modmul(a, s, m, mem)
a = Modmul(a, s, m)
}
if z.IsZero() {
break
}
s = Modsqr(s, m, mem)
s = Modsqr(s, m)
}
return a
}

View File

@ -42,7 +42,7 @@ func ceil(a int, b int) int {
/* output u \in F_p */
func Hash_to_field(hash int, hlen int, DST []byte, M []byte, ctr int) []*FP {
q := NewBIGints(Modulus, nil)
q := NewBIGints(Modulus)
nbq := q.nbits()
L := ceil(nbq+AESKEY*8, 8)
var u []*FP
@ -53,7 +53,7 @@ func Hash_to_field(hash int, hlen int, DST []byte, M []byte, ctr int) []*FP {
for j := 0; j < L; j++ {
fd[j] = OKM[i*L+j]
}
u = append(u, NewFPbig(DBIG_fromBytes(fd).ctmod(q, uint(8*L-nbq), nil), nil))
u = append(u, NewFPbig(DBIG_fromBytes(fd).ctmod(q, uint(8*L-nbq))))
}
return u
}
@ -65,15 +65,15 @@ func Bls256_hash_to_point(M []byte) *ECP {
P := ECP_map2point(u[0])
P1 := ECP_map2point(u[1])
P.Add(P1, nil)
P.Add(P1)
P.Cfp()
P.Affine(nil)
P.Affine()
return P
}
func Init() int {
G := ECP8_generator()
if G.Is_infinity(nil) {
if G.Is_infinity() {
return BLS_FAIL
}
G2_TAB = precomp(G)
@ -82,7 +82,7 @@ func Init() int {
/* generate key pair, private key S, public key W */
func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
r := NewBIGints(CURVE_Order, nil)
r := NewBIGints(CURVE_Order)
nbr := r.nbits()
L := ceil(3*ceil(nbr, 8), 2)
LEN := ext.InttoBytes(L, 2)
@ -93,7 +93,7 @@ func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
AIKM[len(IKM)] = 0
G := ECP8_generator()
if G.Is_infinity(nil) {
if G.Is_infinity() {
return BLS_FAIL
}
SALT := []byte("BLS-SIG-KEYGEN-SALT-")
@ -101,10 +101,10 @@ func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
OKM := ext.HKDF_Expand(ext.MC_SHA2, HASH_TYPE, L, PRK, LEN)
dx := DBIG_fromBytes(OKM[:])
s := dx.ctmod(r, uint(8*L-nbr), nil)
s := dx.ctmod(r, uint(8*L-nbr))
s.ToBytes(S)
// SkToPk
G = G2mul(G, s, nil)
G = G2mul(G, s)
G.ToBytes(W, true)
return BLS_OK
}
@ -113,7 +113,7 @@ func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {
func Core_Sign(SIG []byte, M []byte, S []byte) int {
D := Bls256_hash_to_point(M)
s := FromBytes(S)
D = G1mul(D, s, nil)
D = G1mul(D, s)
D.ToBytes(SIG, true)
return BLS_OK
}
@ -124,21 +124,21 @@ func Core_Verify(SIG []byte, M []byte, W []byte) int {
HM := Bls256_hash_to_point(M)
D := ECP_fromBytes(SIG)
if !G1member(D, nil) {
if !G1member(D) {
return BLS_FAIL
}
D.Neg(nil)
D.Neg()
PK := ECP8_fromBytes(W)
if !G2member(PK, nil) {
if !G2member(PK) {
return BLS_FAIL
}
// Use new multi-pairing mechanism
r := Initmp(nil)
r := Initmp()
Another_pc(r, G2_TAB, D)
Another(r, PK, HM, nil)
v := Miller(r, nil)
Another(r, PK, HM)
v := Miller(r)
//.. or alternatively
// G := ECP8_generator()

View File

@ -19,6 +19,11 @@
package bls48581
// Curve types
const WEIERSTRASS int = 0
const EDWARDS int = 1
const MONTGOMERY int = 2
// Pairing Friendly?
const NOT int = 0
const BN int = 1
@ -26,6 +31,10 @@ const BLS12 int = 2
const BLS24 int = 3
const BLS48 int = 4
// Pairing Twist type
const D_TYPE int = 0
const M_TYPE int = 1
// Sparsity
const FP_ZERO int = 0
const FP_ONE int = 1
@ -34,16 +43,34 @@ const FP_SPARSER int = 3
const FP_SPARSE int = 4
const FP_DENSE int = 5
const CURVE_A int = 0
// Pairing x parameter sign
const POSITIVEX int = 0
const NEGATIVEX int = 1
// Curve type
const CURVETYPE int = WEIERSTRASS
const CURVE_A int = 0
const CURVE_PAIRING_TYPE int = BLS48
// Pairings only
const SEXTIC_TWIST int = D_TYPE
const SIGN_OF_X int = NEGATIVEX
const ATE_BITS int = 33
const G2_TABLE int = 36
const HTC_ISO int = 0
const HTC_ISO_G2 int = 0
// associated hash function and AES key size
const HASH_TYPE int = 64
const AESKEY int = 32
const ALLOW_ALT_COMPRESS bool = false
// These are manually decided policy decisions. To block any potential patent issues set to false.
const USE_GLV bool = true
const USE_GS_G2 bool = true
const USE_GS_GT bool = true

View File

@ -22,45 +22,32 @@
package bls48581
import (
"arena"
"strconv"
)
//import "fmt"
func NewDBIG(mem *arena.Arena) *DBIG {
func NewDBIG() *DBIG {
var b *DBIG
if mem != nil {
b = arena.New[DBIG](mem)
} else {
b = new(DBIG)
}
b = new(DBIG)
for i := 0; i < DNLEN; i++ {
b.w[i] = 0
}
return b
}
func NewDBIGcopy(x *DBIG, mem *arena.Arena) *DBIG {
func NewDBIGcopy(x *DBIG) *DBIG {
var b *DBIG
if mem != nil {
b = arena.New[DBIG](mem)
} else {
b = new(DBIG)
}
b = new(DBIG)
for i := 0; i < DNLEN; i++ {
b.w[i] = x.w[i]
}
return b
}
func NewDBIGscopy(x *BIG, mem *arena.Arena) *DBIG {
func NewDBIGscopy(x *BIG) *DBIG {
var b *DBIG
if mem != nil {
b = arena.New[DBIG](mem)
} else {
b = new(DBIG)
}
b = new(DBIG)
for i := 0; i < NLEN-1; i++ {
b.w[i] = x.w[i]
}
@ -85,8 +72,8 @@ func (r *DBIG) norm() {
}
/* split DBIG at position n, return higher half, keep lower half */
func (r *DBIG) split(n uint, mem *arena.Arena) *BIG {
t := NewBIG(mem)
func (r *DBIG) split(n uint) *BIG {
t := NewBIG()
m := n % BASEBITS
carry := r.w[DNLEN-1] << (BASEBITS - m)
@ -191,11 +178,11 @@ func (r *DBIG) shr(k uint) {
}
}
func (r *DBIG) ctmod(m *BIG, bd uint, mem *arena.Arena) *BIG {
func (r *DBIG) ctmod(m *BIG, bd uint) *BIG {
k := bd
r.norm()
c := NewDBIGscopy(m, mem)
dr := NewDBIG(mem)
c := NewDBIGscopy(m)
dr := NewDBIG()
c.shl(k)
@ -210,25 +197,25 @@ func (r *DBIG) ctmod(m *BIG, bd uint, mem *arena.Arena) *BIG {
k -= 1
c.shr(1)
}
return NewBIGdcopy(r, mem)
return NewBIGdcopy(r)
}
/* reduces this DBIG mod a BIG, and returns the BIG */
func (r *DBIG) Mod(m *BIG, mem *arena.Arena) *BIG {
func (r *DBIG) Mod(m *BIG) *BIG {
k := r.nbits() - m.nbits()
if k < 0 {
k = 0
}
return r.ctmod(m, uint(k), mem)
return r.ctmod(m, uint(k))
}
func (r *DBIG) ctdiv(m *BIG, bd uint, mem *arena.Arena) *BIG {
func (r *DBIG) ctdiv(m *BIG, bd uint) *BIG {
k := bd
c := NewDBIGscopy(m, mem)
a := NewBIGint(0, mem)
e := NewBIGint(1, mem)
sr := NewBIG(mem)
dr := NewDBIG(mem)
c := NewDBIGscopy(m)
a := NewBIGint(0)
e := NewBIGint(1)
sr := NewBIG()
dr := NewDBIG()
r.norm()
c.shl(k)
@ -255,12 +242,12 @@ func (r *DBIG) ctdiv(m *BIG, bd uint, mem *arena.Arena) *BIG {
}
/* return this/c */
func (r *DBIG) div(m *BIG, mem *arena.Arena) *BIG {
func (r *DBIG) div(m *BIG) *BIG {
k := r.nbits() - m.nbits()
if k < 0 {
k = 0
}
return r.ctdiv(m, uint(k), mem)
return r.ctdiv(m, uint(k))
}
/* Convert to Hex String */
@ -277,7 +264,7 @@ func (r *DBIG) toString() string {
}
for i := len - 1; i >= 0; i-- {
b := NewDBIGcopy(r, nil)
b := NewDBIGcopy(r)
b.shr(uint(i * 4))
s += strconv.FormatInt(int64(b.w[0]&15), 16)
@ -288,7 +275,7 @@ func (r *DBIG) toString() string {
/* return number of bits */
func (r *DBIG) nbits() int {
k := DNLEN - 1
t := NewDBIGcopy(r, nil)
t := NewDBIGcopy(r)
t.norm()
for k >= 0 && t.w[k] == 0 {
k--
@ -307,7 +294,7 @@ func (r *DBIG) nbits() int {
/* convert from byte array to BIG */
func DBIG_fromBytes(b []byte) *DBIG {
m := NewDBIG(nil)
m := NewDBIG()
for i := 0; i < len(b); i++ {
m.shl(8)
m.w[0] += Chunk(int(b[i] & 0xff))

View File

@ -22,11 +22,7 @@
package bls48581
import (
"arena"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
)
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
type FP struct {
x *BIG
@ -35,119 +31,84 @@ type FP struct {
/* Constructors */
func NewFP(mem *arena.Arena) *FP {
if mem != nil {
F := arena.New[FP](mem)
F.x = NewBIG(mem)
F.XES = 1
return F
} else {
F := new(FP)
F.x = NewBIG(nil)
F.XES = 1
return F
}
func NewFP() *FP {
F := new(FP)
F.x = NewBIG()
F.XES = 1
return F
}
func NewFPint(a int, mem *arena.Arena) *FP {
if mem != nil {
F := arena.New[FP](mem)
if a < 0 {
m := NewBIGints(Modulus, mem)
m.inc(a)
m.norm()
F.x = NewBIGcopy(m, mem)
} else {
F.x = NewBIGint(a, mem)
}
F.nres(mem)
return F
func NewFPint(a int) *FP {
F := new(FP)
if a < 0 {
m := NewBIGints(Modulus)
m.inc(a)
m.norm()
F.x = NewBIGcopy(m)
} else {
F := new(FP)
if a < 0 {
m := NewBIGints(Modulus, nil)
m.inc(a)
m.norm()
F.x = NewBIGcopy(m, nil)
} else {
F.x = NewBIGint(a, nil)
}
F.nres(nil)
return F
F.x = NewBIGint(a)
}
F.nres()
return F
}
func NewFPbig(a *BIG, mem *arena.Arena) *FP {
if mem != nil {
F := arena.New[FP](mem)
F.x = NewBIGcopy(a, mem)
F.nres(mem)
return F
} else {
F := new(FP)
F.x = NewBIGcopy(a, nil)
F.nres(nil)
return F
}
func NewFPbig(a *BIG) *FP {
F := new(FP)
F.x = NewBIGcopy(a)
F.nres()
return F
}
func NewFPcopy(a *FP, mem *arena.Arena) *FP {
if mem != nil {
F := arena.New[FP](mem)
F.x = NewBIGcopy(a.x, mem)
F.XES = a.XES
return F
} else {
F := new(FP)
F.x = NewBIGcopy(a.x, nil)
F.XES = a.XES
return F
}
func NewFPcopy(a *FP) *FP {
F := new(FP)
F.x = NewBIGcopy(a.x)
F.XES = a.XES
return F
}
func NewFPrand(rng *ext.RAND) *FP {
m := NewBIGints(Modulus, nil)
m := NewBIGints(Modulus)
w := Randomnum(m, rng)
F := NewFPbig(w, nil)
F := NewFPbig(w)
return F
}
func (F *FP) ToString() string {
F.reduce(nil)
return F.Redc(nil).ToString()
F.reduce()
return F.Redc().ToString()
}
/* convert to Montgomery n-residue form */
func (F *FP) nres(mem *arena.Arena) {
func (F *FP) nres() {
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
r := NewBIGints(R2modp, mem)
d := mul(F.x, r, mem)
F.x.copy(mod(d, mem))
r := NewBIGints(R2modp)
d := mul(F.x, r)
F.x.copy(mod(d))
F.XES = 2
} else {
md := NewBIGints(Modulus, mem)
F.x.Mod(md, mem)
md := NewBIGints(Modulus)
F.x.Mod(md)
F.XES = 1
}
}
/* convert back to regular form */
func (F *FP) Redc(mem *arena.Arena) *BIG {
func (F *FP) Redc() *BIG {
if MODTYPE != PSEUDO_MERSENNE && MODTYPE != GENERALISED_MERSENNE {
d := NewDBIGscopy(F.x, mem)
return mod(d, mem)
d := NewDBIGscopy(F.x)
return mod(d)
} else {
r := NewBIGcopy(F.x, mem)
r := NewBIGcopy(F.x)
return r
}
}
/* reduce a DBIG to a BIG using the appropriate form of the modulus */
func mod(d *DBIG, mem *arena.Arena) *BIG {
func mod(d *DBIG) *BIG {
if MODTYPE == PSEUDO_MERSENNE {
t := d.split(MODBITS, mem)
b := NewBIGdcopy(d, mem)
t := d.split(MODBITS)
b := NewBIGdcopy(d)
v := t.pmul(int(MConst))
@ -167,7 +128,7 @@ func mod(d *DBIG, mem *arena.Arena) *BIG {
d.w[NLEN+i-1] = bot
d.w[NLEN+i] += top
}
b := NewBIG(mem)
b := NewBIG()
for i := 0; i < NLEN; i++ {
b.w[i] = d.w[NLEN+i]
@ -177,14 +138,14 @@ func mod(d *DBIG, mem *arena.Arena) *BIG {
}
if MODTYPE == GENERALISED_MERSENNE { // GoldiLocks only
t := d.split(MODBITS, mem)
b := NewBIGdcopy(d, mem)
t := d.split(MODBITS)
b := NewBIGdcopy(d)
b.Add(t)
dd := NewDBIGscopy(t, mem)
dd := NewDBIGscopy(t)
dd.shl(MODBITS / 2)
tt := dd.split(MODBITS, mem)
lo := NewBIGdcopy(dd, mem)
tt := dd.split(MODBITS)
lo := NewBIGdcopy(dd)
b.Add(tt)
b.Add(lo)
b.norm()
@ -202,10 +163,10 @@ func mod(d *DBIG, mem *arena.Arena) *BIG {
}
if MODTYPE == NOT_SPECIAL {
md := NewBIGints(Modulus, mem)
return monty(md, MConst, d, mem)
md := NewBIGints(Modulus)
return monty(md, MConst, d)
}
return NewBIG(mem)
return NewBIG()
}
// find appoximation to quotient of a/m
@ -228,9 +189,9 @@ func quo(n *BIG, m *BIG) int {
}
/* reduce this mod Modulus */
func (F *FP) reduce(mem *arena.Arena) {
m := NewBIGints(Modulus, mem)
r := NewBIGints(Modulus, mem)
func (F *FP) reduce() {
m := NewBIGints(Modulus)
r := NewBIGints(Modulus)
var sb uint
F.x.norm()
@ -256,49 +217,43 @@ func (F *FP) reduce(mem *arena.Arena) {
}
/* test this=0? */
func (F *FP) IsZero(mem *arena.Arena) bool {
W := NewFPcopy(F, mem)
W.reduce(mem)
func (F *FP) IsZero() bool {
W := NewFPcopy(F)
W.reduce()
return W.x.IsZero()
}
func (F *FP) IsOne() bool {
mem := arena.NewArena()
defer mem.Free()
W := NewFPcopy(F, mem)
W.reduce(mem)
T := NewFPint(1, mem)
W := NewFPcopy(F)
W.reduce()
T := NewFPint(1)
return W.Equals(T)
}
func (F *FP) islarger() int {
mem := arena.NewArena()
defer mem.Free()
if F.IsZero(mem) {
if F.IsZero() {
return 0
}
sx := NewBIGints(Modulus, mem)
fx := F.Redc(mem)
sx := NewBIGints(Modulus)
fx := F.Redc()
sx.Sub(fx)
sx.norm()
return Comp(fx, sx)
}
func (F *FP) ToBytes(b []byte) {
F.Redc(nil).ToBytes(b)
F.Redc().ToBytes(b)
}
func FP_fromBytes(b []byte) *FP {
t := FromBytes(b)
return NewFPbig(t, nil)
return NewFPbig(t)
}
func (F *FP) isunity() bool {
mem := arena.NewArena()
defer mem.Free()
W := NewFPcopy(F, mem)
W.reduce(mem)
return W.Redc(mem).isunity()
W := NewFPcopy(F)
W.reduce()
return W.Redc().isunity()
}
/* copy from FP b */
@ -315,27 +270,25 @@ func (F *FP) zero() {
/* set this=1 */
func (F *FP) one() {
mem := arena.NewArena()
defer mem.Free()
F.x.one()
F.nres(mem)
F.nres()
}
/* return sign */
func (F *FP) sign(mem *arena.Arena) int {
func (F *FP) sign() int {
if BIG_ENDIAN_SIGN {
m := NewBIGints(Modulus, mem)
m := NewBIGints(Modulus)
m.dec(1)
m.fshr(1)
n := NewFPcopy(F, mem)
n.reduce(mem)
w := n.Redc(mem)
n := NewFPcopy(F)
n.reduce()
w := n.Redc()
cp := Comp(w, m)
return ((cp + 1) & 2) >> 1
} else {
W := NewFPcopy(F, mem)
W.reduce(mem)
return W.Redc(mem).parity()
W := NewFPcopy(F)
W.reduce()
return W.Redc().parity()
}
}
@ -362,20 +315,20 @@ func (F *FP) cmove(b *FP, d int) {
}
/* this*=b mod Modulus */
func (F *FP) Mul(b *FP, mem *arena.Arena) {
func (F *FP) Mul(b *FP) {
if int64(F.XES)*int64(b.XES) > int64(FEXCESS) {
F.reduce(mem)
F.reduce()
}
d := mul(F.x, b.x, mem)
F.x.copy(mod(d, mem))
d := mul(F.x, b.x)
F.x.copy(mod(d))
F.XES = 2
}
/* this = -this mod Modulus */
func (F *FP) Neg(mem *arena.Arena) {
m := NewBIGints(Modulus, mem)
func (F *FP) Neg() {
m := NewBIGints(Modulus)
sb := logb2(uint32(F.XES - 1))
m.fshl(sb)
@ -383,12 +336,12 @@ func (F *FP) Neg(mem *arena.Arena) {
F.XES = (1 << sb) + 1
if F.XES > FEXCESS {
F.reduce(mem)
F.reduce()
}
}
/* this*=c mod Modulus, where c is a small int */
func (F *FP) imul(c int, mem *arena.Arena) {
func (F *FP) imul(c int) {
// F.norm()
s := false
if c < 0 {
@ -397,60 +350,60 @@ func (F *FP) imul(c int, mem *arena.Arena) {
}
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
d := F.x.pxmul(c, mem)
F.x.copy(mod(d, mem))
d := F.x.pxmul(c)
F.x.copy(mod(d))
F.XES = 2
} else {
if F.XES*int32(c) <= FEXCESS {
F.x.pmul(c)
F.XES *= int32(c)
} else {
n := NewFPint(c, mem)
F.Mul(n, mem)
n := NewFPint(c)
F.Mul(n)
}
}
if s {
F.Neg(mem)
F.Neg()
F.norm()
}
}
/* this*=this mod Modulus */
func (F *FP) Sqr(mem *arena.Arena) {
func (F *FP) Sqr() {
if int64(F.XES)*int64(F.XES) > int64(FEXCESS) {
F.reduce(mem)
F.reduce()
}
d := sqr(F.x, mem)
F.x.copy(mod(d, mem))
d := sqr(F.x)
F.x.copy(mod(d))
F.XES = 2
}
/* this+=b */
func (F *FP) Add(b *FP, mem *arena.Arena) {
func (F *FP) Add(b *FP) {
F.x.Add(b.x)
F.XES += b.XES
if F.XES > FEXCESS {
F.reduce(mem)
F.reduce()
}
}
/* this-=b */
func (F *FP) Sub(b *FP, mem *arena.Arena) {
n := NewFPcopy(b, mem)
n.Neg(mem)
F.Add(n, mem)
func (F *FP) Sub(b *FP) {
n := NewFPcopy(b)
n.Neg()
F.Add(n)
}
func (F *FP) rsub(b *FP, mem *arena.Arena) {
F.Neg(mem)
F.Add(b, mem)
func (F *FP) rsub(b *FP) {
F.Neg()
F.Add(b)
}
/* this/=2 mod Modulus */
func (F *FP) div2(mem *arena.Arena) {
p := NewBIGints(Modulus, mem)
func (F *FP) div2() {
p := NewBIGints(Modulus)
pr := F.x.parity()
w := NewBIGcopy(F.x, mem)
w := NewBIGcopy(F.x)
F.x.fshr(1)
w.Add(p)
w.norm()
@ -460,22 +413,18 @@ func (F *FP) div2(mem *arena.Arena) {
/* return jacobi symbol (this/Modulus) */
func (F *FP) jacobi() int {
mem := arena.NewArena()
defer mem.Free()
w := F.Redc(mem)
p := NewBIGints(Modulus, mem)
w := F.Redc()
p := NewBIGints(Modulus)
return w.Jacobi(p)
}
/* return TRUE if this==a */
func (F *FP) Equals(a *FP) bool {
mem := arena.NewArena()
defer mem.Free()
f := NewFPcopy(F, mem)
s := NewFPcopy(a, mem)
f := NewFPcopy(F)
s := NewFPcopy(a)
s.reduce(mem)
f.reduce(mem)
s.reduce()
f.reduce()
if Comp(s.x, f.x) == 0 {
return true
}
@ -483,22 +432,20 @@ func (F *FP) Equals(a *FP) bool {
}
func (F *FP) Comp(a *FP) int {
mem := arena.NewArena()
defer mem.Free()
f := NewFPcopy(F, mem)
s := NewFPcopy(a, mem)
f := NewFPcopy(F)
s := NewFPcopy(a)
s.reduce(mem)
f.reduce(mem)
s.reduce()
f.reduce()
return Comp(s.x, f.x)
}
func (F *FP) pow(e *BIG, mem *arena.Arena) *FP {
func (F *FP) pow(e *BIG) *FP {
var tb []*FP
var w [1 + (NLEN*int(BASEBITS)+3)/4]int8
F.norm()
t := NewBIGcopy(e, mem)
t := NewBIGcopy(e)
t.norm()
nb := 1 + (t.nbits()+3)/4
@ -509,51 +456,51 @@ func (F *FP) pow(e *BIG, mem *arena.Arena) *FP {
w[i] = int8(lsbs)
t.fshr(4)
}
tb = append(tb, NewFPint(1, mem))
tb = append(tb, NewFPcopy(F, mem))
tb = append(tb, NewFPint(1))
tb = append(tb, NewFPcopy(F))
for i := 2; i < 16; i++ {
tb = append(tb, NewFPcopy(tb[i-1], mem))
tb[i].Mul(F, mem)
tb = append(tb, NewFPcopy(tb[i-1]))
tb[i].Mul(F)
}
r := NewFPcopy(tb[w[nb-1]], mem)
r := NewFPcopy(tb[w[nb-1]])
for i := nb - 2; i >= 0; i-- {
r.Sqr(mem)
r.Sqr(mem)
r.Sqr(mem)
r.Sqr(mem)
r.Mul(tb[w[i]], mem)
r.Sqr()
r.Sqr()
r.Sqr()
r.Sqr()
r.Mul(tb[w[i]])
}
r.reduce(mem)
r.reduce()
return r
}
// See https://eprint.iacr.org/2018/1038
// return this^(p-3)/4 or this^(p-5)/8
func (F *FP) fpow(mem *arena.Arena) *FP {
func (F *FP) fpow() *FP {
ac := [11]int{1, 2, 3, 6, 12, 15, 30, 60, 120, 240, 255}
xp := arena.MakeSlice[*FP](mem, 11, 11)
var xp []*FP
// phase 1
xp[0] = NewFPcopy(F, mem)
xp[1] = NewFPcopy(F, mem)
xp[1].Sqr(mem)
xp[2] = NewFPcopy(xp[1], mem)
xp[2].Mul(F, mem)
xp[3] = NewFPcopy(xp[2], mem)
xp[3].Sqr(mem)
xp[4] = NewFPcopy(xp[3], mem)
xp[4].Sqr(mem)
xp[5] = NewFPcopy(xp[4], mem)
xp[5].Mul(xp[2], mem)
xp[6] = NewFPcopy(xp[5], mem)
xp[6].Sqr(mem)
xp[7] = NewFPcopy(xp[6], mem)
xp[7].Sqr(mem)
xp[8] = NewFPcopy(xp[7], mem)
xp[8].Sqr(mem)
xp[9] = NewFPcopy(xp[8], mem)
xp[9].Sqr(mem)
xp[10] = NewFPcopy(xp[9], mem)
xp[10].Mul(xp[5], mem)
xp = append(xp, NewFPcopy(F))
xp = append(xp, NewFPcopy(F))
xp[1].Sqr()
xp = append(xp, NewFPcopy(xp[1]))
xp[2].Mul(F)
xp = append(xp, NewFPcopy(xp[2]))
xp[3].Sqr()
xp = append(xp, NewFPcopy(xp[3]))
xp[4].Sqr()
xp = append(xp, NewFPcopy(xp[4]))
xp[5].Mul(xp[2])
xp = append(xp, NewFPcopy(xp[5]))
xp[6].Sqr()
xp = append(xp, NewFPcopy(xp[6]))
xp[7].Sqr()
xp = append(xp, NewFPcopy(xp[7]))
xp[8].Sqr()
xp = append(xp, NewFPcopy(xp[8]))
xp[9].Sqr()
xp = append(xp, NewFPcopy(xp[9]))
xp[10].Mul(xp[5])
var n, c int
e := int(PM1D2)
@ -582,7 +529,7 @@ func (F *FP) fpow(mem *arena.Arena) *FP {
k := w - c
i := 10
key := NewFP(mem)
key := NewFP()
if k != 0 {
for ac[i] > k {
@ -597,7 +544,7 @@ func (F *FP) fpow(mem *arena.Arena) *FP {
if ac[i] > k {
continue
}
key.Mul(xp[i], mem)
key.Mul(xp[i])
k -= ac[i]
}
// phase 2
@ -608,19 +555,19 @@ func (F *FP) fpow(mem *arena.Arena) *FP {
j := 3
m := 8
nw := n - bw
t := NewFP(mem)
t := NewFP()
for 2*m < nw {
t.copy(xp[j])
j++
for i = 0; i < m; i++ {
t.Sqr(mem)
t.Sqr()
}
xp[j].copy(xp[j-1])
xp[j].Mul(t, mem)
xp[j].Mul(t)
m *= 2
}
lo := nw - m
r := NewFPcopy(xp[j], mem)
r := NewFPcopy(xp[j])
for lo != 0 {
m /= 2
@ -631,86 +578,84 @@ func (F *FP) fpow(mem *arena.Arena) *FP {
lo -= m
t.copy(r)
for i = 0; i < m; i++ {
t.Sqr(mem)
t.Sqr()
}
r.copy(t)
r.Mul(xp[j], mem)
r.Mul(xp[j])
}
// phase 3
if bw != 0 {
for i = 0; i < bw; i++ {
r.Sqr(mem)
r.Sqr()
}
r.Mul(key, mem)
r.Mul(key)
}
if MODTYPE == GENERALISED_MERSENNE { // Goldilocks ONLY
key.copy(r)
r.Sqr(mem)
r.Mul(F, mem)
r.Sqr()
r.Mul(F)
for i = 0; i < n+1; i++ {
r.Sqr(mem)
r.Sqr()
}
r.Mul(key, mem)
r.Mul(key)
}
for nd > 0 {
r.Sqr(mem)
r.Sqr()
nd--
}
return r
}
// calculates r=x^(p-1-2^e)/2^{e+1) where 2^e|p-1
func (F *FP) progen(mem *arena.Arena) {
func (F *FP) progen() {
if MODTYPE == PSEUDO_MERSENNE || MODTYPE == GENERALISED_MERSENNE {
F.copy(F.fpow(mem))
F.copy(F.fpow())
return
}
e := uint(PM1D2)
m := NewBIGints(Modulus, mem)
m := NewBIGints(Modulus)
m.dec(1)
m.shr(e)
m.dec(1)
m.fshr(1)
F.copy(F.pow(m, mem))
F.copy(F.pow(m))
}
/* this=1/this mod Modulus */
func (F *FP) Invert(h *FP, mem *arena.Arena) {
func (F *FP) Invert(h *FP) {
e := int(PM1D2)
F.norm()
s := NewFPcopy(F, mem)
s := NewFPcopy(F)
for i := 0; i < e-1; i++ {
s.Sqr(mem)
s.Mul(F, mem)
s.Sqr()
s.Mul(F)
}
if h == nil {
F.progen(mem)
F.progen()
} else {
F.copy(h)
}
for i := 0; i <= e; i++ {
F.Sqr(mem)
F.Sqr()
}
F.Mul(s, mem)
F.reduce(mem)
F.Mul(s)
F.reduce()
}
/* test for Quadratic residue */
func (F *FP) qr(h *FP) int {
mem := arena.NewArena()
defer mem.Free()
r := NewFPcopy(F, mem)
r := NewFPcopy(F)
e := int(PM1D2)
r.progen(mem)
r.progen()
if h != nil {
h.copy(r)
}
r.Sqr(mem)
r.Mul(F, mem)
r.Sqr()
r.Mul(F)
for i := 0; i < e-1; i++ {
r.Sqr(mem)
r.Sqr()
}
if r.isunity() {
@ -721,29 +666,29 @@ func (F *FP) qr(h *FP) int {
}
/* return sqrt(this) mod Modulus */
func (F *FP) Sqrt(h *FP, mem *arena.Arena) *FP {
func (F *FP) Sqrt(h *FP) *FP {
e := int(PM1D2)
g := NewFPcopy(F, mem)
g := NewFPcopy(F)
if h == nil {
g.progen(mem)
g.progen()
} else {
g.copy(h)
}
m := NewBIGints(ROI, mem)
v := NewFPbig(m, mem)
m := NewBIGints(ROI)
v := NewFPbig(m)
t := NewFPcopy(g, mem)
t.Sqr(mem)
t.Mul(F, mem)
t := NewFPcopy(g)
t.Sqr()
t.Mul(F)
r := NewFPcopy(F, mem)
r.Mul(g, mem)
b := NewFPcopy(t, mem)
r := NewFPcopy(F)
r.Mul(g)
b := NewFPcopy(t)
for k := e; k > 1; k-- {
for j := 1; j < k-1; j++ {
b.Sqr(mem)
b.Sqr()
}
var u int
if b.isunity() {
@ -752,43 +697,41 @@ func (F *FP) Sqrt(h *FP, mem *arena.Arena) *FP {
u = 1
}
g.copy(r)
g.Mul(v, mem)
g.Mul(v)
r.cmove(g, u)
v.Sqr(mem)
v.Sqr()
g.copy(t)
g.Mul(v, mem)
g.Mul(v)
t.cmove(g, u)
b.copy(t)
}
sgn := r.sign(mem)
nr := NewFPcopy(r, mem)
nr.Neg(mem)
sgn := r.sign()
nr := NewFPcopy(r)
nr.Neg()
nr.norm()
r.cmove(nr, sgn)
return r
}
func (F *FP) invsqrt(i *FP, s *FP) int {
mem := arena.NewArena()
defer mem.Free()
h := NewFP(mem)
h := NewFP()
qr := F.qr(h)
s.copy(F.Sqrt(h, mem))
s.copy(F.Sqrt(h))
i.copy(F)
i.Invert(h, mem)
i.Invert(h)
return qr
}
// Two for the price of one - See Hamburg https://eprint.iacr.org/2012/309.pdf
// Calculate Invert of i and square root of s, return QR
func FP_tpo(i *FP, s *FP) int {
w := NewFPcopy(s, nil)
t := NewFPcopy(i, nil)
w.Mul(i, nil)
t.Mul(w, nil)
w := NewFPcopy(s)
t := NewFPcopy(i)
w.Mul(i)
t.Mul(w)
qr := t.invsqrt(i, s)
i.Mul(w, nil)
s.Mul(i, nil)
i.Mul(w)
s.Mul(i)
return qr
}

View File

@ -23,8 +23,6 @@
package bls48581
import "arena"
//import "fmt"
type FP16 struct {
@ -32,81 +30,46 @@ type FP16 struct {
b *FP8
}
func NewFP16(mem *arena.Arena) *FP16 {
if mem != nil {
F := arena.New[FP16](mem)
F.a = NewFP8(mem)
F.b = NewFP8(mem)
return F
} else {
F := new(FP16)
F.a = NewFP8(nil)
F.b = NewFP8(nil)
return F
}
func NewFP16() *FP16 {
F := new(FP16)
F.a = NewFP8()
F.b = NewFP8()
return F
}
/* Constructors */
func NewFP16int(a int, mem *arena.Arena) *FP16 {
if mem != nil {
F := arena.New[FP16](mem)
F.a = NewFP8int(a, mem)
F.b = NewFP8(mem)
return F
} else {
F := new(FP16)
F.a = NewFP8int(a, nil)
F.b = NewFP8(nil)
return F
}
func NewFP16int(a int) *FP16 {
F := new(FP16)
F.a = NewFP8int(a)
F.b = NewFP8()
return F
}
func NewFP16copy(x *FP16, mem *arena.Arena) *FP16 {
if mem != nil {
F := arena.New[FP16](mem)
F.a = NewFP8copy(x.a, mem)
F.b = NewFP8copy(x.b, mem)
return F
} else {
F := new(FP16)
F.a = NewFP8copy(x.a, nil)
F.b = NewFP8copy(x.b, nil)
return F
}
func NewFP16copy(x *FP16) *FP16 {
F := new(FP16)
F.a = NewFP8copy(x.a)
F.b = NewFP8copy(x.b)
return F
}
func NewFP16fp8s(c *FP8, d *FP8, mem *arena.Arena) *FP16 {
if mem != nil {
F := arena.New[FP16](mem)
F.a = c
F.b = d
return F
} else {
F := new(FP16)
F.a = c
F.b = d
return F
}
func NewFP16fp8s(c *FP8, d *FP8) *FP16 {
F := new(FP16)
F.a = NewFP8copy(c)
F.b = NewFP8copy(d)
return F
}
func NewFP16fp8(c *FP8, mem *arena.Arena) *FP16 {
if mem != nil {
F := arena.New[FP16](mem)
F.a = c
F.b = NewFP8(mem)
return F
} else {
F := new(FP16)
F.a = c
F.b = NewFP8(nil)
return F
}
func NewFP16fp8(c *FP8) *FP16 {
F := new(FP16)
F.a = NewFP8copy(c)
F.b = NewFP8()
return F
}
/* reduce all components of this mod Modulus */
func (F *FP16) reduce(mem *arena.Arena) {
F.a.reduce(mem)
F.b.reduce(mem)
func (F *FP16) reduce() {
F.a.reduce()
F.b.reduce()
}
/* normalise all components of this mod Modulus */
@ -116,8 +79,8 @@ func (F *FP16) norm() {
}
/* test this==0 ? */
func (F *FP16) IsZero(mem *arena.Arena) bool {
return F.a.IsZero(mem) && F.b.IsZero(mem)
func (F *FP16) IsZero() bool {
return F.a.IsZero() && F.b.IsZero()
}
func (F *FP16) ToBytes(bf []byte) {
@ -144,7 +107,7 @@ func FP16_fromBytes(bf []byte) *FP16 {
t[i] = bf[i+MB]
}
ta := FP8_fromBytes(t[:])
return NewFP16fp8s(ta, tb, nil)
return NewFP16fp8s(ta, tb)
}
/* Conditional move */
@ -155,15 +118,13 @@ func (F *FP16) cmove(g *FP16, d int) {
/* test this==1 ? */
func (F *FP16) isunity() bool {
mem := arena.NewArena()
defer mem.Free()
one := NewFP8int(1, mem)
return F.a.Equals(one) && F.b.IsZero(mem)
one := NewFP8int(1)
return F.a.Equals(one) && F.b.IsZero()
}
/* test is w real? That is in a+ib test b is zero */
func (F *FP16) isreal() bool {
return F.b.IsZero(nil)
return F.b.IsZero()
}
/* extract real part a */
@ -204,137 +165,137 @@ func (F *FP16) one() {
}
/* set this=-this */
func (F *FP16) Neg(mem *arena.Arena) {
func (F *FP16) Neg() {
F.norm()
m := NewFP8copy(F.a, mem)
t := NewFP8(mem)
m.Add(F.b, mem)
m.Neg(mem)
m := NewFP8copy(F.a)
t := NewFP8()
m.Add(F.b)
m.Neg()
t.copy(m)
t.Add(F.b, mem)
t.Add(F.b)
F.b.copy(m)
F.b.Add(F.a, mem)
F.b.Add(F.a)
F.a.copy(t)
F.norm()
}
/* this=conjugate(this) */
func (F *FP16) conj(mem *arena.Arena) {
F.b.Neg(mem)
func (F *FP16) conj() {
F.b.Neg()
F.norm()
}
/* this=-conjugate(this) */
func (F *FP16) nconj(mem *arena.Arena) {
F.a.Neg(mem)
func (F *FP16) nconj() {
F.a.Neg()
F.norm()
}
/* this+=x */
func (F *FP16) Add(x *FP16, mem *arena.Arena) {
F.a.Add(x.a, mem)
F.b.Add(x.b, mem)
func (F *FP16) Add(x *FP16) {
F.a.Add(x.a)
F.b.Add(x.b)
}
/* this-=x */
func (F *FP16) Sub(x *FP16, mem *arena.Arena) {
m := NewFP16copy(x, mem)
m.Neg(mem)
F.Add(m, mem)
func (F *FP16) Sub(x *FP16) {
m := NewFP16copy(x)
m.Neg()
F.Add(m)
}
/* this-=x */
func (F *FP16) rsub(x *FP16, mem *arena.Arena) {
F.Neg(mem)
F.Add(x, mem)
func (F *FP16) rsub(x *FP16) {
F.Neg()
F.Add(x)
}
/* this*=s where s is FP8 */
func (F *FP16) pmul(s *FP8, mem *arena.Arena) {
F.a.Mul(s, mem)
F.b.Mul(s, mem)
func (F *FP16) pmul(s *FP8) {
F.a.Mul(s)
F.b.Mul(s)
}
/* this*=s where s is FP2 */
func (F *FP16) qmul(s *FP2, mem *arena.Arena) {
F.a.qmul(s, mem)
F.b.qmul(s, mem)
func (F *FP16) qmul(s *FP2) {
F.a.qmul(s)
F.b.qmul(s)
}
/* this*=s where s is FP */
func (F *FP16) tmul(s *FP, mem *arena.Arena) {
F.a.tmul(s, mem)
F.b.tmul(s, mem)
func (F *FP16) tmul(s *FP) {
F.a.tmul(s)
F.b.tmul(s)
}
/* this*=c where c is int */
func (F *FP16) imul(c int, mem *arena.Arena) {
F.a.imul(c, mem)
F.b.imul(c, mem)
func (F *FP16) imul(c int) {
F.a.imul(c)
F.b.imul(c)
}
/* this*=this */
func (F *FP16) Sqr(mem *arena.Arena) {
t1 := NewFP8copy(F.a, mem)
t2 := NewFP8copy(F.b, mem)
t3 := NewFP8copy(F.a, mem)
func (F *FP16) Sqr() {
t1 := NewFP8copy(F.a)
t2 := NewFP8copy(F.b)
t3 := NewFP8copy(F.a)
t3.Mul(F.b, mem)
t1.Add(F.b, mem)
t2.times_i(mem)
t3.Mul(F.b)
t1.Add(F.b)
t2.times_i()
t2.Add(F.a, mem)
t2.Add(F.a)
t1.norm()
t2.norm()
F.a.copy(t1)
F.a.Mul(t2, mem)
F.a.Mul(t2)
t2.copy(t3)
t2.times_i(mem)
t2.Add(t3, mem)
t2.times_i()
t2.Add(t3)
t2.norm()
t2.Neg(mem)
F.a.Add(t2, mem)
t2.Neg()
F.a.Add(t2)
F.b.copy(t3)
F.b.Add(t3, mem)
F.b.Add(t3)
F.norm()
}
/* this*=y */
func (F *FP16) Mul(y *FP16, mem *arena.Arena) {
t1 := NewFP8copy(F.a, mem)
t2 := NewFP8copy(F.b, mem)
t3 := NewFP8(mem)
t4 := NewFP8copy(F.b, mem)
func (F *FP16) Mul(y *FP16) {
t1 := NewFP8copy(F.a)
t2 := NewFP8copy(F.b)
t3 := NewFP8()
t4 := NewFP8copy(F.b)
t1.Mul(y.a, mem)
t2.Mul(y.b, mem)
t1.Mul(y.a)
t2.Mul(y.b)
t3.copy(y.b)
t3.Add(y.a, mem)
t4.Add(F.a, mem)
t3.Add(y.a)
t4.Add(F.a)
t3.norm()
t4.norm()
t4.Mul(t3, mem)
t4.Mul(t3)
t3.copy(t1)
t3.Neg(mem)
t4.Add(t3, mem)
t3.Neg()
t4.Add(t3)
t4.norm()
t3.copy(t2)
t3.Neg(mem)
t3.Neg()
F.b.copy(t4)
F.b.Add(t3, mem)
F.b.Add(t3)
t2.times_i(mem)
t2.times_i()
F.a.copy(t2)
F.a.Add(t1, mem)
F.a.Add(t1)
F.norm()
}
@ -345,77 +306,77 @@ func (F *FP16) toString() string {
}
/* this=1/this */
func (F *FP16) Invert(mem *arena.Arena) {
t1 := NewFP8copy(F.a, mem)
t2 := NewFP8copy(F.b, mem)
func (F *FP16) Invert() {
t1 := NewFP8copy(F.a)
t2 := NewFP8copy(F.b)
t1.Sqr(mem)
t2.Sqr(mem)
t2.times_i(mem)
t1.Sqr()
t2.Sqr()
t2.times_i()
t2.norm()
t1.Sub(t2, mem)
t1.Sub(t2)
t1.norm()
t1.Invert(nil, mem)
t1.Invert(nil)
F.a.Mul(t1, mem)
t1.Neg(mem)
F.a.Mul(t1)
t1.Neg()
t1.norm()
F.b.Mul(t1, mem)
F.b.Mul(t1)
}
/* this*=i where i = sqrt(sqrt(-1+sqrt(-1))) */
func (F *FP16) times_i(mem *arena.Arena) {
s := NewFP8copy(F.b, mem)
t := NewFP8copy(F.a, mem)
s.times_i(mem)
func (F *FP16) times_i() {
s := NewFP8copy(F.b)
t := NewFP8copy(F.a)
s.times_i()
F.a.copy(s)
F.b.copy(t)
F.norm()
}
func (F *FP16) times_i2(mem *arena.Arena) {
F.a.times_i(mem)
F.b.times_i(mem)
func (F *FP16) times_i2() {
F.a.times_i()
F.b.times_i()
}
func (F *FP16) times_i4(mem *arena.Arena) {
F.a.times_i2(mem)
F.b.times_i2(mem)
func (F *FP16) times_i4() {
F.a.times_i2()
F.b.times_i2()
}
/* this=this^p using Frobenius */
func (F *FP16) frob(f *FP2, mem *arena.Arena) {
ff := NewFP2copy(f, mem)
ff.Sqr(mem)
func (F *FP16) frob(f *FP2) {
ff := NewFP2copy(f)
ff.Sqr()
ff.norm()
F.a.frob(ff, mem)
F.b.frob(ff, mem)
F.b.qmul(f, mem)
F.b.times_i(mem)
F.a.frob(ff)
F.b.frob(ff)
F.b.qmul(f)
F.b.times_i()
}
/* this=this^e */
func (F *FP16) pow(e *BIG, mem *arena.Arena) *FP16 {
w := NewFP16copy(F, mem)
func (F *FP16) pow(e *BIG) *FP16 {
w := NewFP16copy(F)
w.norm()
z := NewBIGcopy(e, mem)
r := NewFP16int(1, mem)
z := NewBIGcopy(e)
r := NewFP16int(1)
z.norm()
for true {
bt := z.parity()
z.fshr(1)
if bt == 1 {
r.Mul(w, mem)
r.Mul(w)
}
if z.IsZero() {
break
}
w.Sqr(mem)
w.Sqr()
}
r.reduce(mem)
r.reduce()
return r
}

View File

@ -23,11 +23,7 @@
package bls48581
import (
"arena"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
)
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
//import "fmt"
@ -36,128 +32,72 @@ type FP2 struct {
b *FP
}
func NewFP2(mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFP(mem)
F.b = NewFP(mem)
return F
} else {
F := new(FP2)
F.a = NewFP(nil)
F.b = NewFP(nil)
return F
}
func NewFP2() *FP2 {
F := new(FP2)
F.a = NewFP()
F.b = NewFP()
return F
}
/* Constructors */
func NewFP2int(a int, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPint(a, mem)
F.b = NewFP(mem)
return F
} else {
F := new(FP2)
F.a = NewFPint(a, nil)
F.b = NewFP(nil)
return F
}
func NewFP2int(a int) *FP2 {
F := new(FP2)
F.a = NewFPint(a)
F.b = NewFP()
return F
}
func NewFP2ints(a int, b int, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPint(a, mem)
F.b = NewFPint(b, mem)
return F
} else {
F := new(FP2)
F.a = NewFPint(a, nil)
F.b = NewFPint(b, nil)
return F
}
func NewFP2ints(a int, b int) *FP2 {
F := new(FP2)
F.a = NewFPint(a)
F.b = NewFPint(b)
return F
}
func NewFP2copy(x *FP2, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPcopy(x.a, mem)
F.b = NewFPcopy(x.b, mem)
return F
} else {
F := new(FP2)
F.a = NewFPcopy(x.a, nil)
F.b = NewFPcopy(x.b, nil)
return F
}
func NewFP2copy(x *FP2) *FP2 {
F := new(FP2)
F.a = NewFPcopy(x.a)
F.b = NewFPcopy(x.b)
return F
}
func NewFP2fps(c *FP, d *FP, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPcopy(c, mem)
F.b = NewFPcopy(d, mem)
return F
} else {
F := new(FP2)
F.a = NewFPcopy(c, nil)
F.b = NewFPcopy(d, nil)
return F
}
func NewFP2fps(c *FP, d *FP) *FP2 {
F := new(FP2)
F.a = NewFPcopy(c)
F.b = NewFPcopy(d)
return F
}
func NewFP2bigs(c *BIG, d *BIG, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPbig(c, mem)
F.b = NewFPbig(d, mem)
return F
} else {
F := new(FP2)
F.a = NewFPbig(c, nil)
F.b = NewFPbig(d, nil)
return F
}
func NewFP2bigs(c *BIG, d *BIG) *FP2 {
F := new(FP2)
F.a = NewFPbig(c)
F.b = NewFPbig(d)
return F
}
func NewFP2fp(c *FP, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPcopy(c, mem)
F.b = NewFP(mem)
return F
} else {
F := new(FP2)
F.a = NewFPcopy(c, nil)
F.b = NewFP(nil)
return F
}
func NewFP2fp(c *FP) *FP2 {
F := new(FP2)
F.a = NewFPcopy(c)
F.b = NewFP()
return F
}
func NewFP2big(c *BIG, mem *arena.Arena) *FP2 {
if mem != nil {
F := arena.New[FP2](mem)
F.a = NewFPbig(c, mem)
F.b = NewFP(mem)
return F
} else {
F := new(FP2)
F.a = NewFPbig(c, nil)
F.b = NewFP(nil)
return F
}
func NewFP2big(c *BIG) *FP2 {
F := new(FP2)
F.a = NewFPbig(c)
F.b = NewFP()
return F
}
func NewFP2rand(rng *ext.RAND) *FP2 {
F := NewFP2fps(NewFPrand(rng), NewFPrand(rng), nil)
F := NewFP2fps(NewFPrand(rng), NewFPrand(rng))
return F
}
/* reduce components mod Modulus */
func (F *FP2) reduce(mem *arena.Arena) {
F.a.reduce(mem)
F.b.reduce(mem)
func (F *FP2) reduce() {
F.a.reduce()
F.b.reduce()
}
/* normalise components of w */
@ -167,12 +107,12 @@ func (F *FP2) norm() {
}
/* test this=0 ? */
func (F *FP2) IsZero(mem *arena.Arena) bool {
return (F.a.IsZero(mem) && F.b.IsZero(mem))
func (F *FP2) IsZero() bool {
return (F.a.IsZero() && F.b.IsZero())
}
func (F *FP2) islarger() int {
if F.IsZero(nil) {
if F.IsZero() {
return 0
}
cmp := F.b.islarger()
@ -206,7 +146,7 @@ func FP2_fromBytes(bf []byte) *FP2 {
t[i] = bf[i+MB]
}
ta := FP_fromBytes(t[:])
return NewFP2fps(ta, tb, nil)
return NewFP2fps(ta, tb)
}
func (F *FP2) cmove(g *FP2, d int) {
@ -216,10 +156,8 @@ func (F *FP2) cmove(g *FP2, d int) {
/* test this=1 ? */
func (F *FP2) isunity() bool {
mem := arena.NewArena()
defer mem.Free()
one := NewFPint(1, mem)
return (F.a.Equals(one) && F.b.IsZero(mem))
one := NewFPint(1)
return (F.a.Equals(one) && F.b.IsZero())
}
/* test this=x */
@ -228,13 +166,13 @@ func (F *FP2) Equals(x *FP2) bool {
}
/* extract a */
func (F *FP2) GetA(mem *arena.Arena) *BIG {
return F.a.Redc(mem)
func (F *FP2) GetA() *BIG {
return F.a.Redc()
}
/* extract b */
func (F *FP2) GetB(mem *arena.Arena) *BIG {
return F.b.Redc(mem)
func (F *FP2) GetB() *BIG {
return F.b.Redc()
}
/* copy this=x */
@ -256,12 +194,12 @@ func (F *FP2) one() {
}
/* Return sign */
func (F *FP2) sign(mem *arena.Arena) int {
p1 := F.a.sign(mem)
p2 := F.b.sign(mem)
func (F *FP2) sign() int {
p1 := F.a.sign()
p2 := F.b.sign()
var u int
if BIG_ENDIAN_SIGN {
if F.b.IsZero(mem) {
if F.b.IsZero() {
u = 1
} else {
u = 0
@ -269,7 +207,7 @@ func (F *FP2) sign(mem *arena.Arena) int {
p2 ^= (p1 ^ p2) & u
return p2
} else {
if F.a.IsZero(mem) {
if F.a.IsZero() {
u = 1
} else {
u = 0
@ -280,106 +218,106 @@ func (F *FP2) sign(mem *arena.Arena) int {
}
/* negate this mod Modulus */
func (F *FP2) Neg(mem *arena.Arena) {
m := NewFPcopy(F.a, mem)
t := NewFP(mem)
func (F *FP2) Neg() {
m := NewFPcopy(F.a)
t := NewFP()
m.Add(F.b, mem)
m.Neg(mem)
m.Add(F.b)
m.Neg()
t.copy(m)
t.Add(F.b, mem)
t.Add(F.b)
F.b.copy(m)
F.b.Add(F.a, mem)
F.b.Add(F.a)
F.a.copy(t)
}
/* set to a-ib */
func (F *FP2) conj(mem *arena.Arena) {
F.b.Neg(mem)
func (F *FP2) conj() {
F.b.Neg()
F.b.norm()
}
/* this+=a */
func (F *FP2) Add(x *FP2, mem *arena.Arena) {
F.a.Add(x.a, mem)
F.b.Add(x.b, mem)
func (F *FP2) Add(x *FP2) {
F.a.Add(x.a)
F.b.Add(x.b)
}
/* this-=a */
func (F *FP2) Sub(x *FP2, mem *arena.Arena) {
m := NewFP2copy(x, mem)
m.Neg(mem)
F.Add(m, mem)
func (F *FP2) Sub(x *FP2) {
m := NewFP2copy(x)
m.Neg()
F.Add(m)
}
/* this-=a */
func (F *FP2) rsub(x *FP2, mem *arena.Arena) {
F.Neg(mem)
F.Add(x, mem)
func (F *FP2) rsub(x *FP2) {
F.Neg()
F.Add(x)
}
/* this*=s, where s is an FP */
func (F *FP2) pmul(s *FP, mem *arena.Arena) {
F.a.Mul(s, mem)
F.b.Mul(s, mem)
func (F *FP2) pmul(s *FP) {
F.a.Mul(s)
F.b.Mul(s)
}
/* this*=i, where i is an int */
func (F *FP2) imul(c int, mem *arena.Arena) {
F.a.imul(c, mem)
F.b.imul(c, mem)
func (F *FP2) imul(c int) {
F.a.imul(c)
F.b.imul(c)
}
/* this*=this */
func (F *FP2) Sqr(mem *arena.Arena) {
w1 := NewFPcopy(F.a, mem)
w3 := NewFPcopy(F.a, mem)
mb := NewFPcopy(F.b, mem)
w1.Add(F.b, mem)
func (F *FP2) Sqr() {
w1 := NewFPcopy(F.a)
w3 := NewFPcopy(F.a)
mb := NewFPcopy(F.b)
w1.Add(F.b)
w3.Add(F.a, mem)
w3.Add(F.a)
w3.norm()
F.b.Mul(w3, mem)
F.b.Mul(w3)
mb.Neg(mem)
F.a.Add(mb, mem)
mb.Neg()
F.a.Add(mb)
w1.norm()
F.a.norm()
F.a.Mul(w1, mem)
F.a.Mul(w1)
}
/* this*=y */
/* Now using Lazy reduction */
func (F *FP2) Mul(y *FP2, mem *arena.Arena) {
func (F *FP2) Mul(y *FP2) {
if int64(F.a.XES+F.b.XES)*int64(y.a.XES+y.b.XES) > int64(FEXCESS) {
if F.a.XES > 1 {
F.a.reduce(mem)
F.a.reduce()
}
if F.b.XES > 1 {
F.b.reduce(mem)
F.b.reduce()
}
}
pR := NewDBIG(mem)
C := NewBIGcopy(F.a.x, mem)
D := NewBIGcopy(y.a.x, mem)
p := NewBIGints(Modulus, mem)
pR := NewDBIG()
C := NewBIGcopy(F.a.x)
D := NewBIGcopy(y.a.x)
p := NewBIGints(Modulus)
pR.ucopy(p)
A := mul(F.a.x, y.a.x, mem)
B := mul(F.b.x, y.b.x, mem)
A := mul(F.a.x, y.a.x)
B := mul(F.b.x, y.b.x)
C.Add(F.b.x)
C.norm()
D.Add(y.b.x)
D.norm()
E := mul(C, D, mem)
FF := NewDBIGcopy(A, mem)
E := mul(C, D)
FF := NewDBIGcopy(A)
FF.Add(B)
B.rsub(pR)
@ -388,9 +326,9 @@ func (F *FP2) Mul(y *FP2, mem *arena.Arena) {
E.Sub(FF)
E.norm()
F.a.x.copy(mod(A, mem))
F.a.x.copy(mod(A))
F.a.XES = 3
F.b.x.copy(mod(E, mem))
F.b.x.copy(mod(E))
F.b.XES = 2
}
@ -414,58 +352,56 @@ func (F *FP2) Mul(y *FP2, mem *arena.Arena) {
}
*/
func (F *FP2) qr(h *FP) int {
mem := arena.NewArena()
defer mem.Free()
c := NewFP2copy(F, mem)
c.conj(mem)
c.Mul(F, mem)
c := NewFP2copy(F)
c.conj()
c.Mul(F)
return c.a.qr(h)
}
/* sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2)) */
func (F *FP2) Sqrt(h *FP, mem *arena.Arena) {
if F.IsZero(mem) {
func (F *FP2) Sqrt(h *FP) {
if F.IsZero() {
return
}
w1 := NewFPcopy(F.b, mem)
w2 := NewFPcopy(F.a, mem)
w3 := NewFP(mem)
w4 := NewFP(mem)
hint := NewFP(mem)
w1.Sqr(mem)
w2.Sqr(mem)
w1.Add(w2, mem)
w1 := NewFPcopy(F.b)
w2 := NewFPcopy(F.a)
w3 := NewFP()
w4 := NewFP()
hint := NewFP()
w1.Sqr()
w2.Sqr()
w1.Add(w2)
w1.norm()
w1 = w1.Sqrt(h, mem)
w1 = w1.Sqrt(h)
w2.copy(F.a)
w3.copy(F.a)
w2.Add(w1, mem)
w2.Add(w1)
w2.norm()
w2.div2(mem)
w2.div2()
w1.copy(F.b)
w1.div2(mem)
w1.div2()
qr := w2.qr(hint)
// tweak hint
w3.copy(hint)
w3.Neg(mem)
w3.Neg()
w3.norm()
w4.copy(w2)
w4.Neg(mem)
w4.Neg()
w4.norm()
w2.cmove(w4, 1-qr)
hint.cmove(w3, 1-qr)
F.a.copy(w2.Sqrt(hint, mem))
F.a.copy(w2.Sqrt(hint))
w3.copy(w2)
w3.Invert(hint, mem)
w3.Mul(F.a, mem)
w3.Invert(hint)
w3.Mul(F.a)
F.b.copy(w3)
F.b.Mul(w1, mem)
F.b.Mul(w1)
w4.copy(F.a)
F.a.cmove(F.b, 1-qr)
@ -489,9 +425,9 @@ func (F *FP2) Sqrt(h *FP, mem *arena.Arena) {
F.b.cmove(w4,1-qr)
*/
sgn := F.sign(mem)
nr := NewFP2copy(F, mem)
nr.Neg(mem)
sgn := F.sign()
nr := NewFP2copy(F)
nr.Neg()
nr.norm()
F.cmove(nr, sgn)
}
@ -507,63 +443,63 @@ func (F *FP2) toString() string {
}
/* this=1/this */
func (F *FP2) Invert(h *FP, mem *arena.Arena) {
func (F *FP2) Invert(h *FP) {
F.norm()
w1 := NewFPcopy(F.a, mem)
w2 := NewFPcopy(F.b, mem)
w1 := NewFPcopy(F.a)
w2 := NewFPcopy(F.b)
w1.Sqr(mem)
w2.Sqr(mem)
w1.Add(w2, mem)
w1.Invert(h, mem)
F.a.Mul(w1, mem)
w1.Neg(mem)
w1.Sqr()
w2.Sqr()
w1.Add(w2)
w1.Invert(h)
F.a.Mul(w1)
w1.Neg()
w1.norm()
F.b.Mul(w1, mem)
F.b.Mul(w1)
}
/* this/=2 */
func (F *FP2) div2(mem *arena.Arena) {
F.a.div2(mem)
F.b.div2(mem)
func (F *FP2) div2() {
F.a.div2()
F.b.div2()
}
/* this*=sqrt(-1) */
func (F *FP2) times_i(mem *arena.Arena) {
z := NewFPcopy(F.a, mem)
func (F *FP2) times_i() {
z := NewFPcopy(F.a)
F.a.copy(F.b)
F.a.Neg(mem)
F.a.Neg()
F.b.copy(z)
}
/* w*=(1+sqrt(-1)) */
/* where X*2-(2^i+sqrt(-1)) is irreducible for FP4 */
func (F *FP2) Mul_ip(mem *arena.Arena) {
t := NewFP2copy(F, mem)
func (F *FP2) Mul_ip() {
t := NewFP2copy(F)
i := QNRI
F.times_i(mem)
F.times_i()
for i > 0 {
t.Add(t, mem)
t.Add(t)
t.norm()
i--
}
F.Add(t, mem)
F.Add(t)
if TOWER == POSITOWER {
F.norm()
F.Neg(mem)
F.Neg()
}
}
/* w/=(2^i+sqrt(-1)) */
func (F *FP2) div_ip(mem *arena.Arena) {
z := NewFP2ints(1<<uint(QNRI), 1, nil)
z.Invert(nil, mem)
func (F *FP2) div_ip() {
z := NewFP2ints(1<<uint(QNRI), 1)
z.Invert(nil)
F.norm()
F.Mul(z, mem)
F.Mul(z)
if TOWER == POSITOWER {
F.Neg(mem)
F.Neg()
F.norm()
}
}

View File

@ -23,11 +23,7 @@
package bls48581
import (
"arena"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
)
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
//import "fmt"
@ -36,115 +32,66 @@ type FP4 struct {
b *FP2
}
func NewFP4(mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2(mem)
F.b = NewFP2(mem)
return F
} else {
F := new(FP4)
F.a = NewFP2(nil)
F.b = NewFP2(nil)
return F
}
func NewFP4() *FP4 {
F := new(FP4)
F.a = NewFP2()
F.b = NewFP2()
return F
}
/* Constructors */
func NewFP4int(a int, mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2int(a, mem)
F.b = NewFP2(mem)
return F
} else {
F := new(FP4)
F.a = NewFP2int(a, nil)
F.b = NewFP2(nil)
return F
}
func NewFP4int(a int) *FP4 {
F := new(FP4)
F.a = NewFP2int(a)
F.b = NewFP2()
return F
}
/* Constructors */
func NewFP4ints(a int, b int, mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2int(a, mem)
F.b = NewFP2int(b, mem)
return F
} else {
F := new(FP4)
F.a = NewFP2int(a, nil)
F.b = NewFP2int(b, nil)
return F
}
func NewFP4ints(a int, b int) *FP4 {
F := new(FP4)
F.a = NewFP2int(a)
F.b = NewFP2int(b)
return F
}
func NewFP4copy(x *FP4, mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2copy(x.a, mem)
F.b = NewFP2copy(x.b, mem)
return F
} else {
F := new(FP4)
F.a = NewFP2copy(x.a, nil)
F.b = NewFP2copy(x.b, nil)
return F
}
func NewFP4copy(x *FP4) *FP4 {
F := new(FP4)
F.a = NewFP2copy(x.a)
F.b = NewFP2copy(x.b)
return F
}
func NewFP4fp2s(c *FP2, d *FP2, mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2copy(c, mem)
F.b = NewFP2copy(d, mem)
return F
} else {
F := new(FP4)
F.a = NewFP2copy(c, nil)
F.b = NewFP2copy(d, nil)
return F
}
func NewFP4fp2s(c *FP2, d *FP2) *FP4 {
F := new(FP4)
F.a = NewFP2copy(c)
F.b = NewFP2copy(d)
return F
}
func NewFP4fp2(c *FP2, mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2copy(c, mem)
F.b = NewFP2(mem)
return F
} else {
F := new(FP4)
F.a = NewFP2copy(c, nil)
F.b = NewFP2(nil)
return F
}
func NewFP4fp2(c *FP2) *FP4 {
F := new(FP4)
F.a = NewFP2copy(c)
F.b = NewFP2()
return F
}
func NewFP4fp(c *FP, mem *arena.Arena) *FP4 {
if mem != nil {
F := arena.New[FP4](mem)
F.a = NewFP2fp(c, mem)
F.b = NewFP2(mem)
return F
} else {
F := new(FP4)
F.a = NewFP2fp(c, nil)
F.b = NewFP2(nil)
return F
}
func NewFP4fp(c *FP) *FP4 {
F := new(FP4)
F.a = NewFP2fp(c)
F.b = NewFP2()
return F
}
func NewFP4rand(rng *ext.RAND) *FP4 {
F := NewFP4fp2s(NewFP2rand(rng), NewFP2rand(rng), nil)
F := NewFP4fp2s(NewFP2rand(rng), NewFP2rand(rng))
return F
}
/* reduce all components of this mod Modulus */
func (F *FP4) reduce(mem *arena.Arena) {
F.a.reduce(mem)
F.b.reduce(mem)
func (F *FP4) reduce() {
F.a.reduce()
F.b.reduce()
}
/* normalise all components of this mod Modulus */
@ -154,12 +101,12 @@ func (F *FP4) norm() {
}
/* test this==0 ? */
func (F *FP4) IsZero(mem *arena.Arena) bool {
return F.a.IsZero(mem) && F.b.IsZero(mem)
func (F *FP4) IsZero() bool {
return F.a.IsZero() && F.b.IsZero()
}
func (F *FP4) islarger() int {
if F.IsZero(nil) {
if F.IsZero() {
return 0
}
cmp := F.b.islarger()
@ -193,7 +140,7 @@ func FP4_fromBytes(bf []byte) *FP4 {
t[i] = bf[i+MB]
}
ta := FP2_fromBytes(t[:])
return NewFP4fp2s(ta, tb, nil)
return NewFP4fp2s(ta, tb)
}
/* Conditional move */
@ -204,17 +151,13 @@ func (F *FP4) cmove(g *FP4, d int) {
/* test this==1 ? */
func (F *FP4) isunity() bool {
mem := arena.NewArena()
defer mem.Free()
one := NewFP2int(1, mem)
return F.a.Equals(one) && F.b.IsZero(mem)
one := NewFP2int(1)
return F.a.Equals(one) && F.b.IsZero()
}
/* test is w real? That is in a+ib test b is zero */
func (F *FP4) isreal() bool {
mem := arena.NewArena()
defer mem.Free()
return F.b.IsZero(mem)
return F.b.IsZero()
}
/* extract real part a */
@ -255,12 +198,12 @@ func (F *FP4) one() {
}
/* Return sign */
func (F *FP4) sign(mem *arena.Arena) int {
p1 := F.a.sign(mem)
p2 := F.b.sign(mem)
func (F *FP4) sign() int {
p1 := F.a.sign()
p2 := F.b.sign()
var u int
if BIG_ENDIAN_SIGN {
if F.b.IsZero(mem) {
if F.b.IsZero() {
u = 1
} else {
u = 0
@ -268,7 +211,7 @@ func (F *FP4) sign(mem *arena.Arena) int {
p2 ^= (p1 ^ p2) & u
return p2
} else {
if F.a.IsZero(mem) {
if F.a.IsZero() {
u = 1
} else {
u = 0
@ -279,132 +222,132 @@ func (F *FP4) sign(mem *arena.Arena) int {
}
/* set this=-this */
func (F *FP4) Neg(mem *arena.Arena) {
func (F *FP4) Neg() {
F.norm()
m := NewFP2copy(F.a, mem)
t := NewFP2(mem)
m.Add(F.b, mem)
m.Neg(mem)
m := NewFP2copy(F.a)
t := NewFP2()
m.Add(F.b)
m.Neg()
t.copy(m)
t.Add(F.b, mem)
t.Add(F.b)
F.b.copy(m)
F.b.Add(F.a, mem)
F.b.Add(F.a)
F.a.copy(t)
F.norm()
}
/* this=conjugate(this) */
func (F *FP4) conj(mem *arena.Arena) {
F.b.Neg(mem)
func (F *FP4) conj() {
F.b.Neg()
F.norm()
}
/* this=-conjugate(this) */
func (F *FP4) nconj(mem *arena.Arena) {
F.a.Neg(mem)
func (F *FP4) nconj() {
F.a.Neg()
F.norm()
}
/* this+=x */
func (F *FP4) Add(x *FP4, mem *arena.Arena) {
F.a.Add(x.a, mem)
F.b.Add(x.b, mem)
func (F *FP4) Add(x *FP4) {
F.a.Add(x.a)
F.b.Add(x.b)
}
/* this-=x */
func (F *FP4) Sub(x *FP4, mem *arena.Arena) {
m := NewFP4copy(x, mem)
m.Neg(mem)
F.Add(m, mem)
func (F *FP4) Sub(x *FP4) {
m := NewFP4copy(x)
m.Neg()
F.Add(m)
}
/* this-=x */
func (F *FP4) rsub(x *FP4, mem *arena.Arena) {
F.Neg(mem)
F.Add(x, mem)
func (F *FP4) rsub(x *FP4) {
F.Neg()
F.Add(x)
}
/* this*=s where s is FP2 */
func (F *FP4) pmul(s *FP2, mem *arena.Arena) {
F.a.Mul(s, mem)
F.b.Mul(s, mem)
func (F *FP4) pmul(s *FP2) {
F.a.Mul(s)
F.b.Mul(s)
}
/* this*=s where s is FP2 */
func (F *FP4) qmul(s *FP, mem *arena.Arena) {
F.a.pmul(s, mem)
F.b.pmul(s, mem)
func (F *FP4) qmul(s *FP) {
F.a.pmul(s)
F.b.pmul(s)
}
/* this*=c where c is int */
func (F *FP4) imul(c int, mem *arena.Arena) {
F.a.imul(c, mem)
F.b.imul(c, mem)
func (F *FP4) imul(c int) {
F.a.imul(c)
F.b.imul(c)
}
/* this*=this */
func (F *FP4) Sqr(mem *arena.Arena) {
t1 := NewFP2copy(F.a, mem)
t2 := NewFP2copy(F.b, mem)
t3 := NewFP2copy(F.a, mem)
func (F *FP4) Sqr() {
t1 := NewFP2copy(F.a)
t2 := NewFP2copy(F.b)
t3 := NewFP2copy(F.a)
t3.Mul(F.b, mem)
t1.Add(F.b, mem)
t2.Mul_ip(mem)
t3.Mul(F.b)
t1.Add(F.b)
t2.Mul_ip()
t2.Add(F.a, mem)
t2.Add(F.a)
t1.norm()
t2.norm()
F.a.copy(t1)
F.a.Mul(t2, mem)
F.a.Mul(t2)
t2.copy(t3)
t2.Mul_ip(mem)
t2.Add(t3, mem)
t2.Mul_ip()
t2.Add(t3)
t2.norm()
t2.Neg(mem)
F.a.Add(t2, mem)
t2.Neg()
F.a.Add(t2)
F.b.copy(t3)
F.b.Add(t3, mem)
F.b.Add(t3)
F.norm()
}
/* this*=y */
func (F *FP4) Mul(y *FP4, mem *arena.Arena) {
t1 := NewFP2copy(F.a, mem)
t2 := NewFP2copy(F.b, mem)
t3 := NewFP2(mem)
t4 := NewFP2copy(F.b, mem)
func (F *FP4) Mul(y *FP4) {
t1 := NewFP2copy(F.a)
t2 := NewFP2copy(F.b)
t3 := NewFP2()
t4 := NewFP2copy(F.b)
t1.Mul(y.a, mem)
t2.Mul(y.b, mem)
t1.Mul(y.a)
t2.Mul(y.b)
t3.copy(y.b)
t3.Add(y.a, mem)
t4.Add(F.a, mem)
t3.Add(y.a)
t4.Add(F.a)
t3.norm()
t4.norm()
t4.Mul(t3, mem)
t4.Mul(t3)
t3.copy(t1)
t3.Neg(mem)
t4.Add(t3, mem)
t3.Neg()
t4.Add(t3)
t4.norm()
t3.copy(t2)
t3.Neg(mem)
t3.Neg()
F.b.copy(t4)
F.b.Add(t3, mem)
F.b.Add(t3)
t2.Mul_ip(mem)
t2.Mul_ip()
F.a.copy(t2)
F.a.Add(t1, mem)
F.a.Add(t1)
F.norm()
}
@ -415,41 +358,41 @@ func (F *FP4) toString() string {
}
/* this=1/this */
func (F *FP4) Invert(h *FP, mem *arena.Arena) {
t1 := NewFP2copy(F.a, mem)
t2 := NewFP2copy(F.b, mem)
func (F *FP4) Invert(h *FP) {
t1 := NewFP2copy(F.a)
t2 := NewFP2copy(F.b)
t1.Sqr(mem)
t2.Sqr(mem)
t2.Mul_ip(mem)
t1.Sqr()
t2.Sqr()
t2.Mul_ip()
t2.norm()
t1.Sub(t2, mem)
t1.Sub(t2)
t1.Invert(h, mem)
F.a.Mul(t1, mem)
t1.Neg(mem)
t1.Invert(h)
F.a.Mul(t1)
t1.Neg()
t1.norm()
F.b.Mul(t1, mem)
F.b.Mul(t1)
}
/* this*=i where i = sqrt(2^i+sqrt(-1)) */
func (F *FP4) times_i(mem *arena.Arena) {
t := NewFP2copy(F.b, mem)
func (F *FP4) times_i() {
t := NewFP2copy(F.b)
F.b.copy(F.a)
t.Mul_ip(mem)
t.Mul_ip()
F.a.copy(t)
F.norm()
if TOWER == POSITOWER {
F.Neg(mem)
F.Neg()
F.norm()
}
}
/* this=this^p using Frobenius */
func (F *FP4) frob(f *FP2, mem *arena.Arena) {
F.a.conj(mem)
F.b.conj(mem)
F.b.Mul(f, mem)
func (F *FP4) frob(f *FP2) {
F.a.conj()
F.b.conj()
F.b.Mul(f)
}
/* this=this^e
@ -475,48 +418,48 @@ func (F *FP4) pow(e *BIG) *FP4 {
}
*/
/* XTR xtr_a function */
func (F *FP4) xtr_A(w *FP4, y *FP4, z *FP4, mem *arena.Arena) {
r := NewFP4copy(w, mem)
t := NewFP4copy(w, mem)
r.Sub(y, mem)
func (F *FP4) xtr_A(w *FP4, y *FP4, z *FP4) {
r := NewFP4copy(w)
t := NewFP4copy(w)
r.Sub(y)
r.norm()
r.pmul(F.a, mem)
t.Add(y, mem)
r.pmul(F.a)
t.Add(y)
t.norm()
t.pmul(F.b, mem)
t.times_i(mem)
t.pmul(F.b)
t.times_i()
F.copy(r)
F.Add(t, mem)
F.Add(z, mem)
F.Add(t)
F.Add(z)
F.norm()
}
/* XTR xtr_d function */
func (F *FP4) xtr_D(mem *arena.Arena) {
w := NewFP4copy(F, mem)
F.Sqr(mem)
w.conj(mem)
w.Add(w, mem)
func (F *FP4) xtr_D() {
w := NewFP4copy(F)
F.Sqr()
w.conj()
w.Add(w)
w.norm()
F.Sub(w, mem)
F.reduce(mem)
F.Sub(w)
F.reduce()
}
/* r=x^n using XTR method on traces of FP12s */
func (F *FP4) xtr_pow(n *BIG, mem *arena.Arena) *FP4 {
a := NewFP4int(3, mem)
b := NewFP4copy(F, mem)
c := NewFP4copy(b, mem)
c.xtr_D(mem)
t := NewFP4(mem)
r := NewFP4(mem)
sf := NewFP4copy(F, mem)
func (F *FP4) xtr_pow(n *BIG) *FP4 {
a := NewFP4int(3)
b := NewFP4copy(F)
c := NewFP4copy(b)
c.xtr_D()
t := NewFP4()
r := NewFP4()
sf := NewFP4copy(F)
sf.norm()
par := n.parity()
v := NewBIGcopy(n, mem)
v := NewBIGcopy(n)
v.norm()
v.fshr(1)
if par == 0 {
@ -528,20 +471,20 @@ func (F *FP4) xtr_pow(n *BIG, mem *arena.Arena) *FP4 {
for i := nb - 1; i >= 0; i-- {
if v.bit(i) != 1 {
t.copy(b)
sf.conj(mem)
c.conj(mem)
b.xtr_A(a, sf, c, mem)
sf.conj(mem)
sf.conj()
c.conj()
b.xtr_A(a, sf, c)
sf.conj()
c.copy(t)
c.xtr_D(mem)
a.xtr_D(mem)
c.xtr_D()
a.xtr_D()
} else {
t.copy(a)
t.conj(mem)
t.conj()
a.copy(b)
a.xtr_D(mem)
b.xtr_A(c, sf, t, mem)
c.xtr_D(mem)
a.xtr_D()
b.xtr_A(c, sf, t)
c.xtr_D()
}
}
if par == 0 {
@ -549,25 +492,25 @@ func (F *FP4) xtr_pow(n *BIG, mem *arena.Arena) *FP4 {
} else {
r.copy(b)
}
r.reduce(mem)
r.reduce()
return r
}
/* r=ck^a.cl^n using XTR double exponentiation method on traces of FP12s. See Stam thesis. */
func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *arena.Arena) *FP4 {
func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG) *FP4 {
e := NewBIGcopy(a, mem)
d := NewBIGcopy(b, mem)
w := NewBIGint(0, mem)
e := NewBIGcopy(a)
d := NewBIGcopy(b)
w := NewBIGint(0)
e.norm()
d.norm()
cu := NewFP4copy(ck, mem) // can probably be passed in w/o copying
cv := NewFP4copy(F, mem)
cumv := NewFP4copy(ckml, mem)
cum2v := NewFP4copy(ckm2l, mem)
r := NewFP4(mem)
t := NewFP4(mem)
cu := NewFP4copy(ck) // can probably be passed in w/o copying
cv := NewFP4copy(F)
cumv := NewFP4copy(ckml)
cum2v := NewFP4copy(ckm2l)
r := NewFP4()
t := NewFP4()
f2 := 0
for d.parity() == 0 && e.parity() == 0 {
@ -588,9 +531,9 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *aren
e.norm()
t.copy(cv)
t.xtr_A(cu, cumv, cum2v, mem)
t.xtr_A(cu, cumv, cum2v)
cum2v.copy(cumv)
cum2v.conj(mem)
cum2v.conj()
cumv.copy(cv)
cv.copy(cu)
cu.copy(t)
@ -598,24 +541,24 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *aren
if d.parity() == 0 {
d.fshr(1)
r.copy(cum2v)
r.conj(mem)
r.conj()
t.copy(cumv)
t.xtr_A(cu, cv, r, mem)
t.xtr_A(cu, cv, r)
cum2v.copy(cumv)
cum2v.xtr_D(mem)
cum2v.xtr_D()
cumv.copy(t)
cu.xtr_D(mem)
cu.xtr_D()
} else {
if e.parity() == 1 {
d.Sub(e)
d.norm()
d.fshr(1)
t.copy(cv)
t.xtr_A(cu, cumv, cum2v, mem)
cu.xtr_D(mem)
t.xtr_A(cu, cumv, cum2v)
cu.xtr_D()
cum2v.copy(cv)
cum2v.xtr_D(mem)
cum2v.conj(mem)
cum2v.xtr_D()
cum2v.conj()
cv.copy(t)
} else {
w.copy(d)
@ -623,13 +566,13 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *aren
d.fshr(1)
e.copy(w)
t.copy(cumv)
t.xtr_D(mem)
t.xtr_D()
cumv.copy(cum2v)
cumv.conj(mem)
cumv.conj()
cum2v.copy(t)
cum2v.conj(mem)
cum2v.conj()
t.copy(cv)
t.xtr_D(mem)
t.xtr_D()
cv.copy(cu)
cu.copy(t)
}
@ -644,7 +587,7 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *aren
e.Sub(d)
e.norm()
t.copy(cv)
t.xtr_A(cu, cumv, cum2v, mem)
t.xtr_A(cu, cumv, cum2v)
cum2v.copy(cumv)
cumv.copy(cu)
cu.copy(t)
@ -655,13 +598,13 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *aren
d.fshr(1)
e.copy(w)
t.copy(cumv)
t.xtr_D(mem)
t.xtr_D()
cumv.copy(cum2v)
cumv.conj(mem)
cumv.conj()
cum2v.copy(t)
cum2v.conj(mem)
cum2v.conj()
t.copy(cv)
t.xtr_D(mem)
t.xtr_D()
cv.copy(cu)
cu.copy(t)
} else {
@ -673,52 +616,52 @@ func (F *FP4) xtr_pow2(ck *FP4, ckml *FP4, ckm2l *FP4, a *BIG, b *BIG, mem *aren
d.copy(w)
d.fshr(1)
t.copy(cv)
t.xtr_A(cu, cumv, cum2v, mem)
cumv.conj(mem)
t.xtr_A(cu, cumv, cum2v)
cumv.conj()
cum2v.copy(cu)
cum2v.xtr_D(mem)
cum2v.conj(mem)
cum2v.xtr_D()
cum2v.conj()
cu.copy(cv)
cu.xtr_D(mem)
cu.xtr_D()
cv.copy(t)
} else {
d.fshr(1)
r.copy(cum2v)
r.conj(mem)
r.conj()
t.copy(cumv)
t.xtr_A(cu, cv, r, mem)
t.xtr_A(cu, cv, r)
cum2v.copy(cumv)
cum2v.xtr_D(mem)
cum2v.xtr_D()
cumv.copy(t)
cu.xtr_D(mem)
cu.xtr_D()
}
}
}
}
}
r.copy(cv)
r.xtr_A(cu, cumv, cum2v, mem)
r.xtr_A(cu, cumv, cum2v)
for i := 0; i < f2; i++ {
r.xtr_D(mem)
r.xtr_D()
}
r = r.xtr_pow(d, mem)
r = r.xtr_pow(d)
return r
}
/* this/=2 */
func (F *FP4) div2(mem *arena.Arena) {
F.a.div2(mem)
F.b.div2(mem)
func (F *FP4) div2() {
F.a.div2()
F.b.div2()
}
func (F *FP4) div_i(mem *arena.Arena) {
u := NewFP2copy(F.a, mem)
v := NewFP2copy(F.b, mem)
u.div_ip(mem)
func (F *FP4) div_i() {
u := NewFP2copy(F.a)
v := NewFP2copy(F.b)
u.div_ip()
F.a.copy(v)
F.b.copy(u)
if TOWER == POSITOWER {
F.Neg(mem)
F.Neg()
F.norm()
}
}
@ -745,72 +688,70 @@ func (F *FP4) pow(b *BIG) {
/* */
// Test for Quadratic Residue
func (F *FP4) qr(h *FP) int {
mem := arena.NewArena()
defer mem.Free()
c := NewFP4copy(F, mem)
c.conj(mem)
c.Mul(F, mem)
c := NewFP4copy(F)
c.conj()
c.Mul(F)
return c.a.qr(h)
}
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
func (F *FP4) Sqrt(h *FP, mem *arena.Arena) {
if F.IsZero(mem) {
func (F *FP4) Sqrt(h *FP) {
if F.IsZero() {
return
}
a := NewFP2copy(F.a, mem)
b := NewFP2(mem)
s := NewFP2copy(F.b, mem)
t := NewFP2copy(F.a, mem)
hint := NewFP(mem)
a := NewFP2copy(F.a)
b := NewFP2()
s := NewFP2copy(F.b)
t := NewFP2copy(F.a)
hint := NewFP()
s.Sqr(mem)
a.Sqr(mem)
s.Mul_ip(mem)
s.Sqr()
a.Sqr()
s.Mul_ip()
s.norm()
a.Sub(s, mem)
a.Sub(s)
s.copy(a)
s.norm()
s.Sqrt(h, mem)
s.Sqrt(h)
a.copy(t)
b.copy(t)
a.Add(s, mem)
a.Add(s)
a.norm()
a.div2(mem)
a.div2()
b.copy(F.b)
b.div2(mem)
b.div2()
qr := a.qr(hint)
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
s.copy(a)
twk := NewFPbig(NewBIGints(TWK, mem), mem)
twk.Mul(hint, mem)
s.div_ip(mem)
twk := NewFPbig(NewBIGints(TWK))
twk.Mul(hint)
s.div_ip()
s.norm()
a.cmove(s, 1-qr)
hint.cmove(twk, 1-qr)
F.a.copy(a)
F.a.Sqrt(hint, mem)
F.a.Sqrt(hint)
s.copy(a)
s.Invert(hint, mem)
s.Mul(F.a, mem)
s.Invert(hint)
s.Mul(F.a)
F.b.copy(s)
F.b.Mul(b, mem)
F.b.Mul(b)
t.copy(F.a)
F.a.cmove(F.b, 1-qr)
F.b.cmove(t, 1-qr)
sgn := F.sign(mem)
nr := NewFP4copy(F, mem)
nr.Neg(mem)
sgn := F.sign()
nr := NewFP4copy(F)
nr.Neg()
nr.norm()
F.cmove(nr, sgn)
}

File diff suppressed because it is too large Load Diff

View File

@ -23,11 +23,7 @@
package bls48581
import (
"arena"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
)
import "source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581/ext"
//import "fmt"
@ -36,115 +32,66 @@ type FP8 struct {
b *FP4
}
func NewFP8(mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4(mem)
F.b = NewFP4(mem)
return F
} else {
F := new(FP8)
F.a = NewFP4(nil)
F.b = NewFP4(nil)
return F
}
func NewFP8() *FP8 {
F := new(FP8)
F.a = NewFP4()
F.b = NewFP4()
return F
}
/* Constructors */
func NewFP8int(a int, mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4int(a, mem)
F.b = NewFP4(mem)
return F
} else {
F := new(FP8)
F.a = NewFP4int(a, nil)
F.b = NewFP4(nil)
return F
}
func NewFP8int(a int) *FP8 {
F := new(FP8)
F.a = NewFP4int(a)
F.b = NewFP4()
return F
}
/* Constructors */
func NewFP8ints(a int, b int, mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4int(a, mem)
F.b = NewFP4int(b, mem)
return F
} else {
F := new(FP8)
F.a = NewFP4int(a, nil)
F.b = NewFP4int(b, nil)
return F
}
func NewFP8ints(a int, b int) *FP8 {
F := new(FP8)
F.a = NewFP4int(a)
F.b = NewFP4int(b)
return F
}
func NewFP8copy(x *FP8, mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4copy(x.a, mem)
F.b = NewFP4copy(x.b, mem)
return F
} else {
F := new(FP8)
F.a = NewFP4copy(x.a, nil)
F.b = NewFP4copy(x.b, nil)
return F
}
func NewFP8copy(x *FP8) *FP8 {
F := new(FP8)
F.a = NewFP4copy(x.a)
F.b = NewFP4copy(x.b)
return F
}
func NewFP8fp4s(c *FP4, d *FP4, mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4copy(c, mem)
F.b = NewFP4copy(d, mem)
return F
} else {
F := new(FP8)
F.a = NewFP4copy(c, nil)
F.b = NewFP4copy(d, nil)
return F
}
func NewFP8fp4s(c *FP4, d *FP4) *FP8 {
F := new(FP8)
F.a = NewFP4copy(c)
F.b = NewFP4copy(d)
return F
}
func NewFP8fp4(c *FP4, mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4copy(c, mem)
F.b = NewFP4(mem)
return F
} else {
F := new(FP8)
F.a = NewFP4copy(c, nil)
F.b = NewFP4(nil)
return F
}
func NewFP8fp4(c *FP4) *FP8 {
F := new(FP8)
F.a = NewFP4copy(c)
F.b = NewFP4()
return F
}
func NewFP8fp(c *FP, mem *arena.Arena) *FP8 {
if mem != nil {
F := arena.New[FP8](mem)
F.a = NewFP4fp(c, mem)
F.b = NewFP4(mem)
return F
} else {
F := new(FP8)
F.a = NewFP4fp(c, nil)
F.b = NewFP4(nil)
return F
}
func NewFP8fp(c *FP) *FP8 {
F := new(FP8)
F.a = NewFP4fp(c)
F.b = NewFP4()
return F
}
func NewFP8rand(rng *ext.RAND) *FP8 {
F := NewFP8fp4s(NewFP4rand(rng), NewFP4rand(rng), nil)
F := NewFP8fp4s(NewFP4rand(rng), NewFP4rand(rng))
return F
}
/* reduce all components of this mod Modulus */
func (F *FP8) reduce(mem *arena.Arena) {
F.a.reduce(mem)
F.b.reduce(mem)
func (F *FP8) reduce() {
F.a.reduce()
F.b.reduce()
}
/* normalise all components of this mod Modulus */
@ -154,12 +101,12 @@ func (F *FP8) norm() {
}
/* test this==0 ? */
func (F *FP8) IsZero(mem *arena.Arena) bool {
return F.a.IsZero(mem) && F.b.IsZero(mem)
func (F *FP8) IsZero() bool {
return F.a.IsZero() && F.b.IsZero()
}
func (F *FP8) islarger() int {
if F.IsZero(nil) {
if F.IsZero() {
return 0
}
cmp := F.b.islarger()
@ -193,7 +140,7 @@ func FP8_fromBytes(bf []byte) *FP8 {
t[i] = bf[i+MB]
}
ta := FP4_fromBytes(t[:])
return NewFP8fp4s(ta, tb, nil)
return NewFP8fp4s(ta, tb)
}
/* Conditional move */
@ -204,15 +151,13 @@ func (F *FP8) cmove(g *FP8, d int) {
/* test this==1 ? */
func (F *FP8) isunity() bool {
mem := arena.NewArena()
defer mem.Free()
one := NewFP4int(1, mem)
return F.a.Equals(one) && F.b.IsZero(mem)
one := NewFP4int(1)
return F.a.Equals(one) && F.b.IsZero()
}
/* test is w real? That is in a+ib test b is zero */
func (F *FP8) isreal() bool {
return F.b.IsZero(nil)
return F.b.IsZero()
}
/* extract real part a */
@ -253,12 +198,12 @@ func (F *FP8) one() {
}
/* Return sign */
func (F *FP8) sign(mem *arena.Arena) int {
p1 := F.a.sign(mem)
p2 := F.b.sign(mem)
func (F *FP8) sign() int {
p1 := F.a.sign()
p2 := F.b.sign()
var u int
if BIG_ENDIAN_SIGN {
if F.b.IsZero(mem) {
if F.b.IsZero() {
u = 1
} else {
u = 0
@ -266,7 +211,7 @@ func (F *FP8) sign(mem *arena.Arena) int {
p2 ^= (p1 ^ p2) & u
return p2
} else {
if F.a.IsZero(mem) {
if F.a.IsZero() {
u = 1
} else {
u = 0
@ -277,137 +222,137 @@ func (F *FP8) sign(mem *arena.Arena) int {
}
/* set this=-this */
func (F *FP8) Neg(mem *arena.Arena) {
func (F *FP8) Neg() {
F.norm()
m := NewFP4copy(F.a, mem)
t := NewFP4(mem)
m.Add(F.b, mem)
m.Neg(mem)
m := NewFP4copy(F.a)
t := NewFP4()
m.Add(F.b)
m.Neg()
t.copy(m)
t.Add(F.b, mem)
t.Add(F.b)
F.b.copy(m)
F.b.Add(F.a, mem)
F.b.Add(F.a)
F.a.copy(t)
F.norm()
}
/* this=conjugate(this) */
func (F *FP8) conj(mem *arena.Arena) {
F.b.Neg(mem)
func (F *FP8) conj() {
F.b.Neg()
F.norm()
}
/* this=-conjugate(this) */
func (F *FP8) nconj(mem *arena.Arena) {
F.a.Neg(mem)
func (F *FP8) nconj() {
F.a.Neg()
F.norm()
}
/* this+=x */
func (F *FP8) Add(x *FP8, mem *arena.Arena) {
F.a.Add(x.a, mem)
F.b.Add(x.b, mem)
func (F *FP8) Add(x *FP8) {
F.a.Add(x.a)
F.b.Add(x.b)
}
/* this-=x */
func (F *FP8) Sub(x *FP8, mem *arena.Arena) {
m := NewFP8copy(x, mem)
m.Neg(mem)
F.Add(m, mem)
func (F *FP8) Sub(x *FP8) {
m := NewFP8copy(x)
m.Neg()
F.Add(m)
}
/* this-=x */
func (F *FP8) rsub(x *FP8, mem *arena.Arena) {
F.Neg(mem)
F.Add(x, mem)
func (F *FP8) rsub(x *FP8) {
F.Neg()
F.Add(x)
}
/* this*=s where s is FP4 */
func (F *FP8) pmul(s *FP4, mem *arena.Arena) {
F.a.Mul(s, mem)
F.b.Mul(s, mem)
func (F *FP8) pmul(s *FP4) {
F.a.Mul(s)
F.b.Mul(s)
}
/* this*=s where s is FP2 */
func (F *FP8) qmul(s *FP2, mem *arena.Arena) {
F.a.pmul(s, mem)
F.b.pmul(s, mem)
func (F *FP8) qmul(s *FP2) {
F.a.pmul(s)
F.b.pmul(s)
}
/* this*=s where s is FP */
func (F *FP8) tmul(s *FP, mem *arena.Arena) {
F.a.qmul(s, mem)
F.b.qmul(s, mem)
func (F *FP8) tmul(s *FP) {
F.a.qmul(s)
F.b.qmul(s)
}
/* this*=c where c is int */
func (F *FP8) imul(c int, mem *arena.Arena) {
F.a.imul(c, mem)
F.b.imul(c, mem)
func (F *FP8) imul(c int) {
F.a.imul(c)
F.b.imul(c)
}
/* this*=this */
func (F *FP8) Sqr(mem *arena.Arena) {
t1 := NewFP4copy(F.a, mem)
t2 := NewFP4copy(F.b, mem)
t3 := NewFP4copy(F.a, mem)
func (F *FP8) Sqr() {
t1 := NewFP4copy(F.a)
t2 := NewFP4copy(F.b)
t3 := NewFP4copy(F.a)
t3.Mul(F.b, mem)
t1.Add(F.b, mem)
t2.times_i(mem)
t3.Mul(F.b)
t1.Add(F.b)
t2.times_i()
t2.Add(F.a, mem)
t2.Add(F.a)
t1.norm()
t2.norm()
F.a.copy(t1)
F.a.Mul(t2, mem)
F.a.Mul(t2)
t2.copy(t3)
t2.times_i(mem)
t2.Add(t3, mem)
t2.times_i()
t2.Add(t3)
t2.norm()
t2.Neg(mem)
F.a.Add(t2, mem)
t2.Neg()
F.a.Add(t2)
F.b.copy(t3)
F.b.Add(t3, mem)
F.b.Add(t3)
F.norm()
}
/* this*=y */
func (F *FP8) Mul(y *FP8, mem *arena.Arena) {
t1 := NewFP4copy(F.a, mem)
t2 := NewFP4copy(F.b, mem)
t3 := NewFP4(mem)
t4 := NewFP4copy(F.b, mem)
func (F *FP8) Mul(y *FP8) {
t1 := NewFP4copy(F.a)
t2 := NewFP4copy(F.b)
t3 := NewFP4()
t4 := NewFP4copy(F.b)
t1.Mul(y.a, mem)
t2.Mul(y.b, mem)
t1.Mul(y.a)
t2.Mul(y.b)
t3.copy(y.b)
t3.Add(y.a, mem)
t4.Add(F.a, mem)
t3.Add(y.a)
t4.Add(F.a)
t3.norm()
t4.norm()
t4.Mul(t3, mem)
t4.Mul(t3)
t3.copy(t1)
t3.Neg(mem)
t4.Add(t3, mem)
t3.Neg()
t4.Add(t3)
t4.norm()
t3.copy(t2)
t3.Neg(mem)
t3.Neg()
F.b.copy(t4)
F.b.Add(t3, mem)
F.b.Add(t3)
t2.times_i(mem)
t2.times_i()
F.a.copy(t2)
F.a.Add(t1, mem)
F.a.Add(t1)
F.norm()
}
@ -418,55 +363,55 @@ func (F *FP8) toString() string {
}
/* this=1/this */
func (F *FP8) Invert(h *FP, mem *arena.Arena) {
t1 := NewFP4copy(F.a, mem)
t2 := NewFP4copy(F.b, mem)
func (F *FP8) Invert(h *FP) {
t1 := NewFP4copy(F.a)
t2 := NewFP4copy(F.b)
t1.Sqr(mem)
t2.Sqr(mem)
t2.times_i(mem)
t1.Sqr()
t2.Sqr()
t2.times_i()
t2.norm()
t1.Sub(t2, mem)
t1.Sub(t2)
t1.norm()
t1.Invert(h, mem)
t1.Invert(h)
F.a.Mul(t1, mem)
t1.Neg(mem)
F.a.Mul(t1)
t1.Neg()
t1.norm()
F.b.Mul(t1, mem)
F.b.Mul(t1)
}
/* this*=i where i = sqrt(sqrt(-1+sqrt(-1))) */
func (F *FP8) times_i(mem *arena.Arena) {
s := NewFP4copy(F.b, mem)
t := NewFP4copy(F.a, mem)
s.times_i(mem)
func (F *FP8) times_i() {
s := NewFP4copy(F.b)
t := NewFP4copy(F.a)
s.times_i()
F.a.copy(s)
F.b.copy(t)
F.norm()
if TOWER == POSITOWER {
F.Neg(mem)
F.Neg()
F.norm()
}
}
func (F *FP8) times_i2(mem *arena.Arena) {
F.a.times_i(mem)
F.b.times_i(mem)
func (F *FP8) times_i2() {
F.a.times_i()
F.b.times_i()
}
/* this=this^p using Frobenius */
func (F *FP8) frob(f *FP2, mem *arena.Arena) {
ff := NewFP2copy(f, mem)
ff.Sqr(mem)
ff.Mul_ip(mem)
func (F *FP8) frob(f *FP2) {
ff := NewFP2copy(f)
ff.Sqr()
ff.Mul_ip()
ff.norm()
F.a.frob(ff, mem)
F.b.frob(ff, mem)
F.b.pmul(f, mem)
F.b.times_i(mem)
F.a.frob(ff)
F.b.frob(ff)
F.b.pmul(f)
F.b.times_i()
}
/* this=this^e
@ -726,19 +671,19 @@ func (F *FP8) xtr_pow2(ck *FP8, ckml *FP8, ckm2l *FP8, a *BIG, b *BIG) *FP8 {
}
*/
/* this/=2 */
func (F *FP8) div2(mem *arena.Arena) {
F.a.div2(mem)
F.b.div2(mem)
func (F *FP8) div2() {
F.a.div2()
F.b.div2()
}
func (F *FP8) div_i(mem *arena.Arena) {
u := NewFP4copy(F.a, mem)
v := NewFP4copy(F.b, mem)
u.div_i(mem)
func (F *FP8) div_i() {
u := NewFP4copy(F.a)
v := NewFP4copy(F.b)
u.div_i()
F.a.copy(v)
F.b.copy(u)
if TOWER == POSITOWER {
F.Neg(mem)
F.Neg()
F.norm()
}
}
@ -765,72 +710,70 @@ func (F *FP8) pow(b *BIG) {
/* */
// Test for Quadratic Residue
func (F *FP8) qr(h *FP) int {
mem := arena.NewArena()
defer mem.Free()
c := NewFP8copy(F, mem)
c.conj(mem)
c.Mul(F, mem)
c := NewFP8copy(F)
c.conj()
c.Mul(F)
return c.a.qr(h)
}
// sqrt(a+ib) = sqrt(a+sqrt(a*a-n*b*b)/2)+ib/(2*sqrt(a+sqrt(a*a-n*b*b)/2))
func (F *FP8) Sqrt(h *FP, mem *arena.Arena) {
if F.IsZero(mem) {
func (F *FP8) Sqrt(h *FP) {
if F.IsZero() {
return
}
a := NewFP4copy(F.a, mem)
b := NewFP4(mem)
s := NewFP4copy(F.b, mem)
t := NewFP4copy(F.a, mem)
hint := NewFP(mem)
a := NewFP4copy(F.a)
b := NewFP4()
s := NewFP4copy(F.b)
t := NewFP4copy(F.a)
hint := NewFP()
s.Sqr(mem)
a.Sqr(mem)
s.times_i(mem)
s.Sqr()
a.Sqr()
s.times_i()
s.norm()
a.Sub(s, mem)
a.Sub(s)
s.copy(a)
s.norm()
s.Sqrt(h, mem)
s.Sqrt(h)
a.copy(t)
b.copy(t)
a.Add(s, mem)
a.Add(s)
a.norm()
a.div2(mem)
a.div2()
b.copy(F.b)
b.div2(mem)
b.div2()
qr := a.qr(hint)
// tweak hint - multiply old hint by Norm(1/Beta)^e where Beta is irreducible polynomial
s.copy(a)
twk := NewFPbig(NewBIGints(TWK, mem), mem)
twk.Mul(hint, mem)
s.div_i(mem)
twk := NewFPbig(NewBIGints(TWK))
twk.Mul(hint)
s.div_i()
s.norm()
a.cmove(s, 1-qr)
hint.cmove(twk, 1-qr)
F.a.copy(a)
F.a.Sqrt(hint, mem)
F.a.Sqrt(hint)
s.copy(a)
s.Invert(hint, mem)
s.Mul(F.a, mem)
s.Invert(hint)
s.Mul(F.a)
F.b.copy(s)
F.b.Mul(b, mem)
F.b.Mul(b)
t.copy(F.a)
F.a.cmove(F.b, 1-qr)
F.b.cmove(t, 1-qr)
sgn := F.sign(mem)
nr := NewFP8copy(F, mem)
nr.Neg(mem)
sgn := F.sign()
nr := NewFP8copy(F)
nr.Neg()
nr.norm()
F.cmove(nr, sgn)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,735 +0,0 @@
package app
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"math/big"
"os"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
mn "github.com/multiformats/go-multiaddr/net"
"github.com/pkg/errors"
"golang.org/x/term"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
var (
textColor = lipgloss.Color("#fff")
primaryColor = lipgloss.Color("#ff0070")
secondaryColor = lipgloss.Color("#ff5c00")
windowHeader = lipgloss.NewStyle().
Foreground(textColor).
Padding(0, 1)
unselectedListStyle = lipgloss.NewStyle().
Foreground(textColor).
Width(28).
Padding(0, 1)
navigatedListStyle = lipgloss.NewStyle().
Foreground(textColor).
Width(28).
Bold(true).
Padding(0, 1)
selectedListStyle = lipgloss.NewStyle().
Foreground(textColor).
Background(primaryColor).
Width(28).
Padding(0, 1)
statusBarStyle = lipgloss.NewStyle().
Foreground(textColor).
Background(primaryColor)
statusStyle = lipgloss.NewStyle().
Foreground(textColor).
Background(primaryColor).
Padding(0, 1)
statusItemStyle = lipgloss.NewStyle().
Foreground(textColor).
Background(secondaryColor).
Padding(0, 1)
docStyle = lipgloss.NewStyle().Padding(0)
border = lipgloss.Border{
Top: "─",
Bottom: "─",
Left: "│",
Right: "│",
TopLeft: "┌",
TopRight: "┐",
BottomLeft: "└",
BottomRight: "┘",
}
)
type DBConsole struct {
nodeConfig *config.Config
}
func newDBConsole(nodeConfig *config.Config) (*DBConsole, error) {
return &DBConsole{
nodeConfig,
}, nil
}
type model struct {
filters []string
cursor int
selectedFilter string
conn *grpc.ClientConn
client protobufs.NodeServiceClient
peerId string
errorMsg string
frame *protobufs.ClockFrame
frames []*protobufs.ClockFrame
frameIndex int
grpcWarn bool
committed bool
lastChecked int64
owned *big.Int
unconfirmedOwned *big.Int
}
func (m model) Init() tea.Cmd {
return nil
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
if m.conn.GetState() == connectivity.Ready {
if m.lastChecked < (time.Now().UnixMilli() - 10_000) {
m.lastChecked = time.Now().UnixMilli()
tokenBalance, err := FetchTokenBalance(m.client)
if err == nil {
m.owned = tokenBalance.Owned
m.unconfirmedOwned = tokenBalance.UnconfirmedOwned
}
}
}
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
return m, tea.Quit
case "up", "w":
if m.cursor > 0 {
m.cursor--
}
case "down", "s":
if m.cursor < len(m.filters)-1 {
m.cursor++
}
case "left", "a":
m.committed = false
m.errorMsg = ""
if m.frameIndex > 0 {
m.frameIndex--
if len(m.frames) != 0 && m.conn.GetState() == connectivity.Ready {
filter, _ := hex.DecodeString(m.selectedFilter)
selector, err := m.frames[m.frameIndex].GetSelector()
if err != nil {
m.errorMsg = err.Error()
break
}
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
},
)
if err == nil && bytes.Equal(
frameInfo.ClockFrame.Output,
m.frames[m.frameIndex].Output,
) {
m.committed = true
m.frame = frameInfo.ClockFrame
} else {
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
Selector: selector.FillBytes(make([]byte, 32)),
},
)
if err != nil {
m.errorMsg = hex.EncodeToString(
selector.FillBytes(make([]byte, 32)),
) + ":" + err.Error()
break
}
m.frame = frameInfo.ClockFrame
}
} else {
m.errorMsg = "Not currently connected to node, cannot query."
}
} else {
first := uint64(0)
if len(m.frames) != 0 {
first = m.frames[0].FrameNumber - 1
}
if first == 0 {
break
}
max := uint64(17)
if len(m.frames) != 0 {
max = first
}
min := max - 16
filter, _ := hex.DecodeString(m.selectedFilter)
frames, err := m.client.GetFrames(
context.Background(),
&protobufs.GetFramesRequest{
Filter: filter,
FromFrameNumber: min,
ToFrameNumber: max + 1,
IncludeCandidates: true,
},
)
if err != nil {
m.selectedFilter = ""
m.errorMsg = err.Error()
break
}
if frames.TruncatedClockFrames != nil {
m.frames = frames.TruncatedClockFrames
m.frameIndex = len(m.frames) - 1
selector, err := m.frames[m.frameIndex].GetSelector()
if err != nil {
m.errorMsg = err.Error()
break
}
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
},
)
if err == nil && bytes.Equal(
frameInfo.ClockFrame.Output,
m.frames[m.frameIndex].Output,
) {
m.committed = true
m.frame = frameInfo.ClockFrame
} else {
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
Selector: selector.FillBytes(make([]byte, 32)),
},
)
if err != nil {
m.errorMsg = err.Error()
break
}
m.frame = frameInfo.ClockFrame
}
}
}
case "right", "d":
m.committed = false
m.errorMsg = ""
if m.frameIndex < len(m.frames)-1 {
m.frameIndex++
if len(m.frames) != 0 && m.conn.GetState() == connectivity.Ready {
filter, _ := hex.DecodeString(m.selectedFilter)
selector, err := m.frames[m.frameIndex].GetSelector()
if err != nil {
m.errorMsg = err.Error()
break
}
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
},
)
if err == nil && bytes.Equal(
frameInfo.ClockFrame.Output,
m.frames[m.frameIndex].Output,
) {
m.committed = true
m.frame = frameInfo.ClockFrame
} else {
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
Selector: selector.FillBytes(make([]byte, 32)),
},
)
if err != nil {
m.errorMsg = hex.EncodeToString(
selector.FillBytes(make([]byte, 32)),
) + ":" + err.Error()
break
}
m.frame = frameInfo.ClockFrame
}
} else {
m.errorMsg = "Not currently connected to node, cannot query."
}
} else {
min := uint64(1)
if len(m.frames) != 0 {
min = m.frames[len(m.frames)-1].FrameNumber + 1
}
max := min + 16
filter, _ := hex.DecodeString(m.selectedFilter)
frames, err := m.client.GetFrames(
context.Background(),
&protobufs.GetFramesRequest{
Filter: filter,
FromFrameNumber: min,
ToFrameNumber: max,
IncludeCandidates: true,
},
)
if err != nil {
m.selectedFilter = ""
m.errorMsg = err.Error()
break
}
if frames.TruncatedClockFrames != nil {
m.frames = frames.TruncatedClockFrames
m.frameIndex = 0
selector, err := m.frames[m.frameIndex].GetSelector()
if err != nil {
m.errorMsg = err.Error()
break
}
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
},
)
if err == nil && bytes.Equal(
frameInfo.ClockFrame.Output,
m.frames[m.frameIndex].Output,
) {
m.committed = true
m.frame = frameInfo.ClockFrame
} else {
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
Selector: selector.FillBytes(make([]byte, 32)),
},
)
if err != nil {
m.errorMsg = err.Error()
break
}
m.frame = frameInfo.ClockFrame
}
}
}
case "enter", " ":
m.errorMsg = ""
m.frame = nil
m.committed = false
if m.conn.GetState() == connectivity.Ready {
if m.selectedFilter != m.filters[m.cursor] {
m.selectedFilter = m.filters[m.cursor]
m.frames = []*protobufs.ClockFrame{}
}
min := uint64(1)
if len(m.frames) != 0 {
min = m.frames[len(m.frames)-1].FrameNumber + 1
}
max := min + 16
filter, _ := hex.DecodeString(m.selectedFilter)
frames, err := m.client.GetFrames(
context.Background(),
&protobufs.GetFramesRequest{
Filter: filter,
FromFrameNumber: min,
ToFrameNumber: max,
IncludeCandidates: true,
},
)
if err != nil {
m.selectedFilter = ""
m.errorMsg = err.Error()
break
}
if frames.TruncatedClockFrames != nil {
m.frames = frames.TruncatedClockFrames
m.frameIndex = 0
selector, err := m.frames[m.frameIndex].GetSelector()
if err != nil {
m.errorMsg = err.Error()
break
}
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
},
)
if err == nil && bytes.Equal(
frameInfo.ClockFrame.Output,
m.frames[m.frameIndex].Output,
) {
m.committed = true
m.frame = frameInfo.ClockFrame
} else {
frameInfo, err := m.client.GetFrameInfo(
context.Background(),
&protobufs.GetFrameInfoRequest{
Filter: filter,
FrameNumber: m.frames[m.frameIndex].FrameNumber,
Selector: selector.FillBytes(make([]byte, 32)),
},
)
if err != nil {
m.errorMsg = err.Error()
break
}
m.frame = frameInfo.ClockFrame
}
}
} else {
m.errorMsg = "Not currently connected to node, cannot query."
}
}
}
return m, nil
}
func (m model) View() string {
physicalWidth, physicalHeight, _ := term.GetSize(int(os.Stdout.Fd()))
doc := strings.Builder{}
window := lipgloss.NewStyle().
Border(border, true).
BorderForeground(primaryColor).
Padding(0, 1)
list := []string{}
for i, item := range m.filters {
str := item[0:12] + ".." + item[len(item)-12:]
if m.selectedFilter == item {
list = append(list, selectedListStyle.Render(str))
} else if i == m.cursor {
list = append(list, navigatedListStyle.Render(str))
} else {
list = append(list, unselectedListStyle.Render(str))
}
}
w := lipgloss.Width
statusKey := statusItemStyle.Render("STATUS")
info := statusStyle.Render("(Press Ctrl-C or Q to quit)")
onlineStatus := "gRPC Not Enabled, Please Configure"
if !m.grpcWarn {
switch m.conn.GetState() {
case connectivity.Connecting:
onlineStatus = "CONNECTING"
case connectivity.Idle:
onlineStatus = "IDLE"
case connectivity.Shutdown:
onlineStatus = "SHUTDOWN"
case connectivity.TransientFailure:
onlineStatus = "DISCONNECTED"
default:
onlineStatus = "CONNECTED"
}
}
ownedVal := statusItemStyle.Copy().
Render("Owned: " + m.owned.String())
if m.owned.Cmp(big.NewInt(-1)) == 0 {
ownedVal = statusItemStyle.Copy().
Render("")
}
unconfirmedOwnedVal := statusItemStyle.Copy().
Render("Unconfirmed: " + m.unconfirmedOwned.String())
if m.unconfirmedOwned.Cmp(big.NewInt(-1)) == 0 {
unconfirmedOwnedVal = statusItemStyle.Copy().
Render("")
}
peerIdVal := statusItemStyle.Render(m.peerId)
statusVal := statusBarStyle.Copy().
Width(physicalWidth-w(statusKey)-w(info)-w(peerIdVal)-w(ownedVal)-
w(unconfirmedOwnedVal)).
Padding(0, 1).
Render(onlineStatus)
bar := lipgloss.JoinHorizontal(lipgloss.Top,
statusKey,
statusVal,
info,
peerIdVal,
ownedVal,
unconfirmedOwnedVal,
)
explorerContent := ""
if m.errorMsg != "" {
explorerContent = m.errorMsg
} else if m.frame != nil {
selector, err := m.frame.GetSelector()
if err != nil {
panic(err)
}
committed := "Unconfirmed"
if m.committed {
committed = "Confirmed"
}
explorerContent = fmt.Sprintf(
"Frame %d (Selector: %x, %s):\n\tParent: %x\n\tVDF Proof: %x\n",
m.frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
committed,
m.frame.ParentSelector,
m.frame.Input[:516],
)
for i := 0; i < len(m.frame.Input[516:])/74; i++ {
commit := m.frame.Input[516+(i*74) : 516+((i+1)*74)]
explorerContent += fmt.Sprintf(
"\tCommitment %+x\n",
commit,
)
explorerContent += fmt.Sprintf(
"\t\tType: %s\n",
m.frame.AggregateProofs[i].InclusionCommitments[0].TypeUrl,
)
}
} else {
explorerContent = logoVersion(physicalWidth - 34)
}
doc.WriteString(
lipgloss.JoinVertical(
lipgloss.Left,
lipgloss.JoinHorizontal(
lipgloss.Top,
lipgloss.JoinVertical(
lipgloss.Left,
windowHeader.Render("Filters (Up/Down, Enter)"),
window.Width(30).Height(physicalHeight-4).Render(lipgloss.JoinVertical(lipgloss.Left, list...)),
),
lipgloss.JoinVertical(
lipgloss.Left,
windowHeader.Render("Explorer (Left/Right)"),
window.Width(physicalWidth-34).Height(physicalHeight-4).Render(explorerContent),
),
),
statusBarStyle.Width(physicalWidth).Render(bar),
),
)
if physicalWidth > 0 {
docStyle = docStyle.MaxWidth(physicalWidth)
docStyle = docStyle.MaxHeight(physicalHeight)
}
return docStyle.Render(doc.String())
}
func consoleModel(
conn *grpc.ClientConn,
nodeConfig *config.Config,
grpcWarn bool,
) model {
peerPrivKey, err := hex.DecodeString(nodeConfig.P2P.PeerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
}
privKey, err := crypto.UnmarshalEd448PrivateKey(peerPrivKey)
if err != nil {
panic(errors.Wrap(err, "error unmarshaling peerkey"))
}
pub := privKey.GetPublic()
id, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(errors.Wrap(err, "error getting peer id"))
}
return model{
filters: []string{
hex.EncodeToString([]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}),
},
cursor: 0,
conn: conn,
client: protobufs.NewNodeServiceClient(conn),
owned: big.NewInt(-1),
unconfirmedOwned: big.NewInt(-1),
peerId: id.String(),
grpcWarn: grpcWarn,
}
}
var defaultGrpcAddress = "localhost:8337"
// Connect to the node via GRPC
func ConnectToNode(nodeConfig *config.Config) (*grpc.ClientConn, error) {
addr := defaultGrpcAddress
if nodeConfig.ListenGRPCMultiaddr != "" {
ma, err := multiaddr.NewMultiaddr(nodeConfig.ListenGRPCMultiaddr)
if err != nil {
panic(err)
}
_, addr, err = mn.DialArgs(ma)
if err != nil {
panic(err)
}
}
return grpc.Dial(
addr,
grpc.WithTransportCredentials(
insecure.NewCredentials(),
),
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(600*1024*1024),
grpc.MaxCallRecvMsgSize(600*1024*1024),
),
)
}
type TokenBalance struct {
Owned *big.Int
UnconfirmedOwned *big.Int
}
func FetchTokenBalance(client protobufs.NodeServiceClient) (TokenBalance, error) {
info, err := client.GetTokenInfo(
context.Background(),
&protobufs.GetTokenInfoRequest{},
)
if err != nil {
return TokenBalance{}, errors.Wrap(err, "error getting token info")
}
// owned := new(big.Int).SetBytes(info.OwnedTokens)
unconfirmedOwned := new(big.Int).SetBytes(info.UnconfirmedOwnedTokens)
return TokenBalance{
// Owned: owned,
UnconfirmedOwned: unconfirmedOwned,
}, nil
}
func FetchNodeInfo(client protobufs.NodeServiceClient) (*protobufs.NodeInfoResponse, error) {
info, err := client.GetNodeInfo(
context.Background(),
&protobufs.GetNodeInfoRequest{},
)
if err != nil {
return nil, errors.Wrap(err, "error getting node info")
}
return info, nil
}
// Runs the DB console
func (c *DBConsole) Run() {
conn, err := ConnectToNode(c.nodeConfig)
if err != nil {
panic(err)
}
defer conn.Close()
grpcWarn := c.nodeConfig.ListenGRPCMultiaddr == ""
p := tea.NewProgram(consoleModel(conn, c.nodeConfig, grpcWarn))
if _, err := p.Run(); err != nil {
panic(err)
}
}
func logoVersion(width int) string {
var out string
if width >= 83 {
out = " ..-------..\n"
out += " ..---'''' ''''---..\n"
out += " .---'' ''---.\n"
out += " .-' '-.\n"
out += " ..-' ..--''''''''''''''--.. '-..\n"
out += " .' .--'' ''--. ''.\n"
out += " .'' ..-' ''-. '.\n"
out += " ' ' ''. '.\n"
out += " '' .'' '. '\n"
out += " ' '' '. '\n"
out += " ' ' ########## . '\n"
out += " ' ' ############## ' '\n"
out += " ' ' ############## ' '\n"
out += " ' ' ############## ' '\n"
out += "' ' ########## ' '\n"
out += "' ' ' '\n"
out += "' ' ' '\n"
out += "' ' ####### ####### ' '\n"
out += "' ' &######################### ' '\n"
out += "' ' ##############% ############## ' '\n"
out += " ' ' &############## ############### ' '\n"
out += " ' ' ############### ##############% '. '\n"
out += " ' '. ########## ############### '-. '\n"
out += " '. . ##### ##############% '-.'\n"
out += " ' '. ###############\n"
out += " '. '.. ##############%\n"
out += " '. '-. ###############\n"
out += " '-. ''-.. .. ##############%\n"
out += " '-. ''---............----' '. ###############\n"
out += " '-.. '. ############\n"
out += " ''-.. ..' ########\n"
out += " ''---.. ...---'' ##\n"
out += " ''----------''\n"
out += " \n"
out += " Quilibrium Node - v" + config.GetVersionString() + " Centauri\n"
out += " \n"
out += " DB Console\n"
} else {
out = "Quilibrium Node - v" + config.GetVersionString() + " Centauri - DB Console\n"
}
return out
}

View File

@ -1,224 +1,55 @@
package app
import (
"errors"
"fmt"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
type Node struct {
logger *zap.Logger
dataProofStore store.DataProofStore
clockStore store.ClockStore
keyManager keys.KeyManager
pubSub p2p.PubSub
execEngines map[string]execution.ExecutionEngine
engine consensus.ConsensusEngine
}
type DHTNode struct {
pubSub p2p.PubSub
quit chan struct{}
}
func newDHTNode(
pubSub p2p.PubSub,
) (*DHTNode, error) {
return &DHTNode{
pubSub: pubSub,
quit: make(chan struct{}),
}, nil
logger *zap.Logger
keyManager keys.KeyManager
pubSub p2p.PubSub
quit chan struct{}
}
func newNode(
logger *zap.Logger,
dataProofStore store.DataProofStore,
clockStore store.ClockStore,
keyManager keys.KeyManager,
pubSub p2p.PubSub,
// execution engines wire in here
engine consensus.ConsensusEngine,
) (*Node, error) {
if engine == nil {
return nil, errors.New("engine must not be nil")
}
execEngines := make(map[string]execution.ExecutionEngine)
return &Node{
logger,
dataProofStore,
clockStore,
keyManager,
pubSub,
execEngines,
engine,
logger: logger,
keyManager: keyManager,
pubSub: pubSub,
quit: make(chan struct{}),
}, nil
}
func (n *Node) VerifyProofIntegrity() {
i, _, _, e := n.dataProofStore.GetLatestDataTimeProof(n.pubSub.GetPeerID())
if e != nil {
panic(e)
}
dataProver := crypto.NewKZGInclusionProver(n.logger)
wesoProver := crypto.NewWesolowskiFrameProver(n.logger)
for j := int(i); j >= 0; j-- {
fmt.Println(j)
_, _, input, o, err := n.dataProofStore.GetDataTimeProof(n.pubSub.GetPeerID(), uint32(j))
if err != nil {
panic(err)
}
idx, idxProof, idxCommit, idxKP := master.GetOutputs(o)
ip := sha3.Sum512(idxProof)
v, err := dataProver.VerifyRaw(ip[:], idxCommit, int(idx), idxKP, 128)
if err != nil {
panic(err)
}
if !v {
panic(fmt.Sprintf("bad kzg proof at increment %d", j))
}
wp := []byte{}
wp = append(wp, n.pubSub.GetPeerID()...)
wp = append(wp, input...)
fmt.Printf("%x\n", wp)
v = wesoProver.VerifyChallengeProof(wp, uint32(j), idx, idxProof)
if !v {
panic(fmt.Sprintf("bad weso proof at increment %d", j))
}
}
}
func (n *Node) RunRepair() {
// intrinsicFilter := append(
// p2p.GetBloomFilter(application.CEREMONY_ADDRESS, 256, 3),
// p2p.GetBloomFilterIndices(application.CEREMONY_ADDRESS, 65536, 24)...,
// )
// n.logger.Info("check store and repair if needed, this may take a few minutes")
// proverTrie := &tries.RollingFrecencyCritbitTrie{}
// head, err := n.clockStore.GetLatestDataClockFrame(intrinsicFilter, proverTrie)
// if err == nil && head != nil {
// for head != nil && head.FrameNumber != 0 {
// prev := head
// head, err = n.clockStore.GetStagedDataClockFrame(
// intrinsicFilter,
// head.FrameNumber-1,
// head.ParentSelector,
// true,
// )
// if err != nil {
// panic(err)
// }
// compare, _, err := n.clockStore.GetDataClockFrame(
// intrinsicFilter,
// prev.FrameNumber-1,
// true,
// )
// if err != nil {
// panic(err)
// }
// if !bytes.Equal(head.Output, compare.Output) {
// n.logger.Warn(
// "repairing frame",
// zap.Uint64("frame_number", head.FrameNumber),
// )
// head, err = n.clockStore.GetStagedDataClockFrame(
// intrinsicFilter,
// prev.FrameNumber-1,
// prev.ParentSelector,
// true,
// )
// if err != nil {
// panic(err)
// }
// txn, err := n.clockStore.NewTransaction()
// if err != nil {
// panic(err)
// }
// selector, err := head.GetSelector()
// if err != nil {
// panic(err)
// }
// err = n.clockStore.CommitDataClockFrame(
// intrinsicFilter,
// head.FrameNumber,
// selector.FillBytes(make([]byte, 32)),
// proverTrie,
// txn,
// true,
// )
// if err != nil {
// panic(err)
// }
// if err = txn.Commit(); err != nil {
// panic(err)
// }
// }
// }
// }
// n.logger.Info("check complete")
}
func (d *DHTNode) Start() {
func (d *Node) Start() {
d.pubSub.Subscribe(
[]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
func(message *pb.Message) error { return nil },
)
<-d.quit
}
func (d *DHTNode) Stop() {
func (d *Node) Stop() {
go func() {
d.quit <- struct{}{}
}()
}
func (n *Node) Start() {
err := <-n.engine.Start()
if err != nil {
panic(err)
}
// TODO: add config mapping to engine name/frame registration
for _, e := range n.execEngines {
n.engine.RegisterExecutor(e, 0)
}
}
func (n *Node) Stop() {
err := <-n.engine.Stop(false)
if err != nil {
panic(err)
}
}
func (n *Node) GetLogger() *zap.Logger {
return n.logger
}
func (n *Node) GetClockStore() store.ClockStore {
return n.clockStore
}
func (n *Node) GetDataProofStore() store.DataProofStore {
return n.dataProofStore
}
func (n *Node) GetKeyManager() keys.KeyManager {
return n.keyManager
}
@ -226,15 +57,3 @@ func (n *Node) GetKeyManager() keys.KeyManager {
func (n *Node) GetPubSub() p2p.PubSub {
return n.pubSub
}
func (n *Node) GetMasterClock() *master.MasterClockConsensusEngine {
return n.engine.(*master.MasterClockConsensusEngine)
}
func (n *Node) GetExecutionEngines() []execution.ExecutionEngine {
list := []execution.ExecutionEngine{}
for _, e := range n.execEngines {
list = append(list, e)
}
return list
}

View File

@ -7,25 +7,10 @@ import (
"github.com/google/wire"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
func logger() *zap.Logger {
log, err := zap.NewProduction()
if err != nil {
panic(err)
}
return log
}
func debugLogger() *zap.Logger {
log, err := zap.NewDevelopment()
if err != nil {
@ -35,10 +20,6 @@ func debugLogger() *zap.Logger {
return log
}
var loggerSet = wire.NewSet(
logger,
)
var debugLoggerSet = wire.NewSet(
debugLogger,
)
@ -49,20 +30,6 @@ var keyManagerSet = wire.NewSet(
wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)),
)
var storeSet = wire.NewSet(
wire.FieldsOf(new(*config.Config), "DB"),
store.NewPebbleDB,
wire.Bind(new(store.KVDB), new(*store.PebbleDB)),
store.NewPebbleClockStore,
store.NewPebbleKeyStore,
store.NewPebbleDataProofStore,
store.NewPeerstoreDatastore,
wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)),
wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)),
wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)),
wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)),
)
var pubSubSet = wire.NewSet(
wire.FieldsOf(new(*config.Config), "P2P"),
p2p.NewInMemoryPeerInfoManager,
@ -71,60 +38,11 @@ var pubSubSet = wire.NewSet(
wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)),
)
var engineSet = wire.NewSet(
wire.FieldsOf(new(*config.Config), "Engine"),
crypto.NewWesolowskiFrameProver,
wire.Bind(new(crypto.FrameProver), new(*crypto.WesolowskiFrameProver)),
crypto.NewKZGInclusionProver,
wire.Bind(new(crypto.InclusionProver), new(*crypto.KZGInclusionProver)),
time.NewMasterTimeReel,
)
var consensusSet = wire.NewSet(
master.NewMasterClockConsensusEngine,
wire.Bind(
new(consensus.ConsensusEngine),
new(*master.MasterClockConsensusEngine),
),
)
func NewDHTNode(*config.Config) (*DHTNode, error) {
panic(wire.Build(
debugLoggerSet,
storeSet,
pubSubSet,
newDHTNode,
))
}
func NewDebugNode(*config.Config, *protobufs.SelfTestReport) (*Node, error) {
func NewNode(*config.Config) (*Node, error) {
panic(wire.Build(
debugLoggerSet,
keyManagerSet,
storeSet,
pubSubSet,
engineSet,
consensusSet,
newNode,
))
}
func NewNode(*config.Config, *protobufs.SelfTestReport) (*Node, error) {
panic(wire.Build(
loggerSet,
keyManagerSet,
storeSet,
pubSubSet,
engineSet,
consensusSet,
newNode,
))
}
func NewDBConsole(*config.Config) (*DBConsole, error) {
panic(wire.Build(newDBConsole))
}
func NewClockStore(*config.Config) (store.ClockStore, error) {
panic(wire.Build(loggerSet, storeSet))
}

View File

@ -10,116 +10,27 @@ import (
"github.com/google/wire"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/master"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
// Injectors from wire.go:
func NewDHTNode(configConfig *config.Config) (*DHTNode, error) {
p2PConfig := configConfig.P2P
dbConfig := configConfig.DB
pebbleDB := store.NewPebbleDB(dbConfig)
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
if err != nil {
return nil, err
}
zapLogger := debugLogger()
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
dhtNode, err := newDHTNode(blossomSub)
if err != nil {
return nil, err
}
return dhtNode, nil
}
func NewDebugNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestReport) (*Node, error) {
zapLogger := debugLogger()
dbConfig := configConfig.DB
pebbleDB := store.NewPebbleDB(dbConfig)
pebbleDataProofStore := store.NewPebbleDataProofStore(pebbleDB, zapLogger)
pebbleClockStore := store.NewPebbleClockStore(pebbleDB, zapLogger)
func NewNode(configConfig *config.Config) (*Node, error) {
logger := debugLogger()
keyConfig := configConfig.Key
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
fileKeyManager := keys.NewFileKeyManager(keyConfig, logger)
p2PConfig := configConfig.P2P
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
if err != nil {
return nil, err
}
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
engineConfig := configConfig.Engine
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver)
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleDataProofStore, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, fileKeyManager, blossomSub, masterClockConsensusEngine)
blossomSub := p2p.NewBlossomSub(p2PConfig, logger)
node, err := newNode(logger, fileKeyManager, blossomSub)
if err != nil {
return nil, err
}
return node, nil
}
func NewNode(configConfig *config.Config, selfTestReport *protobufs.SelfTestReport) (*Node, error) {
zapLogger := logger()
dbConfig := configConfig.DB
pebbleDB := store.NewPebbleDB(dbConfig)
pebbleDataProofStore := store.NewPebbleDataProofStore(pebbleDB, zapLogger)
pebbleClockStore := store.NewPebbleClockStore(pebbleDB, zapLogger)
keyConfig := configConfig.Key
fileKeyManager := keys.NewFileKeyManager(keyConfig, zapLogger)
p2PConfig := configConfig.P2P
peerstoreDatastore, err := store.NewPeerstoreDatastore(pebbleDB)
if err != nil {
return nil, err
}
blossomSub := p2p.NewBlossomSub(p2PConfig, peerstoreDatastore, zapLogger)
engineConfig := configConfig.Engine
kzgInclusionProver := crypto.NewKZGInclusionProver(zapLogger)
wesolowskiFrameProver := crypto.NewWesolowskiFrameProver(zapLogger)
masterTimeReel := time.NewMasterTimeReel(zapLogger, pebbleClockStore, engineConfig, wesolowskiFrameProver)
inMemoryPeerInfoManager := p2p.NewInMemoryPeerInfoManager(zapLogger)
masterClockConsensusEngine := master.NewMasterClockConsensusEngine(engineConfig, zapLogger, pebbleDataProofStore, pebbleClockStore, fileKeyManager, blossomSub, kzgInclusionProver, wesolowskiFrameProver, masterTimeReel, inMemoryPeerInfoManager, selfTestReport)
node, err := newNode(zapLogger, pebbleDataProofStore, pebbleClockStore, fileKeyManager, blossomSub, masterClockConsensusEngine)
if err != nil {
return nil, err
}
return node, nil
}
func NewDBConsole(configConfig *config.Config) (*DBConsole, error) {
dbConsole, err := newDBConsole(configConfig)
if err != nil {
return nil, err
}
return dbConsole, nil
}
func NewClockStore(configConfig *config.Config) (store.ClockStore, error) {
dbConfig := configConfig.DB
pebbleDB := store.NewPebbleDB(dbConfig)
zapLogger := logger()
pebbleClockStore := store.NewPebbleClockStore(pebbleDB, zapLogger)
return pebbleClockStore, nil
}
// wire.go:
func logger() *zap.Logger {
log, err := zap.NewProduction()
if err != nil {
panic(err)
}
return log
}
func debugLogger() *zap.Logger {
log, err := zap.NewDevelopment()
if err != nil {
@ -129,24 +40,10 @@ func debugLogger() *zap.Logger {
return log
}
var loggerSet = wire.NewSet(
logger,
)
var debugLoggerSet = wire.NewSet(
debugLogger,
)
var keyManagerSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Key"), keys.NewFileKeyManager, wire.Bind(new(keys.KeyManager), new(*keys.FileKeyManager)))
var storeSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "DB"), store.NewPebbleDB, wire.Bind(new(store.KVDB), new(*store.PebbleDB)), store.NewPebbleClockStore, store.NewPebbleKeyStore, store.NewPebbleDataProofStore, store.NewPeerstoreDatastore, wire.Bind(new(store.ClockStore), new(*store.PebbleClockStore)), wire.Bind(new(store.KeyStore), new(*store.PebbleKeyStore)), wire.Bind(new(store.DataProofStore), new(*store.PebbleDataProofStore)), wire.Bind(new(store.Peerstore), new(*store.PeerstoreDatastore)))
var pubSubSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "P2P"), p2p.NewInMemoryPeerInfoManager, p2p.NewBlossomSub, wire.Bind(new(p2p.PubSub), new(*p2p.BlossomSub)), wire.Bind(new(p2p.PeerInfoManager), new(*p2p.InMemoryPeerInfoManager)))
var engineSet = wire.NewSet(wire.FieldsOf(new(*config.Config), "Engine"), crypto.NewWesolowskiFrameProver, wire.Bind(new(crypto.FrameProver), new(*crypto.WesolowskiFrameProver)), crypto.NewKZGInclusionProver, wire.Bind(new(crypto.InclusionProver), new(*crypto.KZGInclusionProver)), time.NewMasterTimeReel)
var consensusSet = wire.NewSet(master.NewMasterClockConsensusEngine, wire.Bind(
new(consensus.ConsensusEngine),
new(*master.MasterClockConsensusEngine),
),
)

File diff suppressed because it is too large Load Diff

View File

@ -11,23 +11,20 @@ BINARIES_DIR="$ROOT_DIR/target/release"
pushd "$NODE_DIR" > /dev/null
export CGO_ENABLED=1
export GOEXPERIMENT=arenas
os_type="$(uname)"
case "$os_type" in
"Darwin")
# Check if the architecture is ARM
if [[ "$(uname -m)" == "arm64" ]]; then
# MacOS ld doesn't support -Bstatic and -Bdynamic, so it's important that there is only a static version of the library
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -lvdf -lbls48581 -ldl -lm'" "$@"
go build "$@"
else
echo "Unsupported platform"
exit 1
fi
;;
"Linux")
go build -ldflags "-linkmode 'external' -extldflags '-L$BINARIES_DIR -Wl,-Bstatic -lvdf -lbls48581 -Wl,-Bdynamic -ldl -lm'" "$@"
go build "$@"
;;
*)
echo "Unsupported platform"

View File

@ -64,17 +64,7 @@ var BootstrapPeers = []string{
"/ip4/70.36.102.32/udp/8336/quic-v1/p2p/QmYriGRXCUiwFodqSoS4GgEcD7UVyxXPeCgQKmYne3iLSF",
"/ip4/204.12.220.2/udp/8336/quic-v1/p2p/QmRw5Tw4p5v2vLPvVSAkQEiRPQGnWk9HM4xiSvgxF82CCw",
"/ip4/209.159.149.14/udp/8336/quic-v1/p2p/Qmcq4Lmw45tbodvdRWZ8iGgy3rUcR3dikHTj1fBXP8VJqv",
"/ip4/148.251.9.90/udp/8336/quic-v1/p2p/QmRpKmQ1W83s6moBFpG6D6nrttkqdQSbdCJpvfxDVGcs38",
"/ip4/35.232.113.144/udp/8336/quic-v1/p2p/QmWxkBc7a17ZsLHhszLyTvKsoHMKvKae2XwfQXymiU66md",
"/ip4/34.87.85.78/udp/8336/quic-v1/p2p/QmTGguT5XhtvZZwTLnNQTN8Bg9eUm1THWEneXXHGhMDPrz",
"/ip4/34.81.199.27/udp/8336/quic-v1/p2p/QmTMMKpzCKJCwrnUzNu6tNj4P1nL7hVqz251245wsVpGNg",
"/ip4/34.143.255.235/udp/8336/quic-v1/p2p/QmeifsP6Kvq8A3yabQs6CBg7prSpDSqdee8P2BDQm9EpP8",
"/ip4/34.34.125.238/udp/8336/quic-v1/p2p/QmZdSyBJLm9UiDaPZ4XDkgRGXUwPcHJCmKoH6fS9Qjyko4",
"/ip4/34.80.245.52/udp/8336/quic-v1/p2p/QmNmbqobt82Vre5JxUGVNGEWn2HsztQQ1xfeg6mx7X5u3f",
"/dns/bravo-1.qcommander.sh/udp/8336/quic-v1/p2p/QmWFK1gVuhEqZdr8phTo3QbyLwjYmyivx31Zubqt7oR4XB",
"/ip4/109.199.100.108/udp/8336/quic-v1/p2p/Qma9fgugQc17MDu4YRSvnhfhVre6AYZ3nZdW8dSUYbsWvm",
"/ip4/47.251.49.193/udp/8336/quic-v1/p2p/QmP6ADPmMCsB8y82oFbrKTrwYWXt1CTMJ3jGNDXRHyYJgR",
"/ip4/138.201.203.208/udp/8336/quic-v1/p2p/QmbNhSTd4Y64ZCbV2gAXYR4ZFDdfRBMfrgWsNg99JHxsJo",
// purged peers (keep your node online to return to this list)
// "/ip4/204.186.74.47/udp/8317/quic-v1/p2p/Qmd233pLUDvcDW3ama27usfbG1HxKNh1V9dmWVW1SXp1pd",
// "/ip4/186.233.184.181/udp/8336/quic-v1/p2p/QmW6QDvKuYqJYYMP5tMZSp12X3nexywK28tZNgqtqNpEDL",

View File

@ -13,7 +13,6 @@ type P2PConfig struct {
HistoryLength int `yaml:"historyLength"`
HistoryGossip int `yaml:"historyGossip"`
DLazy int `yaml:"dLazy"`
GossipFactor float64 `yaml:"gossipFactor"`
GossipRetransmission int `yaml:"gossipRetransmission"`
HeartbeatInitialDelay time.Duration `yaml:"heartbeatInitialDelay"`
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`

View File

@ -6,15 +6,15 @@ import (
)
func GetMinimumVersionCutoff() time.Time {
return time.Date(2024, time.May, 8, 5, 0, 0, 0, time.UTC)
return time.Date(2024, time.August, 7, 5, 0, 0, 0, time.UTC)
}
func GetMinimumVersion() []byte {
return []byte{0x01, 0x04, 0x13}
return []byte{0x02, 0x00, 0x00}
}
func GetVersion() []byte {
return []byte{0x01, 0x04, 0x15}
return []byte{0x02, 0x00, 0x00}
}
func GetVersionString() string {
@ -36,5 +36,5 @@ func FormatVersion(version []byte) string {
}
func GetPatchNumber() byte {
return 0x01
return 0x00
}

View File

@ -1,49 +0,0 @@
package consensus
import (
"crypto"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type EngineState int
const (
EngineStateStopped EngineState = iota
EngineStateStarting
EngineStateLoading
EngineStateCollecting
EngineStateProving
EngineStatePublishing
EngineStateVerifying
EngineStateStopping
)
type ConsensusEngine interface {
Start() <-chan error
Stop(force bool) <-chan error
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
GetFrame() *protobufs.ClockFrame
GetDifficulty() uint32
GetState() EngineState
GetFrameChannel() <-chan *protobufs.ClockFrame
}
type DataConsensusEngine interface {
Start() <-chan error
Stop(force bool) <-chan error
RegisterExecutor(exec execution.ExecutionEngine, frame uint64) <-chan error
UnregisterExecutor(name string, frame uint64, force bool) <-chan error
GetFrame() *protobufs.ClockFrame
GetDifficulty() uint32
GetState() EngineState
GetFrameChannel() <-chan *protobufs.ClockFrame
GetProvingKey(
engineConfig *config.EngineConfig,
) (crypto.Signer, keys.KeyType, []byte, []byte)
IsInProverTrie(key []byte) bool
GetPeerInfo() *protobufs.PeerInfoResponse
}

View File

@ -1,247 +0,0 @@
package master
import (
"bytes"
"context"
"encoding/binary"
"strings"
"time"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/mr-tron/base58"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub/pb"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *MasterClockConsensusEngine) handleMessage(message *pb.Message) error {
e.logger.Debug(
"received message",
zap.Binary("data", message.Data),
zap.Binary("from", message.From),
zap.Binary("signature", message.Signature),
)
msg := &protobufs.Message{}
if err := proto.Unmarshal(message.Data, msg); err != nil {
return errors.Wrap(err, "handle message")
}
any := &anypb.Any{}
if err := proto.Unmarshal(msg.Payload, any); err != nil {
return errors.Wrap(err, "handle message")
}
switch any.TypeUrl {
case protobufs.SelfTestReportType:
if err := e.handleSelfTestReport(
message.From,
any,
); err != nil {
return errors.Wrap(err, "handle message")
}
return nil
}
return errors.Wrap(errors.New("invalid message"), "handle message")
}
func (e *MasterClockConsensusEngine) handleSelfTestReport(
peerID []byte,
any *anypb.Any,
) error {
report := &protobufs.SelfTestReport{}
if err := any.UnmarshalTo(report); err != nil {
return errors.Wrap(err, "handle self test report")
}
if bytes.Equal(peerID, e.pubSub.GetPeerID()) {
info := e.peerInfoManager.GetPeerInfo(peerID)
info.LastSeen = time.Now().UnixMilli()
info.DifficultyMetric = report.DifficultyMetric
info.MasterHeadFrame = report.MasterHeadFrame
return nil
}
if len(report.Proof) != 520 {
e.logger.Warn(
"received invalid proof from peer",
zap.String("peer_id", peer.ID(peerID).String()),
zap.Int("proof_size", len(report.Proof)),
zap.Uint32("cores", report.Cores),
)
e.pubSub.SetPeerScore(peerID, -1000)
return errors.Wrap(errors.New("invalid report"), "handle self test report")
}
e.logger.Debug(
"received proof from peer",
zap.String("peer_id", peer.ID(peerID).String()),
)
info := e.peerInfoManager.GetPeerInfo(peerID)
if info != nil {
if (time.Now().UnixMilli() - info.LastSeen) < (270 * 1000) {
return nil
}
info.DifficultyMetric = report.DifficultyMetric
info.MasterHeadFrame = report.MasterHeadFrame
if info.Bandwidth == 0 {
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute)
defer cancel()
ch := e.pubSub.GetMultiaddrOfPeerStream(ctx, peerID)
select {
case <-ch:
select {
case e.bandwidthTestCh <- peerID:
default:
}
case <-ctx.Done():
}
}()
}
proof := report.Proof
challenge := []byte{}
challenge = append(challenge, peerID...)
challenge = append(challenge, report.Challenge...)
proofs := make([][]byte, (len(report.Proof)-8)/516)
for i := 0; i < len(proofs); i++ {
proofs[i] = proof[i*516 : (i+1)*516]
}
select {
case e.verifyTestCh <- verifyChallenge{
peerID: peerID,
challenge: challenge,
cores: report.Cores,
increment: report.Increment,
proof: proof,
}:
default:
}
return nil
}
e.addPeerManifestReport(peerID, report)
memory := binary.BigEndian.Uint64(report.Memory)
e.logger.Debug(
"received self test report",
zap.String("peer_id", base58.Encode(peerID)),
zap.Uint32("difficulty", report.Difficulty),
zap.Int64("difficulty_metric", report.DifficultyMetric),
zap.Int64("commit_16_metric", report.Commit_16Metric),
zap.Int64("commit_128_metric", report.Commit_128Metric),
zap.Int64("commit_1024_metric", report.Commit_1024Metric),
zap.Int64("commit_65536_metric", report.Commit_65536Metric),
zap.Int64("proof_16_metric", report.Proof_16Metric),
zap.Int64("proof_128_metric", report.Proof_128Metric),
zap.Int64("proof_1024_metric", report.Proof_1024Metric),
zap.Int64("proof_65536_metric", report.Proof_65536Metric),
zap.Uint32("cores", report.Cores),
zap.Uint64("memory", memory),
zap.Uint64("storage", binary.BigEndian.Uint64(report.Storage)),
)
if report.Cores < 3 || memory < 16000000000 {
e.logger.Debug(
"peer reported invalid configuration",
zap.String("peer_id", base58.Encode(peerID)),
zap.Uint32("difficulty", report.Difficulty),
zap.Int64("difficulty_metric", report.DifficultyMetric),
zap.Int64("commit_16_metric", report.Commit_16Metric),
zap.Int64("commit_128_metric", report.Commit_128Metric),
zap.Int64("commit_1024_metric", report.Commit_1024Metric),
zap.Int64("commit_65536_metric", report.Commit_65536Metric),
zap.Int64("proof_16_metric", report.Proof_16Metric),
zap.Int64("proof_128_metric", report.Proof_128Metric),
zap.Int64("proof_1024_metric", report.Proof_1024Metric),
zap.Int64("proof_65536_metric", report.Proof_65536Metric),
zap.Uint32("cores", report.Cores),
zap.Uint64("memory", memory),
zap.Uint64("storage", binary.BigEndian.Uint64(report.Storage)),
)
// tag: dusk nuke this peer for now
e.pubSub.SetPeerScore(peerID, -1000)
return nil
}
go func() {
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute)
defer cancel()
ch := e.pubSub.GetMultiaddrOfPeerStream(ctx, peerID)
select {
case <-ch:
go func() {
e.bandwidthTestCh <- peerID
}()
case <-ctx.Done():
}
}()
return nil
}
func (e *MasterClockConsensusEngine) publishProof(
frame *protobufs.ClockFrame,
) error {
e.logger.Debug(
"publishing frame",
zap.Uint64("frame_number", frame.FrameNumber),
)
e.masterTimeReel.Insert(frame, false)
e.state = consensus.EngineStateCollecting
return nil
}
func (e *MasterClockConsensusEngine) publishMessage(
filter []byte,
message proto.Message,
) error {
any := &anypb.Any{}
if err := any.MarshalFrom(message); err != nil {
return errors.Wrap(err, "publish message")
}
// annoying protobuf any hack
any.TypeUrl = strings.Replace(
any.TypeUrl,
"type.googleapis.com",
"types.quilibrium.com",
1,
)
payload, err := proto.Marshal(any)
if err != nil {
return errors.Wrap(err, "publish message")
}
h, err := poseidon.HashBytes(payload)
if err != nil {
return errors.Wrap(err, "publish message")
}
msg := &protobufs.Message{
Hash: h.Bytes(),
Address: e.filter,
Payload: payload,
}
data, err := proto.Marshal(msg)
if err != nil {
return errors.Wrap(err, "publish message")
}
return e.pubSub.PublishToBitmask(filter, data)
}

View File

@ -1,71 +0,0 @@
package master
import (
"time"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/node/consensus"
"source.quilibrium.com/quilibrium/monorepo/node/p2p"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *MasterClockConsensusEngine) prove(
previousFrame *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
e.logger.Debug("proving new frame")
frame, err := e.frameProver.ProveMasterClockFrame(
previousFrame,
time.Now().UnixMilli(),
e.difficulty,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
e.state = consensus.EngineStatePublishing
e.logger.Debug("returning new proven frame")
return frame, nil
}
func (e *MasterClockConsensusEngine) GetMostAheadPeers() (
[][]byte,
error,
) {
frame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
// Needs to be enough to make the sync worthwhile:
max := frame.FrameNumber + 10
var peers [][]byte = [][]byte{}
peerMap := e.peerInfoManager.GetPeerMap()
for peerId, v := range peerMap {
if v.MasterHeadFrame > max {
peers = append(peers, []byte(peerId))
}
if len(peers) >= 30 {
break
}
}
if len(peers) == 0 {
return nil, p2p.ErrNoPeersAvailable
}
return peers, nil
}
func (e *MasterClockConsensusEngine) collect(
currentFramePublished *protobufs.ClockFrame,
) (*protobufs.ClockFrame, error) {
latest, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
return latest, nil
}

View File

@ -1,124 +0,0 @@
package master
import (
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/execution"
)
func (e *MasterClockConsensusEngine) RegisterExecutor(
exec execution.ExecutionEngine,
frame uint64,
) <-chan error {
logger := e.logger.With(zap.String("execution_engine_name", exec.GetName()))
logger.Info("registering execution engine")
errChan := make(chan error)
go func() {
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
logger.Info(
"starting execution engine at frame",
zap.Uint64("current_frame", masterFrame.FrameNumber),
)
err = <-exec.Start()
if err != nil {
logger.Error("could not start execution engine", zap.Error(err))
errChan <- err
return
}
for {
masterFrame, err = e.masterTimeReel.Head()
if err != nil {
panic(err)
}
logger.Info(
"awaiting frame",
zap.Uint64("current_frame", masterFrame.FrameNumber),
zap.Uint64("target_frame", frame),
)
newFrame := masterFrame.FrameNumber
if newFrame >= frame {
logger.Info(
"injecting execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
e.engineMx.Lock()
e.executionEngines[exec.GetName()] = exec
e.engineMx.Unlock()
errChan <- nil
break
}
}
}()
return errChan
}
func (e *MasterClockConsensusEngine) UnregisterExecutor(
name string,
frame uint64,
force bool,
) <-chan error {
logger := e.logger.With(zap.String("execution_engine_name", name))
logger.Info("unregistering execution engine")
errChan := make(chan error)
exec, ok := e.executionEngines[name]
if !ok {
logger.Error(
"could not unregister execution engine",
zap.Error(errors.New("execution engine not registered")),
)
errChan <- errors.New("execution engine not registered")
return errChan
}
go func() {
for {
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
logger.Info(
"awaiting frame",
zap.Uint64("current_frame", masterFrame.FrameNumber),
zap.Uint64("target_frame", frame),
)
newFrame := masterFrame.FrameNumber
if newFrame >= frame {
logger.Info(
"removing execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
e.engineMx.Lock()
delete(e.executionEngines, name)
e.engineMx.Unlock()
logger.Info(
"stopping execution engine at frame",
zap.Uint64("current_frame", newFrame),
)
err := <-exec.Stop(force)
if err != nil {
logger.Error("could not stop execution engine", zap.Error(err))
}
errChan <- err
break
}
}
}()
return errChan
}

File diff suppressed because it is too large Load Diff

View File

@ -1,108 +0,0 @@
package master
import (
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
func (e *MasterClockConsensusEngine) Sync(
request *protobufs.SyncRequest,
server protobufs.ValidationService_SyncServer,
) error {
e.currentReceivingSyncPeersMx.Lock()
if e.currentReceivingSyncPeers > 4 {
e.currentReceivingSyncPeersMx.Unlock()
e.logger.Debug("currently processing maximum sync requests, returning")
return nil
}
e.currentReceivingSyncPeers++
e.currentReceivingSyncPeersMx.Unlock()
defer func() {
e.currentReceivingSyncPeersMx.Lock()
e.currentReceivingSyncPeers--
e.currentReceivingSyncPeersMx.Unlock()
}()
from := request.FramesRequest.FromFrameNumber
masterFrame, err := e.masterTimeReel.Head()
if err != nil {
panic(err)
}
if masterFrame.FrameNumber < from || len(e.historicFrames) == 0 {
e.logger.Debug(
"peer asked for undiscovered frame",
zap.Uint64("frame_number", request.FramesRequest.FromFrameNumber),
)
return nil
}
to := request.FramesRequest.ToFrameNumber
if to == 0 || to-request.FramesRequest.FromFrameNumber > 16 {
to = request.FramesRequest.FromFrameNumber + 15
}
for {
if int(to) > int(masterFrame.FrameNumber) {
to = masterFrame.FrameNumber
}
e.logger.Debug(
"sending response",
zap.Uint64("from", from),
zap.Uint64("to", to),
zap.Uint64("total_frames", uint64(to-from+1)),
)
iter, err := e.clockStore.RangeMasterClockFrames(
e.filter,
from,
to,
)
if err != nil {
return errors.Wrap(err, "sync")
}
response := []*protobufs.ClockFrame{}
for iter.First(); iter.Valid(); iter.Next() {
frame, err := iter.Value()
if err != nil {
return errors.Wrap(err, "sync")
}
response = append(response, frame)
}
if err = iter.Close(); err != nil {
return errors.Wrap(err, "sync")
}
if len(response) == 0 {
return nil
}
if err := server.Send(&protobufs.SyncResponse{
FramesResponse: &protobufs.ClockFramesResponse{
Filter: e.filter,
FromFrameNumber: from,
ToFrameNumber: to,
ClockFrames: response,
},
}); err != nil {
return errors.Wrap(err, "sync")
}
from = response[len(response)-1].FrameNumber + 1
to = from + 15
time.Sleep(1 * time.Second)
}
}

View File

@ -1,871 +0,0 @@
package time
import (
"bytes"
"encoding/hex"
"math/big"
"sort"
"sync"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/pkg/errors"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
var allBitmaskFilter = []byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}
var unknownDistance = new(big.Int).SetBytes([]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})
type pendingFrame struct {
selector *big.Int
parentSelector *big.Int
frameNumber uint64
}
type DataTimeReel struct {
rwMutex sync.RWMutex
running bool
filter []byte
engineConfig *config.EngineConfig
logger *zap.Logger
clockStore store.ClockStore
frameProver crypto.FrameProver
parentTimeReel TimeReel
origin []byte
initialInclusionProof *crypto.InclusionAggregateProof
initialProverKeys [][]byte
head *protobufs.ClockFrame
totalDistance *big.Int
headDistance *big.Int
lruFrames *lru.Cache[string, string]
proverTrie *tries.RollingFrecencyCritbitTrie
pending map[uint64][]*pendingFrame
incompleteForks map[uint64][]*pendingFrame
frames chan *pendingFrame
newFrameCh chan *protobufs.ClockFrame
badFrameCh chan *protobufs.ClockFrame
done chan bool
}
func NewDataTimeReel(
filter []byte,
logger *zap.Logger,
clockStore store.ClockStore,
engineConfig *config.EngineConfig,
frameProver crypto.FrameProver,
origin []byte,
initialInclusionProof *crypto.InclusionAggregateProof,
initialProverKeys [][]byte,
) *DataTimeReel {
if filter == nil {
panic("filter is nil")
}
if logger == nil {
panic("logger is nil")
}
if clockStore == nil {
panic("clock store is nil")
}
if engineConfig == nil {
panic("engine config is nil")
}
if frameProver == nil {
panic("frame prover is nil")
}
cache, err := lru.New[string, string](10000)
if err != nil {
panic(err)
}
return &DataTimeReel{
running: false,
logger: logger,
filter: filter,
engineConfig: engineConfig,
clockStore: clockStore,
frameProver: frameProver,
origin: origin,
initialInclusionProof: initialInclusionProof,
initialProverKeys: initialProverKeys,
lruFrames: cache,
pending: make(map[uint64][]*pendingFrame),
incompleteForks: make(map[uint64][]*pendingFrame),
frames: make(chan *pendingFrame),
newFrameCh: make(chan *protobufs.ClockFrame),
badFrameCh: make(chan *protobufs.ClockFrame),
done: make(chan bool),
}
}
func (d *DataTimeReel) Start() error {
trie := &tries.RollingFrecencyCritbitTrie{}
frame, err := d.clockStore.GetLatestDataClockFrame(d.filter, trie)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
}
if frame == nil {
d.head, d.proverTrie = d.createGenesisFrame()
d.totalDistance = big.NewInt(0)
d.headDistance = big.NewInt(0)
} else {
d.head = frame
if err != nil {
panic(err)
}
d.totalDistance = big.NewInt(0)
d.proverTrie = trie
d.headDistance, err = d.GetDistance(frame)
}
go d.runLoop()
return nil
}
func (d *DataTimeReel) Head() (*protobufs.ClockFrame, error) {
return d.head, nil
}
// Insert enqueues a structurally valid frame into the time reel. If the frame
// is the next one in sequence, it advances the reel head forward and emits a
// new frame on the new frame channel.
func (d *DataTimeReel) Insert(frame *protobufs.ClockFrame, isSync bool) error {
if !d.running {
return nil
}
d.logger.Debug(
"insert frame",
zap.Uint64("frame_number", frame.FrameNumber),
zap.String("output_tag", hex.EncodeToString(frame.Output[:64])),
)
if d.lruFrames.Contains(string(frame.Output[:64])) {
return nil
}
d.lruFrames.Add(string(frame.Output[:64]), string(frame.ParentSelector))
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := frame.GetSelector()
if err != nil {
panic(err)
}
distance, _ := d.GetDistance(frame)
d.storePending(selector, parent, distance, frame)
if !isSync {
go func() {
d.frames <- &pendingFrame{
selector: selector,
parentSelector: parent,
frameNumber: frame.FrameNumber,
}
}()
}
return nil
}
func (d *DataTimeReel) GetFrameProverTrie() *tries.RollingFrecencyCritbitTrie {
return d.proverTrie
}
func (d *DataTimeReel) NewFrameCh() <-chan *protobufs.ClockFrame {
return d.newFrameCh
}
func (d *DataTimeReel) BadFrameCh() <-chan *protobufs.ClockFrame {
return d.badFrameCh
}
func (d *DataTimeReel) Stop() {
d.done <- true
}
func (d *DataTimeReel) createGenesisFrame() (
*protobufs.ClockFrame,
*tries.RollingFrecencyCritbitTrie,
) {
if d.origin == nil {
panic("origin is nil")
}
if d.initialInclusionProof == nil {
panic("initial inclusion proof is nil")
}
if d.initialProverKeys == nil {
panic("initial prover keys is nil")
}
difficulty := d.engineConfig.Difficulty
if difficulty == 0 || difficulty == 10000 {
difficulty = 100000
}
frame, trie, err := d.frameProver.CreateDataGenesisFrame(
d.filter,
d.origin,
difficulty,
d.initialInclusionProof,
d.initialProverKeys,
true,
)
if err != nil {
panic(err)
}
selector, err := frame.GetSelector()
if err != nil {
panic(err)
}
txn, err := d.clockStore.NewTransaction()
if err != nil {
panic(err)
}
err = d.clockStore.StageDataClockFrame(
selector.FillBytes(make([]byte, 32)),
frame,
txn,
)
if err != nil {
txn.Abort()
panic(err)
}
if err := d.clockStore.CommitDataClockFrame(
d.filter,
0,
selector.FillBytes(make([]byte, 32)),
trie,
txn,
false,
); err != nil {
panic(err)
}
if err := txn.Commit(); err != nil {
panic(err)
}
return frame, trie
}
// Main data consensus loop
func (d *DataTimeReel) runLoop() {
d.running = true
for {
select {
case frame := <-d.frames:
// Most common scenario: in order new frame is higher number
if d.head.FrameNumber < frame.frameNumber {
d.logger.Debug(
"frame is higher",
zap.Uint64("head_frame_number", d.head.FrameNumber),
zap.Uint64("frame_number", frame.frameNumber),
)
// tag: equinox master filter changes
_, err := d.clockStore.GetMasterClockFrame(
allBitmaskFilter,
frame.frameNumber)
if err != nil {
d.logger.Debug("no master, add pending")
// If the frame arrived ahead of a master, e.g. the master data is not
// synced, we'll go ahead and mark it as pending and process it when
// we can, but if we had a general fault, panic:
if !errors.Is(err, store.ErrNotFound) {
panic(err)
}
continue
}
rawFrame, err := d.clockStore.GetStagedDataClockFrame(
d.filter,
frame.frameNumber,
frame.selector.FillBytes(make([]byte, 32)),
false,
)
if err != nil {
panic(err)
}
distance, err := d.GetDistance(rawFrame)
if err != nil {
panic(err)
}
// Otherwise set it as the next and process all pending
d.setHead(rawFrame, distance)
} else if d.head.FrameNumber == frame.frameNumber {
// frames are equivalent, no need to act
headSelector, err := d.head.GetSelector()
if err != nil {
panic(err)
}
if headSelector.Cmp(frame.selector) == 0 {
d.logger.Debug("equivalent frame")
continue
}
rawFrame, err := d.clockStore.GetStagedDataClockFrame(
d.filter,
frame.frameNumber,
frame.selector.FillBytes(make([]byte, 32)),
false,
)
if err != nil {
panic(err)
}
distance, err := d.GetDistance(rawFrame)
if err != nil {
panic(err)
}
// Optimization: if competing frames share a parent we can short-circuit
// fork choice
if new(big.Int).SetBytes(d.head.ParentSelector).Cmp(
frame.parentSelector,
) == 0 && distance.Cmp(d.headDistance) < 0 {
d.logger.Debug(
"frame shares parent, has shorter distance, short circuit",
)
d.setHead(rawFrame, distance)
continue
}
} else {
d.logger.Debug("frame is lower height")
}
case <-d.done:
return
}
}
}
func (d *DataTimeReel) addPending(
selector *big.Int,
parent *big.Int,
frameNumber uint64,
) {
d.logger.Debug(
"add pending",
zap.Uint64("head_frame_number", d.head.FrameNumber),
zap.Uint64("add_frame_number", frameNumber),
zap.String("selector", selector.Text(16)),
zap.String("parent", parent.Text(16)),
)
if d.head.FrameNumber <= frameNumber {
if _, ok := d.pending[frameNumber]; !ok {
d.pending[frameNumber] = []*pendingFrame{}
}
// avoid heavy thrashing
for _, frame := range d.pending[frameNumber] {
if frame.selector.Cmp(selector) == 0 {
d.logger.Debug("exists in pending already")
return
}
}
}
if d.head.FrameNumber <= frameNumber {
d.logger.Debug(
"accumulate in pending",
zap.Int("pending_neighbors", len(d.pending[frameNumber])),
)
d.pending[frameNumber] = append(
d.pending[frameNumber],
&pendingFrame{
selector: selector,
parentSelector: parent,
frameNumber: frameNumber,
},
)
}
}
func (d *DataTimeReel) storePending(
selector *big.Int,
parent *big.Int,
distance *big.Int,
frame *protobufs.ClockFrame,
) {
// avoid db thrashing
if existing, err := d.clockStore.GetStagedDataClockFrame(
frame.Filter,
frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
true,
); err != nil && existing == nil {
d.logger.Debug(
"not stored yet, save data candidate",
zap.Uint64("frame_number", frame.FrameNumber),
zap.String("selector", selector.Text(16)),
zap.String("parent", parent.Text(16)),
zap.String("distance", distance.Text(16)),
)
txn, err := d.clockStore.NewTransaction()
if err != nil {
panic(err)
}
err = d.clockStore.StageDataClockFrame(
selector.FillBytes(make([]byte, 32)),
frame,
txn,
)
if err != nil {
txn.Abort()
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
}
}
func (d *DataTimeReel) processPending(
frame *protobufs.ClockFrame,
lastReceived *pendingFrame,
) {
d.logger.Debug(
"process pending",
zap.Int("pending_frame_numbers", len(d.pending)),
)
frameNumbers := []uint64{}
for f := range d.pending {
frameNumbers = append(frameNumbers, f)
d.logger.Debug(
"pending per frame number",
zap.Uint64("pending_frame_number", f),
zap.Int("pending_frames", len(d.pending[f])),
)
}
sort.Slice(frameNumbers, func(i, j int) bool {
return frameNumbers[i] > frameNumbers[j]
})
lastSelector := lastReceived.selector
for _, f := range frameNumbers {
if f < d.head.FrameNumber {
delete(d.pending, f)
}
nextPending := d.pending[f]
d.logger.Debug(
"checking frame set",
zap.Uint64("pending_frame_number", f),
zap.Uint64("frame_number", frame.FrameNumber),
)
if f < frame.FrameNumber {
d.logger.Debug(
"purging frame set",
zap.Uint64("pending_frame_number", f),
zap.Uint64("frame_number", frame.FrameNumber),
)
delete(d.pending, f)
continue
}
// Pull the next
for len(nextPending) != 0 {
d.logger.Debug("try process next")
next := nextPending[0]
d.pending[f] = d.pending[f][1:]
if f == lastReceived.frameNumber && next.selector.Cmp(lastSelector) == 0 {
d.pending[f] = append(d.pending[f], next)
if len(d.pending[f]) == 1 {
nextPending = nil
}
continue
}
go func() {
d.frames <- next
}()
return
}
}
}
func (d *DataTimeReel) setHead(frame *protobufs.ClockFrame, distance *big.Int) {
d.logger.Debug(
"set frame to head",
zap.Uint64("frame_number", frame.FrameNumber),
zap.String("output_tag", hex.EncodeToString(frame.Output[:64])),
zap.Uint64("head_number", d.head.FrameNumber),
zap.String("head_output_tag", hex.EncodeToString(d.head.Output[:64])),
)
txn, err := d.clockStore.NewTransaction()
if err != nil {
panic(err)
}
d.logger.Debug(
"save data",
zap.Uint64("frame_number", frame.FrameNumber),
zap.String("distance", distance.Text(16)),
)
selector, err := frame.GetSelector()
if err != nil {
panic(err)
}
if err := d.clockStore.CommitDataClockFrame(
d.filter,
frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
d.proverTrie,
txn,
false,
); err != nil {
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
d.head = frame
d.headDistance = distance
go func() {
d.newFrameCh <- frame
}()
}
// tag: dusk store the distance with the frame
func (d *DataTimeReel) getTotalDistance(frame *protobufs.ClockFrame) *big.Int {
selector, err := frame.GetSelector()
if err != nil {
panic(err)
}
total, err := d.clockStore.GetTotalDistance(
d.filter,
frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
)
if err == nil && total != nil {
return total
}
total, err = d.GetDistance(frame)
if err != nil {
panic(err)
}
for index := frame; err == nil &&
index.FrameNumber > 0; index, err = d.clockStore.GetStagedDataClockFrame(
d.filter,
index.FrameNumber-1,
index.ParentSelector,
true,
) {
distance, err := d.GetDistance(index)
if err != nil {
panic(err)
}
total.Add(total, distance)
}
d.clockStore.SetTotalDistance(
d.filter,
frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
total,
)
return total
}
func (d *DataTimeReel) GetDistance(frame *protobufs.ClockFrame) (
*big.Int,
error,
) {
// tag: equinox master filter changes
master, err := d.clockStore.GetMasterClockFrame(
allBitmaskFilter,
frame.FrameNumber)
if err != nil {
return unknownDistance, errors.Wrap(err, "get distance")
}
masterSelector, err := master.GetSelector()
if err != nil {
return unknownDistance, errors.Wrap(err, "get distance")
}
discriminatorNode :=
d.proverTrie.FindNearest(masterSelector.FillBytes(make([]byte, 32)))
discriminator := discriminatorNode.External.Key
addr, err := frame.GetAddress()
if err != nil {
return unknownDistance, errors.Wrap(err, "get distance")
}
distance := new(big.Int).Sub(
new(big.Int).SetBytes(discriminator),
new(big.Int).SetBytes(addr),
)
distance.Abs(distance)
return distance, nil
}
func (d *DataTimeReel) forkChoice(
frame *protobufs.ClockFrame,
distance *big.Int,
) {
d.logger.Debug(
"fork choice",
zap.Uint64("frame_number", frame.FrameNumber),
zap.String("output_tag", hex.EncodeToString(frame.Output[:64])),
zap.Uint64("head_number", d.head.FrameNumber),
zap.String("head_output_tag", hex.EncodeToString(d.head.Output[:64])),
)
parentSelector, selector, err := frame.GetParentAndSelector()
if err != nil {
panic(err)
}
leftIndex := d.head
rightIndex := frame
leftTotal := new(big.Int).Set(d.headDistance)
overweight := big.NewInt(0)
rightTotal := new(big.Int).Set(distance)
left := d.head.ParentSelector
right := frame.ParentSelector
rightReplaySelectors := [][]byte{}
for rightIndex.FrameNumber > leftIndex.FrameNumber {
rightReplaySelectors = append(
append(
[][]byte{},
right,
),
rightReplaySelectors...,
)
rightIndex, err = d.clockStore.GetStagedDataClockFrame(
d.filter,
rightIndex.FrameNumber-1,
rightIndex.ParentSelector,
true,
)
if err != nil {
// If lineage cannot be verified, set it for later
if errors.Is(err, store.ErrNotFound) {
d.addPending(selector, parentSelector, frame.FrameNumber)
return
} else {
panic(err)
}
}
right = rightIndex.ParentSelector
rightIndexDistance, err := d.GetDistance(rightIndex)
if err != nil {
panic(err)
}
// We accumulate right on left when right is longer because we cannot know
// where the left will lead and don't want it to disadvantage our comparison
overweight.Add(overweight, rightIndexDistance)
rightTotal.Add(rightTotal, rightIndexDistance)
}
// Walk backwards through the parents, until we find a matching parent
// selector:
for !bytes.Equal(left, right) {
d.logger.Debug(
"scan backwards",
zap.String("left_parent", hex.EncodeToString(leftIndex.ParentSelector)),
zap.String("right_parent", hex.EncodeToString(rightIndex.ParentSelector)),
)
rightReplaySelectors = append(
append(
[][]byte{},
right,
),
rightReplaySelectors...,
)
leftIndex, err = d.clockStore.GetStagedDataClockFrame(
d.filter,
leftIndex.FrameNumber-1,
leftIndex.ParentSelector,
true,
)
if err != nil {
d.logger.Error(
"store corruption: a discontinuity has been found in your time reel",
zap.String(
"selector",
hex.EncodeToString(leftIndex.ParentSelector),
),
zap.Uint64("frame_number", leftIndex.FrameNumber-1),
)
panic(err)
}
rightIndex, err = d.clockStore.GetStagedDataClockFrame(
d.filter,
rightIndex.FrameNumber-1,
rightIndex.ParentSelector,
true,
)
if err != nil {
// If lineage cannot be verified, set it for later
if errors.Is(err, store.ErrNotFound) {
d.addPending(selector, parentSelector, frame.FrameNumber)
return
} else {
panic(err)
}
}
left = leftIndex.ParentSelector
right = rightIndex.ParentSelector
leftIndexDistance, err := d.GetDistance(leftIndex)
if err != nil {
panic(err)
}
rightIndexDistance, err := d.GetDistance(rightIndex)
if err != nil {
panic(err)
}
leftTotal.Add(leftTotal, leftIndexDistance)
rightTotal.Add(rightTotal, rightIndexDistance)
}
d.logger.Debug("found mutual root")
frameNumber := rightIndex.FrameNumber
overweight.Add(overweight, leftTotal)
// Choose new fork based on lightest distance sub-tree
if rightTotal.Cmp(overweight) > 0 {
d.logger.Debug("proposed fork has greater distance",
zap.String("right_total", rightTotal.Text(16)),
zap.String("left_total", overweight.Text(16)),
)
d.addPending(selector, parentSelector, frame.FrameNumber)
return
}
for {
if len(rightReplaySelectors) == 0 {
break
}
next := rightReplaySelectors[0]
rightReplaySelectors =
rightReplaySelectors[1:]
txn, err := d.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err := d.clockStore.CommitDataClockFrame(
d.filter,
frameNumber,
next,
d.proverTrie,
txn,
rightIndex.FrameNumber < d.head.FrameNumber,
); err != nil {
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
frameNumber++
}
txn, err := d.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err := d.clockStore.CommitDataClockFrame(
d.filter,
frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
d.proverTrie,
txn,
false,
); err != nil {
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
d.head = frame
d.totalDistance.Sub(d.totalDistance, leftTotal)
d.totalDistance.Add(d.totalDistance, rightTotal)
d.headDistance = distance
d.logger.Debug(
"set total distance",
zap.String("total_distance", d.totalDistance.Text(16)),
)
d.clockStore.SetTotalDistance(
d.filter,
frame.FrameNumber,
selector.FillBytes(make([]byte, 32)),
d.totalDistance,
)
go func() {
d.newFrameCh <- frame
}()
}
func (d *DataTimeReel) GetTotalDistance() *big.Int {
return new(big.Int).Set(d.totalDistance)
}
var _ TimeReel = (*DataTimeReel)(nil)

View File

@ -1,426 +0,0 @@
package time_test
import (
"bytes"
"fmt"
"strings"
"sync"
"testing"
gotime "time"
"github.com/cloudflare/circl/sign/ed448"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
qcrypto "source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
func generateTestProvers() (
keys.KeyManager,
[]peer.ID,
[][]byte,
[][]byte,
map[string]string,
*tries.RollingFrecencyCritbitTrie,
) {
keyManager := keys.NewInMemoryKeyManager()
peers := []peer.ID{}
pubKeys := [][]byte{}
privKeys := [][]byte{}
addrMap := map[string]string{}
for i := 0; i < 1000; i++ {
keyManager.CreateSigningKey(
fmt.Sprintf("test-key-%d", i),
keys.KeyTypeEd448,
)
k, err := keyManager.GetRawKey(fmt.Sprintf("test-key-%d", i))
if err != nil {
panic(err)
}
privKey, err := crypto.UnmarshalEd448PrivateKey([]byte(k.PrivateKey))
if err != nil {
panic(err)
}
privKeys = append(privKeys, []byte(k.PrivateKey))
pub := privKey.GetPublic()
id, err := peer.IDFromPublicKey(pub)
if err != nil {
panic(err)
}
peers = append(peers, id)
keyManager.CreateSigningKey(
fmt.Sprintf("proving-key-%d", i),
keys.KeyTypeEd448,
)
pk, err := keyManager.GetRawKey(fmt.Sprintf("proving-key-%d", i))
if err != nil {
panic(err)
}
pprivKey, err := crypto.UnmarshalEd448PrivateKey([]byte(pk.PrivateKey))
if err != nil {
panic(err)
}
ppub := pprivKey.GetPublic()
ppubKey, err := ppub.Raw()
if err != nil {
panic(err)
}
pubKeys = append(pubKeys, ppubKey)
}
proverTrie := &tries.RollingFrecencyCritbitTrie{}
for i, s := range pubKeys {
addr, err := poseidon.HashBytes(s)
if err != nil {
panic(err)
}
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
proverTrie.Add(addrBytes, 0)
addrMap[string(addrBytes)] = fmt.Sprintf("proving-key-%d", i)
}
return keyManager,
peers,
pubKeys,
privKeys,
addrMap,
proverTrie
}
func TestDataTimeReel(t *testing.T) {
logger, _ := zap.NewDevelopment()
db := store.NewInMemKVDB()
clockStore := store.NewPebbleClockStore(db, logger)
prover := qcrypto.NewWesolowskiFrameProver(logger)
filter := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
keyManager,
_,
pubKeys,
_,
addrMap,
proverTrie := generateTestProvers()
// We're going to set this up by churning 40 master frames so we don't
// have to zig zag on master and data frames to confirm data time reel
// behaviors
m := time.NewMasterTimeReel(
logger,
clockStore,
&config.EngineConfig{
Filter: filter,
GenesisSeed: strings.Repeat("00", 516),
Difficulty: 10,
},
prover,
)
err := m.Start()
assert.NoError(t, err)
frame, err := m.Head()
assert.NoError(t, err)
frames := []*protobufs.ClockFrame{}
wg := sync.WaitGroup{}
wg.Add(1)
frameCh := m.NewFrameCh()
go func() {
for i := 0; i < 40; i++ {
frames = append(frames, <-frameCh)
}
wg.Done()
}()
// in order
for i := int64(0); i < 40; i++ {
frame, err = prover.ProveMasterClockFrame(frame, i+1, 10)
assert.NoError(t, err)
err := m.Insert(frame, false)
assert.NoError(t, err)
}
wg.Wait()
for i := 0; i < 40; i++ {
assert.NotNil(t, frames[i])
assert.Equal(t, frames[i].FrameNumber, uint64(i+1))
}
filterBytes := []byte{
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff,
}
// Ok, now onto the data time reel. We're going to test the following
// scenarios:
// 1. Z-dist optimal, in order
// 2. Z-dist optimal, out of order
// 3. 90% optimal, out of order
// 4. Malicious majority, out of order
d := time.NewDataTimeReel(
filterBytes,
logger,
clockStore,
&config.EngineConfig{
Filter: filter,
GenesisSeed: strings.Repeat("00", 516),
Difficulty: 10,
},
prover,
frames[0].Output,
&qcrypto.InclusionAggregateProof{
InclusionCommitments: []*qcrypto.InclusionCommitment{},
AggregateCommitment: []byte{},
Proof: []byte{},
},
pubKeys,
)
err = d.Start()
assert.NoError(t, err)
frame, err = d.Head()
assert.NoError(t, err)
dataFrames := []*protobufs.ClockFrame{}
datawg := sync.WaitGroup{}
datawg.Add(1)
dataFrameCh := d.NewFrameCh()
targetFrameParentSelector := []byte{}
go func() {
for {
frame := <-dataFrameCh
dataFrames = append(dataFrames, frame)
if frame.FrameNumber == 40 && bytes.Equal(
frame.ParentSelector,
targetFrameParentSelector,
) {
break
}
}
datawg.Done()
}()
// 1. z-dist optimal proof submission is strictly master-frame evoked leader
for i := int64(0); i < 10; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
)
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
optimalSigner,
i+1,
10,
)
d.Insert(frame, false)
}
// 2. z-dist optimal, out of order proof submission is strictly master-frame
// evoked leader, but arrived completely backwards
insertFrames := []*protobufs.ClockFrame{}
for i := int64(10); i < 20; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
)
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
optimalSigner,
i+1,
10,
)
insertFrames = append(insertFrames, frame)
}
for i := 9; i >= 0; i-- {
err := d.Insert(insertFrames[i], false)
assert.NoError(t, err)
}
// 3. 90% optimal, out of order
insertFrames = []*protobufs.ClockFrame{}
for i := int64(20); i < 25; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
)
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
optimalSigner,
i+1,
10,
)
d.Insert(frame, false)
}
masterSelector, err := frames[25].GetSelector()
assert.NoError(t, err)
proverSelections := proverTrie.FindNearestAndApproximateNeighbors(
masterSelector.FillBytes(make([]byte, 32)),
)
suboptimalSigner2, _ := keyManager.GetSigningKey(
addrMap[string(proverSelections[2].External.Key)],
)
// What we're trying to simulate: consensus heads progressed on a slightly
// less optimal prover.
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
suboptimalSigner2,
26,
10,
)
insertFrames = append(insertFrames, frame)
for i := int64(26); i < 30; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
proverSelection := proverTrie.FindNearest(
masterSelector.FillBytes(make([]byte, 32)),
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelection.External.Key)],
)
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
optimalSigner,
i+1,
10,
)
insertFrames = append(insertFrames, frame)
}
for i := 4; i >= 0; i-- {
err := d.Insert(insertFrames[i], false)
assert.NoError(t, err)
}
// 4. Malicious majority, out of order handle a suppressive majority and
// force consensus on the lowest distance sub-tree:
insertFrames = []*protobufs.ClockFrame{}
conflictFrames := []*protobufs.ClockFrame{}
optimalKeySet := [][]byte{}
suppressedFrame := frame
for i := int64(30); i < 40; i++ {
masterSelector, err := frames[i].GetSelector()
assert.NoError(t, err)
proverSelections := proverTrie.FindNearestAndApproximateNeighbors(
masterSelector.FillBytes(make([]byte, 32)),
)
optimalSigner, _ := keyManager.GetSigningKey(
addrMap[string(proverSelections[0].External.Key)],
)
suboptimalSigner2, _ := keyManager.GetSigningKey(
addrMap[string(proverSelections[2].External.Key)],
)
optimalKeySet = append(optimalKeySet, []byte(
(optimalSigner.Public()).(ed448.PublicKey),
))
// What we're trying to simulate: the majority is intentionally ignoring
// the most optimal signer
suppressedFrame, err = prover.ProveDataClockFrame(
suppressedFrame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
optimalSigner,
i+1,
10,
)
insertFrames = append(insertFrames, suppressedFrame)
if i == 39 {
targetFrameParentSelector = suppressedFrame.ParentSelector
}
frame, err = prover.ProveDataClockFrame(
frame,
[][]byte{},
[]*protobufs.InclusionAggregateProof{},
suboptimalSigner2,
i+1,
10,
)
conflictFrames = append(conflictFrames, frame)
}
for i := 9; i >= 0; i-- {
err := d.Insert(conflictFrames[i], false)
// force linear ordering
gotime.Sleep(1 * gotime.Second)
assert.NoError(t, err)
}
// Someone is honest, but running backwards:
for i := 9; i >= 0; i-- {
err := d.Insert(insertFrames[i], false)
gotime.Sleep(1 * gotime.Second)
assert.NoError(t, err)
}
datawg.Wait()
assert.Equal(t, uint64(40), dataFrames[len(dataFrames)-1].FrameNumber)
assert.Equal(
t,
optimalKeySet[len(optimalKeySet)-1],
dataFrames[len(dataFrames)-1].GetPublicKeySignatureEd448().PublicKey.KeyValue,
)
}

View File

@ -1,316 +0,0 @@
package time
import (
"encoding/hex"
"errors"
"math/big"
"sync"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
type MasterTimeReel struct {
rwMutex sync.RWMutex
filter []byte
engineConfig *config.EngineConfig
logger *zap.Logger
clockStore store.ClockStore
frameProver crypto.FrameProver
head *protobufs.ClockFrame
pending map[uint64][]*protobufs.ClockFrame
frames chan *protobufs.ClockFrame
newFrameCh chan *protobufs.ClockFrame
badFrameCh chan *protobufs.ClockFrame
done chan bool
}
func NewMasterTimeReel(
logger *zap.Logger,
clockStore store.ClockStore,
engineConfig *config.EngineConfig,
frameProver crypto.FrameProver,
) *MasterTimeReel {
if logger == nil {
panic("logger is nil")
}
if clockStore == nil {
panic("clock store is nil")
}
if engineConfig == nil {
panic("engine config is nil")
}
if frameProver == nil {
panic("frame prover is nil")
}
filter, err := hex.DecodeString(engineConfig.Filter)
if err != nil {
panic(err)
}
return &MasterTimeReel{
logger: logger,
filter: filter,
engineConfig: engineConfig,
clockStore: clockStore,
frameProver: frameProver,
pending: make(map[uint64][]*protobufs.ClockFrame),
frames: make(chan *protobufs.ClockFrame),
newFrameCh: make(chan *protobufs.ClockFrame),
badFrameCh: make(chan *protobufs.ClockFrame),
done: make(chan bool),
}
}
// Start implements TimeReel.
func (m *MasterTimeReel) Start() error {
frame, err := m.clockStore.GetLatestMasterClockFrame(m.filter)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
}
genesis, err := m.clockStore.GetMasterClockFrame(m.filter, 0)
if err != nil && !errors.Is(err, store.ErrNotFound) {
panic(err)
}
rebuildGenesisFrame := false
if genesis != nil && genesis.Difficulty == 0 {
m.logger.Warn("corrupted genesis frame detected, rebuilding")
err = m.clockStore.ResetMasterClockFrames(m.filter)
if err != nil {
panic(err)
}
rebuildGenesisFrame = true
}
if frame == nil || rebuildGenesisFrame {
m.head = m.createGenesisFrame()
} else {
m.head = frame
}
go m.runLoop()
return nil
}
// Head implements TimeReel.
func (m *MasterTimeReel) Head() (*protobufs.ClockFrame, error) {
return m.head, nil
}
// Insert enqueues a structurally valid frame into the time reel. If the frame
// is the next one in sequence, it advances the reel head forward and emits a
// new frame on the new frame channel.
func (m *MasterTimeReel) Insert(
frame *protobufs.ClockFrame,
isSync bool,
) error {
go func() {
m.frames <- frame
}()
return nil
}
// NewFrameCh implements TimeReel.
func (m *MasterTimeReel) NewFrameCh() <-chan *protobufs.ClockFrame {
return m.newFrameCh
}
func (m *MasterTimeReel) BadFrameCh() <-chan *protobufs.ClockFrame {
return m.badFrameCh
}
// Stop implements TimeReel.
func (m *MasterTimeReel) Stop() {
m.done <- true
}
func (m *MasterTimeReel) createGenesisFrame() *protobufs.ClockFrame {
seed, err := hex.DecodeString(m.engineConfig.GenesisSeed)
if err != nil {
panic(errors.New("genesis seed is nil"))
}
difficulty := m.engineConfig.Difficulty
if difficulty == 0 || difficulty == 10000 {
difficulty = 100000
}
frame, err := m.frameProver.CreateMasterGenesisFrame(
m.filter,
seed,
difficulty,
)
if err != nil {
panic(err)
}
txn, err := m.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err = m.clockStore.PutMasterClockFrame(frame, txn); err != nil {
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
return frame
}
func (m *MasterTimeReel) runLoop() {
for {
select {
case frame := <-m.frames:
if m.head.FrameNumber < frame.FrameNumber {
m.logger.Debug(
"new frame has higher number",
zap.Uint32("new_frame_number", uint32(frame.FrameNumber)),
zap.Uint32("frame_number", uint32(m.head.FrameNumber)),
)
if frame.FrameNumber-m.head.FrameNumber == 1 {
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := m.head.GetSelector()
if err != nil {
panic(err)
}
// master frames cannot fork, this is invalid
if parent.Cmp(selector) != 0 {
m.logger.Debug(
"invalid parent selector for frame",
zap.Binary("frame_parent_selector", frame.ParentSelector),
zap.Binary("actual_parent_selector", selector.FillBytes(
make([]byte, 32),
)),
)
go func() {
m.badFrameCh <- frame
}()
continue
}
txn, err := m.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err := m.clockStore.PutMasterClockFrame(frame, txn); err != nil {
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
m.head = frame
go func() {
m.newFrameCh <- frame
}()
} else {
if _, ok := m.pending[frame.FrameNumber]; !ok {
m.pending[frame.FrameNumber] = []*protobufs.ClockFrame{}
}
m.pending[frame.FrameNumber] = append(
m.pending[frame.FrameNumber],
frame,
)
}
m.processPending()
} else {
m.logger.Debug(
"new frame has same or lower frame number",
zap.Uint32("new_frame_number", uint32(frame.FrameNumber)),
zap.Uint32("frame_number", uint32(m.head.FrameNumber)),
)
continue
}
case <-m.done:
return
}
}
}
func (m *MasterTimeReel) processPending() {
for pending, ok := m.pending[m.head.FrameNumber+1]; ok; pending,
ok = m.pending[m.head.FrameNumber+1] {
prev := m.head
for _, frame := range pending {
frame := frame
parent := new(big.Int).SetBytes(frame.ParentSelector)
selector, err := m.head.GetSelector()
if err != nil {
panic(err)
}
// master frames cannot fork, this is invalid
if parent.Cmp(selector) != 0 {
m.logger.Debug(
"invalid parent selector for frame",
zap.Binary("frame_parent_selector", frame.ParentSelector),
zap.Binary("actual_parent_selector", selector.FillBytes(
make([]byte, 32),
)),
)
go func() {
m.badFrameCh <- frame
}()
continue
}
txn, err := m.clockStore.NewTransaction()
if err != nil {
panic(err)
}
if err := m.clockStore.PutMasterClockFrame(frame, txn); err != nil {
panic(err)
}
if err = txn.Commit(); err != nil {
panic(err)
}
m.head = frame
go func() {
m.newFrameCh <- frame
}()
break
}
if m.head.FrameNumber != prev.FrameNumber {
delete(m.pending, m.head.FrameNumber)
} else {
delete(m.pending, m.head.FrameNumber+1)
}
}
deletes := []uint64{}
for number := range m.pending {
if number < m.head.FrameNumber {
deletes = append(deletes, number)
}
}
for _, number := range deletes {
delete(m.pending, number)
}
}
var _ TimeReel = (*MasterTimeReel)(nil)

View File

@ -1,82 +0,0 @@
package time_test
import (
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/config"
"source.quilibrium.com/quilibrium/monorepo/node/consensus/time"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/store"
)
func TestMasterTimeReel(t *testing.T) {
logger, _ := zap.NewProduction()
db := store.NewInMemKVDB()
clockStore := store.NewPebbleClockStore(db, logger)
prover := crypto.NewWesolowskiFrameProver(logger)
filter := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
m := time.NewMasterTimeReel(
logger,
clockStore,
&config.EngineConfig{
Filter: filter,
GenesisSeed: strings.Repeat("00", 516),
Difficulty: 10,
},
prover,
)
err := m.Start()
assert.NoError(t, err)
frame, err := m.Head()
assert.NoError(t, err)
frames := []*protobufs.ClockFrame{}
wg := sync.WaitGroup{}
wg.Add(1)
frameCh := m.NewFrameCh()
go func() {
for i := 0; i < 200; i++ {
frames = append(frames, <-frameCh)
}
wg.Done()
}()
// in order
for i := int64(0); i < 100; i++ {
frame, err = prover.ProveMasterClockFrame(frame, i+1, 10)
assert.NoError(t, err)
err := m.Insert(frame, false)
assert.NoError(t, err)
}
insertFrames := []*protobufs.ClockFrame{}
// reverse order
for i := int64(100); i < 200; i++ {
frame, err = prover.ProveMasterClockFrame(frame, i+1, 10)
assert.NoError(t, err)
insertFrames = append(insertFrames, frame)
}
for i := 99; i >= 0; i-- {
err := m.Insert(insertFrames[i], false)
assert.NoError(t, err)
}
wg.Wait()
for i := 0; i < 200; i++ {
assert.NotNil(t, frames[i])
assert.Equal(t, frames[i].FrameNumber, uint64(i+1))
}
}

View File

@ -1,14 +0,0 @@
package time
import (
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type TimeReel interface {
Start() error
Stop()
Insert(frame *protobufs.ClockFrame, isSync bool) error
Head() (*protobufs.ClockFrame, error)
NewFrameCh() <-chan *protobufs.ClockFrame
BadFrameCh() <-chan *protobufs.ClockFrame
}

View File

@ -1,451 +0,0 @@
package channel
import (
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/rand"
"crypto/sha512"
"crypto/subtle"
"encoding/binary"
"github.com/pkg/errors"
"golang.org/x/crypto/hkdf"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
const DOUBLE_RATCHET_PROTOCOL_VERSION = 1
const DOUBLE_RATCHET_PROTOCOL = 1<<8 + DOUBLE_RATCHET_PROTOCOL_VERSION
const CHAIN_KEY = 0x01
const MESSAGE_KEY = 0x02
const AEAD_KEY = 0x03
// Note: If an HSM with raw primitive access becomes available, the raw crypto
// mechanisms should be refactored into calls in KeyManager and implemented
// through the driver
type DoubleRatchetParticipant struct {
sendingEphemeralPrivateKey curves.Scalar
receivingEphemeralKey curves.Point
curve *curves.Curve
keyManager keys.KeyManager
rootKey []byte
sendingChainKey []byte
currentSendingHeaderKey []byte
currentReceivingHeaderKey []byte
nextSendingHeaderKey []byte
nextReceivingHeaderKey []byte
receivingChainKey []byte
currentSendingChainLength uint32
previousSendingChainLength uint32
currentReceivingChainLength uint32
previousReceivingChainLength uint32
skippedKeysMap map[string]map[uint32][]byte
}
func NewDoubleRatchetParticipant(
sessionKey []byte,
sendingHeaderKey []byte,
nextReceivingHeaderKey []byte,
isSender bool,
sendingEphemeralPrivateKey curves.Scalar,
receivingEphemeralKey curves.Point,
curve *curves.Curve,
keyManager keys.KeyManager,
) (*DoubleRatchetParticipant, error) {
participant := &DoubleRatchetParticipant{}
participant.sendingEphemeralPrivateKey = sendingEphemeralPrivateKey
participant.skippedKeysMap = make(map[string]map[uint32][]byte)
participant.keyManager = keyManager
participant.currentSendingChainLength = 0
participant.previousSendingChainLength = 0
participant.currentReceivingChainLength = 0
participant.previousReceivingChainLength = 0
if sendingEphemeralPrivateKey.Point().CurveName() !=
receivingEphemeralKey.CurveName() || receivingEphemeralKey.CurveName() !=
curve.Name {
return nil, errors.New("curve mismatch")
}
participant.curve = curve
if isSender {
hash := hkdf.New(
sha512.New,
receivingEphemeralKey.Mul(
sendingEphemeralPrivateKey,
).ToAffineCompressed(),
sessionKey,
[]byte("quilibrium-double-ratchet"),
)
rkck := make([]byte, 96)
if _, err := hash.Read(rkck[:]); err != nil {
return nil, errors.Wrap(err, "failed establishing root key")
}
participant.currentSendingHeaderKey = sendingHeaderKey
participant.nextReceivingHeaderKey = nextReceivingHeaderKey
participant.rootKey = rkck[:32]
participant.sendingChainKey = rkck[32:64]
participant.nextSendingHeaderKey = rkck[64:96]
participant.receivingEphemeralKey = receivingEphemeralKey
} else {
participant.rootKey = sessionKey
participant.nextReceivingHeaderKey = sendingHeaderKey
participant.nextSendingHeaderKey = nextReceivingHeaderKey
}
return participant, nil
}
func (r *DoubleRatchetParticipant) RatchetEncrypt(
message []byte,
) (*protobufs.P2PChannelEnvelope, error) {
envelope := &protobufs.P2PChannelEnvelope{
ProtocolIdentifier: DOUBLE_RATCHET_PROTOCOL,
MessageHeader: &protobufs.MessageCiphertext{},
MessageBody: &protobufs.MessageCiphertext{},
}
newChainKey, messageKey, aeadKey := ratchetKeys(r.sendingChainKey)
r.sendingChainKey = newChainKey
var err error
header := r.encodeHeader()
envelope.MessageHeader, err = r.encrypt(
header,
r.currentSendingHeaderKey,
nil,
)
if err != nil {
return nil, errors.Wrap(err, "could not encrypt header")
}
envelope.MessageBody, err = r.encrypt(
message,
messageKey,
append(append([]byte{}, aeadKey...), envelope.MessageHeader.Ciphertext...),
)
if err != nil {
return nil, errors.Wrap(err, "could not encrypt message")
}
r.currentSendingChainLength++
return envelope, nil
}
func (r *DoubleRatchetParticipant) RatchetDecrypt(
envelope *protobufs.P2PChannelEnvelope,
) ([]byte, error) {
plaintext, err := r.trySkippedMessageKeys(envelope)
if err != nil {
return nil, errors.Wrap(err, "could not decrypt from matching skipped key")
}
if plaintext != nil {
return plaintext, nil
}
header, shouldRatchet, err := r.decryptHeader(
envelope.MessageHeader,
r.currentReceivingHeaderKey,
)
if err != nil {
return nil, errors.Wrap(err, "could not decrypt header")
}
receivingEphemeralKey,
previousReceivingChainLength,
currentReceivingChainLength,
err := r.decodeHeader(header)
if err != nil {
return nil, errors.Wrap(err, "could not decode header")
}
if shouldRatchet {
if err := r.skipMessageKeys(previousReceivingChainLength); err != nil {
return nil, errors.Wrap(err, "could not skip previous message keys")
}
if err := r.ratchetEphemeralKeys(receivingEphemeralKey); err != nil {
return nil, errors.Wrap(err, "could not ratchet ephemeral keys")
}
}
if err := r.skipMessageKeys(currentReceivingChainLength); err != nil {
return nil, errors.Wrap(err, "could not skip message keys")
}
newChainKey, messageKey, aeadKey := ratchetKeys(r.receivingChainKey)
plaintext, err = r.decrypt(
envelope.MessageBody,
messageKey,
append(
append([]byte{}, aeadKey...),
envelope.MessageHeader.Ciphertext...,
),
)
r.receivingChainKey = newChainKey
r.currentReceivingChainLength++
return plaintext, errors.Wrap(err, "could not decrypt message")
}
func (r *DoubleRatchetParticipant) ratchetEphemeralKeys(
newReceivingEphemeralKey curves.Point,
) error {
r.previousSendingChainLength = r.currentSendingChainLength
r.currentSendingChainLength = 0
r.currentReceivingChainLength = 0
r.currentSendingHeaderKey = r.nextSendingHeaderKey
r.currentReceivingHeaderKey = r.nextReceivingHeaderKey
r.receivingEphemeralKey = newReceivingEphemeralKey
hash := hkdf.New(
sha512.New,
newReceivingEphemeralKey.Mul(
r.sendingEphemeralPrivateKey,
).ToAffineCompressed(),
r.rootKey,
[]byte("quilibrium-double-ratchet"),
)
rkck := make([]byte, 96)
if _, err := hash.Read(rkck[:]); err != nil {
return errors.Wrap(err, "failed ratcheting root key")
}
r.rootKey = rkck[:32]
r.receivingChainKey = rkck[32:64]
r.nextReceivingHeaderKey = rkck[64:]
r.sendingEphemeralPrivateKey = r.curve.NewScalar().Random(rand.Reader)
hash = hkdf.New(
sha512.New,
newReceivingEphemeralKey.Mul(
r.sendingEphemeralPrivateKey,
).ToAffineCompressed(),
r.rootKey,
[]byte("quilibrium-double-ratchet"),
)
rkck2 := make([]byte, 96)
if _, err := hash.Read(rkck2[:]); err != nil {
return errors.Wrap(err, "failed ratcheting root key")
}
r.rootKey = rkck2[:32]
r.sendingChainKey = rkck2[32:64]
r.nextSendingHeaderKey = rkck2[64:]
return nil
}
func (r *DoubleRatchetParticipant) trySkippedMessageKeys(
envelope *protobufs.P2PChannelEnvelope,
) ([]byte, error) {
for receivingHeaderKey, skippedKeys := range r.skippedKeysMap {
header, _, err := r.decryptHeader(
envelope.MessageHeader,
[]byte(receivingHeaderKey),
)
if err == nil {
_, _, current, err := r.decodeHeader(header)
if err != nil {
return nil, errors.Wrap(err, "malformed header")
}
messageKey := skippedKeys[current][:32]
aeadKey := skippedKeys[current][32:]
plaintext, err := r.decrypt(
envelope.MessageBody,
messageKey,
append(
append([]byte{}, aeadKey...),
envelope.MessageHeader.Ciphertext[:]...,
),
)
if err != nil {
return nil, errors.Wrap(err, "could not decrypt from skipped key")
}
delete(r.skippedKeysMap[receivingHeaderKey], current)
if len(r.skippedKeysMap[receivingHeaderKey]) == 0 {
delete(r.skippedKeysMap, receivingHeaderKey)
}
return plaintext, nil
}
}
return nil, nil
}
func (r *DoubleRatchetParticipant) skipMessageKeys(until uint32) error {
if r.currentReceivingChainLength+100 < until {
return errors.New("skip limit exceeded")
}
if r.receivingChainKey != nil {
for r.currentReceivingChainLength < until {
newChainKey, messageKey, aeadKey := ratchetKeys(r.receivingChainKey)
skippedKeys := r.skippedKeysMap[string(r.currentReceivingHeaderKey)]
if skippedKeys == nil {
r.skippedKeysMap[string(r.currentReceivingHeaderKey)] =
make(map[uint32][]byte)
}
skippedKeys[r.currentReceivingChainLength] = append(
append([]byte{}, messageKey...),
aeadKey...,
)
r.receivingChainKey = newChainKey
r.currentReceivingChainLength++
}
}
return nil
}
func (r *DoubleRatchetParticipant) encodeHeader() []byte {
header := []byte{}
header = append(
header,
r.curve.NewGeneratorPoint().Mul(
r.sendingEphemeralPrivateKey,
).ToAffineCompressed()[:]...,
)
header = binary.BigEndian.AppendUint32(header, r.previousSendingChainLength)
header = binary.BigEndian.AppendUint32(header, r.currentSendingChainLength)
return header
}
func (r *DoubleRatchetParticipant) decryptHeader(
ciphertext *protobufs.MessageCiphertext,
receivingHeaderKey []byte,
) ([]byte, bool, error) {
header, err := r.decrypt(
ciphertext,
receivingHeaderKey,
nil,
)
if err != nil && subtle.ConstantTimeCompare(
r.currentReceivingHeaderKey,
receivingHeaderKey,
) == 1 {
if header, err = r.decrypt(
ciphertext,
r.nextReceivingHeaderKey,
nil,
); err != nil {
return nil, false, errors.Wrap(err, "could not decrypt header")
}
return header, true, nil
}
return header, false, errors.Wrap(err, "could not decrypt header")
}
func (r *DoubleRatchetParticipant) decodeHeader(
header []byte,
) (curves.Point, uint32, uint32, error) {
if len(header) < 9 {
return nil, 0, 0, errors.New("malformed header")
}
currentReceivingChainLength := binary.BigEndian.Uint32(header[len(header)-4:])
previousReceivingChainLength := binary.BigEndian.Uint32(
header[len(header)-8 : len(header)-4],
)
receivingEphemeralKeyBytes := header[:len(header)-8]
receivingEphemeralKey, err := r.curve.Point.FromAffineCompressed(
receivingEphemeralKeyBytes,
)
return receivingEphemeralKey,
previousReceivingChainLength,
currentReceivingChainLength,
errors.Wrap(err, "could not decode receiving dh key")
}
func (r *DoubleRatchetParticipant) encrypt(
plaintext []byte,
key []byte,
associatedData []byte,
) (*protobufs.MessageCiphertext, error) {
iv := [12]byte{}
rand.Read(iv[:])
aesCipher, err := aes.NewCipher(key)
if err != nil {
return nil, errors.Wrap(err, "could not construct cipher")
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
return nil, errors.Wrap(err, "could not construct block")
}
ciphertext := &protobufs.MessageCiphertext{}
if associatedData == nil {
associatedData = make([]byte, 32)
if _, err := rand.Read(associatedData); err != nil {
return nil, errors.Wrap(err, "could not obtain entropy")
}
ciphertext.AssociatedData = associatedData
}
ciphertext.Ciphertext = gcm.Seal(nil, iv[:], plaintext, associatedData)
ciphertext.InitializationVector = iv[:]
return ciphertext, nil
}
func (r *DoubleRatchetParticipant) decrypt(
ciphertext *protobufs.MessageCiphertext,
key []byte,
associatedData []byte,
) ([]byte, error) {
if associatedData == nil {
associatedData = ciphertext.AssociatedData
}
aesCipher, err := aes.NewCipher(key)
if err != nil {
return nil, errors.Wrap(err, "could not construct cipher")
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
return nil, errors.Wrap(err, "could not construct block")
}
plaintext, err := gcm.Open(
nil,
ciphertext.InitializationVector,
ciphertext.Ciphertext,
associatedData,
)
return plaintext, errors.Wrap(err, "could not decrypt ciphertext")
}
func ratchetKeys(inputKey []byte) ([]byte, []byte, []byte) {
buf := hmac.New(sha512.New, inputKey)
buf.Write([]byte{AEAD_KEY})
aeadKey := buf.Sum(nil)
buf.Reset()
buf.Write([]byte{MESSAGE_KEY})
messageKey := buf.Sum(nil)
buf.Reset()
buf.Write([]byte{CHAIN_KEY})
chainKey := buf.Sum(nil)
return chainKey[:32], messageKey[:32], aeadKey[:32]
}

View File

@ -1,129 +0,0 @@
package channel_test
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/channel"
)
func TestRatchetEncrypt(t *testing.T) {
x448SendingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingEphemeralPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingSignedPrePrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingIdentityPrivateKey)
x448SendingEphemeralKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingEphemeralPrivateKey)
x448ReceivingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingIdentityPrivateKey)
x448ReceivingSignedPreKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingSignedPrePrivateKey)
senderResult := channel.SenderX3DH(
x448SendingIdentityPrivateKey,
x448SendingEphemeralPrivateKey,
x448ReceivingIdentityKey,
x448ReceivingSignedPreKey,
96,
)
receiverResult := channel.ReceiverX3DH(
x448ReceivingIdentityPrivateKey,
x448ReceivingSignedPrePrivateKey,
x448SendingIdentityKey,
x448SendingEphemeralKey,
96,
)
sender, err := channel.NewDoubleRatchetParticipant(
senderResult[:32],
senderResult[32:64],
senderResult[64:],
true,
x448SendingEphemeralPrivateKey,
x448ReceivingSignedPreKey,
curves.ED448(),
nil,
)
require.NoError(t, err)
receiver, err := channel.NewDoubleRatchetParticipant(
receiverResult[:32],
receiverResult[32:64],
receiverResult[64:],
false,
x448ReceivingSignedPrePrivateKey,
x448SendingEphemeralKey,
curves.ED448(),
nil,
)
require.NoError(t, err)
envelope1, err := sender.RatchetEncrypt([]byte("hello there"))
require.NoError(t, err)
envelope2, err := sender.RatchetEncrypt([]byte("general kenobi"))
require.NoError(t, err)
plaintext1, err := receiver.RatchetDecrypt(envelope1)
require.NoError(t, err)
plaintext2, err := receiver.RatchetDecrypt(envelope2)
require.NoError(t, err)
envelope3, err := receiver.RatchetEncrypt([]byte("you are a bold one"))
require.NoError(t, err)
envelope4, err := receiver.RatchetEncrypt([]byte("[mechanical laughing]"))
require.NoError(t, err)
plaintext3, err := sender.RatchetDecrypt(envelope3)
require.NoError(t, err)
plaintext4, err := sender.RatchetDecrypt(envelope4)
require.NoError(t, err)
// confirm large messages
msg5 := make([]byte, 1024*1024*10)
msg6 := make([]byte, 1024*1024*10)
msg7 := make([]byte, 1024*1024*10)
msg8 := make([]byte, 1024*1024*10)
rand.Read(msg5)
rand.Read(msg6)
rand.Read(msg7)
rand.Read(msg8)
envelope5, err := sender.RatchetEncrypt(msg5)
require.NoError(t, err)
envelope6, err := sender.RatchetEncrypt(msg6)
require.NoError(t, err)
plaintext5, err := receiver.RatchetDecrypt(envelope5)
require.NoError(t, err)
plaintext6, err := receiver.RatchetDecrypt(envelope6)
require.NoError(t, err)
envelope7, err := receiver.RatchetEncrypt(msg7)
require.NoError(t, err)
envelope8, err := receiver.RatchetEncrypt(msg8)
require.NoError(t, err)
plaintext7, err := sender.RatchetDecrypt(envelope7)
require.NoError(t, err)
plaintext8, err := sender.RatchetDecrypt(envelope8)
require.NoError(t, err)
require.Equal(t, []byte("hello there"), plaintext1)
require.Equal(t, []byte("general kenobi"), plaintext2)
require.Equal(t, []byte("you are a bold one"), plaintext3)
require.Equal(t, []byte("[mechanical laughing]"), plaintext4)
require.Equal(t, msg5, plaintext5)
require.Equal(t, msg6, plaintext6)
require.Equal(t, msg7, plaintext7)
require.Equal(t, msg8, plaintext8)
}

View File

@ -1,337 +0,0 @@
package channel
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"fmt"
"math/big"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
type Feldman struct {
threshold int
total int
id int
fragsForCounterparties map[int][]byte
fragsFromCounterparties map[int]curves.Scalar
zkpok curves.Scalar
secret curves.Scalar
scalar curves.Scalar
generator curves.Point
publicKey curves.Point
point curves.Point
randomCommitmentPoint curves.Point
round FeldmanRound
zkcommitsFromCounterparties map[int][]byte
pointsFromCounterparties map[int]curves.Point
curve curves.Curve
}
type FeldmanReveal struct {
Point []byte
RandomCommitmentPoint []byte
ZKPoK []byte
}
var ErrWrongRound = errors.New("wrong round for feldman")
type FeldmanRound int
const (
FELDMAN_ROUND_UNINITIALIZED = FeldmanRound(0)
FELDMAN_ROUND_INITIALIZED = FeldmanRound(1)
FELDMAN_ROUND_COMMITTED = FeldmanRound(2)
FELDMAN_ROUND_REVEALED = FeldmanRound(3)
FELDMAN_ROUND_RECONSTRUCTED = FeldmanRound(4)
)
func NewFeldman(
threshold, total, id int,
secret curves.Scalar,
curve curves.Curve,
generator curves.Point,
) (*Feldman, error) {
return &Feldman{
threshold: threshold,
total: total,
id: id,
fragsForCounterparties: make(map[int][]byte),
fragsFromCounterparties: make(map[int]curves.Scalar),
zkpok: nil,
secret: secret,
scalar: nil,
generator: generator,
publicKey: secret.Point().Generator(),
point: secret.Point().Generator(),
round: FELDMAN_ROUND_UNINITIALIZED,
zkcommitsFromCounterparties: make(map[int][]byte),
pointsFromCounterparties: make(map[int]curves.Point),
curve: curve,
}, nil
}
func (f *Feldman) SamplePolynomial() error {
if f.round != FELDMAN_ROUND_UNINITIALIZED {
return errors.Wrap(ErrWrongRound, "sample polynomial")
}
coeffs := append([]curves.Scalar{}, f.secret)
for i := 1; i < f.threshold; i++ {
secret := f.curve.NewScalar()
secret = secret.Random(rand.Reader)
coeffs = append(coeffs, secret)
}
for i := 1; i <= f.total; i++ {
result := coeffs[0].Clone()
x := f.curve.Scalar.New(i)
for j := 1; j < f.threshold; j++ {
term := coeffs[j].Mul(x)
result = result.Add(term)
x = x.Mul(f.curve.Scalar.New(i))
}
if i == f.id {
f.scalar = result
} else {
fragBytes := result.Bytes()
f.fragsForCounterparties[i] = fragBytes
}
}
f.round = FELDMAN_ROUND_INITIALIZED
return nil
}
func (f *Feldman) Scalar() curves.Scalar {
return f.scalar
}
func (f *Feldman) GetPolyFrags() (map[int][]byte, error) {
if f.round != FELDMAN_ROUND_INITIALIZED {
return nil, errors.Wrap(ErrWrongRound, "get poly frags")
}
return f.fragsForCounterparties, nil
}
func (f *Feldman) SetPolyFragForParty(id int, frag []byte) ([]byte, error) {
if f.round != FELDMAN_ROUND_INITIALIZED {
return nil, errors.Wrap(ErrWrongRound, "set poly frag for party")
}
var err error
f.fragsFromCounterparties[id], err = f.curve.NewScalar().SetBytes(frag)
if err != nil {
return nil, errors.Wrap(err, "set poly frag for party")
}
if len(f.fragsFromCounterparties) == f.total-1 {
for _, v := range f.fragsFromCounterparties {
f.scalar = f.scalar.Add(v)
}
f.point = f.generator.Mul(f.scalar)
randCommitment := f.curve.NewScalar().Random(rand.Reader)
f.randomCommitmentPoint = f.generator.Mul(randCommitment)
randCommitmentPointBytes := f.randomCommitmentPoint.ToAffineCompressed()
publicPointBytes := f.point.ToAffineCompressed()
challenge := sha256.Sum256(
append(
append([]byte{}, publicPointBytes...),
randCommitmentPointBytes...,
),
)
challengeBig, err := f.curve.NewScalar().SetBigInt(
new(big.Int).SetBytes(challenge[:]),
)
if err != nil {
return nil, errors.Wrap(err, "set poly frag for party")
}
f.zkpok = f.scalar.Mul(challengeBig).Add(randCommitment)
zkpokBytes := f.zkpok.Bytes()
zkcommit := sha256.Sum256(
append(
append([]byte{}, randCommitmentPointBytes...),
zkpokBytes...,
),
)
f.round = FELDMAN_ROUND_COMMITTED
return zkcommit[:], nil
}
return []byte{}, nil
}
func (f *Feldman) ReceiveCommitments(
id int,
zkcommit []byte,
) (*FeldmanReveal, error) {
if f.round != FELDMAN_ROUND_COMMITTED {
return nil, errors.Wrap(ErrWrongRound, "receive commitments")
}
f.zkcommitsFromCounterparties[id] = zkcommit
if len(f.zkcommitsFromCounterparties) == f.total-1 {
publicPointBytes := f.point.ToAffineCompressed()
randCommitmentPointBytes := f.randomCommitmentPoint.ToAffineCompressed()
f.round = FELDMAN_ROUND_REVEALED
zkpokBytes := f.zkpok.Bytes()
return &FeldmanReveal{
Point: publicPointBytes,
RandomCommitmentPoint: randCommitmentPointBytes,
ZKPoK: zkpokBytes,
}, nil
}
return nil, nil
}
func (f *Feldman) Recombine(id int, reveal *FeldmanReveal) (bool, error) {
if f.round != FELDMAN_ROUND_REVEALED {
return false, errors.Wrap(ErrWrongRound, "recombine")
}
counterpartyPoint, err := f.curve.NewGeneratorPoint().FromAffineCompressed(
reveal.Point,
)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
if counterpartyPoint.Equal(f.curve.NewGeneratorPoint()) ||
counterpartyPoint.Equal(f.generator) {
return false, errors.Wrap(errors.New("counterparty sent generator"), "recombine")
}
counterpartyRandomCommitmentPoint, err := f.curve.NewGeneratorPoint().
FromAffineCompressed(reveal.RandomCommitmentPoint)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
if counterpartyRandomCommitmentPoint.Equal(f.curve.NewGeneratorPoint()) ||
counterpartyRandomCommitmentPoint.Equal(f.generator) {
return false, errors.Wrap(errors.New("counterparty sent generator"), "recombine")
}
counterpartyZKPoK, err := f.curve.NewScalar().SetBytes(reveal.ZKPoK)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
counterpartyZKCommit := f.zkcommitsFromCounterparties[id]
challenge := sha256.Sum256(append(
append([]byte{}, reveal.Point...),
reveal.RandomCommitmentPoint...,
))
challengeBig, err := f.curve.NewScalar().SetBigInt(
new(big.Int).SetBytes(challenge[:]),
)
if err != nil {
return false, errors.Wrap(err, "recombine")
}
proof := f.generator.Mul(counterpartyZKPoK)
counterpartyRandomCommitmentPoint = counterpartyRandomCommitmentPoint.Add(
counterpartyPoint.Mul(challengeBig),
)
if !proof.Equal(counterpartyRandomCommitmentPoint) {
return false, errors.Wrap(
errors.New(fmt.Sprintf("invalid proof from %d", id)),
"recombine",
)
}
verifier := sha256.Sum256(append(
append([]byte{}, reveal.RandomCommitmentPoint...),
reveal.ZKPoK...,
))
if !bytes.Equal(counterpartyZKCommit, verifier[:]) {
return false, errors.Wrap(
errors.New(fmt.Sprintf("%d changed zkpok after commit", id)),
"recombine",
)
}
f.pointsFromCounterparties[id] = counterpartyPoint
if len(f.pointsFromCounterparties) == f.total-1 {
f.pointsFromCounterparties[f.id] = f.point
for i := 1; i <= f.total-f.threshold+1; i++ {
var reconstructedSum curves.Point = nil
for j := i; j < f.threshold+i; j++ {
num := f.curve.Scalar.One()
den := f.curve.Scalar.One()
for k := i; k < f.threshold+i; k++ {
if j != k {
j := f.curve.NewScalar().New(j)
k := f.curve.NewScalar().New(k)
num = num.Mul(k)
den = den.Mul(k.Sub(j))
}
}
den, _ = den.Invert()
reconstructedFragment := f.pointsFromCounterparties[j].Mul(num.Mul(den))
if reconstructedSum == nil {
reconstructedSum = reconstructedFragment
} else {
reconstructedSum = reconstructedSum.Add(reconstructedFragment)
}
}
if f.publicKey.Equal(f.curve.NewGeneratorPoint()) ||
f.publicKey.Equal(f.generator) {
f.publicKey = reconstructedSum
} else if !f.publicKey.Equal(reconstructedSum) {
return false, errors.Wrap(
errors.New("recombination mismatch"),
"recombine",
)
}
}
f.round = FELDMAN_ROUND_RECONSTRUCTED
}
return f.round == FELDMAN_ROUND_RECONSTRUCTED, nil
}
func (f *Feldman) PublicKey() curves.Point {
return f.publicKey
}
func (f *Feldman) PublicKeyBytes() []byte {
return f.publicKey.ToAffineCompressed()
}
func ReverseScalarBytes(inBytes []byte, length int) []byte {
outBytes := make([]byte, length)
for i, j := 0, len(inBytes)-1; j >= 0; i, j = i+1, j-1 {
outBytes[i] = inBytes[j]
}
return outBytes
}

View File

@ -1,446 +0,0 @@
package channel_test
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/assert"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
crypto "source.quilibrium.com/quilibrium/monorepo/node/crypto/channel"
)
func TestFeldman(t *testing.T) {
s1 := curves.ED25519().NewScalar().Random(rand.Reader)
f1, err := crypto.NewFeldman(
3,
5,
1,
s1,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s2 := curves.ED25519().NewScalar().Random(rand.Reader)
f2, err := crypto.NewFeldman(
3,
5,
2,
s2,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s3 := curves.ED25519().NewScalar().Random(rand.Reader)
f3, err := crypto.NewFeldman(
3,
5,
3,
s3,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s4 := curves.ED25519().NewScalar().Random(rand.Reader)
f4, err := crypto.NewFeldman(
3,
5,
4,
s4,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
s5 := curves.ED25519().NewScalar().Random(rand.Reader)
f5, err := crypto.NewFeldman(
3,
5,
5,
s5,
*curves.ED25519(),
curves.ED25519().NewGeneratorPoint(),
)
assert.NoError(t, err)
err = f1.SamplePolynomial()
assert.NoError(t, err)
err = f2.SamplePolynomial()
assert.NoError(t, err)
err = f3.SamplePolynomial()
assert.NoError(t, err)
err = f4.SamplePolynomial()
assert.NoError(t, err)
err = f5.SamplePolynomial()
assert.NoError(t, err)
m1, err := f1.GetPolyFrags()
assert.NoError(t, err)
m2, err := f2.GetPolyFrags()
assert.NoError(t, err)
m3, err := f3.GetPolyFrags()
assert.NoError(t, err)
m4, err := f4.GetPolyFrags()
assert.NoError(t, err)
m5, err := f5.GetPolyFrags()
assert.NoError(t, err)
m1[1] = f1.Scalar().Bytes()
_, err = f1.SetPolyFragForParty(2, m2[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(3, m3[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(4, m4[1])
assert.NoError(t, err)
z1, err := f1.SetPolyFragForParty(5, m5[1])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(1, m1[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(3, m3[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(4, m4[2])
assert.NoError(t, err)
z2, err := f2.SetPolyFragForParty(5, m5[2])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(1, m1[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(2, m2[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(4, m4[3])
assert.NoError(t, err)
z3, err := f3.SetPolyFragForParty(5, m5[3])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(1, m1[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(2, m2[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(3, m3[4])
assert.NoError(t, err)
z4, err := f4.SetPolyFragForParty(5, m5[4])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(1, m1[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(2, m2[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(3, m3[5])
assert.NoError(t, err)
z5, err := f5.SetPolyFragForParty(4, m4[5])
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(2, z2)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(3, z3)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(4, z4)
assert.NoError(t, err)
assert.NoError(t, err)
r1, err := f1.ReceiveCommitments(5, z5)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(3, z3)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r2, err := f2.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r3, err := f3.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r4, err := f4.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r5, err := f5.ReceiveCommitments(4, z4)
assert.NoError(t, err)
_, err = f1.Recombine(2, r2)
assert.NoError(t, err)
_, err = f1.Recombine(3, r3)
assert.NoError(t, err)
_, err = f1.Recombine(4, r4)
assert.NoError(t, err)
_, err = f1.Recombine(5, r5)
assert.NoError(t, err)
_, err = f2.Recombine(1, r1)
assert.NoError(t, err)
_, err = f2.Recombine(3, r3)
assert.NoError(t, err)
_, err = f2.Recombine(4, r4)
assert.NoError(t, err)
_, err = f2.Recombine(5, r5)
assert.NoError(t, err)
_, err = f3.Recombine(1, r1)
assert.NoError(t, err)
_, err = f3.Recombine(2, r2)
assert.NoError(t, err)
_, err = f3.Recombine(4, r4)
assert.NoError(t, err)
_, err = f3.Recombine(5, r5)
assert.NoError(t, err)
_, err = f4.Recombine(1, r1)
assert.NoError(t, err)
_, err = f4.Recombine(2, r2)
assert.NoError(t, err)
_, err = f4.Recombine(3, r3)
assert.NoError(t, err)
_, err = f4.Recombine(5, r5)
assert.NoError(t, err)
_, err = f5.Recombine(1, r1)
assert.NoError(t, err)
_, err = f5.Recombine(2, r2)
assert.NoError(t, err)
_, err = f5.Recombine(3, r3)
assert.NoError(t, err)
_, err = f5.Recombine(4, r4)
assert.NoError(t, err)
s := s1.Add(s2.Add(s3.Add(s4.Add(s5))))
assert.True(t, curves.ED25519().NewGeneratorPoint().Mul(s).Equal(f1.PublicKey()))
assert.True(t, f5.PublicKey().Equal(f1.PublicKey()))
}
func TestFeldmanCustomGenerator(t *testing.T) {
gen := curves.ED25519().Point.Random(rand.Reader)
f1, err := crypto.NewFeldman(
3,
5,
1,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f2, err := crypto.NewFeldman(
3,
5,
2,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f3, err := crypto.NewFeldman(
3,
5,
3,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f4, err := crypto.NewFeldman(
3,
5,
4,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
f5, err := crypto.NewFeldman(
3,
5,
5,
curves.ED25519().NewScalar().Random(rand.Reader),
*curves.ED25519(),
gen,
)
assert.NoError(t, err)
err = f1.SamplePolynomial()
assert.NoError(t, err)
err = f2.SamplePolynomial()
assert.NoError(t, err)
err = f3.SamplePolynomial()
assert.NoError(t, err)
err = f4.SamplePolynomial()
assert.NoError(t, err)
err = f5.SamplePolynomial()
assert.NoError(t, err)
m1, err := f1.GetPolyFrags()
assert.NoError(t, err)
m2, err := f2.GetPolyFrags()
assert.NoError(t, err)
m3, err := f3.GetPolyFrags()
assert.NoError(t, err)
m4, err := f4.GetPolyFrags()
assert.NoError(t, err)
m5, err := f5.GetPolyFrags()
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(2, m2[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(3, m3[1])
assert.NoError(t, err)
_, err = f1.SetPolyFragForParty(4, m4[1])
assert.NoError(t, err)
z1, err := f1.SetPolyFragForParty(5, m5[1])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(1, m1[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(3, m3[2])
assert.NoError(t, err)
_, err = f2.SetPolyFragForParty(4, m4[2])
assert.NoError(t, err)
z2, err := f2.SetPolyFragForParty(5, m5[2])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(1, m1[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(2, m2[3])
assert.NoError(t, err)
_, err = f3.SetPolyFragForParty(4, m4[3])
assert.NoError(t, err)
z3, err := f3.SetPolyFragForParty(5, m5[3])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(1, m1[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(2, m2[4])
assert.NoError(t, err)
_, err = f4.SetPolyFragForParty(3, m3[4])
assert.NoError(t, err)
z4, err := f4.SetPolyFragForParty(5, m5[4])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(1, m1[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(2, m2[5])
assert.NoError(t, err)
_, err = f5.SetPolyFragForParty(3, m3[5])
assert.NoError(t, err)
z5, err := f5.SetPolyFragForParty(4, m4[5])
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(2, z2)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(3, z3)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f1.ReceiveCommitments(4, z4)
assert.NoError(t, err)
assert.NoError(t, err)
r1, err := f1.ReceiveCommitments(5, z5)
assert.NoError(t, err)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(3, z3)
assert.NoError(t, err)
_, err = f2.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r2, err := f2.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f3.ReceiveCommitments(4, z4)
assert.NoError(t, err)
r3, err := f3.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f4.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r4, err := f4.ReceiveCommitments(5, z5)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(1, z1)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(2, z2)
assert.NoError(t, err)
_, err = f5.ReceiveCommitments(3, z3)
assert.NoError(t, err)
r5, err := f5.ReceiveCommitments(4, z4)
assert.NoError(t, err)
_, err = f1.Recombine(2, r2)
assert.NoError(t, err)
_, err = f1.Recombine(3, r3)
assert.NoError(t, err)
_, err = f1.Recombine(4, r4)
assert.NoError(t, err)
_, err = f1.Recombine(5, r5)
assert.NoError(t, err)
_, err = f2.Recombine(1, r1)
assert.NoError(t, err)
_, err = f2.Recombine(3, r3)
assert.NoError(t, err)
_, err = f2.Recombine(4, r4)
assert.NoError(t, err)
_, err = f2.Recombine(5, r5)
assert.NoError(t, err)
_, err = f3.Recombine(1, r1)
assert.NoError(t, err)
_, err = f3.Recombine(2, r2)
assert.NoError(t, err)
_, err = f3.Recombine(4, r4)
assert.NoError(t, err)
_, err = f3.Recombine(5, r5)
assert.NoError(t, err)
_, err = f4.Recombine(1, r1)
assert.NoError(t, err)
_, err = f4.Recombine(2, r2)
assert.NoError(t, err)
_, err = f4.Recombine(3, r3)
assert.NoError(t, err)
_, err = f4.Recombine(5, r5)
assert.NoError(t, err)
_, err = f5.Recombine(1, r1)
assert.NoError(t, err)
_, err = f5.Recombine(2, r2)
assert.NoError(t, err)
_, err = f5.Recombine(3, r3)
assert.NoError(t, err)
_, err = f5.Recombine(4, r4)
assert.NoError(t, err)
}

View File

@ -1,755 +0,0 @@
package channel
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha512"
"crypto/subtle"
"encoding/binary"
"encoding/json"
"fmt"
"sort"
"github.com/pkg/errors"
"golang.org/x/crypto/hkdf"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
const TRIPLE_RATCHET_PROTOCOL_VERSION = 1
const TRIPLE_RATCHET_PROTOCOL = 2<<8 + TRIPLE_RATCHET_PROTOCOL_VERSION
type TripleRatchetRound int
const (
TRIPLE_RATCHET_ROUND_UNINITIALIZED = TripleRatchetRound(0)
TRIPLE_RATCHET_ROUND_INITIALIZED = TripleRatchetRound(1)
TRIPLE_RATCHET_ROUND_COMMITTED = TripleRatchetRound(2)
TRIPLE_RATCHET_ROUND_REVEALED = TripleRatchetRound(3)
TRIPLE_RATCHET_ROUND_RECONSTRUCTED = TripleRatchetRound(4)
)
// Note: If an HSM with raw primitive access becomes available, the raw crypto
// mechanisms should be refactored into calls in KeyManager and implemented
// through the driver
type TripleRatchetParticipant struct {
peerKey curves.Scalar
sendingEphemeralPrivateKey curves.Scalar
receivingEphemeralKeys map[string]curves.Scalar
receivingGroupKey curves.Point
curve curves.Curve
keyManager keys.KeyManager
rootKey []byte
sendingChainKey []byte
currentHeaderKey []byte
nextHeaderKey []byte
receivingChainKey map[string][]byte
currentSendingChainLength uint32
previousSendingChainLength uint32
currentReceivingChainLength map[string]uint32
previousReceivingChainLength map[string]uint32
peerIdMap map[string]int
idPeerMap map[int]*PeerInfo
skippedKeysMap map[string]map[string]map[uint32][]byte
peerChannels map[string]*DoubleRatchetParticipant
dkgRatchet *Feldman
}
type PeerInfo struct {
PublicKey curves.Point
IdentityPublicKey curves.Point
SignedPrePublicKey curves.Point
}
// Weak-mode synchronous group modification TR this is not the asynchronous
// TR, does not ratchet group key automatically, know what your use case is
// before adopting this.
func NewTripleRatchetParticipant(
peers []*PeerInfo,
curve curves.Curve,
keyManager keys.KeyManager,
peerKey curves.Scalar,
identityKey curves.Scalar,
signedPreKey curves.Scalar,
) (
*TripleRatchetParticipant,
map[string]*protobufs.P2PChannelEnvelope,
error,
) {
participant := &TripleRatchetParticipant{}
participant.skippedKeysMap = make(map[string]map[string]map[uint32][]byte)
participant.receivingEphemeralKeys = make(map[string]curves.Scalar)
participant.receivingChainKey = make(map[string][]byte)
participant.peerChannels = make(map[string]*DoubleRatchetParticipant)
participant.keyManager = keyManager
participant.currentSendingChainLength = 0
participant.previousSendingChainLength = 0
participant.currentReceivingChainLength = make(map[string]uint32)
participant.previousReceivingChainLength = make(map[string]uint32)
peerBasis := append([]*PeerInfo{}, peers...)
peerBasis = append(peerBasis, &PeerInfo{
PublicKey: peerKey.Point().Generator().Mul(peerKey),
IdentityPublicKey: identityKey.Point().Generator().Mul(identityKey),
SignedPrePublicKey: signedPreKey.Point().Generator().Mul(signedPreKey),
})
sort.Slice(peerBasis, func(i, j int) bool {
return bytes.Compare(
peerBasis[i].PublicKey.ToAffineCompressed(),
peerBasis[j].PublicKey.ToAffineCompressed(),
) <= 0
})
initMessages := make(map[string]*protobufs.P2PChannelEnvelope)
peerIdMap := map[string]int{}
idPeerMap := map[int]*PeerInfo{}
sender := false
for i := 0; i < len(peerBasis); i++ {
peerIdMap[string(peerBasis[i].PublicKey.ToAffineCompressed())] = i + 1
idPeerMap[i+1] = peerBasis[i]
if bytes.Equal(
peerBasis[i].PublicKey.ToAffineCompressed(),
peerKey.Point().Generator().Mul(peerKey).ToAffineCompressed(),
) {
sender = true
} else {
participant.skippedKeysMap[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)] = make(map[string]map[uint32][]byte)
participant.currentReceivingChainLength[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)] = 0
participant.previousReceivingChainLength[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)] = 0
var sessionKey []byte
if sender {
sessionKey = SenderX3DH(
identityKey,
signedPreKey,
peerBasis[i].IdentityPublicKey,
peerBasis[i].SignedPrePublicKey,
96,
)
} else {
sessionKey = ReceiverX3DH(
identityKey,
signedPreKey,
peerBasis[i].IdentityPublicKey,
peerBasis[i].SignedPrePublicKey,
96,
)
}
var err error
participant.peerChannels[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)], err = NewDoubleRatchetParticipant(
sessionKey[:32],
sessionKey[32:64],
sessionKey[64:],
sender,
signedPreKey,
peerBasis[i].SignedPrePublicKey,
&curve,
keyManager,
)
if err != nil {
return nil, nil, errors.Wrap(err, "new triple ratchet participant")
}
if sender {
initMessages[string(peerBasis[i].PublicKey.ToAffineCompressed())], err =
participant.peerChannels[string(
peerBasis[i].PublicKey.ToAffineCompressed(),
)].RatchetEncrypt([]byte("init"))
if err != nil {
return nil, nil, errors.Wrap(err, "new triple ratchet participant")
}
}
}
}
feldman, err := NewFeldman(
2,
len(peers)+1,
peerIdMap[string(
peerKey.Point().Generator().Mul(peerKey).ToAffineCompressed(),
)],
curve.NewScalar().Random(rand.Reader),
curve,
curve.Point.Generator(),
)
if err != nil {
return nil, nil, errors.Wrap(err, "new triple ratchet participant")
}
participant.peerIdMap = peerIdMap
participant.idPeerMap = idPeerMap
participant.dkgRatchet = feldman
participant.curve = curve
participant.peerKey = peerKey
return participant, initMessages, nil
}
func (r *TripleRatchetParticipant) Initialize(
initMessages map[string]*protobufs.P2PChannelEnvelope,
) (map[string]*protobufs.P2PChannelEnvelope, error) {
for k, m := range initMessages {
msg, err := r.peerChannels[k].RatchetDecrypt(m)
if err != nil {
return nil, errors.Wrap(err, "initialize")
}
if string(msg) != "init" {
return nil, errors.Wrap(errors.New("invalid init message"), "initialize")
}
}
if err := r.dkgRatchet.SamplePolynomial(); err != nil {
return nil, errors.Wrap(err, "initialize")
}
result, err := r.dkgRatchet.GetPolyFrags()
if err != nil {
return nil, errors.Wrap(err, "initialize")
}
resultMap := make(map[string]*protobufs.P2PChannelEnvelope)
for k, v := range result {
if r.idPeerMap[k].PublicKey.Equal(
r.peerKey.Point().Generator().Mul(r.peerKey),
) {
continue
}
envelope, err := r.peerChannels[string(
r.idPeerMap[k].PublicKey.ToAffineCompressed(),
)].RatchetEncrypt(v)
if err != nil {
return nil, errors.Wrap(err, "initialize")
}
resultMap[string(r.idPeerMap[k].PublicKey.ToAffineCompressed())] = envelope
}
return resultMap, nil
}
func (r *TripleRatchetParticipant) ReceivePolyFrag(
peerId []byte,
frag *protobufs.P2PChannelEnvelope,
) (map[string]*protobufs.P2PChannelEnvelope, error) {
b, err := r.peerChannels[string(peerId)].RatchetDecrypt(frag)
if err != nil {
return nil, errors.Wrap(err, "receive poly frag")
}
result, err := r.dkgRatchet.SetPolyFragForParty(
r.peerIdMap[string(peerId)],
b,
)
if err != nil {
return nil, errors.Wrap(err, "receive poly frag")
}
if len(result) != 0 {
envelopes := make(map[string]*protobufs.P2PChannelEnvelope)
for k, c := range r.peerChannels {
envelope, err := c.RatchetEncrypt(result)
if err != nil {
return nil, errors.Wrap(err, "receive poly frag")
}
envelopes[k] = envelope
}
return envelopes, errors.Wrap(err, "receive poly frag")
}
return nil, nil
}
func (r *TripleRatchetParticipant) ReceiveCommitment(
peerId []byte,
zkcommit *protobufs.P2PChannelEnvelope,
) (map[string]*protobufs.P2PChannelEnvelope, error) {
b, err := r.peerChannels[string(peerId)].RatchetDecrypt(zkcommit)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
result, err := r.dkgRatchet.ReceiveCommitments(
r.peerIdMap[string(peerId)],
b,
)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
d, err := json.Marshal(result)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
if result != nil {
envelopes := make(map[string]*protobufs.P2PChannelEnvelope)
for k, c := range r.peerChannels {
envelope, err := c.RatchetEncrypt(d)
if err != nil {
return nil, errors.Wrap(err, "receive commitment")
}
envelopes[k] = envelope
}
return envelopes, errors.Wrap(err, "receive poly frag")
}
return nil, nil
}
func (r *TripleRatchetParticipant) Recombine(
peerId []byte,
reveal *protobufs.P2PChannelEnvelope,
) error {
b, err := r.peerChannels[string(peerId)].RatchetDecrypt(reveal)
if err != nil {
return errors.Wrap(err, "recombine")
}
rev := &FeldmanReveal{}
if err = json.Unmarshal(b, rev); err != nil {
return errors.Wrap(err, "recombine")
}
done, err := r.dkgRatchet.Recombine(
r.peerIdMap[string(peerId)],
rev,
)
if err != nil {
return errors.Wrap(err, "recombine")
}
if !done {
return nil
}
sess := sha512.Sum512_256(r.dkgRatchet.PublicKeyBytes())
hash := hkdf.New(
sha512.New,
r.dkgRatchet.PublicKeyBytes(),
sess[:],
[]byte("quilibrium-triple-ratchet"),
)
rkck := make([]byte, 96)
if _, err := hash.Read(rkck[:]); err != nil {
return errors.Wrap(err, "recombine")
}
r.rootKey = rkck[:32]
r.currentHeaderKey = rkck[32:64]
r.nextHeaderKey = rkck[64:]
r.receivingGroupKey = r.dkgRatchet.PublicKey()
r.sendingEphemeralPrivateKey = r.curve.Scalar.Random(rand.Reader)
return nil
}
func (r *TripleRatchetParticipant) RatchetEncrypt(
message []byte,
) (*protobufs.P2PChannelEnvelope, error) {
envelope := &protobufs.P2PChannelEnvelope{
ProtocolIdentifier: TRIPLE_RATCHET_PROTOCOL,
MessageHeader: &protobufs.MessageCiphertext{},
MessageBody: &protobufs.MessageCiphertext{},
}
newChainKey, messageKey, aeadKey := ratchetKeys(r.sendingChainKey)
r.sendingChainKey = newChainKey
var err error
header := r.encodeHeader()
envelope.MessageHeader, err = r.encrypt(
header,
r.currentHeaderKey,
nil,
)
if err != nil {
return nil, errors.Wrap(err, "could not encrypt header")
}
envelope.MessageBody, err = r.encrypt(
message,
messageKey,
append(append([]byte{}, aeadKey...), envelope.MessageHeader.Ciphertext...),
)
if err != nil {
return nil, errors.Wrap(err, "could not encrypt message")
}
r.currentSendingChainLength++
return envelope, nil
}
func (r *TripleRatchetParticipant) RatchetDecrypt(
envelope *protobufs.P2PChannelEnvelope,
) ([]byte, error) {
plaintext, err := r.trySkippedMessageKeys(envelope)
if err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
if plaintext != nil {
return plaintext, nil
}
header, shouldRatchet, err := r.decryptHeader(
envelope.MessageHeader,
r.currentHeaderKey,
)
if err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
senderKey,
receivingEphemeralKey,
previousReceivingChainLength,
currentReceivingChainLength,
err := r.decodeHeader(header)
if err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
if shouldRatchet {
if err := r.skipMessageKeys(
senderKey,
previousReceivingChainLength,
); err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
if err := r.ratchetReceiverEphemeralKeys(
senderKey,
receivingEphemeralKey,
); err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
}
if err := r.skipMessageKeys(
senderKey,
currentReceivingChainLength,
); err != nil {
return nil, errors.Wrap(err, "ratchet decrypt")
}
newChainKey, messageKey, aeadKey := ratchetKeys(
r.receivingChainKey[string(senderKey.ToAffineCompressed())],
)
r.receivingChainKey[string(senderKey.ToAffineCompressed())] = newChainKey
r.currentReceivingChainLength[string(senderKey.ToAffineCompressed())]++
plaintext, err = r.decrypt(
envelope.MessageBody,
messageKey,
append(
append([]byte{}, aeadKey...),
envelope.MessageHeader.Ciphertext...,
),
)
return plaintext, errors.Wrap(err, "ratchet decrypt")
}
func (r *TripleRatchetParticipant) ratchetSenderEphemeralKeys() error {
hash := hkdf.New(
sha512.New,
r.receivingGroupKey.Mul(
r.sendingEphemeralPrivateKey,
).ToAffineCompressed(),
r.rootKey,
[]byte("quilibrium-triple-ratchet"),
)
rkck2 := make([]byte, 96)
if _, err := hash.Read(rkck2[:]); err != nil {
return errors.Wrap(err, "failed ratcheting root key")
}
r.rootKey = rkck2[:32]
r.sendingChainKey = rkck2[32:64]
r.nextHeaderKey = rkck2[64:]
return nil
}
func (r *TripleRatchetParticipant) ratchetReceiverEphemeralKeys(
peerKey curves.Point,
newEphemeralKey curves.Scalar,
) error {
r.previousSendingChainLength = r.currentSendingChainLength
r.currentSendingChainLength = 0
r.currentReceivingChainLength[string(peerKey.ToAffineCompressed())] = 0
r.currentHeaderKey = r.nextHeaderKey
r.receivingEphemeralKeys[string(
peerKey.ToAffineCompressed(),
)] = newEphemeralKey
hash := hkdf.New(
sha512.New,
r.receivingGroupKey.Mul(
newEphemeralKey,
).ToAffineCompressed(),
r.rootKey,
[]byte("quilibrium-triple-ratchet"),
)
rkck := make([]byte, 96)
if _, err := hash.Read(rkck[:]); err != nil {
return errors.Wrap(err, "failed ratcheting root key")
}
r.rootKey = rkck[:32]
r.receivingChainKey[string(peerKey.ToAffineCompressed())] = rkck[32:64]
r.nextHeaderKey = rkck[64:]
r.sendingEphemeralPrivateKey = r.curve.NewScalar().Random(rand.Reader)
return nil
}
func (r *TripleRatchetParticipant) trySkippedMessageKeys(
envelope *protobufs.P2PChannelEnvelope,
) ([]byte, error) {
for receivingHeaderKey, skippedKeys := range r.skippedKeysMap {
header, _, err := r.decryptHeader(
envelope.MessageHeader,
[]byte(receivingHeaderKey),
)
if err == nil {
peerKey, _, _, current, err := r.decodeHeader(header)
if err != nil {
return nil, errors.Wrap(err, "try skipped message keys")
}
messageKey := skippedKeys[string(
peerKey.ToAffineCompressed(),
)][current][:32]
aeadKey := skippedKeys[string(
peerKey.ToAffineCompressed(),
)][current][32:]
plaintext, err := r.decrypt(
envelope.MessageBody,
messageKey,
append(
append([]byte{}, aeadKey...),
envelope.MessageHeader.Ciphertext[:]...,
),
)
if err != nil {
return nil, errors.Wrap(err, "try skipped message keys")
}
delete(r.skippedKeysMap[string(
peerKey.ToAffineCompressed(),
)][receivingHeaderKey], current)
if len(r.skippedKeysMap[string(
peerKey.ToAffineCompressed(),
)][receivingHeaderKey]) == 0 {
delete(r.skippedKeysMap[string(
peerKey.ToAffineCompressed(),
)], receivingHeaderKey)
}
return plaintext, nil
}
}
return nil, nil
}
func (r *TripleRatchetParticipant) skipMessageKeys(
senderKey curves.Point,
until uint32,
) error {
if r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)]+100 < until {
return errors.Wrap(errors.New("skip limit exceeded"), "skip message keys")
}
if r.receivingChainKey != nil {
for r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)] < until {
newChainKey, messageKey, aeadKey := ratchetKeys(
r.receivingChainKey[string(
senderKey.ToAffineCompressed(),
)],
)
skippedKeys := r.skippedKeysMap[string(
senderKey.ToAffineCompressed(),
)][string(r.currentHeaderKey)]
if skippedKeys == nil {
r.skippedKeysMap[string(
senderKey.ToAffineCompressed(),
)][string(r.currentHeaderKey)] =
make(map[uint32][]byte)
}
skippedKeys[r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)]] = append(
append([]byte{}, messageKey...),
aeadKey...,
)
r.receivingChainKey[string(
senderKey.ToAffineCompressed(),
)] = newChainKey
r.currentReceivingChainLength[string(
senderKey.ToAffineCompressed(),
)]++
}
}
return nil
}
func (r *TripleRatchetParticipant) encodeHeader() []byte {
header := []byte{}
header = append(
header,
r.peerKey.Point().Generator().Mul(r.peerKey).ToAffineCompressed()...,
)
header = append(
header,
r.sendingEphemeralPrivateKey.Bytes()...,
)
header = binary.BigEndian.AppendUint32(header, r.previousSendingChainLength)
header = binary.BigEndian.AppendUint32(header, r.currentSendingChainLength)
return header
}
func (r *TripleRatchetParticipant) decryptHeader(
ciphertext *protobufs.MessageCiphertext,
receivingHeaderKey []byte,
) ([]byte, bool, error) {
header, err := r.decrypt(
ciphertext,
receivingHeaderKey,
nil,
)
if err != nil && subtle.ConstantTimeCompare(
r.currentHeaderKey,
receivingHeaderKey,
) == 1 {
if header, err = r.decrypt(
ciphertext,
r.nextHeaderKey,
nil,
); err != nil {
return nil, false, errors.Wrap(err, "could not decrypt header")
}
fmt.Println("should ratchet")
return header, true, nil
}
return header, false, errors.Wrap(err, "could not decrypt header")
}
func (r *TripleRatchetParticipant) decodeHeader(
header []byte,
) (curves.Point, curves.Scalar, uint32, uint32, error) {
if len(header) < 9 {
return nil, nil, 0, 0, errors.Wrap(
errors.New("malformed header"),
"decode header",
)
}
currentReceivingChainLength := binary.BigEndian.Uint32(header[len(header)-4:])
previousReceivingChainLength := binary.BigEndian.Uint32(
header[len(header)-8 : len(header)-4],
)
sender := header[:len(r.curve.Point.ToAffineCompressed())]
senderKey, err := r.curve.Point.FromAffineCompressed(sender)
if err != nil {
return nil, nil, 0, 0, errors.Wrap(err, "decode header")
}
receivingEphemeralKeyBytes := header[len(
r.curve.Point.ToAffineCompressed(),
) : len(header)-8]
receivingEphemeralKey, err := r.curve.Scalar.Clone().SetBytes(
receivingEphemeralKeyBytes,
)
return senderKey,
receivingEphemeralKey,
previousReceivingChainLength,
currentReceivingChainLength,
errors.Wrap(err, "decode header")
}
func (r *TripleRatchetParticipant) encrypt(
plaintext []byte,
key []byte,
associatedData []byte,
) (*protobufs.MessageCiphertext, error) {
iv := [12]byte{}
rand.Read(iv[:])
aesCipher, err := aes.NewCipher(key)
if err != nil {
return nil, errors.Wrap(err, "encrypt")
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
return nil, errors.Wrap(err, "encrypt")
}
ciphertext := &protobufs.MessageCiphertext{}
if associatedData == nil {
associatedData = make([]byte, 32)
if _, err := rand.Read(associatedData); err != nil {
return nil, errors.Wrap(err, "encrypt")
}
ciphertext.AssociatedData = associatedData
}
ciphertext.Ciphertext = gcm.Seal(nil, iv[:], plaintext, associatedData)
ciphertext.InitializationVector = iv[:]
return ciphertext, nil
}
func (r *TripleRatchetParticipant) decrypt(
ciphertext *protobufs.MessageCiphertext,
key []byte,
associatedData []byte,
) ([]byte, error) {
if associatedData == nil {
associatedData = ciphertext.AssociatedData
}
aesCipher, err := aes.NewCipher(key)
if err != nil {
return nil, errors.Wrap(err, "decrypt")
}
gcm, err := cipher.NewGCM(aesCipher)
if err != nil {
return nil, errors.Wrap(err, "decrypt")
}
plaintext, err := gcm.Open(
nil,
ciphertext.InitializationVector,
ciphertext.Ciphertext,
associatedData,
)
return plaintext, errors.Wrap(err, "decrypt")
}

View File

@ -1,85 +0,0 @@
package channel
import (
"crypto/sha512"
"golang.org/x/crypto/hkdf"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
var domainSeparators = map[string][]byte{
curves.ED448().Name: {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF,
},
}
func SenderX3DH(
sendingIdentityPrivateKey curves.Scalar,
sendingEphemeralPrivateKey curves.Scalar,
receivingIdentityKey curves.Point,
receivingSignedPreKey curves.Point,
sessionKeyLength uint8,
) []byte {
xdh1 := receivingSignedPreKey.Mul(
sendingIdentityPrivateKey,
).ToAffineCompressed()
xdh2 := receivingIdentityKey.Mul(
sendingEphemeralPrivateKey,
).ToAffineCompressed()
xdh3 := receivingSignedPreKey.Mul(
sendingEphemeralPrivateKey,
).ToAffineCompressed()
salt := make([]byte, sessionKeyLength)
x3dh := hkdf.New(sha512.New, append(
append(
append(
append([]byte{}, domainSeparators[receivingIdentityKey.CurveName()]...),
xdh1[:]...),
xdh2[:]...),
xdh3[:]...), salt, []byte("quilibrium-x3dh"))
sessionKey := make([]byte, sessionKeyLength)
if _, err := x3dh.Read(sessionKey[:]); err != nil {
return nil
}
return sessionKey
}
func ReceiverX3DH(
sendingIdentityPrivateKey curves.Scalar,
sendingSignedPrePrivateKey curves.Scalar,
receivingIdentityKey curves.Point,
receivingEphemeralKey curves.Point,
sessionKeyLength uint8,
) []byte {
xdh1 := receivingIdentityKey.Mul(
sendingSignedPrePrivateKey,
).ToAffineCompressed()
xdh2 := receivingEphemeralKey.Mul(
sendingIdentityPrivateKey,
).ToAffineCompressed()
xdh3 := receivingEphemeralKey.Mul(
sendingSignedPrePrivateKey,
).ToAffineCompressed()
salt := make([]byte, sessionKeyLength)
x3dh := hkdf.New(sha512.New, append(
append(
append(
append([]byte{}, domainSeparators[receivingIdentityKey.CurveName()]...),
xdh1[:]...),
xdh2[:]...),
xdh3[:]...), salt, []byte("quilibrium-x3dh"))
sessionKey := make([]byte, sessionKeyLength)
if _, err := x3dh.Read(sessionKey[:]); err != nil {
return nil
}
return sessionKey
}

View File

@ -1,40 +0,0 @@
package channel_test
import (
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/channel"
)
func TestX3DHMatches(t *testing.T) {
x448SendingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingEphemeralPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingIdentityPrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448ReceivingSignedPrePrivateKey := curves.ED448().Scalar.Random(rand.Reader)
x448SendingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingIdentityPrivateKey)
x448SendingEphemeralKey := curves.ED448().NewGeneratorPoint().Mul(x448SendingEphemeralPrivateKey)
x448ReceivingIdentityKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingIdentityPrivateKey)
x448ReceivingSignedPreKey := curves.ED448().NewGeneratorPoint().Mul(x448ReceivingSignedPrePrivateKey)
result := channel.SenderX3DH(
x448SendingIdentityPrivateKey,
x448SendingEphemeralPrivateKey,
x448ReceivingIdentityKey,
x448ReceivingSignedPreKey,
32,
)
compare := channel.ReceiverX3DH(
x448ReceivingIdentityPrivateKey,
x448ReceivingSignedPrePrivateKey,
x448SendingIdentityKey,
x448SendingEphemeralKey,
32,
)
require.Equal(t, result, compare)
}

View File

@ -1,65 +0,0 @@
package crypto
import (
"crypto"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
)
type FrameProver interface {
ProveMasterClockFrame(
previousFrame *protobufs.ClockFrame,
timestamp int64,
difficulty uint32,
) (*protobufs.ClockFrame, error)
ProveDataClockFrame(
previousFrame *protobufs.ClockFrame,
commitments [][]byte,
aggregateProofs []*protobufs.InclusionAggregateProof,
provingKey crypto.Signer,
timestamp int64,
difficulty uint32,
) (*protobufs.ClockFrame, error)
CreateMasterGenesisFrame(
filter []byte,
seed []byte,
difficulty uint32,
) (*protobufs.ClockFrame, error)
CreateDataGenesisFrame(
filter []byte,
origin []byte,
difficulty uint32,
inclusionProof *InclusionAggregateProof,
proverKeys [][]byte,
preDusk bool,
) (*protobufs.ClockFrame, *tries.RollingFrecencyCritbitTrie, error)
VerifyMasterClockFrame(
frame *protobufs.ClockFrame,
) error
VerifyDataClockFrame(
frame *protobufs.ClockFrame,
) error
GenerateWeakRecursiveProofIndex(
frame *protobufs.ClockFrame,
) (uint64, error)
FetchRecursiveProof(
frame *protobufs.ClockFrame,
) []byte
VerifyWeakRecursiveProof(
frame *protobufs.ClockFrame,
proof []byte,
deepVerifier *protobufs.ClockFrame,
) bool
CalculateChallengeProof(
challenge []byte,
core uint32,
increment uint32,
) ([]byte, error)
VerifyChallengeProof(
challenge []byte,
increment uint32,
core uint32,
proof []byte,
) bool
}

View File

@ -1,42 +0,0 @@
package crypto
type InclusionCommitment struct {
TypeUrl string
Data []byte
Commitment []byte
}
type InclusionAggregateProof struct {
InclusionCommitments []*InclusionCommitment
AggregateCommitment []byte
Proof []byte
}
type InclusionProver interface {
// Commit(
// data []byte,
// typeUrl string,
// ) (*InclusionCommitment, error)
// ProveAggregate(commits []*InclusionCommitment) (
// *InclusionAggregateProof,
// error,
// )
// VerifyAggregate(proof *InclusionAggregateProof) (bool, error)
// VerifyFrame(frame *protobufs.ClockFrame) error
CommitRaw(
data []byte,
polySize uint64,
) ([]byte, error)
ProveRaw(
data []byte,
index int,
polySize uint64,
) ([]byte, error)
VerifyRaw(
data []byte,
commit []byte,
index int,
proof []byte,
polySize uint64,
) (bool, error)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,266 +0,0 @@
package kzg
import (
"math/big"
"github.com/pkg/errors"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
)
func recurseFFT(
values []curves.PairingScalar,
offset uint64,
stride uint64,
rootsStride uint64,
out []curves.PairingScalar,
fftWidth uint64,
inverse bool,
) {
roots := RootsOfUnityBLS48581
if inverse {
roots = ReverseRootsOfUnityBLS48581
}
if len(out) <= 16 {
l := uint64(len(out))
for i := uint64(0); i < l; i++ {
last := values[offset].Mul(roots[fftWidth][0])
for j := uint64(1); j < l; j++ {
last = last.Add(values[offset+j*stride].Mul(
roots[fftWidth][((i*j)%l)*rootsStride],
))
}
out[i] = last.(curves.PairingScalar)
}
return
}
half := uint64(len(out)) >> 1
// slide to the left
recurseFFT(
values,
offset,
stride<<1,
rootsStride<<1,
out[:half],
fftWidth,
inverse,
)
// slide to the right
recurseFFT(
values,
offset+stride,
stride<<1,
rootsStride<<1,
out[half:],
fftWidth,
inverse,
)
// cha cha now, y'all
for i := uint64(0); i < half; i++ {
mul := out[i+half].Mul(
roots[fftWidth][i*rootsStride],
).(curves.PairingScalar)
mulAdd := out[i].Add(mul).(curves.PairingScalar)
out[i+half] = out[i].Sub(mul).(curves.PairingScalar)
out[i] = mulAdd
}
}
func FFT(
values []curves.PairingScalar,
curve curves.PairingCurve,
fftWidth uint64,
inverse bool,
) ([]curves.PairingScalar, error) {
width := uint64(len(values))
if width > fftWidth {
return nil, errors.New("invalid width of values")
}
if width&(width-1) != 0 {
width = nearestPowerOfTwo(width)
}
// We make a copy so we can mutate it during the work.
workingValues := make([]curves.PairingScalar, width)
for i := 0; i < len(values); i++ {
workingValue := values[i].Clone()
workingValues[i] = workingValue.(curves.PairingScalar)
}
for i := uint64(len(values)); i < width; i++ {
workingValue, err := curve.NewScalar().SetBigInt(
big.NewInt(0),
)
if err != nil {
return nil, errors.Wrap(err, "invalid scalar")
}
workingValues[i] = workingValue.(curves.PairingScalar)
}
out := make([]curves.PairingScalar, width)
stride := fftWidth / width
for i := 0; i < len(out); i++ {
out[i] = curve.NewScalar()
}
if inverse {
invLen, err := curve.NewScalar().SetBigInt(big.NewInt((int64(width))))
if err != nil {
return nil, errors.Wrap(err, "invalid int")
}
inv, err := invLen.Invert()
if err != nil {
return nil, errors.Wrap(err, "could not invert")
}
invLen = inv.(curves.PairingScalar)
recurseFFT(workingValues, 0, 1, stride, out, fftWidth, inverse)
for i := 0; i < len(out); i++ {
out[i] = out[i].Mul(invLen).(curves.PairingScalar)
}
return out, nil
} else {
recurseFFT(workingValues, 0, 1, stride, out, fftWidth, inverse)
return out, nil
}
}
func recurseFFTG1(
values []curves.PairingPoint,
offset uint64,
stride uint64,
rootsStride uint64,
out []curves.PairingPoint,
fftWidth uint64,
inverse bool,
) {
roots := RootsOfUnityBLS48581
if inverse {
roots = ReverseRootsOfUnityBLS48581
}
if len(out) <= 16 {
l := uint64(len(out))
for i := uint64(0); i < l; i++ {
last := values[offset].Mul(roots[fftWidth][0])
for j := uint64(1); j < l; j++ {
last = last.Add(values[offset+j*stride].Mul(
roots[fftWidth][((i*j)%l)*rootsStride],
))
}
out[i] = last.(curves.PairingPoint)
}
return
}
half := uint64(len(out)) >> 1
// slide to the left
recurseFFTG1(
values,
offset,
stride<<1,
rootsStride<<1,
out[:half],
fftWidth,
inverse,
)
// slide to the right
recurseFFTG1(
values,
offset+stride,
stride<<1,
rootsStride<<1,
out[half:],
fftWidth,
inverse,
)
// cha cha now, y'all
for i := uint64(0); i < half; i++ {
mul := out[i+half].Mul(roots[fftWidth][i*rootsStride]).(curves.PairingPoint)
mulAdd := out[i].Add(mul).(curves.PairingPoint)
out[i+half] = out[i].Sub(mul).(curves.PairingPoint)
out[i] = mulAdd
}
}
func FFTG1(
values []curves.PairingPoint,
curve curves.PairingCurve,
fftWidth uint64,
inverse bool,
) ([]curves.PairingPoint, error) {
width := uint64(len(values))
if width > fftWidth {
return nil, errors.New("invalid width of values")
}
if width&(width-1) != 0 {
width = nearestPowerOfTwo(width)
}
workingValues := make([]curves.PairingPoint, width)
for i := 0; i < len(values); i++ {
workingValue, err := curve.NewG1GeneratorPoint().FromAffineCompressed(
values[i].ToAffineCompressed(),
)
if err != nil {
return nil, errors.Wrap(err, "invalid point")
}
workingValues[i] = workingValue.(curves.PairingPoint)
}
for i := uint64(len(values)); i < width; i++ {
workingValues[i] = curve.NewG1IdentityPoint()
}
out := make([]curves.PairingPoint, width)
stride := fftWidth / width
for i := 0; i < len(out); i++ {
out[i] = curve.NewG1IdentityPoint()
}
if inverse {
invLen, err := curve.NewScalar().SetBigInt(big.NewInt((int64(width))))
if err != nil {
return nil, errors.Wrap(err, "invalid int")
}
inv, err := invLen.Invert()
if err != nil {
return nil, errors.Wrap(err, "could not invert")
}
invLen = inv.(curves.PairingScalar)
recurseFFTG1(workingValues, 0, 1, stride, out, fftWidth, inverse)
for i := 0; i < len(out); i++ {
out[i] = out[i].Mul(invLen).(curves.PairingPoint)
}
return out, nil
} else {
recurseFFTG1(workingValues, 0, 1, stride, out, fftWidth, inverse)
return out, nil
}
}
func nearestPowerOfTwo(number uint64) uint64 {
power := uint64(1)
for number > power {
power = power << 1
}
return power
}

View File

@ -1,703 +0,0 @@
package kzg
import (
_ "embed"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"math/big"
"os"
"sync"
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
)
type PowersOfTauJson struct {
G1Affines []string `json:"G1Powers"`
G2Affines []string `json:"G2Powers"`
G1FFT []string `json:"G1FFT"`
}
type ContributionJson struct {
PowersOfTau PowersOfTauJson `json:"powersOfTau"`
PotPubKey string `json:"potPubKey"`
Witness Witness `json:"witness"`
VoucherPubKey string `json:"voucherPubKey"`
}
type BatchContribution struct {
Contribution Contribution
}
type PowersOfTau struct {
G1Affines []*bls48581.ECP
G2Affines []*bls48581.ECP8
G1FFT []*bls48581.ECP
}
type CeremonyState struct {
PowersOfTau PowersOfTauJson `json:"powersOfTau"`
PotPubKey string `json:"potPubKey"`
Witness Witness `json:"witness"`
VoucherPubKeys []string `json:"voucherPubKeys"`
}
type Witness struct {
RunningProducts []string `json:"runningProducts"`
PotPubKeys []string `json:"potPubKeys"`
}
type Contribution struct {
NumG1Powers int
NumG2Powers int
PowersOfTau PowersOfTau
PotPubKey *bls48581.ECP8
}
type KZGProver struct {
bytesPerScalar int
curve *curves.PairingCurve
hashFunc func() hash.Hash
orderBI *big.Int
}
var RootOfUnityBLS48581 map[uint64]curves.PairingScalar = make(map[uint64]curves.PairingScalar)
var RootsOfUnityBLS48581 map[uint64][]curves.PairingScalar = make(map[uint64][]curves.PairingScalar)
var ReverseRootsOfUnityBLS48581 map[uint64][]curves.PairingScalar = make(map[uint64][]curves.PairingScalar)
var CeremonyBLS48581G1 []curves.PairingPoint
var CeremonyBLS48581G2 []curves.PairingPoint
var CeremonyRunningProducts []curves.PairingPoint
var CeremonyPotPubKeys []curves.PairingPoint
var CeremonySignatories []curves.Point
var FFTBLS48581 map[uint64][]curves.PairingPoint = make(map[uint64][]curves.PairingPoint)
func TestInit(file string) {
// start with phase 1 ceremony:
csBytes, err := os.ReadFile(file)
if err != nil {
panic(err)
}
bls48581.Init()
cs := &CeremonyState{}
if err := json.Unmarshal(csBytes, cs); err != nil {
panic(err)
}
g1s := make([]curves.PairingPoint, 65536)
g2s := make([]curves.PairingPoint, 257)
g1ffts := make([]curves.PairingPoint, 65536)
wg := sync.WaitGroup{}
wg.Add(65536)
for i := 0; i < 65536; i++ {
i := i
go func() {
b, err := hex.DecodeString(cs.PowersOfTau.G1Affines[i][2:])
if err != nil {
panic(err)
}
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
g1s[i] = g1.(curves.PairingPoint)
f, err := hex.DecodeString(cs.PowersOfTau.G1FFT[i][2:])
if err != nil {
panic(err)
}
g1fft, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(f)
if err != nil {
panic(err)
}
g1ffts[i] = g1fft.(curves.PairingPoint)
if i < 257 {
b, err := hex.DecodeString(cs.PowersOfTau.G2Affines[i][2:])
if err != nil {
panic(err)
}
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(
b,
)
if err != nil {
panic(err)
}
g2s[i] = g2.(curves.PairingPoint)
}
wg.Done()
}()
}
wg.Wait()
wg.Add(len(cs.Witness.RunningProducts))
CeremonyRunningProducts = make([]curves.PairingPoint, len(cs.Witness.RunningProducts))
for i, s := range cs.Witness.RunningProducts {
i, s := i, s
go func() {
b, err := hex.DecodeString(s[2:])
if err != nil {
panic(err)
}
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
CeremonyRunningProducts[i] = g1.(curves.PairingPoint)
wg.Done()
}()
}
wg.Wait()
wg.Add(len(cs.Witness.PotPubKeys))
CeremonyPotPubKeys = make([]curves.PairingPoint, len(cs.Witness.PotPubKeys))
for i, s := range cs.Witness.PotPubKeys {
i, s := i, s
go func() {
b, err := hex.DecodeString(s[2:])
if err != nil {
panic(err)
}
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
CeremonyPotPubKeys[i] = g2.(curves.PairingPoint)
wg.Done()
}()
}
wg.Wait()
wg.Add(len(cs.VoucherPubKeys))
CeremonySignatories = make([]curves.Point, len(cs.VoucherPubKeys))
for i, s := range cs.VoucherPubKeys {
i, s := i, s
go func() {
b, err := hex.DecodeString(s[2:])
if err != nil {
panic(err)
}
CeremonySignatories[i], err = curves.ED448().Point.FromAffineCompressed(b)
if err != nil {
panic(err)
}
wg.Done()
}()
}
wg.Wait()
CeremonyBLS48581G1 = g1s
CeremonyBLS48581G2 = g2s
// Post-ceremony, precompute everything and put it in the finalized ceremony
// state
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
sizes := []int64{16, 32, 64, 128, 256, 512, 1024, 2048, 65536}
wg.Add(len(sizes))
root := make([]curves.PairingScalar, 9)
roots := make([][]curves.PairingScalar, 9)
reverseRoots := make([][]curves.PairingScalar, 9)
ffts := make([][]curves.PairingPoint, 9)
for idx, i := range sizes {
i := i
idx := idx
go func() {
exp := new(big.Int).Quo(
new(big.Int).Sub(q, big.NewInt(1)),
big.NewInt(i),
)
rootOfUnity := new(big.Int).Exp(big.NewInt(int64(37)), exp, q)
roots[idx] = make([]curves.PairingScalar, i+1)
reverseRoots[idx] = make([]curves.PairingScalar, i+1)
wg2 := sync.WaitGroup{}
wg2.Add(int(i))
for j := int64(0); j < i; j++ {
j := j
go func() {
rev := big.NewInt(int64(j))
r := new(big.Int).Exp(
rootOfUnity,
rev,
q,
)
scalar, _ := (&curves.ScalarBls48581{}).SetBigInt(r)
if rev.Cmp(big.NewInt(1)) == 0 {
root[idx] = scalar.(curves.PairingScalar)
}
roots[idx][j] = scalar.(curves.PairingScalar)
reverseRoots[idx][i-j] = roots[idx][j]
wg2.Done()
}()
}
wg2.Wait()
roots[idx][i] = roots[idx][0]
reverseRoots[idx][0] = reverseRoots[idx][i]
wg.Done()
}()
}
wg.Wait()
wg.Add(len(sizes))
for i := range root {
i := i
RootOfUnityBLS48581[uint64(sizes[i])] = root[i]
RootsOfUnityBLS48581[uint64(sizes[i])] = roots[i]
ReverseRootsOfUnityBLS48581[uint64(sizes[i])] = reverseRoots[i]
go func() {
// We precomputed 65536, others are cheap and will be fully precomputed
// post-ceremony
if sizes[i] < 65536 {
fftG1, err := FFTG1(
CeremonyBLS48581G1[:sizes[i]],
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
uint64(sizes[i]),
true,
)
if err != nil {
panic(err)
}
ffts[i] = fftG1
} else {
ffts[i] = g1ffts
}
wg.Done()
}()
}
wg.Wait()
for i := range root {
FFTBLS48581[uint64(sizes[i])] = ffts[i]
}
}
//go:embed ceremony.json
var csBytes []byte
func Init() {
rbls48581.Init()
}
func NewKZGProver(
curve *curves.PairingCurve,
hashFunc func() hash.Hash,
orderBI *big.Int,
) *KZGProver {
if curve.Name != curves.BLS48581Name {
// kzg ceremony transcript not available for any other curve
return nil
}
return &KZGProver{
bytesPerScalar: 64,
curve: curve,
hashFunc: hashFunc,
orderBI: orderBI,
}
}
func DefaultKZGProver() *KZGProver {
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
return NewKZGProver(
curves.BLS48581(curves.BLS48581G1().Point),
sha3.New256,
q,
)
}
func (p *KZGProver) BytesToPolynomial(
bytes []byte,
) ([]curves.PairingScalar, error) {
size := len(bytes) / p.bytesPerScalar
truncLast := false
if len(bytes)%p.bytesPerScalar > 0 {
truncLast = true
}
poly := []curves.PairingScalar{}
var i int
for i = 0; i < size; i++ {
scalar, err := p.curve.NewScalar().SetBytes(
bytes[i*p.bytesPerScalar : (i+1)*p.bytesPerScalar],
)
if err != nil {
return nil, errors.Wrap(err, "could not set bytes for scalar")
}
poly = append(
poly,
scalar.(curves.PairingScalar),
)
}
if truncLast {
scalar, err := p.curve.NewScalar().SetBytes(
bytes[i*p.bytesPerScalar:],
)
if err != nil {
return nil, errors.Wrap(err, "could not set bytes for scalar")
}
poly = append(
poly,
scalar.(curves.PairingScalar),
)
}
return poly, nil
}
func (p *KZGProver) PointLinearCombination(
points []curves.PairingPoint,
scalars []curves.PairingScalar,
) (curves.PairingPoint, error) {
if len(points) != len(scalars) {
return nil, fmt.Errorf(
"length mismatch between arguments, points: %d, scalars: %d",
len(points),
len(scalars),
)
}
result := p.curve.NewG1IdentityPoint()
for i, p := range points {
result = result.Add(p.Mul(scalars[i])).(curves.PairingPoint)
}
return result, nil
}
func (p *KZGProver) PolynomialLinearCombination(
polynomials [][]curves.PairingScalar,
scalars []curves.PairingScalar,
) ([]curves.PairingScalar, error) {
if len(polynomials) != len(scalars) {
return nil, errors.New("length mismatch between arguments")
}
result := make([]curves.PairingScalar, len(polynomials[0]))
for i := range polynomials[0] {
result[i] = p.curve.NewScalar()
}
for j, ps := range polynomials {
for i, p := range ps {
result[i] = result[i].Add(p.Mul(scalars[j])).(curves.PairingScalar)
}
}
return result, nil
}
func (p *KZGProver) EvaluateLagrangeForm(
polynomial []curves.PairingScalar,
x curves.PairingScalar,
fftWidth uint64,
scale uint8,
) (curves.PairingScalar, error) {
if uint64(len(polynomial)) != fftWidth>>scale {
return nil, errors.Wrap(
errors.New("polynomial length does not match stride"),
"evaluate lagrange form",
)
}
width := p.curve.NewScalar().New(len(polynomial))
y := p.curve.NewScalar()
for i := 0; i < len(polynomial); i++ {
numerator := polynomial[i].Mul(RootsOfUnityBLS48581[fftWidth][i<<scale])
value := numerator.Div(x.Sub(
RootsOfUnityBLS48581[fftWidth][i<<scale]))
y = y.Add(value).(curves.PairingScalar)
}
xBI := x.BigInt()
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
xBI.Exp(xBI, width.BigInt(), q)
xBI.Sub(xBI, big.NewInt(1))
value, err := p.curve.NewScalar().SetBigInt(xBI)
value = value.Div(width)
if err != nil {
return nil, errors.Wrap(err, "evaluate lagrange form")
}
return y.Mul(value).(curves.PairingScalar), nil
}
func (p *KZGProver) ComputeChallenges(
polynomials [][]curves.PairingScalar,
commitments []curves.PairingPoint,
) ([]curves.PairingScalar, curves.Scalar, error) {
l := len(polynomials)
degree := len(polynomials[0])
h := p.hashFunc()
if _, err := h.Write([]byte("q_kzg_challenges")); err != nil {
return nil, nil, errors.Wrap(err, "could not write to hash")
}
if _, err := h.Write(binary.BigEndian.AppendUint32(
[]byte{},
uint32(l),
)); err != nil {
return nil, nil, errors.Wrap(err, "could not write to hash")
}
if _, err := h.Write(binary.BigEndian.AppendUint32(
[]byte{},
uint32(degree),
)); err != nil {
return nil, nil, errors.Wrap(err, "could not write to hash")
}
for _, poly := range polynomials {
for _, scalar := range poly {
if _, err := h.Write(scalar.Bytes()); err != nil {
return nil, nil, errors.Wrap(err, "could not write to hash")
}
}
}
for _, commitment := range commitments {
if _, err := h.Write(commitment.ToAffineCompressed()); err != nil {
return nil, nil, errors.Wrap(err, "could not write to hash")
}
}
result := h.Sum(nil)
powers := make([]curves.PairingScalar, len(commitments))
resultPow := append([]byte{}, result...)
resultPow = append(resultPow, 0x00)
rs := p.curve.NewScalar().Hash(resultPow)
eval := append([]byte{}, result...)
eval = append(eval, 0x01)
evalScalar := p.curve.NewScalar().Hash(eval)
s, err := p.curve.NewScalar().SetBigInt(big.NewInt(1))
if err != nil {
return nil, nil, errors.Wrap(err, "could not set bytes")
}
for i := range powers {
powers[i] = s.Clone().(curves.PairingScalar)
s = s.Mul(rs)
}
return powers, evalScalar, nil
}
func (p *KZGProver) AggregatePolynomialCommitment(
polynomials [][]curves.PairingScalar,
commitments []curves.PairingPoint,
) ([]curves.PairingScalar, curves.PairingPoint, curves.PairingScalar, error) {
powers, evalScalar, err := p.ComputeChallenges(
polynomials,
commitments,
)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "aggregate polynomial commitment")
}
pairEval, ok := evalScalar.(curves.PairingScalar)
if !ok {
return nil, nil, nil, errors.Wrap(
errors.New("invalid scalar"),
"aggregate polynomial commitment",
)
}
aggregatePolynomial, err := p.PolynomialLinearCombination(polynomials, powers)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "aggregate polynomial commitment")
}
aggregateCommitment, err := p.PointLinearCombination(commitments, powers)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "aggregate polynomial commitment")
}
return aggregatePolynomial, aggregateCommitment, pairEval, nil
}
func (p *KZGProver) Prove(
polynomial []curves.PairingScalar,
commitment curves.PairingPoint,
z curves.PairingScalar,
) (
curves.PairingPoint,
error,
) {
if nearestPowerOfTwo(uint64(len(polynomial))) != uint64(len(polynomial)) {
return nil, errors.Wrap(
errors.New("polynomial must be power of two"),
"prove",
)
}
y, err := p.EvaluateLagrangeForm(
polynomial,
z,
uint64(len(polynomial)),
0,
)
if err != nil {
return nil, errors.Wrap(err, "prove")
}
quotient := make([]curves.PairingScalar, len(polynomial))
for i := range quotient {
shifted := polynomial[i].Sub(y).(curves.PairingScalar)
if z.Cmp(RootsOfUnityBLS48581[uint64(len(polynomial))][i]) == 0 {
return nil, errors.Wrap(
errors.New("invalid challenge"),
"prove",
)
}
denominator := RootsOfUnityBLS48581[uint64(len(polynomial))][i].Sub(
z,
).(curves.PairingScalar)
quotient[i] = shifted.Div(denominator).(curves.PairingScalar)
}
r, err := p.PointLinearCombination(
FFTBLS48581[uint64(len(polynomial))],
quotient,
)
return r, errors.Wrap(err, "prove")
}
func (p *KZGProver) Commit(
polynomial []curves.PairingScalar,
) (curves.PairingPoint, error) {
commitment, err := p.PointLinearCombination(
FFTBLS48581[uint64(len(polynomial))],
polynomial,
)
return commitment, errors.Wrap(err, "commit")
}
func (p *KZGProver) CommitAggregate(
polynomials [][]curves.PairingScalar,
) ([]curves.PairingPoint, error) {
commitments := make([]curves.PairingPoint, len(polynomials))
for i, poly := range polynomials {
if nearestPowerOfTwo(uint64(len(poly))) != uint64(len(poly)) {
return nil, errors.Wrap(
errors.New("polynomial must be power of two"),
"prove aggregate",
)
}
var err error
commitments[i], err = p.Commit(poly)
if err != nil {
return nil, errors.Wrap(err, "commit aggregate")
}
}
return commitments, nil
}
func (p *KZGProver) ProveAggregate(
polynomials [][]curves.PairingScalar,
commitments []curves.PairingPoint,
) (
curves.PairingPoint,
curves.PairingPoint,
error,
) {
poly, commitment, challenge, err := p.AggregatePolynomialCommitment(
polynomials,
commitments,
)
if err != nil {
return nil, nil, errors.Wrap(err, "prove aggregate")
}
proof, err := p.Prove(poly, commitment, challenge)
return proof, commitment, errors.Wrap(err, "prove aggregate")
}
func (p *KZGProver) Verify(
commitment curves.PairingPoint,
z curves.PairingScalar,
y curves.PairingScalar,
proof curves.PairingPoint,
) bool {
z2 := p.curve.NewG2GeneratorPoint().Mul(z).(curves.PairingPoint)
y1 := p.curve.NewG1GeneratorPoint().Mul(y).(curves.PairingPoint)
xz := CeremonyBLS48581G2[1].Sub(z2).(curves.PairingPoint)
cy := commitment.Sub(y1).(curves.PairingPoint)
gt := xz.MultiPairing(
proof,
xz,
cy.Neg().(curves.PairingPoint),
p.curve.NewG2GeneratorPoint(),
)
return gt.IsOne()
}
func (p *KZGProver) VerifyAggregateProof(
polynomials [][]curves.PairingScalar,
commitments []curves.PairingPoint,
commitment curves.PairingPoint,
proof curves.PairingPoint,
) (bool, error) {
aggregatedPolynomial, aggregatedCommitment, challenge, err :=
p.AggregatePolynomialCommitment(polynomials, commitments)
if err != nil {
return false, errors.Wrap(err, "verify aggregate proof")
}
if !aggregatedCommitment.Equal(commitment) {
return false, errors.Wrap(
errors.New("aggregate commitment does not match"),
"verify aggregate proof",
)
}
y, err := p.EvaluateLagrangeForm(
aggregatedPolynomial,
challenge,
uint64(len(aggregatedPolynomial)),
0,
)
if err != nil {
return false, errors.Wrap(err, "verify aggregate proof")
}
return p.Verify(
aggregatedCommitment,
challenge,
y,
proof,
), nil
}

View File

@ -1,358 +0,0 @@
package kzg_test
import (
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"os"
"sync"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves"
"source.quilibrium.com/quilibrium/monorepo/nekryptology/pkg/core/curves/native/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
)
func TestMain(m *testing.M) {
csBytes, err := os.ReadFile("./ceremony.json")
if err != nil {
panic(err)
}
cs := &kzg.CeremonyState{}
if err := json.Unmarshal(csBytes, cs); err != nil {
panic(err)
}
g1s := make([]curves.PairingPoint, 16)
g2s := make([]curves.PairingPoint, 2)
g1ffts := make([]curves.PairingPoint, 16)
wg := sync.WaitGroup{}
wg.Add(16)
for i := 0; i < 16; i++ {
i := i
go func() {
b, err := hex.DecodeString(cs.PowersOfTau.G1Affines[i][2:])
if err != nil {
panic(err)
}
g1, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(b)
if err != nil {
panic(err)
}
g1s[i] = g1.(curves.PairingPoint)
f, err := hex.DecodeString(cs.PowersOfTau.G1FFT[i][2:])
if err != nil {
panic(err)
}
g1fft, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(f)
if err != nil {
panic(err)
}
g1ffts[i] = g1fft.(curves.PairingPoint)
if i < 2 {
b, err := hex.DecodeString(cs.PowersOfTau.G2Affines[i][2:])
if err != nil {
panic(err)
}
g2, err := curves.BLS48581G2().NewGeneratorPoint().FromAffineCompressed(
b,
)
if err != nil {
panic(err)
}
g2s[i] = g2.(curves.PairingPoint)
}
wg.Done()
}()
}
wg.Wait()
kzg.CeremonyBLS48581G1 = g1s
kzg.CeremonyBLS48581G2 = g2s
// Post-ceremony, precompute everything and put it in the finalized ceremony
// state
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
sizes := []int64{16}
wg.Add(len(sizes))
root := make([]curves.PairingScalar, 1)
roots := make([][]curves.PairingScalar, 1)
reverseRoots := make([][]curves.PairingScalar, 1)
ffts := make([][]curves.PairingPoint, 1)
for idx, i := range sizes {
i := i
idx := idx
go func() {
exp := new(big.Int).Quo(
new(big.Int).Sub(q, big.NewInt(1)),
big.NewInt(i),
)
rootOfUnity := new(big.Int).Exp(big.NewInt(int64(37)), exp, q)
roots[idx] = make([]curves.PairingScalar, i+1)
reverseRoots[idx] = make([]curves.PairingScalar, i+1)
wg2 := sync.WaitGroup{}
wg2.Add(int(i))
for j := int64(0); j < i; j++ {
j := j
go func() {
rev := big.NewInt(int64(j))
r := new(big.Int).Exp(
rootOfUnity,
rev,
q,
)
scalar, _ := (&curves.ScalarBls48581{}).SetBigInt(r)
if rev.Cmp(big.NewInt(1)) == 0 {
root[idx] = scalar.(curves.PairingScalar)
}
roots[idx][j] = scalar.(curves.PairingScalar)
reverseRoots[idx][i-j] = roots[idx][j]
wg2.Done()
}()
}
wg2.Wait()
roots[idx][i] = roots[idx][0]
reverseRoots[idx][0] = reverseRoots[idx][i]
wg.Done()
}()
}
wg.Wait()
wg.Add(len(sizes))
for i := range root {
i := i
kzg.RootOfUnityBLS48581[uint64(sizes[i])] = root[i]
kzg.RootsOfUnityBLS48581[uint64(sizes[i])] = roots[i]
kzg.ReverseRootsOfUnityBLS48581[uint64(sizes[i])] = reverseRoots[i]
go func() {
// We precomputed 65536, others are cheap and will be fully precomputed
// post-ceremony
if sizes[i] < 65536 {
fftG1, err := kzg.FFTG1(
kzg.CeremonyBLS48581G1[:sizes[i]],
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
uint64(sizes[i]),
true,
)
if err != nil {
panic(err)
}
ffts[i] = fftG1
} else {
ffts[i] = g1ffts
}
wg.Done()
}()
}
wg.Wait()
for i := range root {
kzg.FFTBLS48581[uint64(sizes[i])] = ffts[i]
}
code := m.Run()
os.Exit(code)
}
func TestKzgBytesToPoly(t *testing.T) {
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
p := kzg.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
poly, err := p.BytesToPolynomial([]byte(
"Did you ever hear the tragedy of Darth Plagueis The Wise? I thought not." +
" It's not a story the Jedi would tell you. It's a Sith legend. Darth " +
"Plagueis was a Dark Lord of the Sith, so powerful and so wise he could " +
"use the Force to influence the midichlorians to create life… He had such" +
" a knowledge of the dark side that he could even keep the ones he cared " +
"about from dying. The dark side of the Force is a pathway to many " +
"abilities some consider to be unnatural. He became so powerful… the only" +
" thing he was afraid of was losing his power, which eventually, of " +
"course, he did. Unfortunately, he taught his apprentice everything he " +
"knew, then his apprentice killed him in his sleep. Ironic. He could " +
"save others from death, but not himself."))
require.NoError(t, err)
t1, _ := hex.DecodeString("00000000000000000044696420796f7520657665722068656172207468652074726167656479206f6620446172746820506c6167756569732054686520576973653f20492074686f75")
t2, _ := hex.DecodeString("000000000000000000676874206e6f742e2049742773206e6f7420612073746f727920746865204a65646920776f756c642074656c6c20796f752e204974277320612053697468206c")
t3, _ := hex.DecodeString("0000000000000000006567656e642e20446172746820506c616775656973207761732061204461726b204c6f7264206f662074686520536974682c20736f20706f77657266756c2061")
t4, _ := hex.DecodeString("0000000000000000006e6420736f207769736520686520636f756c64207573652074686520466f72636520746f20696e666c75656e636520746865206d69646963686c6f7269616e73")
t5, _ := hex.DecodeString("00000000000000000020746f20637265617465206c696665e280a62048652068616420737563682061206b6e6f776c65646765206f6620746865206461726b20736964652074686174")
t6, _ := hex.DecodeString("00000000000000000020686520636f756c64206576656e206b65657020746865206f6e65732068652063617265642061626f75742066726f6d206479696e672e20546865206461726b")
t7, _ := hex.DecodeString("0000000000000000002073696465206f662074686520466f7263652069732061207061746877617920746f206d616e79206162696c697469657320736f6d6520636f6e736964657220")
t8, _ := hex.DecodeString("000000000000000000746f20626520756e6e61747572616c2e20486520626563616d6520736f20706f77657266756ce280a620746865206f6e6c79207468696e672068652077617320")
t9, _ := hex.DecodeString("000000000000000000616672616964206f6620776173206c6f73696e672068697320706f7765722c207768696368206576656e7475616c6c792c206f6620636f757273652c20686520")
t10, _ := hex.DecodeString("0000000000000000006469642e20556e666f7274756e6174656c792c20686520746175676874206869732061707072656e746963652065766572797468696e67206865206b6e65772c")
t11, _ := hex.DecodeString("000000000000000000207468656e206869732061707072656e74696365206b696c6c65642068696d20696e2068697320736c6565702e2049726f6e69632e20486520636f756c642073")
t12, _ := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000000000617665206f74686572732066726f6d2064656174682c20627574206e6f742068696d73656c662e")
target := [][]byte{t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12}
actual := [][]byte{}
for _, p := range poly {
actual = append(actual, p.Bytes())
fmt.Println(hex.EncodeToString(p.Bytes()))
}
require.Equal(t, target, actual)
}
func TestPolynomialCommitment(t *testing.T) {
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
p := kzg.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
poly, err := p.BytesToPolynomial([]byte(
"Did you ever hear the tragedy of Darth Plagueis The Wise? I thought not." +
" It's not a story the Jedi would tell you. It's a Sith legend. Darth " +
"Plagueis was a Dark Lord of the Sith, so powerful and so wise he could " +
"use the Force to influence the midichlorians to create life… He had such" +
" a knowledge of the dark side that he could even keep the ones he cared " +
"about from dying. The dark side of the Force is a pathway to many " +
"abilities some consider to be unnatural. He became so powerful… the only" +
" thing he was afraid of was losing his power, which eventually, of " +
"course, he did. Unfortunately, he taught his apprentice everything he " +
"knew, then his apprentice killed him in his sleep. Ironic. He could " +
"save others from death, but not himself."))
for i := len(poly); i < 16; i++ {
poly = append(poly, curves.BLS48581G1().NewScalar().(curves.PairingScalar))
}
require.NoError(t, err)
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
false,
)
require.NoError(t, err)
require.NoError(t, err)
commitByCoeffs, err := p.PointLinearCombination(
kzg.CeremonyBLS48581G1[:16],
poly,
)
require.NoError(t, err)
commitByEval, err := p.PointLinearCombination(
kzg.FFTBLS48581[16],
evalPoly,
)
require.NoError(t, err)
fmt.Println(commitByCoeffs.ToAffineCompressed())
fmt.Println(commitByEval.ToAffineCompressed())
require.True(t, commitByCoeffs.Equal(commitByEval))
}
func TestKZGProof(t *testing.T) {
modulus := make([]byte, 73)
bls48581.NewBIGints(bls48581.CURVE_Order, nil).ToBytes(modulus)
q := new(big.Int).SetBytes(modulus)
p := kzg.NewKZGProver(curves.BLS48581(curves.BLS48581G1().Point), sha3.New256, q)
poly, err := p.BytesToPolynomial([]byte(
"Did you ever hear the tragedy of Darth Plagueis The Wise? I thought not." +
" It's not a story the Jedi would tell you. It's a Sith legend. Darth " +
"Plagueis was a Dark Lord of the Sith, so powerful and so wise he could " +
"use the Force to influence the midichlorians to create life… He had such" +
" a knowledge of the dark side that he could even keep the ones he cared " +
"about from dying. The dark side of the Force is a pathway to many " +
"abilities some consider to be unnatural. He became so powerful… the only" +
" thing he was afraid of was losing his power, which eventually, of " +
"course, he did. Unfortunately, he taught his apprentice everything he " +
"knew, then his apprentice killed him in his sleep. Ironic. He could " +
"save others from death, but not himself."))
require.NoError(t, err)
for i := len(poly); i < 16; i++ {
poly = append(poly, curves.BLS48581G1().NewScalar().(curves.PairingScalar))
}
evalPoly, err := kzg.FFT(
poly,
*curves.BLS48581(
curves.BLS48581G1().NewGeneratorPoint(),
),
16,
true,
)
require.NoError(t, err)
commit, err := p.Commit(poly)
require.NoError(t, err)
z := kzg.RootsOfUnityBLS48581[16][2]
require.NoError(t, err)
checky := evalPoly[len(poly)-1]
for i := len(evalPoly) - 2; i >= 0; i-- {
checky = checky.Mul(z).Add(evalPoly[i]).(curves.PairingScalar)
}
fmt.Printf("%+x\n", checky.Bytes())
divisors := make([]curves.PairingScalar, 2)
divisors[0] = (&curves.ScalarBls48581{}).Zero().Sub(z).(*curves.ScalarBls48581)
divisors[1] = (&curves.ScalarBls48581{}).One().(*curves.ScalarBls48581)
a := make([]curves.PairingScalar, len(evalPoly))
for i := 0; i < len(a); i++ {
a[i] = evalPoly[i].Clone().(*curves.ScalarBls48581)
}
// Adapted from Feist's amortized proofs:
aPos := len(a) - 1
bPos := len(divisors) - 1
diff := aPos - bPos
out := make([]curves.PairingScalar, diff+1, diff+1)
for diff >= 0 {
out[diff] = a[aPos].Div(divisors[bPos]).(*curves.ScalarBls48581)
for i := bPos; i >= 0; i-- {
a[diff+i] = a[diff+i].Sub(
out[diff].Mul(divisors[i]),
).(*curves.ScalarBls48581)
}
aPos -= 1
diff -= 1
}
proof, err := p.PointLinearCombination(kzg.CeremonyBLS48581G1[:15], out)
// proof, err := p.Prove(evalPoly, commit, z.(curves.PairingScalar))
require.NoError(t, err)
require.True(t, p.Verify(commit, z, checky, proof))
commitments, err := p.CommitAggregate(
[][]curves.PairingScalar{evalPoly},
)
require.NoError(t, err)
proof, commitment, err := p.ProveAggregate(
[][]curves.PairingScalar{evalPoly},
commitments,
)
require.NoError(t, err)
valid, err := p.VerifyAggregateProof(
[][]curves.PairingScalar{evalPoly},
commitments,
commitment,
proof,
)
require.False(t, proof.IsIdentity())
require.NoError(t, err)
require.False(t, valid)
}

View File

@ -1,486 +0,0 @@
package crypto
import (
"go.uber.org/zap"
rbls48581 "source.quilibrium.com/quilibrium/monorepo/bls48581"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
)
type KZGInclusionProver struct {
prover *kzg.KZGProver
logger *zap.Logger
}
func NewKZGInclusionProver(logger *zap.Logger) *KZGInclusionProver {
return &KZGInclusionProver{
prover: kzg.DefaultKZGProver(),
logger: logger,
}
}
// // Commit implements InclusionProver.
// func (k *KZGInclusionProver) Commit(
// data []byte,
// typeUrl string,
// ) (*InclusionCommitment, error) {
// if typeUrl == protobufs.IntrinsicExecutionOutputType {
// digest := sha3.NewShake256()
// _, err := digest.Write(data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// expand := make([]byte, 1024)
// _, err = digest.Read(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// poly, err := k.prover.BytesToPolynomial(expand)
// if err != nil {
// return nil, errors.Wrap(err, "commit")
// }
// k.logger.Debug("proving execution output for inclusion")
// polys, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 16,
// false,
// )
// if err != nil {
// return nil, errors.Wrap(err, "prove")
// }
// k.logger.Debug("converted execution output chunk to evaluation form")
// k.logger.Debug("creating kzg commitment")
// points, err := k.prover.Commit(polys)
// if err != nil {
// return nil, errors.Wrap(err, "prove")
// }
// return &InclusionCommitment{
// TypeUrl: typeUrl,
// Data: data,
// Commitment: points.ToAffineCompressed(),
// }, nil
// }
// poly, err := k.prover.BytesToPolynomial(data)
// if err != nil {
// return nil, errors.Wrap(err, "commit")
// }
// points, err := k.prover.Commit(poly)
// if err != nil {
// return nil, errors.Wrap(err, "commit")
// }
// return &InclusionCommitment{
// TypeUrl: typeUrl,
// Data: data,
// Commitment: points.ToAffineCompressed(),
// }, nil
// }
// // ProveAggregate implements InclusionProver.
// func (k *KZGInclusionProver) ProveAggregate(
// commits []*InclusionCommitment,
// ) (*InclusionAggregateProof, error) {
// polys := [][]curves.PairingScalar{}
// commitPoints := []curves.PairingPoint{}
// for _, commit := range commits {
// switch commit.TypeUrl {
// case protobufs.IntrinsicExecutionOutputType:
// k.logger.Debug("confirming inclusion in aggregate")
// digest := sha3.NewShake256()
// _, err := digest.Write(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// expand := make([]byte, 1024)
// _, err = digest.Read(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// poly, err := k.prover.BytesToPolynomial(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 16,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
// polys = append(polys, evalPoly)
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// return nil, errors.Wrap(err, "prove aggregate")
// }
// commitPoints = append(commitPoints, c.(curves.PairingPoint))
// default:
// k.logger.Debug("confirming inclusion in aggregate")
// poly, err := k.prover.BytesToPolynomial(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// for i := 0; i < 1024-len(poly); i++ {
// poly = append(
// poly,
// curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
// )
// }
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 1024,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return nil, errors.Wrap(err, "prove aggregate")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
// polys = append(polys, evalPoly)
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return nil, errors.Wrap(err, "prove aggregate")
// }
// commitPoints = append(commitPoints, c.(curves.PairingPoint))
// }
// }
// proof, commitment, err := k.prover.ProveAggregate(
// polys,
// commitPoints,
// )
// if err != nil {
// return nil, errors.Wrap(err, "prove aggregate")
// }
// if proof.IsIdentity() {
// return nil, errors.Wrap(errors.New("invalid proof"), "prove aggregate")
// }
// return &InclusionAggregateProof{
// InclusionCommitments: commits,
// AggregateCommitment: commitment.ToAffineCompressed(),
// Proof: proof.ToAffineCompressed(),
// }, nil
// }
// // VerifyAggregate implements InclusionProver.
// func (k *KZGInclusionProver) VerifyAggregate(
// proof *InclusionAggregateProof,
// ) (bool, error) {
// polys := [][]curves.PairingScalar{}
// commitPoints := []curves.PairingPoint{}
// for _, commit := range proof.InclusionCommitments {
// poly, err := k.prover.BytesToPolynomial(commit.Data)
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
// polys = append(polys, poly)
// point, err := curves.BLS48581G1().Point.FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
// commitPoints = append(commitPoints, point.(curves.PairingPoint))
// }
// aggregate, err := curves.BLS48581G1().Point.FromAffineCompressed(
// proof.AggregateCommitment,
// )
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
// proofPoint, err := curves.BLS48581G1().Point.FromAffineCompressed(
// proof.Proof,
// )
// if err != nil {
// return false, errors.Wrap(err, "verify aggregate")
// }
// verify, err := k.prover.VerifyAggregateProof(
// polys,
// commitPoints,
// aggregate.(curves.PairingPoint),
// proofPoint.(curves.PairingPoint),
// )
// return verify, errors.Wrap(err, "verify aggregate")
// }
// func (k *KZGInclusionProver) VerifyFrame(
// frame *protobufs.ClockFrame,
// ) error {
// aggregateCommitments := []curves.PairingPoint{}
// for i := 0; i < (len(frame.Input)-516)/74; i++ {
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// frame.Input[516+(i*74) : 516+(i*74)+74],
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// aggregateCommitments = append(aggregateCommitments, c.(curves.PairingPoint))
// }
// if len(aggregateCommitments) != len(frame.AggregateProofs) {
// k.logger.Error(
// "commit length mismatched proof for frame",
// zap.Int("commit_length", len(aggregateCommitments)),
// zap.Int("proof_length", len(frame.AggregateProofs)),
// )
// return errors.Wrap(
// errors.New("commit length mismatched proof for frame"),
// "verify frame",
// )
// }
// for i, proof := range frame.AggregateProofs {
// aggregatePoly := [][]curves.PairingScalar{}
// commitments := []curves.PairingPoint{}
// for _, commit := range proof.GetInclusionCommitments() {
// switch commit.TypeUrl {
// case protobufs.IntrinsicExecutionOutputType:
// k.logger.Debug("confirming inclusion in aggregate")
// digest := sha3.NewShake256()
// _, err := digest.Write(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// expand := make([]byte, 1024)
// _, err = digest.Read(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// poly, err := k.prover.BytesToPolynomial(expand)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 16,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
// aggregatePoly = append(aggregatePoly, evalPoly)
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// commitments = append(commitments, c.(curves.PairingPoint))
// default:
// k.logger.Debug("confirming inclusion in aggregate")
// poly, err := k.prover.BytesToPolynomial(commit.Data)
// if err != nil {
// k.logger.Error(
// "error converting key bundle to polynomial",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// for i := 0; i < 1024-len(poly); i++ {
// poly = append(
// poly,
// curves.BLS48581G1().Scalar.Zero().(curves.PairingScalar),
// )
// }
// evalPoly, err := kzg.FFT(
// poly,
// *curves.BLS48581(
// curves.BLS48581G1().NewGeneratorPoint(),
// ),
// 1024,
// false,
// )
// if err != nil {
// k.logger.Error(
// "error performing fast fourier transform on key bundle",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// k.logger.Debug(
// "created fft of polynomial",
// zap.Int("poly_size", len(evalPoly)),
// )
// aggregatePoly = append(aggregatePoly, evalPoly)
// c, err := curves.BLS48581G1().NewGeneratorPoint().FromAffineCompressed(
// commit.Commitment,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// commitments = append(commitments, c.(curves.PairingPoint))
// }
// }
// p, err := curves.BLS48581G1().Point.FromAffineCompressed(
// proof.Proof,
// )
// if err != nil {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(err, "verify frame")
// }
// result, err := k.prover.VerifyAggregateProof(
// aggregatePoly,
// commitments,
// aggregateCommitments[i],
// p.(curves.PairingPoint),
// )
// if err != nil {
// k.logger.Error(
// "could not verify clock frame",
// zap.Error(err),
// )
// return errors.Wrap(err, "verify frame")
// }
// if !result {
// k.logger.Error("could not verify clock frame", zap.Error(err))
// return errors.Wrap(
// errors.New("invalid proof"),
// "verify frame",
// )
// }
// }
// return nil
// }
func (k *KZGInclusionProver) CommitRaw(
data []byte,
polySize uint64,
) ([]byte, error) {
return rbls48581.CommitRaw(data, polySize), nil
}
func (k *KZGInclusionProver) ProveRaw(
data []byte,
index int,
polySize uint64,
) ([]byte, error) {
return rbls48581.ProveRaw(data, uint64(index), polySize), nil
}
func (k *KZGInclusionProver) VerifyRaw(
data []byte,
commit []byte,
index int,
proof []byte,
polySize uint64,
) (bool, error) {
return rbls48581.VerifyRaw(data, commit, uint64(index), proof, polySize), nil
}
var _ InclusionProver = (*KZGInclusionProver)(nil)

View File

@ -1,86 +0,0 @@
package crypto_test
import (
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/kzg"
)
// func TestKZGVerifyFrame(t *testing.T) {
// kzg.TestInit("./kzg/ceremony.json")
// data := make([]byte, 1024)
// rand.Read(data)
// l, _ := zap.NewProduction()
// inclusionProver := crypto.NewKZGInclusionProver(l)
// commitment, err := inclusionProver.Commit(
// data,
// protobufs.IntrinsicExecutionOutputType,
// )
// assert.NoError(t, err)
// proof, err := inclusionProver.ProveAggregate(
// []*crypto.InclusionCommitment{commitment},
// )
// assert.NoError(t, err)
// frame := &protobufs.ClockFrame{
// Filter: []byte{0x00},
// FrameNumber: 1,
// Input: bytes.Repeat([]byte{0x00}, 516),
// Output: bytes.Repeat([]byte{0x00}, 516),
// }
// _, priv, _ := ed448.GenerateKey(rand.Reader)
// w := crypto.NewWesolowskiFrameProver(l)
// frame, err = w.ProveDataClockFrame(
// frame,
// [][]byte{proof.AggregateCommitment},
// []*protobufs.InclusionAggregateProof{
// {
// Filter: []byte{0x00},
// FrameNumber: 1,
// InclusionCommitments: []*protobufs.InclusionCommitment{
// {
// Filter: []byte{0x00},
// FrameNumber: 1,
// TypeUrl: proof.InclusionCommitments[0].TypeUrl,
// Commitment: proof.InclusionCommitments[0].Commitment,
// Data: data,
// Position: 0,
// },
// },
// Proof: proof.Proof,
// },
// },
// priv,
// time.Now().UnixMilli(),
// 100,
// )
// err = inclusionProver.VerifyFrame(frame)
// assert.NoError(t, err)
// }
func TestKZGInclusionProverRawFuncs(t *testing.T) {
kzg.TestInit("./kzg/ceremony.json")
data, _ := hex.DecodeString("408f9f0a63a1c463579a1fdaf82b37e0f397476e87c524915870ce7f5ede9c248493ea4ffefae154b8a55f10add4d75846b273a7f57433b438ae72880a29ab7cab6c3187a14651bac085329778526ebb31d14c9beb7b0983ff5e71a47c96ed9e7149e9e896cd4d604191583a282bdb5a92ea71334f296fd06498323b0c5d0e60c04180a7141813f6f9a6c766c450898ffc437ebed07a2fbd9201207171a0a8f5006a83d9e2430687952dd42237b7d77de61c0655b91bb1943ed4b9337449ded69ef8f2f83fba58827be7b7082db048b799f1bb590f61c558976910e77357562eb4d66fc97636c26ea562fe18b4cc397e679acad23cfd003ae93efe2903534ce1fe475eba3c82fef71554b4d63b593f2da3fea3b1b3f91379c6ff1989c91eaab70e336d96f3c46de987ef7165d111f692fe8205f7df0eb854fc550aa0d10942049dec4c60d99a51b0a7cde49a6d5e9364d0162cb86af1a51efeffacf7935f796f18cb868756e693aa967339efb8e45071da835ff8b6897fe56dc14edb49352edc88d3a6866873ecfa2bf968907e86c0dd139ab9a23bae341ec6aa5f1fbac2390a9d7f5ef9346d5c433268bf85e34e98295233f5e0d2ceb35c47b33b93e8ae9445c3b9f6ec32d8e3a1a1bc95b013dd36a84d803e468e873420c71b6473e44300f4d2702ccb452146c675d5ac1511a0b0a61a857b58ed3365ecdc1cafafbdfe5f0f2420389ae5f54d2fb9d12de314b416fdb12786fb66d0517229347ecc347eb8207a88abeffbdb9acfc582047a9343efae6c21cf67566e2d949920bdff1f4cea376332dd503c9dcd72a776744724c29a25038ef582f1103b406321e14d0f232c709b3d5a3568c75a1bc244b65e18d9ca7c53e2e13bb5638c325f6d43601de131aa2e3b7ffcc23accf6c69e9c6360cf8f4d48de3f11354855ec281f8a9c85caec0b8284c99c66a43ed0c37d6ce0f5c349e4551da6a1d9edcfa02f6be27ed037c5ec79c0519ba60725f89b3fe7826ca1a7b157ef9360bc2007bc2b9dd2ba8fdc225047a9f66b832e2da1dc6019f480e3aadb46ba93cccbd1e7b221a5d36e0fc96cbf497bfb40ff0276f14b7d45c4738a1b755e2754c5c352ac4af96c1a9be1d92942200b325cc3c53e9b3099c99a466bdc6c001179f6c63f828936b1c33f651a150c080b2eac8ed7cb9cfe599daee477f9ba88a6d1cbdeb08995c3c7bcce18ee2946c2beb138b8c797f61c6c33800ffeda74b77dab186cc4c7e91e9aca954d4863de6b04a82ef563a6eefbedec8fdc9284fb33e15197d2512e4928019fc29aa9c0a199797ef02c8daeb8706dd21a0e6b25b0e73795bac18dfaac2abc1defddf530f6a14046c2a918fa581b7ab0240bbd4f2e570a527581cb0a39bb544ceeabeedf891bc2417ac1e1fa558c09a9ceffef108a5778ff99a8575b4fb69cbbfb2c474d58")
l, _ := zap.NewProduction()
inclusionProver := crypto.NewKZGInclusionProver(l)
c, err := inclusionProver.CommitRaw(data, 1024)
assert.NoError(t, err)
p, err := inclusionProver.ProveRaw(data, 0, 1024)
assert.NoError(t, err)
v, err := inclusionProver.VerifyRaw(data[64*0:64*1], c, 0, p, 1024)
assert.NoError(t, err)
assert.False(t, v)
}

View File

@ -1,468 +0,0 @@
package shuffle
import (
"crypto/rand"
"math/big"
"filippo.io/edwards25519"
)
var lBE = []byte{
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 222, 249, 222, 162, 247,
156, 214, 88, 18, 99, 26, 92, 245, 211, 236,
}
var lBigInt = big.NewInt(0).SetBytes(lBE)
func genPolyFrags(
secret *edwards25519.Scalar,
total, threshold int,
) []*edwards25519.Scalar {
coeffs := []*edwards25519.Scalar{}
coeffs = append(coeffs, secret)
for i := 1; i < threshold; i++ {
coeffBI, _ := rand.Int(rand.Reader, lBigInt)
coeff := BigIntToLEBytes(coeffBI)
scalar, err := edwards25519.NewScalar().SetCanonicalBytes(coeff[:])
if err != nil {
panic(err)
}
coeffs = append(coeffs, scalar)
}
frags := []*edwards25519.Scalar{}
for i := 1; i <= total; i++ {
result, _ := edwards25519.NewScalar().SetCanonicalBytes(coeffs[0].Bytes())
iBytes := BigIntToLEBytes(big.NewInt(int64(i)))
x, err := edwards25519.NewScalar().SetCanonicalBytes(iBytes)
if err != nil {
panic(err)
}
for j := 1; j <= threshold-1; j++ {
xi := edwards25519.NewScalar().Multiply(coeffs[j], x)
result.Add(result, xi)
xmul, _ := edwards25519.NewScalar().SetCanonicalBytes(iBytes)
x.Multiply(x, xmul)
}
frags = append(frags, result)
}
return frags
}
func ShamirSplitMatrix(
matrix [][]*edwards25519.Scalar,
total, threshold int,
) [][][]*edwards25519.Scalar {
shamirMatrix := make([][][]*edwards25519.Scalar, len(matrix))
for x := 0; x < len(matrix); x++ {
shamirMatrix[x] = make([][]*edwards25519.Scalar, len(matrix[0]))
for y := 0; y < len(matrix[0]); y++ {
shamirMatrix[x][y] = genPolyFrags(matrix[x][y], total, threshold)
}
}
return shamirMatrix
}
func AddMatrices(matrices ...[][]*edwards25519.Scalar) [][]*edwards25519.Scalar {
result := make([][]*edwards25519.Scalar, len(matrices[0]))
for x := 0; x < len(matrices[0]); x++ {
result[x] = make([]*edwards25519.Scalar, len(matrices[0][0]))
for y := 0; y < len(matrices[0][0]); y++ {
result[x][y] = edwards25519.NewScalar()
for i := 0; i < len(matrices); i++ {
result[x][y].Add(result[x][y], matrices[i][x][y])
}
}
}
return result
}
func GenerateRandomVectorShares(
length, total, threshold int,
) [][]*edwards25519.Scalar {
result := make([][]*edwards25519.Scalar, length)
for i := 0; i < length; i++ {
bi, _ := rand.Int(rand.Reader, lBigInt)
biBytes := BigIntToLEBytes(bi)
scalar, _ := edwards25519.NewScalar().SetCanonicalBytes(biBytes[:])
result[i] = genPolyFrags(scalar, total, threshold)
}
return result
}
func InterpolatePolynomialShares(
shares []*edwards25519.Scalar,
ids []int,
) *edwards25519.Scalar {
var reconstructedSum *edwards25519.Scalar
for j := 0; j < len(ids); j++ {
oneLENumBytes := BigIntToLEBytes(big.NewInt(1))
coeffNum, _ := edwards25519.NewScalar().SetCanonicalBytes(oneLENumBytes)
coeffDenom, _ := edwards25519.NewScalar().SetCanonicalBytes(oneLENumBytes)
for k := 0; k < len(ids); k++ {
if j != k {
ikBytes := BigIntToLEBytes(big.NewInt(int64(ids[k])))
ijBytes := BigIntToLEBytes(big.NewInt(int64(ids[j])))
ikScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(ikBytes)
ijScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(ijBytes)
coeffNum.Multiply(coeffNum, ikScalar)
ikScalar.Subtract(ikScalar, ijScalar)
coeffDenom.Multiply(coeffDenom, ikScalar)
}
}
coeffDenom.Invert(coeffDenom)
coeffNum.Multiply(coeffNum, coeffDenom)
reconstructedFrag := edwards25519.NewScalar().Multiply(
coeffNum,
shares[ids[j]-1],
)
if reconstructedSum == nil {
reconstructedSum = reconstructedFrag
} else {
reconstructedSum.Add(reconstructedSum, reconstructedFrag)
}
}
return reconstructedSum
}
func LUDecompose(
matrix [][]*edwards25519.Scalar,
) ([][]*edwards25519.Scalar, [][]*edwards25519.Scalar) {
imax := 0
maxA := edwards25519.NewScalar()
N := len(matrix)
p := make([]int, N)
pm := make([][]*edwards25519.Scalar, N)
newA := make([][]*edwards25519.Scalar, N)
for i := 0; i < N; i++ {
newA[i] = make([]*edwards25519.Scalar, N)
pm[i] = make([]*edwards25519.Scalar, N)
p[i] = i
for j := 0; j < N; j++ {
newA[i][j], _ = edwards25519.NewScalar().SetCanonicalBytes(
matrix[i][j].Bytes(),
)
}
}
scalarOne, _ := edwards25519.NewScalar().SetCanonicalBytes(
BigIntToLEBytes(big.NewInt(int64(1))),
)
for i := 0; i < N; i++ {
maxA = edwards25519.NewScalar()
imax = i
for k := i; k < N; k++ {
if LEBytesToBigInt(newA[k][i].Bytes()).Cmp(
LEBytesToBigInt(maxA.Bytes()),
) > 0 {
maxA = newA[k][i]
imax = k
}
}
if imax != i {
//pivoting P
j := p[i]
p[i] = p[imax]
p[imax] = j
//pivoting rows of A
ptr := newA[i]
newA[i] = newA[imax]
newA[imax] = ptr
}
for j := i + 1; j < N; j++ {
newA[j][i].Multiply(
newA[j][i],
edwards25519.NewScalar().Invert(newA[i][i]),
)
for k := i + 1; k < N; k++ {
newA[j][k].Subtract(newA[j][k], edwards25519.NewScalar().Multiply(
newA[j][i],
newA[i][k],
))
}
}
}
for i := 0; i < N; i++ {
for j := 0; j < N; j++ {
if p[i] == j {
pm[i][j] = scalarOne
} else {
pm[i][j] = edwards25519.NewScalar()
}
}
}
return newA, pm
}
func Invert(matrix [][]*edwards25519.Scalar) [][]*edwards25519.Scalar {
a, p := LUDecompose(matrix)
ia := make([][]*edwards25519.Scalar, len(matrix))
for i := 0; i < len(matrix); i++ {
ia[i] = make([]*edwards25519.Scalar, len(matrix))
}
for j := 0; j < len(matrix); j++ {
for i := 0; i < len(matrix); i++ {
ia[i][j] = edwards25519.NewScalar().Set(p[i][j])
for k := 0; k < i; k++ {
ia[i][j].Subtract(ia[i][j], edwards25519.NewScalar().Multiply(
a[i][k],
ia[k][j],
))
}
}
for i := len(matrix) - 1; i >= 0; i-- {
for k := i + 1; k < len(matrix); k++ {
ia[i][j].Subtract(ia[i][j], edwards25519.NewScalar().Multiply(
a[i][k],
ia[k][j],
))
}
ia[i][j].Multiply(ia[i][j], edwards25519.NewScalar().Invert(a[i][i]))
}
}
return ia
}
func InterpolateMatrixShares(
matrixShares [][][]*edwards25519.Scalar,
ids []int,
) [][]*edwards25519.Scalar {
matrix := make([][]*edwards25519.Scalar, len(matrixShares))
for x := 0; x < len(matrix); x++ {
matrix[x] = make([]*edwards25519.Scalar, len(matrixShares[0]))
for y := 0; y < len(matrix[0]); y++ {
matrix[x][y] = InterpolatePolynomialShares(matrixShares[x][y], ids)
}
}
return matrix
}
func ScalarMult(a int, b [][]*edwards25519.Scalar) [][]*edwards25519.Scalar {
prod := make([][]*edwards25519.Scalar, len(b))
for x := 0; x < len(b); x++ {
prod[x] = make([]*edwards25519.Scalar, len(b[0]))
for y := 0; y < len(b[0]); y++ {
if a >= 0 {
prod[x][y], _ = edwards25519.NewScalar().SetCanonicalBytes(
BigIntToLEBytes(big.NewInt(int64(a))),
)
} else {
negA, _ := edwards25519.NewScalar().SetCanonicalBytes(
BigIntToLEBytes(big.NewInt(int64(-a))),
)
prod[x][y] = edwards25519.NewScalar().Subtract(
edwards25519.NewScalar(),
negA,
)
}
prod[x][y] = prod[x][y].Multiply(prod[x][y], b[x][y])
}
}
return prod
}
func GenerateDotProduct(
a, b [][]*edwards25519.Scalar,
) [][]*edwards25519.Scalar {
if len(a[0]) != len(b) {
panic("cannot generate dot product of a and b - mismatched length")
}
abMatrix := make([][]*edwards25519.Scalar, len(a))
for x := 0; x < len(a); x++ {
abMatrix[x] = make([]*edwards25519.Scalar, len(b[0]))
for y := 0; y < len(b[0]); y++ {
abMatrix[x][y] = edwards25519.NewScalar()
for ay := 0; ay < len(a[0]); ay++ {
abMatrix[x][y].MultiplyAdd(a[x][ay], b[ay][y], abMatrix[x][y])
}
}
}
return abMatrix
}
func GenerateRandomMatrixAndInverseShares(
size, total, threshold int,
) [2][][][]*edwards25519.Scalar {
output := make([][]*edwards25519.Scalar, size)
for x := 0; x < size; x++ {
output[x] = make([]*edwards25519.Scalar, size)
for y := 0; y < size; y++ {
i, _ := rand.Int(rand.Reader, lBigInt)
iBytes := BigIntToLEBytes(i)
iScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(iBytes[:])
output[x][y] = iScalar
}
}
splitOutput := ShamirSplitMatrix(output, total, threshold)
splitInverse := ShamirSplitMatrix(Invert(output), total, threshold)
return [2][][][]*edwards25519.Scalar{splitOutput, splitInverse}
}
func GenerateRandomBeaverTripleMatrixShares(
size, total, threshold int,
) [3][][][]*edwards25519.Scalar {
uMatrix := make([][]*edwards25519.Scalar, size)
vMatrix := make([][]*edwards25519.Scalar, size)
for i := 0; i < size; i++ {
uMatrix[i] = make([]*edwards25519.Scalar, size)
vMatrix[i] = make([]*edwards25519.Scalar, size)
for j := 0; j < size; j++ {
uj, _ := rand.Int(rand.Reader, lBigInt)
ujBytes := BigIntToLEBytes(uj)
ujScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(ujBytes[:])
vj, _ := rand.Int(rand.Reader, lBigInt)
vjBytes := BigIntToLEBytes(vj)
vjScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(vjBytes[:])
uMatrix[i][j] = ujScalar
vMatrix[i][j] = vjScalar
}
}
uvMatrix := GenerateDotProduct(uMatrix, vMatrix)
uMatrixShares := ShamirSplitMatrix(uMatrix, total, threshold)
vMatrixShares := ShamirSplitMatrix(vMatrix, total, threshold)
uvMatrixShares := ShamirSplitMatrix(uvMatrix, total, threshold)
return [3][][][]*edwards25519.Scalar{
uMatrixShares, vMatrixShares, uvMatrixShares,
}
}
func GeneratePermutationMatrix(size int) [][]*edwards25519.Scalar {
matrix := [][]*edwards25519.Scalar{}
elements := []int{}
for i := 0; i < size; i++ {
elements = append(elements, i)
}
for i := 0; i < size; i++ {
pos, _ := rand.Int(rand.Reader, big.NewInt(int64(len(elements))))
var vecPos int
elements, vecPos = remove(elements, int(pos.Int64()))
scalarOne, err := edwards25519.NewScalar().SetCanonicalBytes(
BigIntToLEBytes(big.NewInt(1)),
)
if err != nil {
panic(err)
}
vector := []*edwards25519.Scalar{}
for j := 0; j < vecPos; j++ {
scalarZero, err := edwards25519.NewScalar().SetCanonicalBytes(
BigIntToLEBytes(big.NewInt(0)),
)
if err != nil {
panic(err)
}
vector = append(vector, scalarZero)
}
vector = append(vector, scalarOne)
for j := vecPos + 1; j < size; j++ {
scalarZero, err := edwards25519.NewScalar().SetCanonicalBytes(
BigIntToLEBytes(big.NewInt(0)),
)
if err != nil {
panic(err)
}
vector = append(vector, scalarZero)
}
matrix = append(matrix, vector)
}
return matrix
}
func BigIntToLEBytes(bi *big.Int) []byte {
b := bi.Bytes()
last := len(b) - 1
for i := 0; i < len(b)/2; i++ {
b[i], b[last-i] = b[last-i], b[i]
}
for i := len(b); i < 32; i++ {
b = append(b, 0x00)
}
return b
}
func LEBytesToBigInt(bytes []byte) *big.Int {
b := make([]byte, len(bytes))
last := len(b) - 1
for i := 0; i < len(b)/2; i++ {
b[i], b[last-i] = b[last-i], b[i]
}
res := big.NewInt(0)
return res.SetBytes(b)
}
func remove(elements []int, i int) ([]int, int) {
ret := elements[i]
elements[i] = elements[len(elements)-1]
newElements := []int{}
newElements = append(newElements, elements[:len(elements)-1]...)
return newElements, ret
}

View File

@ -1,536 +0,0 @@
package shuffle_test
import (
"fmt"
"math/big"
"testing"
"filippo.io/edwards25519"
"github.com/stretchr/testify/assert"
"source.quilibrium.com/quilibrium/monorepo/node/crypto/shuffle"
)
func TestGeneratePermutationMatrix(t *testing.T) {
m := shuffle.GeneratePermutationMatrix(6)
for _, x := range m {
ySum := byte(0x00)
for _, y := range x {
ySum += y.Bytes()[0]
}
assert.Equal(t, ySum, byte(0x01))
}
for x := 0; x < len(m); x++ {
xSum := byte(0x00)
for y := 0; y < len(m); y++ {
xSum += m[y][x].Bytes()[0]
}
assert.Equal(t, xSum, byte(0x01))
}
}
func verifyLagrange(t *testing.T, shares []*edwards25519.Scalar, expected *edwards25519.Scalar, total, threshold int) {
var result *edwards25519.Scalar
for i := 1; i <= total-threshold+1; i++ {
var reconstructedSum *edwards25519.Scalar
for j := 0; j < threshold; j++ {
oneLENumBytes := shuffle.BigIntToLEBytes(big.NewInt(1))
coeffNum, _ := edwards25519.NewScalar().SetCanonicalBytes(oneLENumBytes)
coeffDenom, _ := edwards25519.NewScalar().SetCanonicalBytes(oneLENumBytes)
for k := 0; k < threshold; k++ {
if j != k {
ikBytes := shuffle.BigIntToLEBytes(big.NewInt(int64(i + k)))
ijBytes := shuffle.BigIntToLEBytes(big.NewInt(int64(i + j)))
ikScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(ikBytes)
ijScalar, _ := edwards25519.NewScalar().SetCanonicalBytes(ijBytes)
coeffNum.Multiply(coeffNum, ikScalar)
ikScalar.Subtract(ikScalar, ijScalar)
coeffDenom.Multiply(coeffDenom, ikScalar)
}
}
coeffDenom.Invert(coeffDenom)
coeffNum.Multiply(coeffNum, coeffDenom)
reconstructedFrag := edwards25519.NewScalar().Multiply(coeffNum, shares[i+j-1])
if reconstructedSum == nil {
reconstructedSum = reconstructedFrag
} else {
reconstructedSum.Add(reconstructedSum, reconstructedFrag)
}
}
if result == nil {
result = reconstructedSum
assert.Equal(t, expected.Bytes(), result.Bytes())
} else if result.Equal(reconstructedSum) == 0 {
fmt.Println("mismatched reconstruction")
t.FailNow()
}
}
}
func TestGenerateShamirMatrix(t *testing.T) {
m := shuffle.GeneratePermutationMatrix(6)
sm := shuffle.ShamirSplitMatrix(m, 10, 3)
for xi, x := range sm {
for yi, y := range x {
verifyLagrange(t, y, m[xi][yi], 10, 3)
}
}
}
func TestMatrixDotProduct(t *testing.T) {
zeroBytes := shuffle.BigIntToLEBytes(big.NewInt(0))
oneBytes := shuffle.BigIntToLEBytes(big.NewInt(1))
twoBytes := shuffle.BigIntToLEBytes(big.NewInt(2))
threeBytes := shuffle.BigIntToLEBytes(big.NewInt(3))
fourBytes := shuffle.BigIntToLEBytes(big.NewInt(4))
zero, _ := edwards25519.NewScalar().SetCanonicalBytes(zeroBytes)
one, _ := edwards25519.NewScalar().SetCanonicalBytes(oneBytes)
two, _ := edwards25519.NewScalar().SetCanonicalBytes(twoBytes)
three, _ := edwards25519.NewScalar().SetCanonicalBytes(threeBytes)
four, _ := edwards25519.NewScalar().SetCanonicalBytes(fourBytes)
aMatrix := [][]*edwards25519.Scalar{
{two, two},
{zero, three},
{zero, four},
}
bMatrix := [][]*edwards25519.Scalar{
{two, one, two},
{three, two, four},
}
abMatrix := shuffle.GenerateDotProduct(aMatrix, bMatrix)
assert.Equal(t, byte(0x0a), abMatrix[0][0].Bytes()[0])
assert.Equal(t, byte(0x06), abMatrix[0][1].Bytes()[0])
assert.Equal(t, byte(0x0c), abMatrix[0][2].Bytes()[0])
assert.Equal(t, byte(0x09), abMatrix[1][0].Bytes()[0])
assert.Equal(t, byte(0x06), abMatrix[1][1].Bytes()[0])
assert.Equal(t, byte(0x0c), abMatrix[1][2].Bytes()[0])
assert.Equal(t, byte(0x0c), abMatrix[2][0].Bytes()[0])
assert.Equal(t, byte(0x08), abMatrix[2][1].Bytes()[0])
assert.Equal(t, byte(0x10), abMatrix[2][2].Bytes()[0])
}
func TestGenerateRandomBeaverTripleMatrixShares(t *testing.T) {
beaverTripleShares := shuffle.GenerateRandomBeaverTripleMatrixShares(6, 10, 3)
uMatrixShares := beaverTripleShares[0]
vMatrixShares := beaverTripleShares[1]
uvMatrixShares := beaverTripleShares[2]
uMatrix := shuffle.InterpolateMatrixShares(uMatrixShares, []int{1, 2, 3})
vMatrix := shuffle.InterpolateMatrixShares(vMatrixShares, []int{1, 2, 3})
uvMatrix := shuffle.InterpolateMatrixShares(uvMatrixShares, []int{1, 2, 3})
for x := 0; x < len(uMatrixShares); x++ {
for y := 0; y < len(uMatrixShares[0]); y++ {
verifyLagrange(t, uMatrixShares[x][y], uMatrix[x][y], 10, 3)
verifyLagrange(t, vMatrixShares[x][y], vMatrix[x][y], 10, 3)
verifyLagrange(t, uvMatrixShares[x][y], uvMatrix[x][y], 10, 3)
}
}
uvCheck := shuffle.GenerateDotProduct(uMatrix, vMatrix)
assert.Equal(t, uvMatrix, uvCheck)
}
func TestPermutationMatrix(t *testing.T) {
permutationMatrix1 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix2 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix3 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix4 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix := shuffle.GenerateDotProduct(permutationMatrix1, permutationMatrix2)
permutationMatrix = shuffle.GenerateDotProduct(permutationMatrix, permutationMatrix3)
permutationMatrix = shuffle.GenerateDotProduct(permutationMatrix, permutationMatrix4)
one, _ := edwards25519.NewScalar().SetCanonicalBytes(shuffle.BigIntToLEBytes(big.NewInt(1)))
for x := 0; x < 6; x++ {
sumX := edwards25519.NewScalar()
for y := 0; y < 6; y++ {
sumX.Add(sumX, permutationMatrix[x][y])
}
assert.Equal(t, sumX, one)
}
for y := 0; y < 6; y++ {
sumY := edwards25519.NewScalar()
for x := 0; x < 6; x++ {
sumY.Add(sumY, permutationMatrix[x][y])
}
assert.Equal(t, sumY, one)
}
}
func TestPermutationSharing(t *testing.T) {
permutationMatrix1 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix2 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix3 := shuffle.GeneratePermutationMatrix(6)
permutationMatrix4 := shuffle.GeneratePermutationMatrix(6)
permutationMatrixShares1 := shuffle.ShamirSplitMatrix(permutationMatrix1, 4, 3)
permutationMatrixShares2 := shuffle.ShamirSplitMatrix(permutationMatrix2, 4, 3)
permutationMatrixShares3 := shuffle.ShamirSplitMatrix(permutationMatrix3, 4, 3)
permutationMatrixShares4 := shuffle.ShamirSplitMatrix(permutationMatrix4, 4, 3)
inverseShareMatrix1 := make([][][]*edwards25519.Scalar, 4)
inverseShareMatrix2 := make([][][]*edwards25519.Scalar, 4)
inverseShareMatrix3 := make([][][]*edwards25519.Scalar, 4)
inverseShareMatrix4 := make([][][]*edwards25519.Scalar, 4)
for i := 0; i < 4; i++ {
inverseShareMatrix1[i] = make([][]*edwards25519.Scalar, 6)
inverseShareMatrix2[i] = make([][]*edwards25519.Scalar, 6)
inverseShareMatrix3[i] = make([][]*edwards25519.Scalar, 6)
inverseShareMatrix4[i] = make([][]*edwards25519.Scalar, 6)
for x := 0; x < 6; x++ {
inverseShareMatrix1[i][x] = make([]*edwards25519.Scalar, 6)
inverseShareMatrix2[i][x] = make([]*edwards25519.Scalar, 6)
inverseShareMatrix3[i][x] = make([]*edwards25519.Scalar, 6)
inverseShareMatrix4[i][x] = make([]*edwards25519.Scalar, 6)
for y := 0; y < 6; y++ {
inverseShareMatrix1[i][x][y] = permutationMatrixShares1[x][y][i]
inverseShareMatrix2[i][x][y] = permutationMatrixShares2[x][y][i]
inverseShareMatrix3[i][x][y] = permutationMatrixShares3[x][y][i]
inverseShareMatrix4[i][x][y] = permutationMatrixShares4[x][y][i]
}
}
}
beaverTripleShares1 := shuffle.GenerateRandomBeaverTripleMatrixShares(6, 4, 3)
beaverTripleShares2 := shuffle.GenerateRandomBeaverTripleMatrixShares(6, 4, 3)
beaverTripleShares3 := shuffle.GenerateRandomBeaverTripleMatrixShares(6, 4, 3)
beaverTriplesAShares1 := beaverTripleShares1[0]
beaverTriplesBShares1 := beaverTripleShares1[1]
beaverTriplesABShares1 := beaverTripleShares1[2]
beaverTriplesAShares2 := beaverTripleShares2[0]
beaverTriplesBShares2 := beaverTripleShares2[1]
beaverTriplesABShares2 := beaverTripleShares2[2]
beaverTriplesAShares3 := beaverTripleShares3[0]
beaverTriplesBShares3 := beaverTripleShares3[1]
beaverTriplesABShares3 := beaverTripleShares3[2]
inverseBeaverTriplesAShares1 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesBShares1 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesABShares1 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesAShares2 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesBShares2 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesABShares2 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesAShares3 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesBShares3 := make([][][]*edwards25519.Scalar, 4)
inverseBeaverTriplesABShares3 := make([][][]*edwards25519.Scalar, 4)
for i := 0; i < 4; i++ {
inverseBeaverTriplesAShares1[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesBShares1[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesABShares1[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesAShares2[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesBShares2[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesABShares2[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesAShares3[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesBShares3[i] = make([][]*edwards25519.Scalar, 6)
inverseBeaverTriplesABShares3[i] = make([][]*edwards25519.Scalar, 6)
for x := 0; x < 6; x++ {
inverseBeaverTriplesAShares1[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesBShares1[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesABShares1[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesAShares2[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesBShares2[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesABShares2[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesAShares3[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesBShares3[i][x] = make([]*edwards25519.Scalar, 6)
inverseBeaverTriplesABShares3[i][x] = make([]*edwards25519.Scalar, 6)
for y := 0; y < 6; y++ {
inverseBeaverTriplesAShares1[i][x][y] = beaverTriplesAShares1[x][y][i]
inverseBeaverTriplesBShares1[i][x][y] = beaverTriplesBShares1[x][y][i]
inverseBeaverTriplesABShares1[i][x][y] = beaverTriplesABShares1[x][y][i]
inverseBeaverTriplesAShares2[i][x][y] = beaverTriplesAShares2[x][y][i]
inverseBeaverTriplesBShares2[i][x][y] = beaverTriplesBShares2[x][y][i]
inverseBeaverTriplesABShares2[i][x][y] = beaverTriplesABShares2[x][y][i]
inverseBeaverTriplesAShares3[i][x][y] = beaverTriplesAShares3[x][y][i]
inverseBeaverTriplesBShares3[i][x][y] = beaverTriplesBShares3[x][y][i]
inverseBeaverTriplesABShares3[i][x][y] = beaverTriplesABShares3[x][y][i]
}
}
}
es1 := make([][][]*edwards25519.Scalar, 6)
fs1 := make([][][]*edwards25519.Scalar, 6)
es2 := make([][][]*edwards25519.Scalar, 6)
fs2 := make([][][]*edwards25519.Scalar, 6)
es3 := make([][][]*edwards25519.Scalar, 6)
fs3 := make([][][]*edwards25519.Scalar, 6)
for x := 0; x < 6; x++ {
es1[x] = make([][]*edwards25519.Scalar, 6)
fs1[x] = make([][]*edwards25519.Scalar, 6)
es2[x] = make([][]*edwards25519.Scalar, 6)
fs2[x] = make([][]*edwards25519.Scalar, 6)
es3[x] = make([][]*edwards25519.Scalar, 6)
fs3[x] = make([][]*edwards25519.Scalar, 6)
for y := 0; y < 6; y++ {
es1[x][y] = make([]*edwards25519.Scalar, 4)
fs1[x][y] = make([]*edwards25519.Scalar, 4)
es2[x][y] = make([]*edwards25519.Scalar, 4)
fs2[x][y] = make([]*edwards25519.Scalar, 4)
es3[x][y] = make([]*edwards25519.Scalar, 4)
fs3[x][y] = make([]*edwards25519.Scalar, 4)
for i := 0; i < 4; i++ {
es1[x][y][i] = edwards25519.NewScalar().Subtract(inverseShareMatrix1[i][x][y], inverseBeaverTriplesAShares1[i][x][y])
fs1[x][y][i] = edwards25519.NewScalar().Subtract(inverseShareMatrix2[i][x][y], inverseBeaverTriplesBShares1[i][x][y])
es2[x][y][i] = edwards25519.NewScalar().Subtract(inverseShareMatrix2[i][x][y], inverseBeaverTriplesAShares2[i][x][y])
fs2[x][y][i] = edwards25519.NewScalar().Subtract(inverseShareMatrix3[i][x][y], inverseBeaverTriplesBShares2[i][x][y])
es3[x][y][i] = edwards25519.NewScalar().Subtract(inverseShareMatrix3[i][x][y], inverseBeaverTriplesAShares3[i][x][y])
fs3[x][y][i] = edwards25519.NewScalar().Subtract(inverseShareMatrix4[i][x][y], inverseBeaverTriplesBShares3[i][x][y])
}
}
}
// e = a - u
// f = b - v
// (a - u)(b - v) = -ab + ub + av - uv + (ab-av) + (ab - ub) + uv
e1 := shuffle.InterpolateMatrixShares(es1, []int{1, 2, 3, 4})
f1 := shuffle.InterpolateMatrixShares(fs1, []int{1, 2, 3, 4})
e2 := shuffle.InterpolateMatrixShares(es2, []int{1, 2, 3, 4})
f2 := shuffle.InterpolateMatrixShares(fs2, []int{1, 2, 3, 4})
e3 := shuffle.InterpolateMatrixShares(es3, []int{1, 2, 3, 4})
f3 := shuffle.InterpolateMatrixShares(fs3, []int{1, 2, 3, 4})
// mul(a, b) => <e> = <a> - <u>, <f> = <b> - <v>, <c> = -i * e * f + f * <a> + e * <b> + <z>
ef1 := shuffle.GenerateDotProduct(e1, f1)
ef2 := shuffle.GenerateDotProduct(e2, f2)
ef3 := shuffle.GenerateDotProduct(e3, f3)
fa1 := make([][][]*edwards25519.Scalar, 4)
fa2 := make([][][]*edwards25519.Scalar, 4)
fa3 := make([][][]*edwards25519.Scalar, 4)
eb1 := make([][][]*edwards25519.Scalar, 4)
eb2 := make([][][]*edwards25519.Scalar, 4)
eb3 := make([][][]*edwards25519.Scalar, 4)
cs1 := make([][][]*edwards25519.Scalar, 4)
cs2 := make([][][]*edwards25519.Scalar, 4)
cs3 := make([][][]*edwards25519.Scalar, 4)
// cs := make([][][]*edwards25519.Scalar, 4)
inverseCS1 := make([][][]*edwards25519.Scalar, 6)
inverseCS3 := make([][][]*edwards25519.Scalar, 6)
for i := 0; i < 4; i++ {
fa1[i] = shuffle.GenerateDotProduct(inverseShareMatrix1[i], f1)
eb1[i] = shuffle.GenerateDotProduct(e1, inverseShareMatrix2[i])
fa2[i] = shuffle.GenerateDotProduct(inverseShareMatrix2[i], f2)
eb2[i] = shuffle.GenerateDotProduct(e2, inverseShareMatrix3[i])
fa3[i] = shuffle.GenerateDotProduct(inverseShareMatrix3[i], f3)
eb3[i] = shuffle.GenerateDotProduct(e3, inverseShareMatrix4[i])
cs1[i] = shuffle.AddMatrices(shuffle.ScalarMult(-1, ef1), fa1[i], eb1[i], inverseBeaverTriplesABShares1[i])
cs2[i] = shuffle.AddMatrices(shuffle.ScalarMult(-1, ef2), fa2[i], eb2[i], inverseBeaverTriplesABShares2[i])
cs3[i] = shuffle.AddMatrices(shuffle.ScalarMult(-1, ef3), fa3[i], eb3[i], inverseBeaverTriplesABShares3[i])
}
for x := 0; x < 6; x++ {
inverseCS1[x] = make([][]*edwards25519.Scalar, 6)
inverseCS3[x] = make([][]*edwards25519.Scalar, 6)
for y := 0; y < 6; y++ {
inverseCS1[x][y] = make([]*edwards25519.Scalar, 4)
inverseCS3[x][y] = make([]*edwards25519.Scalar, 4)
for i := 0; i < 4; i++ {
inverseCS1[x][y][i] = cs1[i][x][y]
inverseCS3[x][y][i] = cs3[i][x][y]
}
}
}
c1 := shuffle.InterpolateMatrixShares(inverseCS1, []int{1, 2, 3, 4})
c3 := shuffle.InterpolateMatrixShares(inverseCS3, []int{1, 2, 3, 4})
c := shuffle.GenerateDotProduct(c1, c3)
ab := shuffle.GenerateDotProduct(permutationMatrix1, permutationMatrix2)
abc := shuffle.GenerateDotProduct(ab, permutationMatrix3)
abcd := shuffle.GenerateDotProduct(abc, permutationMatrix4)
for x := 0; x < 6; x++ {
for y := 0; y < 6; y++ {
assert.ElementsMatch(t, c[x][y].Bytes(), abcd[x][y].Bytes())
}
}
}
// func TestIlanBeaverMultiMatrixSharing(t *testing.T) {
// fmt.Println("start")
// start := time.Now()
// ri := [65][][][]*edwards25519.Scalar{}
// rj := [65][][][]*edwards25519.Scalar{}
// next := time.Now()
// diff := next.Sub(start)
// fmt.Println(diff)
// start = next
// fmt.Println("generating random and inverse matrices")
// var wg sync.WaitGroup
// for i := 0; i <= 64; i++ {
// wg.Add(1)
// i := i
// go func() {
// defer wg.Done()
// rs := crypto.GenerateRandomMatrixAndInverseShares(80, 4, 3)
// ri[i] = make([][][]*edwards25519.Scalar, 4)
// rj[i] = make([][][]*edwards25519.Scalar, 4)
// for j := 0; j < 4; j++ {
// ri[i][j] = make([][]*edwards25519.Scalar, 80)
// rj[i][j] = make([][]*edwards25519.Scalar, 80)
// for x := 0; x < 80; x++ {
// ri[i][j][x] = make([]*edwards25519.Scalar, 80)
// rj[i][j][x] = make([]*edwards25519.Scalar, 80)
// for y := 0; y < 80; y++ {
// ri[i][j][x][y] = rs[0][x][y][j]
// rj[i][j][x][y] = rs[1][x][y][j]
// }
// }
// }
// }()
// }
// wg.Wait()
// next = time.Now()
// diff = next.Sub(start)
// fmt.Println(diff)
// start = next
// fmt.Println("generating permutation matrices")
// rxr := [64][][][]*edwards25519.Scalar{}
// for i := 1; i <= 64; i++ {
// wg.Add(1)
// i := i
// go func() {
// defer wg.Done()
// x := crypto.GeneratePermutationMatrix(80)
// xs := crypto.ShamirSplitMatrix(x, 4, 3)
// ixs := make([][][]*edwards25519.Scalar, 4)
// rxr[i-1] = make([][][]*edwards25519.Scalar, 4)
// for j := 0; j < 4; j++ {
// ixs[j] = make([][]*edwards25519.Scalar, 80)
// rxr[i-1][j] = make([][]*edwards25519.Scalar, 80)
// for x := 0; x < 80; x++ {
// ixs[j][x] = make([]*edwards25519.Scalar, 80)
// rxr[i-1][j][x] = make([]*edwards25519.Scalar, 80)
// for y := 0; y < 80; y++ {
// ixs[j][x][y] = xs[x][y][j]
// }
// }
// }
// for j := 0; j < 4; j++ {
// rxrij := crypto.GenerateDotProduct(ri[i-1][j], ixs[j])
// rxr[i-1][j] = crypto.GenerateDotProduct(rxrij, rj[i][j])
// }
// }()
// }
// wg.Wait()
// next = time.Now()
// diff = next.Sub(start)
// fmt.Println(diff)
// start = next
// fmt.Println("swapping elements for interpolation")
// irxr := [64][][][]*edwards25519.Scalar{}
// for i := 0; i < 64; i++ {
// wg.Add(1)
// i := i
// go func() {
// defer wg.Done()
// irxr[i] = make([][][]*edwards25519.Scalar, 80)
// for x := 0; x < 80; x++ {
// irxr[i][x] = make([][]*edwards25519.Scalar, 80)
// for y := 0; y < 80; y++ {
// irxr[i][x][y] = make([]*edwards25519.Scalar, 4)
// for j := 0; j < 4; j++ {
// irxr[i][x][y][j] = rxr[i][j][x][y]
// }
// }
// }
// }()
// }
// wg.Wait()
// rxri := [][]*edwards25519.Scalar{}
// next = time.Now()
// diff = next.Sub(start)
// fmt.Println(diff)
// start = next
// fmt.Println("interpolating")
// for i := 0; i < 64; i++ {
// next := crypto.InterpolateMatrixShares(irxr[i], []int{1, 2, 3})
// if i == 0 {
// rxri = next
// } else {
// rxri = crypto.GenerateDotProduct(rxri, next)
// }
// }
// rpms := make([][][]*edwards25519.Scalar, 4)
// next = time.Now()
// diff = next.Sub(start)
// fmt.Println(diff)
// start = next
// fmt.Println("generating intermediary dot products")
// for i := 1; i <= 4; i++ {
// rpms[i-1] = crypto.GenerateDotProduct(crypto.GenerateDotProduct(rj[0][i-1], rxri), ri[64][i-1])
// }
// final := make([][][]*edwards25519.Scalar, 80)
// for x := 0; x < 80; x++ {
// final[x] = make([][]*edwards25519.Scalar, 80)
// for y := 0; y < 80; y++ {
// final[x][y] = make([]*edwards25519.Scalar, 4)
// for j := 0; j < 4; j++ {
// final[x][y][j] = rpms[j][x][y]
// }
// }
// }
// next = time.Now()
// diff = next.Sub(start)
// fmt.Println(diff)
// start = next
// fmt.Println("final interpolation")
// rpm := crypto.InterpolateMatrixShares(final, []int{1, 2, 3})
// for x := 0; x < 80; x++ {
// for y := 0; y < 80; y++ {
// fmt.Printf("%x, ", rpm[x][y].Bytes()[0])
// }
// fmt.Println()
// }
// t.Fail()
// }

View File

@ -1,624 +0,0 @@
package crypto
import (
"bytes"
"crypto"
"crypto/rand"
"encoding/binary"
"math/big"
"github.com/cloudflare/circl/sign/ed448"
"github.com/iden3/go-iden3-crypto/poseidon"
"github.com/pkg/errors"
"go.uber.org/zap"
"golang.org/x/crypto/sha3"
"source.quilibrium.com/quilibrium/monorepo/node/keys"
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
"source.quilibrium.com/quilibrium/monorepo/node/tries"
"source.quilibrium.com/quilibrium/monorepo/vdf"
)
type WesolowskiFrameProver struct {
logger *zap.Logger
}
func NewWesolowskiFrameProver(logger *zap.Logger) *WesolowskiFrameProver {
return &WesolowskiFrameProver{
logger,
}
}
func (w *WesolowskiFrameProver) ProveMasterClockFrame(
previousFrame *protobufs.ClockFrame,
timestamp int64,
difficulty uint32,
) (*protobufs.ClockFrame, error) {
input := []byte{}
input = append(input, previousFrame.Filter...)
input = binary.BigEndian.AppendUint64(input, previousFrame.FrameNumber+1)
input = binary.BigEndian.AppendUint32(input, difficulty)
input = append(input, previousFrame.Output[:]...)
b := sha3.Sum256(input)
o := vdf.WesolowskiSolve(b, difficulty)
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], previousFrame.Output[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
return nil, errors.Wrap(err, "prove clock frame")
}
frame := &protobufs.ClockFrame{
Filter: previousFrame.Filter,
FrameNumber: previousFrame.FrameNumber + 1,
Timestamp: timestamp,
Difficulty: difficulty,
ParentSelector: parent.FillBytes(make([]byte, 32)),
Input: previousFrame.Output,
AggregateProofs: []*protobufs.InclusionAggregateProof{},
Output: o[:],
}
return frame, nil
}
func (w *WesolowskiFrameProver) VerifyMasterClockFrame(
frame *protobufs.ClockFrame,
) error {
input := []byte{}
input = append(input, frame.Filter...)
input = binary.BigEndian.AppendUint64(input, frame.FrameNumber)
input = binary.BigEndian.AppendUint32(input, frame.Difficulty)
input = append(input, frame.Input...)
if len(frame.Input) < 516 {
return errors.Wrap(
errors.New("invalid input"),
"verify clock frame",
)
}
if len(frame.AggregateProofs) > 0 {
return errors.Wrap(
errors.New("invalid input"),
"verify clock frame",
)
}
if frame.PublicKeySignature != nil {
return errors.Wrap(
errors.New("invalid input"),
"verify clock frame",
)
}
if len(frame.Input) != 516 {
return errors.Wrap(
errors.New("invalid input"),
"verify clock frame",
)
}
if len(frame.Output) != 516 {
return errors.Wrap(
errors.New("invalid output"),
"verify clock frame",
)
}
b := sha3.Sum256(input)
proof := [516]byte{}
copy(proof[:], frame.Output)
if !vdf.WesolowskiVerify(b, frame.Difficulty, proof) {
w.logger.Error("invalid proof",
zap.Binary("filter", frame.Filter),
zap.Uint64("frame_number", frame.FrameNumber),
zap.Uint32("difficulty", frame.Difficulty),
zap.Binary("frame_input", frame.Input),
zap.Binary("frame_output", frame.Output),
)
return errors.Wrap(
errors.New("invalid proof"),
"verify clock frame",
)
}
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], frame.Input[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
return errors.Wrap(err, "verify clock frame")
}
selector := new(big.Int).SetBytes(frame.ParentSelector)
if parent.Cmp(selector) != 0 {
return errors.Wrap(
errors.New("selector did not match input"),
"verify clock frame",
)
}
return nil
}
func (w *WesolowskiFrameProver) CreateMasterGenesisFrame(
filter []byte,
seed []byte,
difficulty uint32,
) (
*protobufs.ClockFrame,
error,
) {
b := sha3.Sum256(seed)
o := vdf.WesolowskiSolve(b, difficulty)
inputMessage := o[:]
w.logger.Debug("proving genesis frame")
input := []byte{}
input = append(input, filter...)
input = binary.BigEndian.AppendUint64(input, 0)
input = binary.BigEndian.AppendUint32(input, difficulty)
if bytes.Equal(seed, []byte{0x00}) {
value := [516]byte{}
input = append(input, value[:]...)
} else {
input = append(input, seed...)
}
b = sha3.Sum256(input)
o = vdf.WesolowskiSolve(b, difficulty)
frame := &protobufs.ClockFrame{
Filter: filter,
FrameNumber: 0,
Timestamp: 0,
Difficulty: difficulty,
Input: inputMessage,
Output: o[:],
ParentSelector: []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
AggregateProofs: []*protobufs.InclusionAggregateProof{},
PublicKeySignature: nil,
}
return frame, nil
}
func (w *WesolowskiFrameProver) ProveDataClockFrame(
previousFrame *protobufs.ClockFrame,
commitments [][]byte,
aggregateProofs []*protobufs.InclusionAggregateProof,
provingKey crypto.Signer,
timestamp int64,
difficulty uint32,
) (*protobufs.ClockFrame, error) {
var pubkey []byte
pubkeyType := keys.KeyTypeEd448
ed448PublicKey, ok := provingKey.Public().(ed448.PublicKey)
if ok {
pubkey = []byte(ed448PublicKey)
} else {
return nil, errors.Wrap(
errors.New("no valid signature provided"),
"prove clock frame",
)
}
h, err := poseidon.HashBytes(pubkey)
if err != nil {
return nil, errors.Wrap(
errors.New("could not hash proving key"),
"prove clock frame",
)
}
address := h.Bytes()
input := []byte{}
input = append(input, previousFrame.Filter...)
input = binary.BigEndian.AppendUint64(input, previousFrame.FrameNumber+1)
input = binary.BigEndian.AppendUint64(input, uint64(timestamp))
input = binary.BigEndian.AppendUint32(input, difficulty)
input = append(input, address...)
input = append(input, previousFrame.Output[:]...)
commitmentInput := []byte{}
for _, commitment := range commitments {
commitmentInput = append(commitmentInput, commitment...)
}
input = append(input, commitmentInput...)
b := sha3.Sum256(input)
o := vdf.WesolowskiSolve(b, difficulty)
// TODO: make this configurable for signing algorithms that allow
// user-supplied hash functions
signature, err := provingKey.Sign(
rand.Reader,
append(append([]byte{}, b[:]...), o[:]...),
crypto.Hash(0),
)
if err != nil {
return nil, errors.Wrap(
err,
"prove",
)
}
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], previousFrame.Output[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
return nil, errors.Wrap(err, "prove clock frame")
}
frame := &protobufs.ClockFrame{
Filter: previousFrame.Filter,
FrameNumber: previousFrame.FrameNumber + 1,
Timestamp: timestamp,
Difficulty: difficulty,
ParentSelector: parent.FillBytes(make([]byte, 32)),
Input: append(
append([]byte{}, previousFrame.Output...),
commitmentInput...,
),
AggregateProofs: aggregateProofs,
Output: o[:],
}
switch pubkeyType {
case keys.KeyTypeEd448:
frame.PublicKeySignature = &protobufs.ClockFrame_PublicKeySignatureEd448{
PublicKeySignatureEd448: &protobufs.Ed448Signature{
Signature: signature,
PublicKey: &protobufs.Ed448PublicKey{
KeyValue: pubkey,
},
},
}
default:
return nil, errors.Wrap(
errors.New("unsupported proving key"),
"prove clock frame",
)
}
return frame, nil
}
func (w *WesolowskiFrameProver) CreateDataGenesisFrame(
filter []byte,
origin []byte,
difficulty uint32,
inclusionProof *InclusionAggregateProof,
proverKeys [][]byte,
preDusk bool,
) (*protobufs.ClockFrame, *tries.RollingFrecencyCritbitTrie, error) {
frameProverTrie := &tries.RollingFrecencyCritbitTrie{}
for _, s := range proverKeys {
addr, err := poseidon.HashBytes(s)
if err != nil {
panic(err)
}
addrBytes := addr.Bytes()
addrBytes = append(make([]byte, 32-len(addrBytes)), addrBytes...)
frameProverTrie.Add(addrBytes, 0)
}
w.logger.Info("proving genesis frame")
input := []byte{}
input = append(input, filter...)
input = binary.BigEndian.AppendUint64(input, 0)
input = binary.BigEndian.AppendUint64(input, 0)
input = binary.BigEndian.AppendUint32(input, difficulty)
input = append(input, origin...)
if !preDusk {
input = append(input, inclusionProof.AggregateCommitment...)
}
b := sha3.Sum256(input)
o := vdf.WesolowskiSolve(b, difficulty)
commitments := []*protobufs.InclusionCommitment{}
for i, commit := range inclusionProof.InclusionCommitments {
commitments = append(commitments, &protobufs.InclusionCommitment{
Filter: filter,
FrameNumber: 0,
Position: uint32(i),
TypeUrl: commit.TypeUrl,
Data: commit.Data,
Commitment: commit.Commitment,
})
}
frame := &protobufs.ClockFrame{
Filter: filter,
FrameNumber: 0,
Timestamp: 0,
Difficulty: difficulty,
Input: append(
append([]byte{}, origin...),
inclusionProof.AggregateCommitment...,
),
Output: o[:],
ParentSelector: []byte{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
AggregateProofs: []*protobufs.InclusionAggregateProof{
{
Filter: filter,
FrameNumber: 0,
InclusionCommitments: commitments,
Proof: inclusionProof.Proof,
},
},
PublicKeySignature: nil,
}
return frame, frameProverTrie, nil
}
func (w *WesolowskiFrameProver) VerifyDataClockFrame(
frame *protobufs.ClockFrame,
) error {
var pubkey []byte
var signature []byte
pubkeyType := keys.KeyTypeEd448
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
signature = ed448PublicKey.Signature
} else {
return errors.Wrap(
errors.New("no valid signature provided"),
"verify clock frame",
)
}
h, err := poseidon.HashBytes(pubkey)
if err != nil {
return errors.Wrap(
errors.New("could not hash proving key"),
"verify clock frame",
)
}
address := h.Bytes()
input := []byte{}
input = append(input, frame.Filter...)
input = binary.BigEndian.AppendUint64(input, frame.FrameNumber)
input = binary.BigEndian.AppendUint64(input, uint64(frame.Timestamp))
input = binary.BigEndian.AppendUint32(input, frame.Difficulty)
input = append(input, address...)
input = append(input, frame.Input...)
if len(frame.Input) < 516 {
return errors.Wrap(
errors.New("invalid input"),
"verify clock frame",
)
}
if len(frame.Output) != 516 {
return errors.Wrap(
errors.New("invalid output"),
"verify clock frame",
)
}
b := sha3.Sum256(input)
proof := [516]byte{}
copy(proof[:], frame.Output)
// TODO: make this configurable for signing algorithms that allow
// user-supplied hash functions
switch pubkeyType {
case keys.KeyTypeEd448:
if len(pubkey) != 57 || len(signature) != 114 || !ed448.VerifyAny(
pubkey,
append(append([]byte{}, b[:]...), frame.Output...),
signature,
crypto.Hash(0),
) {
return errors.Wrap(
errors.New("invalid signature for issuer"),
"verify clock frame",
)
}
}
if !vdf.WesolowskiVerify(b, frame.Difficulty, proof) {
return errors.Wrap(
errors.New("invalid proof"),
"verify clock frame",
)
}
previousSelectorBytes := [516]byte{}
copy(previousSelectorBytes[:], frame.Input[:516])
parent, err := poseidon.HashBytes(previousSelectorBytes[:])
if err != nil {
return errors.Wrap(err, "verify clock frame")
}
selector := new(big.Int).SetBytes(frame.ParentSelector)
if parent.Cmp(selector) != 0 {
return errors.Wrap(
errors.New("selector did not match input"),
"verify clock frame",
)
}
return nil
}
func (w *WesolowskiFrameProver) GenerateWeakRecursiveProofIndex(
frame *protobufs.ClockFrame,
) (uint64, error) {
hash, err := poseidon.HashBytes(frame.Output)
if err != nil {
return 0, errors.Wrap(err, "generate weak recursive proof")
}
return hash.Mod(
hash,
new(big.Int).SetUint64(frame.FrameNumber),
).Uint64(), nil
}
func (w *WesolowskiFrameProver) FetchRecursiveProof(
frame *protobufs.ClockFrame,
) []byte {
var pubkey []byte
ed448PublicKey := frame.GetPublicKeySignatureEd448()
if ed448PublicKey != nil {
pubkey = ed448PublicKey.PublicKey.KeyValue
} else {
return nil
}
h, err := poseidon.HashBytes(pubkey)
if err != nil {
return nil
}
address := h.Bytes()
input := []byte{}
input = append(input, frame.Filter...)
input = binary.BigEndian.AppendUint64(input, frame.FrameNumber)
input = binary.BigEndian.AppendUint64(input, uint64(frame.Timestamp))
input = binary.BigEndian.AppendUint32(input, frame.Difficulty)
input = append(input, address...)
input = append(input, frame.Input...)
input = append(input, frame.Output...)
return input
}
func (w *WesolowskiFrameProver) VerifyWeakRecursiveProof(
frame *protobufs.ClockFrame,
proof []byte,
deepVerifier *protobufs.ClockFrame,
) bool {
hash, err := poseidon.HashBytes(frame.Output)
if err != nil {
w.logger.Debug("could not hash output")
return false
}
frameNumber := hash.Mod(
hash,
new(big.Int).SetUint64(frame.FrameNumber),
).Uint64()
if len(proof) < 1084 {
w.logger.Debug("invalid proof size")
return false
}
filter := proof[:len(frame.Filter)]
check := binary.BigEndian.Uint64(
proof[len(frame.Filter) : len(frame.Filter)+8],
)
timestamp := binary.BigEndian.Uint64(
proof[len(frame.Filter)+8 : len(frame.Filter)+16],
)
difficulty := binary.BigEndian.Uint32(
proof[len(frame.Filter)+16 : len(frame.Filter)+20],
)
input := proof[len(frame.Filter)+52:]
if check != frameNumber ||
!bytes.Equal(filter, frame.Filter) ||
int64(timestamp) >= frame.Timestamp ||
difficulty > frame.Difficulty ||
len(input) < 1032 {
w.logger.Debug(
"check failed",
zap.Bool("failed_frame_number", check != frameNumber),
zap.Bool("failed_filter", !bytes.Equal(filter, frame.Filter)),
zap.Bool("failed_timestamp", int64(timestamp) >= frame.Timestamp),
zap.Bool("failed_difficulty", difficulty > frame.Difficulty),
zap.Bool("failed_input_size", len(input) < 1032),
)
return false
}
if deepVerifier != nil && (check != deepVerifier.FrameNumber ||
!bytes.Equal(filter, deepVerifier.Filter) ||
int64(timestamp) != deepVerifier.Timestamp ||
difficulty != deepVerifier.Difficulty ||
!bytes.Equal(input[:len(input)-516], deepVerifier.Input)) {
return false
}
b := sha3.Sum256(input[:len(input)-516])
output := [516]byte{}
copy(output[:], input[len(input)-516:])
if vdf.WesolowskiVerify(b, difficulty, output) {
w.logger.Debug("verification succeeded")
return true
} else {
w.logger.Debug("verification failed")
return false
}
}
func (w *WesolowskiFrameProver) CalculateChallengeProof(
challenge []byte,
core uint32,
increment uint32,
) ([]byte, error) {
difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
instanceInput := binary.BigEndian.AppendUint32([]byte{}, core)
instanceInput = append(instanceInput, challenge...)
b := sha3.Sum256(instanceInput)
o := vdf.WesolowskiSolve(b, uint32(difficulty))
output := make([]byte, 516)
copy(output[:], o[:])
return output, nil
}
func (w *WesolowskiFrameProver) VerifyChallengeProof(
challenge []byte,
increment uint32,
core uint32,
proof []byte,
) bool {
difficulty := 200000 - (increment / 4)
if difficulty < 25000 || increment > 800000 {
difficulty = 25000
}
if len(proof) != 516 {
return false
}
instanceInput := binary.BigEndian.AppendUint32([]byte{}, core)
instanceInput = append(instanceInput, challenge...)
b := sha3.Sum256(instanceInput)
check := vdf.WesolowskiVerify(b, difficulty, [516]byte(proof))
return check
}

View File

@ -1,36 +0,0 @@
package crypto_test
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
"source.quilibrium.com/quilibrium/monorepo/node/crypto"
)
func TestMasterProve(t *testing.T) {
l, _ := zap.NewProduction()
w := crypto.NewWesolowskiFrameProver(l)
m, err := w.CreateMasterGenesisFrame([]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}, bytes.Repeat([]byte{0x00}, 516), 10000)
assert.NoError(t, err)
next, err := w.ProveMasterClockFrame(m, time.Now().UnixMilli(), 10000)
assert.NoError(t, err)
err = w.VerifyMasterClockFrame(next)
assert.NoError(t, err)
}
func TestChallengeProof(t *testing.T) {
l, _ := zap.NewProduction()
w := crypto.NewWesolowskiFrameProver(l)
proofs, err := w.CalculateChallengeProof([]byte{0x01, 0x02, 0x03}, 0, 1)
assert.NoError(t, err)
assert.True(t, w.VerifyChallengeProof([]byte{0x01, 0x02, 0x03}, 1, 0, proofs))
}

View File

@ -1,18 +0,0 @@
package execution
import (
"source.quilibrium.com/quilibrium/monorepo/node/protobufs"
)
type ExecutionEngine interface {
GetName() string
GetSupportedApplications() []*protobufs.Application
Start() <-chan error
Stop(force bool) <-chan error
ProcessMessage(
address []byte,
message *protobufs.Message,
) ([]*protobufs.Message, error)
GetPeerInfo() *protobufs.PeerInfoResponse
GetFrame() *protobufs.ClockFrame
}

File diff suppressed because it is too large Load Diff

View File

@ -7,40 +7,26 @@ toolchain go1.22.1
// A necessary hack until source.quilibrium.com is open to all
replace source.quilibrium.com/quilibrium/monorepo/nekryptology => ../nekryptology
replace source.quilibrium.com/quilibrium/monorepo/bls48581 => ../bls48581
replace source.quilibrium.com/quilibrium/monorepo/vdf => ../vdf
replace github.com/libp2p/go-libp2p => ../go-libp2p
replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream
replace source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub => ../go-libp2p-blossomsub
replace github.com/cockroachdb/pebble => ../pebble
require (
filippo.io/edwards25519 v1.0.0-rc.1
github.com/cockroachdb/pebble v0.0.0-20231210175920-b4d301aeb46a
github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9
github.com/libp2p/go-libp2p v0.35.1
github.com/libp2p/go-libp2p-gostream v0.6.0
github.com/libp2p/go-libp2p v0.35.4
github.com/libp2p/go-libp2p-kad-dht v0.23.0
google.golang.org/protobuf v1.34.1
gopkg.in/yaml.v2 v2.4.0
source.quilibrium.com/quilibrium/monorepo/bls48581 v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/go-libp2p-blossomsub v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/nekryptology v0.0.0-00010101000000-000000000000
source.quilibrium.com/quilibrium/monorepo/vdf v0.0.0-00010101000000-000000000000
)
require (
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597 // indirect
github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326 // indirect
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
github.com/pion/datachannel v1.5.6 // indirect
github.com/pion/dtls/v2 v2.2.11 // indirect
github.com/pion/ice/v2 v2.3.24 // indirect
github.com/pion/ice/v2 v2.3.25 // indirect
github.com/pion/interceptor v0.1.29 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/mdns v0.0.12 // indirect
@ -54,43 +40,20 @@ require (
github.com/pion/transport/v2 v2.2.5 // indirect
github.com/pion/turn/v2 v2.1.6 // indirect
github.com/pion/webrtc/v3 v3.2.40 // indirect
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f // indirect
go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.uber.org/mock v0.4.0 // indirect
)
require (
github.com/DataDog/zstd v1.4.5 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401 // indirect
github.com/bwesterb/go-ristretto v1.2.3 // indirect
github.com/charmbracelet/bubbletea v0.24.2
github.com/charmbracelet/lipgloss v0.9.1
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/gnark-crypto v0.5.3 // indirect
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/reflow v0.3.0 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/multiformats/go-multiaddr v0.12.4
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
golang.org/x/term v0.21.0
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
@ -122,12 +85,12 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.15
github.com/iden3/go-iden3-crypto v0.0.16
github.com/ipfs/boxo v0.8.0 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-datastore v0.6.0 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
@ -155,7 +118,7 @@ require (
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
@ -168,7 +131,7 @@ require (
github.com/onsi/ginkgo/v2 v2.15.0 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pkg/errors v0.9.1
github.com/polydawn/refmt v0.89.0 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
@ -179,13 +142,12 @@ require (
github.com/quic-go/quic-go v0.44.0 // indirect
github.com/quic-go/webtransport-go v0.8.0 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/stretchr/testify v1.9.0
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/dig v1.17.1 // indirect
go.uber.org/fx v1.21.1 // indirect
go.uber.org/fx v1.22.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.24.0
@ -193,7 +155,7 @@ require (
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
gonum.org/v1/gonum v0.11.0 // indirect

View File

@ -12,12 +12,8 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
@ -47,42 +43,23 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbletea v0.24.2 h1:uaQIKx9Ai6Gdh5zpTbGiWpytMU+CfsPp06RaW2cx/SY=
github.com/charmbracelet/bubbletea v0.24.2/go.mod h1:XdrNrV4J8GiyshTtx3DNuYkR1FDaJmO3l2nejekbsgg=
github.com/charmbracelet/lipgloss v0.9.1 h1:PNyd3jvaJbg4jRHKWXnCj1akQm4rh8dbEzN1p/u1KWg=
github.com/charmbracelet/lipgloss v0.9.1/go.mod h1:1mPmG4cxScwUQALAAnacHaigiiHB9Pmr+v1VEawJl6I=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA=
github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.5.3 h1:4xLFGZR3NWEH2zy+YzvzHicpToQR8FXFbfLNvpGB+rE=
github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY=
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -94,10 +71,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPc
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597 h1:xKCSqM+c9FjQIr0Qacn9m7x0kv/opDWGr/nvCowFCok=
github.com/deiu/gon3 v0.0.0-20230411081920-f0f8f879f597/go.mod h1:r8Pv5x6dxChq4mb1ZqzTyK3y9w8wDzWt55XAJpfSq34=
github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9 h1:xs255gi9FPRuCW+Ud8lQOBXBGHqM8cqqmoRfGokK3f0=
github.com/deiu/rdf2go v0.0.0-20240619132609-81222e324bb9/go.mod h1:d+9YsU6N5OuirjLEOp23T2/+S7OLByerfuv1f89iy90=
github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=
github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE=
github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=
@ -122,13 +95,9 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -201,8 +170,8 @@ github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE0
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
@ -221,8 +190,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs=
github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA=
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
@ -306,10 +275,6 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326 h1:YP3lfXXYiQV5MKeUqVnxRP5uuMQTLPx+PGYm1UBoU98=
github.com/linkeddata/gojsonld v0.0.0-20170418210642-4f5db6791326/go.mod h1:nfqkuSNlsk1bvti/oa7TThx4KmRMBmSxf3okHI9wp3E=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
@ -317,11 +282,6 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
@ -343,14 +303,6 @@ github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34=
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
@ -395,15 +347,13 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg=
github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI=
github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs=
github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
@ -441,7 +391,6 @@ github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU=
github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -470,17 +419,11 @@ github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f h1:L2/fBPABieQnQzfV40k2Zw7IcvZbt0CN5TgwUl8zDCs=
github.com/rychipman/easylex v0.0.0-20160129204217-49ee7767142f/go.mod h1:MZ2GRTcqmve6EoSbErWgCR+Ash4p8Gc5esHe8MDErss=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
@ -556,8 +499,8 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0=
go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys=
go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
@ -679,7 +622,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -702,8 +644,6 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

Some files were not shown because too many files have changed in this diff Show More