mirror of
https://github.com/0glabs/0g-chain.git
synced 2024-11-14 12:05:18 +00:00
remove das module
This commit is contained in:
parent
81f8c541ca
commit
655bb69313
@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/testutil/sims"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
|
@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
|
12
app/app.go
12
app/app.go
@ -115,9 +115,7 @@ import (
|
||||
council "github.com/0glabs/0g-chain/x/council/v1"
|
||||
councilkeeper "github.com/0glabs/0g-chain/x/council/v1/keeper"
|
||||
counciltypes "github.com/0glabs/0g-chain/x/council/v1/types"
|
||||
das "github.com/0glabs/0g-chain/x/das/v1"
|
||||
daskeeper "github.com/0glabs/0g-chain/x/das/v1/keeper"
|
||||
dastypes "github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
|
||||
evmutil "github.com/0glabs/0g-chain/x/evmutil"
|
||||
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
|
||||
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
|
||||
@ -192,7 +190,6 @@ var (
|
||||
metrics.AppModuleBasic{},
|
||||
consensus.AppModuleBasic{},
|
||||
council.AppModuleBasic{},
|
||||
das.AppModuleBasic{},
|
||||
)
|
||||
|
||||
// module account permissions
|
||||
@ -300,7 +297,6 @@ type App struct {
|
||||
consensusParamsKeeper consensusparamkeeper.Keeper
|
||||
|
||||
CouncilKeeper councilkeeper.Keeper
|
||||
DasKeeper daskeeper.Keeper
|
||||
|
||||
// make scoped keepers public for test purposes
|
||||
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
|
||||
@ -356,7 +352,6 @@ func NewApp(
|
||||
consensusparamtypes.StoreKey, crisistypes.StoreKey,
|
||||
|
||||
counciltypes.StoreKey,
|
||||
dastypes.StoreKey,
|
||||
)
|
||||
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey)
|
||||
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
|
||||
@ -778,7 +773,6 @@ func NewApp(
|
||||
app.CouncilKeeper = councilkeeper.NewKeeper(
|
||||
keys[counciltypes.StoreKey], appCodec, app.stakingKeeper,
|
||||
)
|
||||
app.DasKeeper = daskeeper.NewKeeper(keys[dastypes.StoreKey], appCodec, app.stakingKeeper)
|
||||
|
||||
// create the module manager (Note: Any module instantiated in the module manager that is later modified
|
||||
// must be passed by reference here.)
|
||||
@ -825,7 +819,6 @@ func NewApp(
|
||||
metrics.NewAppModule(options.TelemetryOptions),
|
||||
|
||||
council.NewAppModule(app.CouncilKeeper, app.stakingKeeper),
|
||||
das.NewAppModule(app.DasKeeper),
|
||||
)
|
||||
|
||||
// Warning: Some begin blockers must run before others. Ensure the dependencies are understood before modifying this list.
|
||||
@ -883,7 +876,6 @@ func NewApp(
|
||||
packetforwardtypes.ModuleName,
|
||||
|
||||
counciltypes.ModuleName,
|
||||
dastypes.ModuleName,
|
||||
)
|
||||
|
||||
// Warning: Some end blockers must run before others. Ensure the dependencies are understood before modifying this list.
|
||||
@ -931,7 +923,6 @@ func NewApp(
|
||||
packetforwardtypes.ModuleName,
|
||||
|
||||
counciltypes.ModuleName,
|
||||
dastypes.ModuleName,
|
||||
)
|
||||
|
||||
// Warning: Some init genesis methods must run before others. Ensure the dependencies are understood before modifying this list
|
||||
@ -977,7 +968,6 @@ func NewApp(
|
||||
packetforwardtypes.ModuleName,
|
||||
|
||||
counciltypes.ModuleName,
|
||||
dastypes.ModuleName,
|
||||
|
||||
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
|
||||
)
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"time"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
@ -44,7 +43,7 @@ func (suite *tallyHandlerSuite) SetupTest() {
|
||||
|
||||
stakingKeeper := *suite.app.GetStakingKeeper()
|
||||
suite.staking = stakingHelper{stakingKeeper}
|
||||
suite.staking.setBondDenom(suite.ctx, chaincfg.DisplayDenom)
|
||||
suite.staking.setBondDenom(suite.ctx, "ua0gi")
|
||||
|
||||
suite.tallier = NewTallyHandler(
|
||||
suite.app.GetGovKeeper(),
|
||||
@ -302,7 +301,7 @@ func (suite *tallyHandlerSuite) mintDerivative(owner sdk.AccAddress, validator s
|
||||
// suite.Require().NoError(err)
|
||||
|
||||
// return minted
|
||||
return sdk.NewCoin(chaincfg.DisplayDenom, amount)
|
||||
return sdk.NewCoin("ua0gi", amount)
|
||||
}
|
||||
|
||||
func (suite *tallyHandlerSuite) delegateToNewBondedValidator(delegator sdk.AccAddress, amount sdkmath.Int) stakingtypes.ValidatorI {
|
||||
|
2
go.mod
2
go.mod
@ -19,7 +19,6 @@ require (
|
||||
github.com/ethereum/go-ethereum v1.10.26
|
||||
github.com/evmos/ethermint v0.21.0
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
@ -108,6 +107,7 @@ require (
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.1.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
|
@ -1,61 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/helper/da/light"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type DaLightRpcClient interface {
|
||||
Sample(ctx context.Context, streamId, headerHash []byte, blobIdx, times uint32) (bool, error)
|
||||
Destroy()
|
||||
GetInstanceCount() int
|
||||
}
|
||||
|
||||
type daLightClient struct {
|
||||
maxInstance int
|
||||
pool ConnectionPool
|
||||
}
|
||||
|
||||
func NewDaLightClient(address string, instanceLimit int) DaLightRpcClient {
|
||||
return &daLightClient{
|
||||
maxInstance: instanceLimit,
|
||||
pool: NewConnectionPool(address, instanceLimit, 10*time.Minute),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *daLightClient) Sample(ctx context.Context, streamId, headerHash []byte, blobIdx, times uint32) (bool, error) {
|
||||
connection, err := c.pool.GetConnection()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to connect to da light server")
|
||||
}
|
||||
defer c.pool.ReleaseConnection(connection)
|
||||
|
||||
req := &light.SampleRequest{
|
||||
StreamId: streamId,
|
||||
BatchHeaderHash: headerHash,
|
||||
BlobIndex: blobIdx,
|
||||
Times: times,
|
||||
}
|
||||
client := light.NewLightClient(connection)
|
||||
reply, err := client.Sample(ctx, req)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to sample from da light server")
|
||||
}
|
||||
|
||||
return reply.Success, nil
|
||||
}
|
||||
|
||||
func (c *daLightClient) Destroy() {
|
||||
if c.pool != nil {
|
||||
c.pool.Close()
|
||||
c.pool = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *daLightClient) GetInstanceCount() int {
|
||||
return c.maxInstance
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
type ConnectionPool interface {
|
||||
GetConnection() (*grpc.ClientConn, error)
|
||||
ReleaseConnection(*grpc.ClientConn)
|
||||
Close()
|
||||
}
|
||||
|
||||
type connectionPoolImpl struct {
|
||||
address string
|
||||
maxSize int
|
||||
timeout time.Duration
|
||||
param grpc.ConnectParams
|
||||
|
||||
mu sync.Mutex
|
||||
pool []*grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewConnectionPool(address string, maxSize int, timeout time.Duration) ConnectionPool {
|
||||
return &connectionPoolImpl{
|
||||
address: address,
|
||||
maxSize: maxSize,
|
||||
timeout: timeout,
|
||||
param: grpc.ConnectParams{
|
||||
Backoff: backoff.Config{
|
||||
BaseDelay: 1.0 * time.Second,
|
||||
Multiplier: 1.5,
|
||||
Jitter: 0.2,
|
||||
MaxDelay: 30 * time.Second,
|
||||
},
|
||||
MinConnectTimeout: 30 * time.Second,
|
||||
},
|
||||
pool: make([]*grpc.ClientConn, 0, maxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *connectionPoolImpl) GetConnection() (*grpc.ClientConn, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.pool == nil {
|
||||
return nil, errors.New("connection pool is closed")
|
||||
}
|
||||
|
||||
// Check if there's any available connection in the pool
|
||||
if len(p.pool) > 0 {
|
||||
conn := p.pool[0]
|
||||
p.pool = p.pool[1:]
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// If the pool is empty, create a new connection
|
||||
conn, err := grpc.Dial(p.address, grpc.WithBlock(),
|
||||
grpc.WithConnectParams(p.param),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (p *connectionPoolImpl) ReleaseConnection(conn *grpc.ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.pool != nil {
|
||||
// If the pool is full, close the connection
|
||||
if len(p.pool) >= p.maxSize {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Add the connection back to the pool
|
||||
p.pool = append(p.pool, conn)
|
||||
} else {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *connectionPoolImpl) Close() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.pool != nil {
|
||||
for _, conn := range p.pool {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
p.pool = nil
|
||||
}
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
module github.com/0glabs/0g-chain/helper/da
|
||||
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/lesismal/nbio v1.5.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.32.0
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.33.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/lesismal/llib v1.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/stretchr/testify v1.8.4 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
)
|
@ -1,60 +0,0 @@
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/lesismal/llib v1.1.13 h1:+w1+t0PykXpj2dXQck0+p6vdC9/mnbEXHgUy/HXDGfE=
|
||||
github.com/lesismal/llib v1.1.13/go.mod h1:70tFXXe7P1FZ02AU9l8LgSOK7d7sRrpnkUr3rd3gKSg=
|
||||
github.com/lesismal/nbio v1.5.4 h1:fZ6FOVZOBm7nFuudYsq+WyHJuM2UNuPdlvF/1LVa6lo=
|
||||
github.com/lesismal/nbio v1.5.4/go.mod h1:mvfYBAA1jmrafXf2XvkM28jWkMTfA5jGks+HKDBMmOc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
|
||||
github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
golang.org/x/crypto v0.0.0-20210513122933-cd7d49e622d5/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
@ -1,397 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v4.25.3
|
||||
// source: light/light.proto
|
||||
|
||||
package light
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// SampleRequest contains the blob to sample (by batch and blob index) and required sample times
|
||||
type SampleRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
StreamId []byte `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"`
|
||||
BatchHeaderHash []byte `protobuf:"bytes,2,opt,name=batch_header_hash,json=batchHeaderHash,proto3" json:"batch_header_hash,omitempty"`
|
||||
BlobIndex uint32 `protobuf:"varint,3,opt,name=blob_index,json=blobIndex,proto3" json:"blob_index,omitempty"`
|
||||
Times uint32 `protobuf:"varint,4,opt,name=times,proto3" json:"times,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SampleRequest) Reset() {
|
||||
*x = SampleRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_light_light_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SampleRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SampleRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SampleRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_light_light_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SampleRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SampleRequest) Descriptor() ([]byte, []int) {
|
||||
return file_light_light_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *SampleRequest) GetStreamId() []byte {
|
||||
if x != nil {
|
||||
return x.StreamId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SampleRequest) GetBatchHeaderHash() []byte {
|
||||
if x != nil {
|
||||
return x.BatchHeaderHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SampleRequest) GetBlobIndex() uint32 {
|
||||
if x != nil {
|
||||
return x.BlobIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *SampleRequest) GetTimes() uint32 {
|
||||
if x != nil {
|
||||
return x.Times
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// SampleReply contains the sample result
|
||||
type SampleReply struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SampleReply) Reset() {
|
||||
*x = SampleReply{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_light_light_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SampleReply) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SampleReply) ProtoMessage() {}
|
||||
|
||||
func (x *SampleReply) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_light_light_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SampleReply.ProtoReflect.Descriptor instead.
|
||||
func (*SampleReply) Descriptor() ([]byte, []int) {
|
||||
return file_light_light_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *SampleReply) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type RetrieveRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
BatchHeaderHash []byte `protobuf:"bytes,1,opt,name=batch_header_hash,json=batchHeaderHash,proto3" json:"batch_header_hash,omitempty"`
|
||||
BlobIndex uint32 `protobuf:"varint,2,opt,name=blob_index,json=blobIndex,proto3" json:"blob_index,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RetrieveRequest) Reset() {
|
||||
*x = RetrieveRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_light_light_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RetrieveRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RetrieveRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RetrieveRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_light_light_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RetrieveRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RetrieveRequest) Descriptor() ([]byte, []int) {
|
||||
return file_light_light_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *RetrieveRequest) GetBatchHeaderHash() []byte {
|
||||
if x != nil {
|
||||
return x.BatchHeaderHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RetrieveRequest) GetBlobIndex() uint32 {
|
||||
if x != nil {
|
||||
return x.BlobIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type RetrieveReply struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status bool `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RetrieveReply) Reset() {
|
||||
*x = RetrieveReply{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_light_light_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RetrieveReply) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RetrieveReply) ProtoMessage() {}
|
||||
|
||||
func (x *RetrieveReply) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_light_light_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RetrieveReply.ProtoReflect.Descriptor instead.
|
||||
func (*RetrieveReply) Descriptor() ([]byte, []int) {
|
||||
return file_light_light_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *RetrieveReply) GetStatus() bool {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *RetrieveReply) GetData() []byte {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_light_light_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_light_light_proto_rawDesc = []byte{
|
||||
0x0a, 0x11, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x12, 0x05, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8d, 0x01, 0x0a, 0x0d, 0x53,
|
||||
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09,
|
||||
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x61, 0x74,
|
||||
0x63, 0x68, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x48, 0x65, 0x61, 0x64, 0x65,
|
||||
0x72, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x49,
|
||||
0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20,
|
||||
0x01, 0x28, 0x0d, 0x52, 0x05, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0x27, 0x0a, 0x0b, 0x53, 0x61,
|
||||
0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63,
|
||||
0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63,
|
||||
0x65, 0x73, 0x73, 0x22, 0x5c, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f,
|
||||
0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x0f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61,
|
||||
0x73, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x49, 0x6e, 0x64, 0x65,
|
||||
0x78, 0x22, 0x3b, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x70,
|
||||
0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
|
||||
0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, 0x79,
|
||||
0x0a, 0x05, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x12, 0x34, 0x0a, 0x06, 0x53, 0x61, 0x6d, 0x70, 0x6c,
|
||||
0x65, 0x12, 0x14, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x2e,
|
||||
0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x3a, 0x0a,
|
||||
0x08, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x12, 0x16, 0x2e, 0x6c, 0x69, 0x67, 0x68,
|
||||
0x74, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x14, 0x2e, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65,
|
||||
0x76, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74,
|
||||
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x67, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x30,
|
||||
0x67, 0x2d, 0x64, 0x61, 0x74, 0x61, 0x2d, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x2f, 0x72, 0x75, 0x6e,
|
||||
0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_light_light_proto_rawDescOnce sync.Once
|
||||
file_light_light_proto_rawDescData = file_light_light_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_light_light_proto_rawDescGZIP() []byte {
|
||||
file_light_light_proto_rawDescOnce.Do(func() {
|
||||
file_light_light_proto_rawDescData = protoimpl.X.CompressGZIP(file_light_light_proto_rawDescData)
|
||||
})
|
||||
return file_light_light_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_light_light_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_light_light_proto_goTypes = []interface{}{
|
||||
(*SampleRequest)(nil), // 0: light.SampleRequest
|
||||
(*SampleReply)(nil), // 1: light.SampleReply
|
||||
(*RetrieveRequest)(nil), // 2: light.RetrieveRequest
|
||||
(*RetrieveReply)(nil), // 3: light.RetrieveReply
|
||||
}
|
||||
var file_light_light_proto_depIdxs = []int32{
|
||||
0, // 0: light.Light.Sample:input_type -> light.SampleRequest
|
||||
2, // 1: light.Light.Retrieve:input_type -> light.RetrieveRequest
|
||||
1, // 2: light.Light.Sample:output_type -> light.SampleReply
|
||||
3, // 3: light.Light.Retrieve:output_type -> light.RetrieveReply
|
||||
2, // [2:4] is the sub-list for method output_type
|
||||
0, // [0:2] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_light_light_proto_init() }
|
||||
func file_light_light_proto_init() {
|
||||
if File_light_light_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_light_light_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SampleRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_light_light_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SampleReply); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_light_light_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RetrieveRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_light_light_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RetrieveReply); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_light_light_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_light_light_proto_goTypes,
|
||||
DependencyIndexes: file_light_light_proto_depIdxs,
|
||||
MessageInfos: file_light_light_proto_msgTypes,
|
||||
}.Build()
|
||||
File_light_light_proto = out.File
|
||||
file_light_light_proto_rawDesc = nil
|
||||
file_light_light_proto_goTypes = nil
|
||||
file_light_light_proto_depIdxs = nil
|
||||
}
|
@ -1,141 +0,0 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v4.25.3
|
||||
// source: light/light.proto
|
||||
|
||||
package light
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// LightClient is the client API for Light service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type LightClient interface {
|
||||
Sample(ctx context.Context, in *SampleRequest, opts ...grpc.CallOption) (*SampleReply, error)
|
||||
Retrieve(ctx context.Context, in *RetrieveRequest, opts ...grpc.CallOption) (*RetrieveReply, error)
|
||||
}
|
||||
|
||||
type lightClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewLightClient(cc grpc.ClientConnInterface) LightClient {
|
||||
return &lightClient{cc}
|
||||
}
|
||||
|
||||
func (c *lightClient) Sample(ctx context.Context, in *SampleRequest, opts ...grpc.CallOption) (*SampleReply, error) {
|
||||
out := new(SampleReply)
|
||||
err := c.cc.Invoke(ctx, "/light.Light/Sample", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *lightClient) Retrieve(ctx context.Context, in *RetrieveRequest, opts ...grpc.CallOption) (*RetrieveReply, error) {
|
||||
out := new(RetrieveReply)
|
||||
err := c.cc.Invoke(ctx, "/light.Light/Retrieve", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// LightServer is the server API for Light service.
|
||||
// All implementations must embed UnimplementedLightServer
|
||||
// for forward compatibility
|
||||
type LightServer interface {
|
||||
Sample(context.Context, *SampleRequest) (*SampleReply, error)
|
||||
Retrieve(context.Context, *RetrieveRequest) (*RetrieveReply, error)
|
||||
mustEmbedUnimplementedLightServer()
|
||||
}
|
||||
|
||||
// UnimplementedLightServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedLightServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedLightServer) Sample(context.Context, *SampleRequest) (*SampleReply, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Sample not implemented")
|
||||
}
|
||||
func (UnimplementedLightServer) Retrieve(context.Context, *RetrieveRequest) (*RetrieveReply, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Retrieve not implemented")
|
||||
}
|
||||
func (UnimplementedLightServer) mustEmbedUnimplementedLightServer() {}
|
||||
|
||||
// UnsafeLightServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to LightServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeLightServer interface {
|
||||
mustEmbedUnimplementedLightServer()
|
||||
}
|
||||
|
||||
func RegisterLightServer(s grpc.ServiceRegistrar, srv LightServer) {
|
||||
s.RegisterService(&Light_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Light_Sample_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SampleRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LightServer).Sample(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/light.Light/Sample",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LightServer).Sample(ctx, req.(*SampleRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Light_Retrieve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RetrieveRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(LightServer).Retrieve(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/light.Light/Retrieve",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(LightServer).Retrieve(ctx, req.(*RetrieveRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Light_ServiceDesc is the grpc.ServiceDesc for Light service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Light_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "light.Light",
|
||||
HandlerType: (*LightServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Sample",
|
||||
Handler: _Light_Sample_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Retrieve",
|
||||
Handler: _Light_Retrieve_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "light/light.proto",
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/helper/da/service"
|
||||
"github.com/0glabs/0g-chain/helper/da/types"
|
||||
|
||||
"github.com/lesismal/nbio/nbhttp"
|
||||
"github.com/lesismal/nbio/nbhttp/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
subscribeMsg = "{\"jsonrpc\":\"2.0\",\"method\":\"subscribe\",\"id\":1,\"params\":{\"query\":\"tm.event='Tx'\"}}"
|
||||
)
|
||||
|
||||
var (
|
||||
rpcAddress = flag.String("rpc-address", "34.214.2.28:32001", "address of da-light rpc server")
|
||||
wsAddress = flag.String("ws-address", "127.0.0.1:26657", "address of emvos ws server")
|
||||
relativePath = flag.String("relative-path", "", "relative path of evmosd")
|
||||
account = flag.String("account", "", "account to run evmosd cli")
|
||||
keyring = flag.String("keyring", "", "keyring to run evmosd cli")
|
||||
homePath = flag.String("home", "", "home path of evmosd node")
|
||||
)
|
||||
|
||||
func newUpgrader() *websocket.Upgrader {
|
||||
u := websocket.NewUpgrader()
|
||||
u.OnMessage(func(c *websocket.Conn, messageType websocket.MessageType, data []byte) {
|
||||
log.Println("onEcho:", string(data))
|
||||
ctx := context.WithValue(context.Background(), types.DA_RPC_ADDRESS, *rpcAddress)
|
||||
ctx = context.WithValue(ctx, types.NODE_CLI_RELATIVE_PATH, *relativePath)
|
||||
ctx = context.WithValue(ctx, types.NODE_CLI_EXEC_ACCOUNT, *account)
|
||||
ctx = context.WithValue(ctx, types.NODE_CLI_EXEC_KEYRING, *keyring)
|
||||
ctx = context.WithValue(ctx, types.NODE_HOME_PATH, *homePath)
|
||||
go func() { service.OnMessage(ctx, c, messageType, data) }()
|
||||
})
|
||||
|
||||
u.OnClose(func(c *websocket.Conn, err error) {
|
||||
fmt.Println("OnClose:", c.RemoteAddr().String(), err)
|
||||
service.OnClose()
|
||||
})
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
engine := nbhttp.NewEngine(nbhttp.Config{})
|
||||
err := engine.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("nbio.Start failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
u := url.URL{Scheme: "ws", Host: *wsAddress, Path: "/websocket"}
|
||||
dialer := &websocket.Dialer{
|
||||
Engine: engine,
|
||||
Upgrader: newUpgrader(),
|
||||
DialTimeout: time.Second * 3,
|
||||
}
|
||||
c, res, err := dialer.Dial(u.String(), nil)
|
||||
if err != nil {
|
||||
if res != nil && res.Body != nil {
|
||||
bReason, _ := io.ReadAll(res.Body)
|
||||
fmt.Printf("dial failed: %v, reason: %v\n", err, string(bReason))
|
||||
} else {
|
||||
fmt.Printf("dial failed: %v\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
c.WriteMessage(websocket.TextMessage, []byte(subscribeMsg))
|
||||
}()
|
||||
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
<-interrupt
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
engine.Shutdown(ctx)
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package light;
|
||||
|
||||
option go_package = "proto/light";
|
||||
|
||||
service Light {
|
||||
rpc Sample(SampleRequest) returns (SampleReply) {}
|
||||
rpc Retrieve(RetrieveRequest) returns (RetrieveReply) {}
|
||||
}
|
||||
|
||||
// SampleRequest contains the blob to sample (by batch and blob index) and required sample times
|
||||
message SampleRequest {
|
||||
bytes stream_id = 1;
|
||||
bytes batch_header_hash = 2;
|
||||
uint32 blob_index = 3;
|
||||
uint32 times = 4;
|
||||
}
|
||||
|
||||
// SampleReply contains the sample result
|
||||
message SampleReply {
|
||||
bool success = 1;
|
||||
}
|
||||
|
||||
message RetrieveRequest {
|
||||
bytes batch_header_hash = 1;
|
||||
uint32 blob_index = 2;
|
||||
}
|
||||
|
||||
message RetrieveReply {
|
||||
bool status = 1;
|
||||
bytes data = 2;
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/0glabs/0g-chain/helper/da/client"
|
||||
"github.com/0glabs/0g-chain/helper/da/types"
|
||||
"github.com/0glabs/0g-chain/helper/da/utils/sizedw8grp"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/lesismal/nbio/nbhttp/websocket"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultClientInstance = 10
|
||||
)
|
||||
|
||||
var rpcClient client.DaLightRpcClient
|
||||
|
||||
func OnMessage(ctx context.Context, c *websocket.Conn, messageType websocket.MessageType, data []byte) {
|
||||
if messageType == websocket.TextMessage {
|
||||
rawMsg := unwrapJsonRpc(data)
|
||||
if verifyQuery(rawMsg) {
|
||||
eventStr := jsoniter.Get(rawMsg, "events").ToString()
|
||||
events := map[string][]string{}
|
||||
if err := jsoniter.UnmarshalFromString(eventStr, &events); err == nil {
|
||||
dasRequestMap := make(map[string]string, 4)
|
||||
for key, val := range events {
|
||||
if strings.HasPrefix(key, "das_request.") {
|
||||
dasRequestMap[strings.ReplaceAll(key, "das_request.", "")] = val[0]
|
||||
}
|
||||
}
|
||||
if len(dasRequestMap) == 4 {
|
||||
rid, _ := strconv.ParseUint(dasRequestMap["request_id"], 10, 64)
|
||||
numBlobs, _ := strconv.ParseUint(dasRequestMap["num_blobs"], 10, 64)
|
||||
req := types.DASRequest{
|
||||
RequestId: rid,
|
||||
StreamId: dasRequestMap["stream_id"],
|
||||
BatchHeaderHash: dasRequestMap["batch_header_hash"],
|
||||
NumBlobs: numBlobs,
|
||||
}
|
||||
err := handleDasRequest(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
log.Err(err).Msgf("failed to handle das request: %v, %v", req, err)
|
||||
} else {
|
||||
log.Info().Msgf("successfully handled das request: %v", req)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// TODO: handle other message
|
||||
}
|
||||
}
|
||||
|
||||
func OnClose() {
|
||||
if rpcClient != nil {
|
||||
rpcClient.Destroy()
|
||||
rpcClient = nil
|
||||
}
|
||||
}
|
||||
|
||||
func unwrapJsonRpc(data []byte) []byte {
|
||||
result := jsoniter.Get(data, "result")
|
||||
if 0 < len(result.Keys()) {
|
||||
return []byte(result.ToString())
|
||||
}
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
func verifyQuery(data []byte) bool {
|
||||
if len(data) > 0 {
|
||||
return jsoniter.Get(data, "query").ToString() == "tm.event='Tx'"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func handleDasRequest(ctx context.Context, request types.DASRequest) error {
|
||||
if rpcClient == nil {
|
||||
addrVal := ctx.Value(types.DA_RPC_ADDRESS)
|
||||
if addrVal == nil {
|
||||
return errors.New("da light service address not found in context")
|
||||
}
|
||||
|
||||
limit := ctx.Value(types.INSTANCE_LIMIT)
|
||||
if limit == nil {
|
||||
limit = defaultClientInstance
|
||||
}
|
||||
|
||||
rpcClient = client.NewDaLightClient(addrVal.(string), limit.(int))
|
||||
}
|
||||
|
||||
streamID, err := hex.DecodeString(request.StreamId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
batchHeaderHash, err := hex.DecodeString(request.BatchHeaderHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := make(chan bool, request.NumBlobs)
|
||||
taskCnt := min(rpcClient.GetInstanceCount(), int(request.NumBlobs))
|
||||
wg := sizedw8grp.New(taskCnt)
|
||||
|
||||
for i := uint64(0); i < request.NumBlobs; i++ {
|
||||
wg.Add()
|
||||
go func(idx uint64) {
|
||||
defer wg.Done()
|
||||
ret, err := rpcClient.Sample(ctx, streamID, batchHeaderHash, uint32(idx), 1)
|
||||
if err != nil {
|
||||
log.Err(err).Msgf("failed to sample data availability with blob index %d", idx)
|
||||
result <- false
|
||||
} else {
|
||||
log.Info().Msgf("sample result for blob index %d: %v", idx, ret)
|
||||
result <- ret
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
close(result)
|
||||
|
||||
finalResult := true
|
||||
for val := range result {
|
||||
if !val {
|
||||
finalResult = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return runEvmosdCliReportDasResult(ctx, request.RequestId, finalResult)
|
||||
}
|
||||
|
||||
func runEvmosdCliReportDasResult(ctx context.Context, requestId uint64, result bool) error {
|
||||
relativePath := ctx.Value(types.NODE_CLI_RELATIVE_PATH)
|
||||
if relativePath == nil {
|
||||
return errors.New("relativePath not found in context")
|
||||
}
|
||||
|
||||
account := ctx.Value(types.NODE_CLI_EXEC_ACCOUNT)
|
||||
if account == nil {
|
||||
return errors.New("account not found in context")
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"tx",
|
||||
"das",
|
||||
"report-das-result",
|
||||
strconv.FormatUint(requestId, 10),
|
||||
strconv.FormatBool(result),
|
||||
"--from", account.(string),
|
||||
"--gas-prices", "7678500neuron", // TODO: use args to set gas prices
|
||||
}
|
||||
|
||||
homePath := ctx.Value(types.NODE_HOME_PATH)
|
||||
if len(homePath.(string)) > 0 {
|
||||
args = append(args, "--home", homePath.(string))
|
||||
}
|
||||
|
||||
keyring := ctx.Value(types.NODE_CLI_EXEC_KEYRING)
|
||||
if len(keyring.(string)) > 0 {
|
||||
args = append(args, "--keyring-backend", keyring.(string))
|
||||
}
|
||||
|
||||
cmdStr := relativePath.(string) + "0gchaind"
|
||||
cmd := exec.Command(cmdStr, append(args, "-y")...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
package types
|
||||
|
||||
type DASRequest struct {
|
||||
RequestId uint64 `json:"request_id"`
|
||||
StreamId string `json:"stream_id"`
|
||||
BatchHeaderHash string `json:"batch_header_hash"`
|
||||
NumBlobs uint64 `json:"num_blobs"`
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package types
|
||||
|
||||
const (
|
||||
DA_RPC_ADDRESS = "rpc_address"
|
||||
INSTANCE_LIMIT = "instance_limit"
|
||||
NODE_CLI_RELATIVE_PATH = "relative_path"
|
||||
NODE_CLI_EXEC_ACCOUNT = "node_exec_account"
|
||||
NODE_CLI_EXEC_KEYRING = "node_exec_keyring"
|
||||
NODE_HOME_PATH = "home_path"
|
||||
)
|
@ -1,51 +0,0 @@
|
||||
package sizedw8grp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type SizedWaitGroup struct {
|
||||
Size int
|
||||
|
||||
current chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func New(limit int) SizedWaitGroup {
|
||||
size := math.MaxInt32
|
||||
if limit > 0 {
|
||||
size = limit
|
||||
}
|
||||
return SizedWaitGroup{
|
||||
Size: size,
|
||||
|
||||
current: make(chan struct{}, size),
|
||||
wg: sync.WaitGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SizedWaitGroup) Add() {
|
||||
_ = s.AddWithContext(context.Background())
|
||||
}
|
||||
|
||||
func (s *SizedWaitGroup) AddWithContext(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case s.current <- struct{}{}:
|
||||
break
|
||||
}
|
||||
s.wg.Add(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SizedWaitGroup) Done() {
|
||||
<-s.current
|
||||
s.wg.Done()
|
||||
}
|
||||
|
||||
func (s *SizedWaitGroup) Wait() {
|
||||
s.wg.Wait()
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
syntax = "proto3";
|
||||
package zgc.das.v1;
|
||||
|
||||
import "cosmos_proto/cosmos.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/0glabs/0g-chain/x/das/v1/types";
|
||||
|
||||
message Params {}
|
||||
|
||||
// GenesisState defines the das module's genesis state.
|
||||
message GenesisState {
|
||||
option (gogoproto.goproto_getters) = false;
|
||||
|
||||
Params params = 1 [(gogoproto.nullable) = false];
|
||||
uint64 next_request_id = 2 [(gogoproto.customname) = "NextRequestID"];
|
||||
repeated DASRequest requests = 3 [(gogoproto.nullable) = false];
|
||||
repeated DASResponse responses = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DASRequest {
|
||||
uint64 id = 1 [(gogoproto.customname) = "ID"];
|
||||
bytes stream_id = 2 [(gogoproto.customname) = "StreamID"];
|
||||
bytes batch_header_hash = 3;
|
||||
uint32 num_blobs = 4;
|
||||
}
|
||||
|
||||
message DASResponse {
|
||||
uint64 id = 1 [(gogoproto.customname) = "ID"];
|
||||
bytes sampler = 2 [
|
||||
(cosmos_proto.scalar) = "cosmos.AddressBytes",
|
||||
(gogoproto.casttype) = "github.com/cosmos/cosmos-sdk/types.ValAddress"
|
||||
];
|
||||
repeated bool results = 3;
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
syntax = "proto3";
|
||||
package zgc.das.v1;
|
||||
|
||||
import "cosmos_proto/cosmos.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/0glabs/0g-chain/x/das/v1/types";
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
|
||||
// Query defines the gRPC querier service for the das module
|
||||
service Query {
|
||||
rpc NextRequestID(QueryNextRequestIDRequest) returns (QueryNextRequestIDResponse) {
|
||||
option (google.api.http).get = "/0gchain/das/v1/next-request-id";
|
||||
}
|
||||
}
|
||||
|
||||
message QueryNextRequestIDRequest {}
|
||||
|
||||
message QueryNextRequestIDResponse {
|
||||
uint64 next_request_id = 1 [(gogoproto.customname) = "NextRequestID"];
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
syntax = "proto3";
|
||||
package zgc.das.v1;
|
||||
|
||||
import "cosmos_proto/cosmos.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "zgc/das/v1/genesis.proto";
|
||||
|
||||
option go_package = "github.com/0glabs/0g-chain/x/das/v1/types";
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
|
||||
// Msg defines the das Msg service
|
||||
service Msg {
|
||||
rpc RequestDAS(MsgRequestDAS) returns (MsgRequestDASResponse);
|
||||
rpc ReportDASResult(MsgReportDASResult) returns (MsgReportDASResultResponse);
|
||||
}
|
||||
|
||||
message MsgRequestDAS {
|
||||
string requester = 1 [(gogoproto.moretags) = "Requester"];
|
||||
string stream_id = 2 [(gogoproto.customname) = "StreamID"];
|
||||
string batch_header_hash = 3;
|
||||
uint32 num_blobs = 4;
|
||||
}
|
||||
|
||||
message MsgRequestDASResponse {
|
||||
uint64 request_id = 1 [(gogoproto.customname) = "RequestID"];
|
||||
}
|
||||
|
||||
message MsgReportDASResult {
|
||||
uint64 request_id = 1 [(gogoproto.customname) = "RequestID"];
|
||||
string sampler = 2;
|
||||
repeated bool results = 3;
|
||||
}
|
||||
|
||||
message MsgReportDASResultResponse {}
|
@ -3,7 +3,6 @@ package e2e_test
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
@ -48,13 +47,13 @@ func (suite *IntegrationTestSuite) TestUpgradeParams_SDK() {
|
||||
"x/gov DepositParams max deposit period after upgrade should be 172800s",
|
||||
)
|
||||
suite.Assert().Equal(
|
||||
[]sdk.Coin{{Denom: chaincfg.DisplayDenom, Amount: sdk.NewInt(10_000_000)}},
|
||||
[]sdk.Coin{{Denom: "ua0gi", Amount: sdk.NewInt(10_000_000)}},
|
||||
govParamsAfter.DepositParams.MinDeposit,
|
||||
"x/gov DepositParams min deposit after upgrade should be 10_000_000 ukava",
|
||||
)
|
||||
|
||||
expectedParams := govtypes.Params{
|
||||
MinDeposit: sdk.NewCoins(sdk.NewCoin(chaincfg.DisplayDenom, sdk.NewInt(10_000_000))),
|
||||
MinDeposit: sdk.NewCoins(sdk.NewCoin("ua0gi", sdk.NewInt(10_000_000))),
|
||||
MaxDepositPeriod: mustParseDuration("172800s"),
|
||||
VotingPeriod: mustParseDuration("30s"),
|
||||
Quorum: "0.334000000000000000",
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
"github.com/0glabs/0g-chain/x/bep3"
|
||||
"github.com/0glabs/0g-chain/x/bep3/keeper"
|
||||
"github.com/0glabs/0g-chain/x/bep3/types"
|
||||
@ -34,7 +33,7 @@ func (suite *ABCITestSuite) SetupTest() {
|
||||
|
||||
// Set up auth GenesisState
|
||||
_, addrs := app.GeneratePrivKeyAddressPairs(12)
|
||||
coins := sdk.NewCoins(c("bnb", 10000000000), c(chaincfg.DisplayDenom, 10000000000))
|
||||
coins := sdk.NewCoins(c("bnb", 10000000000), c("ua0gi", 10000000000))
|
||||
authGS := app.NewFundedGenStateWithSameCoins(tApp.AppCodec(), coins, addrs)
|
||||
// Initialize test app
|
||||
tApp.InitializeFromGenesisStates(authGS, NewBep3GenStateMulti(tApp.AppCodec(), addrs[11]))
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
"github.com/0glabs/0g-chain/x/committee/keeper"
|
||||
"github.com/0glabs/0g-chain/x/committee/types"
|
||||
)
|
||||
@ -62,7 +61,7 @@ func (suite *MsgServerTestSuite) SetupTest() {
|
||||
[]types.Proposal{},
|
||||
[]types.Vote{},
|
||||
)
|
||||
suite.communityPoolAmt = sdk.NewCoins(sdk.NewCoin(chaincfg.DisplayDenom, sdkmath.NewInt(1000)))
|
||||
suite.communityPoolAmt = sdk.NewCoins(sdk.NewCoin("ua0gi", sdkmath.NewInt(1000)))
|
||||
suite.app.InitializeFromGenesisStates(
|
||||
app.GenesisState{types.ModuleName: cdc.MustMarshalJSON(testGenesis)},
|
||||
// TODO: not used?
|
||||
|
@ -53,7 +53,7 @@ func TestUnpackPermissions_Failure(t *testing.T) {
|
||||
// "repay x/community cdp debt",
|
||||
// "repays debt on a cdp position",
|
||||
// "collateral-type",
|
||||
// sdk.NewInt64Coin(chaincfg.DisplayDenom, 1e10),
|
||||
// sdk.NewInt64Coin("ua0gi", 1e10),
|
||||
// ),
|
||||
// allowed: true,
|
||||
// },
|
||||
@ -90,7 +90,7 @@ func TestUnpackPermissions_Failure(t *testing.T) {
|
||||
// proposal: communitytypes.NewCommunityPoolLendWithdrawProposal(
|
||||
// "withdraw lend position",
|
||||
// "this fake proposal withdraws a lend position for the community pool",
|
||||
// sdk.NewCoins(sdk.NewCoin(chaincfg.DisplayDenom, sdk.NewInt(1e10))),
|
||||
// sdk.NewCoins(sdk.NewCoin("ua0gi", sdk.NewInt(1e10))),
|
||||
// ),
|
||||
// allowed: true,
|
||||
// },
|
||||
@ -128,7 +128,7 @@ func TestUnpackPermissions_Failure(t *testing.T) {
|
||||
// "withdraw x/community cdp collateral",
|
||||
// "yes",
|
||||
// "collateral-type",
|
||||
// sdk.NewInt64Coin(chaincfg.DisplayDenom, 1e10),
|
||||
// sdk.NewInt64Coin("ua0gi", 1e10),
|
||||
// ),
|
||||
// allowed: true,
|
||||
// },
|
||||
|
@ -1,57 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
)
|
||||
|
||||
// GetQueryCmd returns the cli query commands for the inflation module.
|
||||
func GetQueryCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: types.ModuleName,
|
||||
Short: "Querying commands for the das module",
|
||||
DisableFlagParsing: true,
|
||||
SuggestionsMinimumDistance: 2,
|
||||
RunE: client.ValidateCmd,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
GetNextRequestID(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func GetNextRequestID() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "next-request-id",
|
||||
Short: "Query the next request ID",
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
clientCtx, err := client.GetClientQueryContext(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queryClient := types.NewQueryClient(clientCtx)
|
||||
|
||||
params := &types.QueryNextRequestIDRequest{}
|
||||
res, err := queryClient.NextRequestID(context.Background(), params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return clientCtx.PrintString(fmt.Sprintf("%v\n", res.NextRequestID))
|
||||
},
|
||||
}
|
||||
|
||||
flags.AddQueryFlagsToCmd(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/client/tx"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// GetTxCmd returns the transaction commands for this module
|
||||
func GetTxCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: types.ModuleName,
|
||||
Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName),
|
||||
DisableFlagParsing: true,
|
||||
SuggestionsMinimumDistance: 2,
|
||||
RunE: client.ValidateCmd,
|
||||
}
|
||||
cmd.AddCommand(
|
||||
NewRequestDASCmd(),
|
||||
NewReportDASResultCmd(),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewRequestDASCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "request-das steram-id batch-header-hash num-blobs",
|
||||
Short: "Request data-availability-sampling",
|
||||
Args: cobra.ExactArgs(3),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
clientCtx, err := client.GetClientTxContext(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numBlobs, err := strconv.Atoi(args[2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := types.NewMsgRequestDAS(clientCtx.GetFromAddress(), args[0], args[1], uint32(numBlobs))
|
||||
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
|
||||
},
|
||||
}
|
||||
|
||||
flags.AddTxFlagsToCmd(cmd)
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
func NewReportDASResultCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "report-das-result request-id results",
|
||||
Short: "Report data-availability-sampling result",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
clientCtx, err := client.GetClientTxContext(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requestID, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := len(args) - 1
|
||||
results := make([]bool, n)
|
||||
for i := 0; i < n; i++ {
|
||||
var err error
|
||||
results[i], err = strconv.ParseBool(args[i+1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// get account name by address
|
||||
accAddr := clientCtx.GetFromAddress()
|
||||
|
||||
samplerAddr, err := sdk.ValAddressFromHex(hex.EncodeToString(accAddr.Bytes()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := &types.MsgReportDASResult{
|
||||
RequestID: requestID,
|
||||
Sampler: samplerAddr.String(),
|
||||
Results: results,
|
||||
}
|
||||
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
|
||||
},
|
||||
}
|
||||
|
||||
flags.AddTxFlagsToCmd(cmd)
|
||||
return cmd
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/keeper"
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
)
|
||||
|
||||
// InitGenesis initializes the store state from a genesis state.
|
||||
func InitGenesis(ctx sdk.Context, keeper keeper.Keeper, gs types.GenesisState) {
|
||||
if err := gs.Validate(); err != nil {
|
||||
panic(fmt.Sprintf("failed to validate %s genesis state: %s", types.ModuleName, err))
|
||||
}
|
||||
|
||||
keeper.SetNextRequestID(ctx, gs.NextRequestID)
|
||||
for _, req := range gs.Requests {
|
||||
keeper.SetDASRequest(ctx, req)
|
||||
}
|
||||
for _, resp := range gs.Responses {
|
||||
keeper.SetDASResponse(ctx, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// ExportGenesis returns a GenesisState for a given context and keeper.
|
||||
func ExportGenesis(ctx sdk.Context, keeper keeper.Keeper) *types.GenesisState {
|
||||
nextRequestID, err := keeper.GetNextRequestID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return types.NewGenesisState(
|
||||
nextRequestID,
|
||||
keeper.GetDASRequests(ctx),
|
||||
keeper.GetDASResponses(ctx),
|
||||
)
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
package keeper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
var _ types.QueryServer = Keeper{}
|
||||
|
||||
func (k Keeper) NextRequestID(
|
||||
c context.Context,
|
||||
_ *types.QueryNextRequestIDRequest,
|
||||
) (*types.QueryNextRequestIDResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(c)
|
||||
nextRequestID, err := k.GetNextRequestID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &types.QueryNextRequestIDResponse{NextRequestID: nextRequestID}, nil
|
||||
}
|
@ -1,198 +0,0 @@
|
||||
package keeper
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/store/prefix"
|
||||
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
)
|
||||
|
||||
type Keeper struct {
|
||||
storeKey storetypes.StoreKey
|
||||
cdc codec.BinaryCodec
|
||||
stakingKeeperRef types.StakingKeeperRef
|
||||
}
|
||||
|
||||
// NewKeeper creates a new das Keeper instance
|
||||
func NewKeeper(
|
||||
storeKey storetypes.StoreKey,
|
||||
cdc codec.BinaryCodec,
|
||||
stakingKeeper types.StakingKeeperRef,
|
||||
) Keeper {
|
||||
return Keeper{
|
||||
storeKey: storeKey,
|
||||
cdc: cdc,
|
||||
stakingKeeperRef: stakingKeeper,
|
||||
}
|
||||
}
|
||||
|
||||
// Logger returns a module-specific logger.
|
||||
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
|
||||
return ctx.Logger().With("module", "x/"+types.ModuleName)
|
||||
}
|
||||
|
||||
func (k Keeper) SetNextRequestID(ctx sdk.Context, id uint64) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
store.Set(types.NextRequestIDKey, types.GetKeyFromID(id))
|
||||
}
|
||||
|
||||
func (k Keeper) GetNextRequestID(ctx sdk.Context) (uint64, error) {
|
||||
store := ctx.KVStore(k.storeKey)
|
||||
bz := store.Get(types.NextRequestIDKey)
|
||||
if bz == nil {
|
||||
return 0, errorsmod.Wrap(types.ErrInvalidGenesis, "next request ID not set at genesis")
|
||||
}
|
||||
return types.Uint64FromBytes(bz), nil
|
||||
}
|
||||
|
||||
func (k Keeper) IncrementNextRequestID(ctx sdk.Context) error {
|
||||
id, err := k.GetNextRequestID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.SetNextRequestID(ctx, id+1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k Keeper) GetDASRequest(ctx sdk.Context, requestID uint64) (types.DASRequest, bool) {
|
||||
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RequestKeyPrefix)
|
||||
bz := store.Get(types.GetKeyFromID(requestID))
|
||||
if bz == nil {
|
||||
return types.DASRequest{}, false
|
||||
}
|
||||
var req types.DASRequest
|
||||
k.cdc.MustUnmarshal(bz, &req)
|
||||
return req, true
|
||||
}
|
||||
|
||||
func (k Keeper) SetDASRequest(ctx sdk.Context, req types.DASRequest) {
|
||||
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RequestKeyPrefix)
|
||||
bz := k.cdc.MustMarshal(&req)
|
||||
store.Set(types.GetKeyFromID(req.ID), bz)
|
||||
}
|
||||
|
||||
func (k Keeper) IterateDASRequest(ctx sdk.Context, cb func(req types.DASRequest) (stop bool)) {
|
||||
iterator := sdk.KVStorePrefixIterator(ctx.KVStore(k.storeKey), types.RequestKeyPrefix)
|
||||
|
||||
defer iterator.Close()
|
||||
for ; iterator.Valid(); iterator.Next() {
|
||||
var req types.DASRequest
|
||||
k.cdc.MustUnmarshal(iterator.Value(), &req)
|
||||
if cb(req) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k Keeper) GetDASRequests(ctx sdk.Context) []types.DASRequest {
|
||||
results := []types.DASRequest{}
|
||||
k.IterateDASRequest(ctx, func(req types.DASRequest) bool {
|
||||
results = append(results, req)
|
||||
return false
|
||||
})
|
||||
return results
|
||||
}
|
||||
|
||||
func (k Keeper) StoreNewDASRequest(
|
||||
ctx sdk.Context,
|
||||
streamIDHexStr string,
|
||||
batchHeaderHashHexStr string,
|
||||
numBlobs uint32) (uint64, error) {
|
||||
requestID, err := k.GetNextRequestID(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
streamID, err := hex.DecodeString(streamIDHexStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
batchHeaderHash, err := hex.DecodeString(batchHeaderHashHexStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
req := types.DASRequest{
|
||||
ID: requestID,
|
||||
StreamID: streamID,
|
||||
BatchHeaderHash: batchHeaderHash,
|
||||
NumBlobs: numBlobs,
|
||||
}
|
||||
k.SetDASRequest(ctx, req)
|
||||
|
||||
ctx.EventManager().EmitEvent(
|
||||
sdk.NewEvent(
|
||||
types.EventTypeDASRequest,
|
||||
sdk.NewAttribute(types.AttributeKeyRequestID, strconv.FormatUint(requestID, 10)),
|
||||
sdk.NewAttribute(types.AttributeKeyStreamID, streamIDHexStr),
|
||||
sdk.NewAttribute(types.AttributeKeyBatchHeaderHash, batchHeaderHashHexStr),
|
||||
sdk.NewAttribute(types.AttributeKeyNumBlobs, strconv.FormatUint(uint64(numBlobs), 10)),
|
||||
),
|
||||
)
|
||||
|
||||
return requestID, nil
|
||||
}
|
||||
|
||||
func (k Keeper) GetDASResponse(
|
||||
ctx sdk.Context, requestID uint64, sampler sdk.ValAddress,
|
||||
) (types.DASResponse, bool) {
|
||||
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ResponseKeyPrefix)
|
||||
bz := store.Get(types.GetResponseKey(requestID, sampler))
|
||||
if bz == nil {
|
||||
return types.DASResponse{}, false
|
||||
}
|
||||
var vote types.DASResponse
|
||||
k.cdc.MustUnmarshal(bz, &vote)
|
||||
return vote, true
|
||||
}
|
||||
|
||||
func (k Keeper) SetDASResponse(ctx sdk.Context, resp types.DASResponse) {
|
||||
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ResponseKeyPrefix)
|
||||
bz := k.cdc.MustMarshal(&resp)
|
||||
store.Set(types.GetResponseKey(resp.ID, resp.Sampler), bz)
|
||||
}
|
||||
|
||||
func (k Keeper) IterateDASResponse(ctx sdk.Context, cb func(resp types.DASResponse) (stop bool)) {
|
||||
iterator := sdk.KVStorePrefixIterator(ctx.KVStore(k.storeKey), types.ResponseKeyPrefix)
|
||||
|
||||
defer iterator.Close()
|
||||
for ; iterator.Valid(); iterator.Next() {
|
||||
var resp types.DASResponse
|
||||
k.cdc.MustUnmarshal(iterator.Value(), &resp)
|
||||
if cb(resp) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k Keeper) GetDASResponses(ctx sdk.Context) []types.DASResponse {
|
||||
results := []types.DASResponse{}
|
||||
k.IterateDASResponse(ctx, func(resp types.DASResponse) bool {
|
||||
results = append(results, resp)
|
||||
return false
|
||||
})
|
||||
return results
|
||||
}
|
||||
|
||||
func (k Keeper) StoreNewDASResponse(
|
||||
ctx sdk.Context, requestID uint64, sampler sdk.ValAddress, results []bool) error {
|
||||
if _, found := k.GetDASRequest(ctx, requestID); !found {
|
||||
return errorsmod.Wrapf(types.ErrUnknownRequest, "%d", requestID)
|
||||
}
|
||||
|
||||
k.SetDASResponse(ctx, types.DASResponse{
|
||||
ID: requestID,
|
||||
Sampler: sampler,
|
||||
Results: results,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
package keeper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
var _ types.MsgServer = &Keeper{}
|
||||
|
||||
// RequestDAS handles MsgRequestDAS messages
|
||||
func (k Keeper) RequestDAS(
|
||||
goCtx context.Context, msg *types.MsgRequestDAS,
|
||||
) (*types.MsgRequestDASResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(goCtx)
|
||||
|
||||
requestID, err := k.StoreNewDASRequest(ctx, msg.StreamID, msg.BatchHeaderHash, msg.NumBlobs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.IncrementNextRequestID(ctx)
|
||||
return &types.MsgRequestDASResponse{
|
||||
RequestID: requestID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReportDASResult handles MsgReportDASResult messages
|
||||
func (k Keeper) ReportDASResult(
|
||||
goCtx context.Context, msg *types.MsgReportDASResult,
|
||||
) (*types.MsgReportDASResultResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(goCtx)
|
||||
|
||||
sampler, err := sdk.ValAddressFromBech32(msg.Sampler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, found := k.stakingKeeperRef.GetValidator(ctx, sampler); !found {
|
||||
return nil, stakingtypes.ErrNoValidatorFound
|
||||
}
|
||||
|
||||
if err := k.StoreNewDASResponse(ctx, msg.RequestID, sampler, msg.Results); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.MsgReportDASResultResponse{}, nil
|
||||
}
|
@ -1,169 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/das/v1/client/cli"
|
||||
"github.com/0glabs/0g-chain/x/das/v1/keeper"
|
||||
"github.com/0glabs/0g-chain/x/das/v1/types"
|
||||
)
|
||||
|
||||
// consensusVersion defines the current x/council module consensus version.
|
||||
const consensusVersion = 1
|
||||
|
||||
// type check to ensure the interface is properly implemented
|
||||
var (
|
||||
_ module.AppModule = AppModule{}
|
||||
_ module.AppModuleBasic = AppModuleBasic{}
|
||||
// _ module.AppModuleSimulation = AppModule{}
|
||||
_ module.BeginBlockAppModule = AppModule{}
|
||||
_ module.EndBlockAppModule = AppModule{}
|
||||
)
|
||||
|
||||
// app module Basics object
|
||||
type AppModuleBasic struct{}
|
||||
|
||||
// Name returns the inflation module's name.
|
||||
func (AppModuleBasic) Name() string {
|
||||
return types.ModuleName
|
||||
}
|
||||
|
||||
// RegisterLegacyAminoCodec registers the inflation module's types on the given LegacyAmino codec.
|
||||
func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {}
|
||||
|
||||
// ConsensusVersion returns the consensus state-breaking version for the module.
|
||||
func (AppModuleBasic) ConsensusVersion() uint64 {
|
||||
return consensusVersion
|
||||
}
|
||||
|
||||
// RegisterInterfaces registers interfaces and implementations of the incentives
|
||||
// module.
|
||||
func (AppModuleBasic) RegisterInterfaces(interfaceRegistry codectypes.InterfaceRegistry) {
|
||||
types.RegisterInterfaces(interfaceRegistry)
|
||||
}
|
||||
|
||||
// DefaultGenesis returns default genesis state as raw bytes for the incentives
|
||||
// module.
|
||||
func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage {
|
||||
return cdc.MustMarshalJSON(types.DefaultGenesisState())
|
||||
}
|
||||
|
||||
// ValidateGenesis performs genesis state validation for the inflation module.
|
||||
func (b AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error {
|
||||
var genesisState types.GenesisState
|
||||
if err := cdc.UnmarshalJSON(bz, &genesisState); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
|
||||
}
|
||||
|
||||
return genesisState.Validate()
|
||||
}
|
||||
|
||||
// RegisterRESTRoutes performs a no-op as the inflation module doesn't expose REST
|
||||
// endpoints
|
||||
func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) {}
|
||||
|
||||
// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the inflation module.
|
||||
func (b AppModuleBasic) RegisterGRPCGatewayRoutes(c client.Context, serveMux *runtime.ServeMux) {
|
||||
if err := types.RegisterQueryHandlerClient(context.Background(), serveMux, types.NewQueryClient(c)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// GetTxCmd returns the root tx command for the inflation module.
|
||||
func (AppModuleBasic) GetTxCmd() *cobra.Command {
|
||||
return cli.GetTxCmd()
|
||||
}
|
||||
|
||||
// GetQueryCmd returns no root query command for the inflation module.
|
||||
func (AppModuleBasic) GetQueryCmd() *cobra.Command {
|
||||
return cli.GetQueryCmd()
|
||||
}
|
||||
|
||||
// ___________________________________________________________________________
|
||||
|
||||
// AppModule implements an application module for the inflation module.
|
||||
type AppModule struct {
|
||||
AppModuleBasic
|
||||
keeper keeper.Keeper
|
||||
}
|
||||
|
||||
// NewAppModule creates a new AppModule Object
|
||||
func NewAppModule(
|
||||
k keeper.Keeper,
|
||||
) AppModule {
|
||||
return AppModule{
|
||||
AppModuleBasic: AppModuleBasic{},
|
||||
keeper: k,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the inflation module's name.
|
||||
func (AppModule) Name() string {
|
||||
return types.ModuleName
|
||||
}
|
||||
|
||||
// RegisterInvariants registers the inflation module invariants.
|
||||
func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {}
|
||||
|
||||
// RegisterServices registers a gRPC query service to respond to the
|
||||
// module-specific gRPC queries.
|
||||
func (am AppModule) RegisterServices(cfg module.Configurator) {
|
||||
types.RegisterMsgServer(cfg.MsgServer(), am.keeper)
|
||||
types.RegisterQueryServer(cfg.QueryServer(), am.keeper)
|
||||
}
|
||||
|
||||
func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
|
||||
// am.keeper.BeginBlock(ctx, req)
|
||||
}
|
||||
|
||||
func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate {
|
||||
// am.keeper.EndBlock(ctx, req)
|
||||
return []abci.ValidatorUpdate{}
|
||||
}
|
||||
|
||||
// InitGenesis performs genesis initialization for the inflation module. It returns
|
||||
// no validator updates.
|
||||
func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
|
||||
var genesisState types.GenesisState
|
||||
|
||||
cdc.MustUnmarshalJSON(data, &genesisState)
|
||||
InitGenesis(ctx, am.keeper, genesisState)
|
||||
return []abci.ValidatorUpdate{}
|
||||
}
|
||||
|
||||
// ExportGenesis returns the exported genesis state as raw bytes for the inflation
|
||||
// module.
|
||||
func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage {
|
||||
gs := ExportGenesis(ctx, am.keeper)
|
||||
return cdc.MustMarshalJSON(gs)
|
||||
}
|
||||
|
||||
// ___________________________________________________________________________
|
||||
|
||||
// AppModuleSimulation functions
|
||||
|
||||
// GenerateGenesisState creates a randomized GenState of the inflation module.
|
||||
func (am AppModule) GenerateGenesisState(_ *module.SimulationState) {
|
||||
}
|
||||
|
||||
// RegisterStoreDecoder registers a decoder for inflation module's types.
|
||||
func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {
|
||||
}
|
||||
|
||||
// WeightedOperations doesn't return any inflation module operation.
|
||||
func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation {
|
||||
return []simtypes.WeightedOperation{}
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/msgservice"
|
||||
)
|
||||
|
||||
var (
|
||||
amino = codec.NewLegacyAmino()
|
||||
// ModuleCdc references the global evm module codec. Note, the codec should
|
||||
// ONLY be used in certain instances of tests and for JSON encoding.
|
||||
ModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
|
||||
|
||||
// AminoCdc is a amino codec created to support amino JSON compatible msgs.
|
||||
AminoCdc = codec.NewAminoCodec(amino)
|
||||
)
|
||||
|
||||
const (
|
||||
// Amino names
|
||||
requestDASName = "evmos/das/MsgRequestDAS"
|
||||
reportDASResultName = "evmos/das/MsgReportDASResult"
|
||||
)
|
||||
|
||||
// NOTE: This is required for the GetSignBytes function
|
||||
func init() {
|
||||
RegisterLegacyAminoCodec(amino)
|
||||
amino.Seal()
|
||||
}
|
||||
|
||||
// RegisterInterfaces register implementations
|
||||
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
|
||||
registry.RegisterImplementations(
|
||||
(*sdk.Msg)(nil),
|
||||
&MsgRequestDAS{},
|
||||
&MsgReportDASResult{},
|
||||
)
|
||||
|
||||
msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
|
||||
}
|
||||
|
||||
// RegisterLegacyAminoCodec required for EIP-712
|
||||
func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
|
||||
cdc.RegisterConcrete(&MsgRequestDAS{}, requestDASName, nil)
|
||||
cdc.RegisterConcrete(&MsgReportDASResult{}, reportDASResultName, nil)
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
package types
|
||||
|
||||
import errorsmod "cosmossdk.io/errors"
|
||||
|
||||
var (
|
||||
ErrUnknownRequest = errorsmod.Register(ModuleName, 0, "request not found")
|
||||
ErrInvalidGenesis = errorsmod.Register(ModuleName, 1, "invalid genesis")
|
||||
)
|
@ -1,11 +0,0 @@
|
||||
package types
|
||||
|
||||
// Module event types
|
||||
const (
|
||||
EventTypeDASRequest = "das_request"
|
||||
|
||||
AttributeKeyRequestID = "request_id"
|
||||
AttributeKeyStreamID = "stream_id"
|
||||
AttributeKeyBatchHeaderHash = "batch_header_hash"
|
||||
AttributeKeyNumBlobs = "num_blobs"
|
||||
)
|
@ -1,28 +0,0 @@
|
||||
package types
|
||||
|
||||
const (
|
||||
DefaultNextRequestID = 0
|
||||
)
|
||||
|
||||
// NewGenesisState returns a new genesis state object for the module.
|
||||
func NewGenesisState(nextRequestID uint64, requests []DASRequest, responses []DASResponse) *GenesisState {
|
||||
return &GenesisState{
|
||||
NextRequestID: nextRequestID,
|
||||
Requests: requests,
|
||||
Responses: responses,
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultGenesisState returns the default genesis state for the module.
|
||||
func DefaultGenesisState() *GenesisState {
|
||||
return NewGenesisState(
|
||||
DefaultNextRequestID,
|
||||
[]DASRequest{},
|
||||
[]DASResponse{},
|
||||
)
|
||||
}
|
||||
|
||||
// Validate performs basic validation of genesis data.
|
||||
func (gs GenesisState) Validate() error {
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,10 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
type StakingKeeperRef interface {
|
||||
GetValidator(ctx sdk.Context, addr sdk.ValAddress) (validator stakingtypes.Validator, found bool)
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// ModuleName The name that will be used throughout the module
|
||||
ModuleName = "das"
|
||||
|
||||
// StoreKey Top level store key where all module items will be stored
|
||||
StoreKey = ModuleName
|
||||
)
|
||||
|
||||
// Key prefixes
|
||||
var (
|
||||
RequestKeyPrefix = []byte{0x00} // prefix for keys that store requests
|
||||
ResponseKeyPrefix = []byte{0x01} // prefix for keys that store responses
|
||||
|
||||
NextRequestIDKey = []byte{0x02}
|
||||
)
|
||||
|
||||
// GetKeyFromID returns the bytes to use as a key for a uint64 id
|
||||
func GetKeyFromID(id uint64) []byte {
|
||||
return Uint64ToBytes(id)
|
||||
}
|
||||
|
||||
func GetResponseKey(requestID uint64, sampler sdk.ValAddress) []byte {
|
||||
return append(GetKeyFromID(requestID), sampler.Bytes()...)
|
||||
}
|
||||
|
||||
// Uint64ToBytes converts a uint64 into fixed length bytes for use in store keys.
|
||||
func Uint64ToBytes(id uint64) []byte {
|
||||
bz := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bz, uint64(id))
|
||||
return bz
|
||||
}
|
||||
|
||||
// Uint64FromBytes converts some fixed length bytes back into a uint64.
|
||||
func Uint64FromBytes(bz []byte) uint64 {
|
||||
return binary.BigEndian.Uint64(bz)
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
)
|
||||
|
||||
var _, _ sdk.Msg = &MsgRequestDAS{}, &MsgReportDASResult{}
|
||||
|
||||
func NewMsgRequestDAS(fromAddr sdk.AccAddress, streamID, hash string, numBlobs uint32) *MsgRequestDAS {
|
||||
return &MsgRequestDAS{
|
||||
Requester: fromAddr.String(),
|
||||
StreamID: streamID,
|
||||
BatchHeaderHash: hash,
|
||||
NumBlobs: numBlobs,
|
||||
}
|
||||
}
|
||||
|
||||
func (msg MsgRequestDAS) GetSigners() []sdk.AccAddress {
|
||||
from, err := sdk.AccAddressFromBech32(msg.Requester)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return []sdk.AccAddress{from}
|
||||
}
|
||||
|
||||
func (msg MsgRequestDAS) ValidateBasic() error {
|
||||
_, err := sdk.AccAddressFromBech32(msg.Requester)
|
||||
if err != nil {
|
||||
return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "Invalid requester account address (%s)", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (msg *MsgReportDASResult) GetSigners() []sdk.AccAddress {
|
||||
samplerValAddr, err := sdk.ValAddressFromBech32(msg.Sampler)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
accAddr, err := sdk.AccAddressFromHexUnsafe(hex.EncodeToString(samplerValAddr.Bytes()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return []sdk.AccAddress{accAddr}
|
||||
}
|
||||
|
||||
func (msg *MsgReportDASResult) ValidateBasic() error {
|
||||
_, err := sdk.ValAddressFromBech32(msg.Sampler)
|
||||
if err != nil {
|
||||
return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "Invalid sampler validator address (%s)", err)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,511 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: zgc/das/v1/query.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
_ "github.com/cosmos/cosmos-proto"
|
||||
_ "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
_ "github.com/cosmos/gogoproto/gogoproto"
|
||||
grpc1 "github.com/cosmos/gogoproto/grpc"
|
||||
proto "github.com/cosmos/gogoproto/proto"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
_ "google.golang.org/protobuf/types/known/timestamppb"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type QueryNextRequestIDRequest struct {
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDRequest) Reset() { *m = QueryNextRequestIDRequest{} }
|
||||
func (m *QueryNextRequestIDRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryNextRequestIDRequest) ProtoMessage() {}
|
||||
func (*QueryNextRequestIDRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d404c1962bca645f, []int{0}
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_QueryNextRequestIDRequest.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryNextRequestIDRequest.Merge(m, src)
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryNextRequestIDRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryNextRequestIDRequest proto.InternalMessageInfo
|
||||
|
||||
type QueryNextRequestIDResponse struct {
|
||||
NextRequestID uint64 `protobuf:"varint,1,opt,name=next_request_id,json=nextRequestId,proto3" json:"next_request_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDResponse) Reset() { *m = QueryNextRequestIDResponse{} }
|
||||
func (m *QueryNextRequestIDResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*QueryNextRequestIDResponse) ProtoMessage() {}
|
||||
func (*QueryNextRequestIDResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d404c1962bca645f, []int{1}
|
||||
}
|
||||
func (m *QueryNextRequestIDResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *QueryNextRequestIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_QueryNextRequestIDResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *QueryNextRequestIDResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_QueryNextRequestIDResponse.Merge(m, src)
|
||||
}
|
||||
func (m *QueryNextRequestIDResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *QueryNextRequestIDResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_QueryNextRequestIDResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_QueryNextRequestIDResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*QueryNextRequestIDRequest)(nil), "zgc.das.v1.QueryNextRequestIDRequest")
|
||||
proto.RegisterType((*QueryNextRequestIDResponse)(nil), "zgc.das.v1.QueryNextRequestIDResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("zgc/das/v1/query.proto", fileDescriptor_d404c1962bca645f) }
|
||||
|
||||
var fileDescriptor_d404c1962bca645f = []byte{
|
||||
// 334 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xbf, 0x4b, 0x03, 0x31,
|
||||
0x14, 0xc7, 0x2f, 0xa2, 0x0e, 0x81, 0x22, 0x1e, 0x22, 0xf6, 0x94, 0x54, 0x0b, 0xfe, 0x1a, 0x9a,
|
||||
0xb4, 0x3a, 0xb9, 0x16, 0x41, 0x5c, 0x04, 0x5d, 0x04, 0x97, 0x92, 0xbb, 0x8b, 0x69, 0xa0, 0x97,
|
||||
0x5c, 0x9b, 0x5c, 0x69, 0x3b, 0xba, 0xb8, 0x2a, 0xfe, 0x53, 0x1d, 0x0b, 0x2e, 0x4e, 0xa2, 0x57,
|
||||
0xff, 0x10, 0xe9, 0xe5, 0x0e, 0xad, 0x28, 0x6e, 0xef, 0xbd, 0xef, 0xf7, 0x7d, 0xf3, 0xe1, 0x05,
|
||||
0xae, 0x8f, 0x78, 0x40, 0x42, 0xaa, 0x49, 0xbf, 0x41, 0xba, 0x09, 0xeb, 0x0d, 0x71, 0xdc, 0x53,
|
||||
0x46, 0xb9, 0x70, 0xc4, 0x03, 0x1c, 0x52, 0x8d, 0xfb, 0x0d, 0xaf, 0x1c, 0x28, 0x1d, 0x29, 0xdd,
|
||||
0xca, 0x14, 0x62, 0x1b, 0x6b, 0xf3, 0xd6, 0xb8, 0xe2, 0xca, 0xce, 0x67, 0x55, 0x3e, 0xdd, 0xe2,
|
||||
0x4a, 0xf1, 0x0e, 0x23, 0x34, 0x16, 0x84, 0x4a, 0xa9, 0x0c, 0x35, 0x42, 0xc9, 0x62, 0xa7, 0x9c,
|
||||
0xab, 0x59, 0xe7, 0x27, 0xb7, 0x84, 0xca, 0xfc, 0x55, 0xaf, 0xf2, 0x53, 0x32, 0x22, 0x62, 0xda,
|
||||
0xd0, 0x28, 0xb6, 0x86, 0xea, 0x26, 0x2c, 0x5f, 0xce, 0x28, 0x2f, 0xd8, 0xc0, 0x5c, 0xb1, 0x6e,
|
||||
0xc2, 0xb4, 0x39, 0x3f, 0xcd, 0x8b, 0xea, 0x35, 0xf4, 0x7e, 0x13, 0x75, 0xac, 0xa4, 0x66, 0xee,
|
||||
0x09, 0x5c, 0x91, 0x6c, 0x60, 0x5a, 0x3d, 0xab, 0xb4, 0x44, 0xb8, 0x01, 0xb6, 0xc1, 0xc1, 0x62,
|
||||
0x73, 0x35, 0x7d, 0xad, 0x94, 0xe6, 0x77, 0x4a, 0xf2, 0x5b, 0x1b, 0x1e, 0x3d, 0x02, 0xb8, 0x94,
|
||||
0x25, 0xbb, 0xf7, 0x00, 0xce, 0x5b, 0xdd, 0x5d, 0xfc, 0x75, 0x29, 0xfc, 0x27, 0x9b, 0xb7, 0xf7,
|
||||
0x9f, 0xcd, 0x52, 0x56, 0xf7, 0xef, 0x9e, 0x3f, 0x9e, 0x16, 0x76, 0xdc, 0x0a, 0xa9, 0xf3, 0xa0,
|
||||
0x4d, 0x85, 0x2c, 0x3e, 0x67, 0x46, 0x54, 0xcb, 0xd9, 0x6b, 0x22, 0x6c, 0x9e, 0x8d, 0xdf, 0x91,
|
||||
0x33, 0x4e, 0x11, 0x98, 0xa4, 0x08, 0xbc, 0xa5, 0x08, 0x3c, 0x4c, 0x91, 0x33, 0x99, 0x22, 0xe7,
|
||||
0x65, 0x8a, 0x9c, 0x9b, 0x43, 0x2e, 0x4c, 0x3b, 0xf1, 0x71, 0xa0, 0x22, 0x52, 0xe7, 0x1d, 0xea,
|
||||
0x6b, 0x52, 0xe7, 0x35, 0x1b, 0x38, 0x28, 0x22, 0xcd, 0x30, 0x66, 0xda, 0x5f, 0xce, 0x2e, 0x7b,
|
||||
0xfc, 0x19, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x9e, 0xd6, 0x49, 0x0a, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// QueryClient is the client API for Query service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type QueryClient interface {
|
||||
NextRequestID(ctx context.Context, in *QueryNextRequestIDRequest, opts ...grpc.CallOption) (*QueryNextRequestIDResponse, error)
|
||||
}
|
||||
|
||||
type queryClient struct {
|
||||
cc grpc1.ClientConn
|
||||
}
|
||||
|
||||
func NewQueryClient(cc grpc1.ClientConn) QueryClient {
|
||||
return &queryClient{cc}
|
||||
}
|
||||
|
||||
func (c *queryClient) NextRequestID(ctx context.Context, in *QueryNextRequestIDRequest, opts ...grpc.CallOption) (*QueryNextRequestIDResponse, error) {
|
||||
out := new(QueryNextRequestIDResponse)
|
||||
err := c.cc.Invoke(ctx, "/zgc.das.v1.Query/NextRequestID", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// QueryServer is the server API for Query service.
|
||||
type QueryServer interface {
|
||||
NextRequestID(context.Context, *QueryNextRequestIDRequest) (*QueryNextRequestIDResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedQueryServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedQueryServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedQueryServer) NextRequestID(ctx context.Context, req *QueryNextRequestIDRequest) (*QueryNextRequestIDResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method NextRequestID not implemented")
|
||||
}
|
||||
|
||||
func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
|
||||
s.RegisterService(&_Query_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Query_NextRequestID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(QueryNextRequestIDRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(QueryServer).NextRequestID(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/zgc.das.v1.Query/NextRequestID",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(QueryServer).NextRequestID(ctx, req.(*QueryNextRequestIDRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Query_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "zgc.das.v1.Query",
|
||||
HandlerType: (*QueryServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "NextRequestID",
|
||||
Handler: _Query_NextRequestID_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "zgc/das/v1/query.proto",
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.NextRequestID != 0 {
|
||||
i = encodeVarintQuery(dAtA, i, uint64(m.NextRequestID))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovQuery(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *QueryNextRequestIDResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.NextRequestID != 0 {
|
||||
n += 1 + sovQuery(uint64(m.NextRequestID))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovQuery(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozQuery(x uint64) (n int) {
|
||||
return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *QueryNextRequestIDRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowQuery
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: QueryNextRequestIDRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: QueryNextRequestIDRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipQuery(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthQuery
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *QueryNextRequestIDResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowQuery
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: QueryNextRequestIDResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: QueryNextRequestIDResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field NextRequestID", wireType)
|
||||
}
|
||||
m.NextRequestID = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowQuery
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.NextRequestID |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipQuery(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthQuery
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipQuery(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowQuery
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowQuery
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowQuery
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthQuery
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupQuery
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthQuery
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
@ -1,153 +0,0 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: zgc/das/v1/query.proto
|
||||
|
||||
/*
|
||||
Package types is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_Query_NextRequestID_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq QueryNextRequestIDRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.NextRequestID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Query_NextRequestID_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq QueryNextRequestIDRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.NextRequestID(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
|
||||
// UnaryRPC :call QueryServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
|
||||
func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
|
||||
|
||||
mux.Handle("GET", pattern_Query_NextRequestID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Query_NextRequestID_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Query_NextRequestID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterQueryHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterQueryHandler registers the http handlers for service Query to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
|
||||
}
|
||||
|
||||
// RegisterQueryHandlerClient registers the http handlers for service Query
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "QueryClient" to call the correct interceptors.
|
||||
func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
|
||||
|
||||
mux.Handle("GET", pattern_Query_NextRequestID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Query_NextRequestID_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Query_NextRequestID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Query_NextRequestID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"0gchain", "das", "v1", "next-request-id"}, "", runtime.AssumeColonVerbOpt(false)))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Query_NextRequestID_0 = runtime.ForwardResponseMessage
|
||||
)
|
File diff suppressed because it is too large
Load Diff
@ -5,7 +5,6 @@ import (
|
||||
"time"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
"github.com/0glabs/0g-chain/x/validator-vesting/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
@ -26,7 +25,7 @@ func (s queryServer) CirculatingSupply(c context.Context, req *types.QueryCircul
|
||||
QueryCirculatingSupplyResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(c)
|
||||
|
||||
totalSupply := s.bk.GetSupply(ctx, chaincfg.DisplayDenom).Amount
|
||||
totalSupply := s.bk.GetSupply(ctx, "ua0gi").Amount
|
||||
supplyInt := getCirculatingSupply(ctx.BlockTime(), totalSupply)
|
||||
return &types.QueryCirculatingSupplyResponse{
|
||||
Amount: supplyInt,
|
||||
@ -37,7 +36,7 @@ func (s queryServer) CirculatingSupply(c context.Context, req *types.QueryCircul
|
||||
func (s queryServer) TotalSupply(c context.Context, req *types.QueryTotalSupplyRequest) (*types.QueryTotalSupplyResponse, error) {
|
||||
ctx := sdk.UnwrapSDKContext(c)
|
||||
|
||||
totalSupply := s.bk.GetSupply(ctx, chaincfg.DisplayDenom).Amount
|
||||
totalSupply := s.bk.GetSupply(ctx, "ua0gi").Amount
|
||||
supplyInt := sdk.NewDecFromInt(totalSupply).Mul(sdk.MustNewDecFromStr("0.000001")).TruncateInt()
|
||||
return &types.QueryTotalSupplyResponse{
|
||||
Amount: supplyInt,
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
"github.com/0glabs/0g-chain/x/validator-vesting/keeper"
|
||||
"github.com/0glabs/0g-chain/x/validator-vesting/types"
|
||||
)
|
||||
@ -53,7 +52,7 @@ func TestGrpcQueryTestSuite(t *testing.T) {
|
||||
|
||||
func (suite *grpcQueryTestSuite) TestCirculatingSupply() {
|
||||
suite.Run("vesting period supply", func() {
|
||||
suite.bk.SetSupply(suite.ctx, chaincfg.DisplayDenom, sdkmath.NewInt(2_500_000_000_000))
|
||||
suite.bk.SetSupply(suite.ctx, "ua0gi", sdkmath.NewInt(2_500_000_000_000))
|
||||
lastVestingPeriod := time.Date(2022, 8, 5, 24, 0, 0, 0, time.UTC)
|
||||
queryClient := suite.queryClientWithBlockTime(lastVestingPeriod)
|
||||
res, err := queryClient.CirculatingSupply(context.Background(), &types.QueryCirculatingSupplyRequest{})
|
||||
@ -62,7 +61,7 @@ func (suite *grpcQueryTestSuite) TestCirculatingSupply() {
|
||||
})
|
||||
|
||||
suite.Run("supply after last vesting period", func() {
|
||||
suite.bk.SetSupply(suite.ctx, chaincfg.DisplayDenom, sdkmath.NewInt(100_000_000))
|
||||
suite.bk.SetSupply(suite.ctx, "ua0gi", sdkmath.NewInt(100_000_000))
|
||||
res, err := suite.queryClient.CirculatingSupply(context.Background(), &types.QueryCirculatingSupplyRequest{})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(sdkmath.NewInt(100), res.Amount)
|
||||
@ -70,7 +69,7 @@ func (suite *grpcQueryTestSuite) TestCirculatingSupply() {
|
||||
}
|
||||
|
||||
func (suite *grpcQueryTestSuite) TestTotalSupply() {
|
||||
suite.bk.SetSupply(suite.ctx, chaincfg.DisplayDenom, sdkmath.NewInt(100_000_000))
|
||||
suite.bk.SetSupply(suite.ctx, "ua0gi", sdkmath.NewInt(100_000_000))
|
||||
res, err := suite.queryClient.TotalSupply(context.Background(), &types.QueryTotalSupplyRequest{})
|
||||
suite.Require().NoError(err)
|
||||
suite.Require().Equal(sdkmath.NewInt(100), res.Amount)
|
||||
|
Loading…
Reference in New Issue
Block a user