Compare commits

...

221 Commits

Author SHA1 Message Date
0xroy
3681e1e1df
Merge branch 'dev' into dev 2024-11-20 06:08:05 -08:00
Solovyov1796
5051f75b52
Merge pull request #91 from Solovyov1796/dev2
fix unit test
2024-11-20 10:45:08 +08:00
Solovyov1796
5c8ced4b37 fix unit test 2 2024-11-19 18:21:52 +08:00
Solovyov1796
47b6341324 fix unit test 2024-11-19 18:20:43 +08:00
Solovyov1796
ae7d1ad2a7
Merge pull request #89 from Solovyov1796/dev2
add evm min gas prices decorator to eth ante handler chain
2024-11-18 13:30:04 +08:00
Solovyov1796
3e427e5bdd set min gas prices 2024-11-18 13:26:33 +08:00
Solovyov1796
9e1ec04010 add evm min gas prices decorator to eth ante handler chain 2024-11-18 12:11:18 +08:00
MiniFrenchBread
76eebc57c1
Merge pull request #84 from 0glabs/staking-precompile
feat: staking precompile
2024-11-11 13:42:13 +08:00
MiniFrenchBread
100bad3471 fix: solc evm version
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-11-11 13:37:45 +08:00
MiniFrenchBread
605a71e826 test: tx
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-11-04 19:01:53 +08:00
MiniFrenchBread
1602c96a40 test: query 2024-11-04 16:20:59 +08:00
MiniFrenchBread
759d08a6eb fix: app
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-10-29 19:29:44 +08:00
Solovyov1796
f2abb98d6c
Merge pull request #85 from Solovyov1796/dev2
fix: bump cosmos-sdk to enable logger for store and pruning
2024-10-27 18:29:20 +08:00
Solovyov1796
77bfade203 fix: bump cosmos-sdk to enable logger for store and pruning 2024-10-27 02:03:26 +08:00
MiniFrenchBread
e308e44dd6 feat: staking precompile 2024-10-25 22:35:03 +08:00
0g-wh
58875c1bc9 update module version, clean github actions 2024-10-25 11:45:52 +08:00
Solovyov1796
c80874b0ea
Merge pull request #80 from Solovyov1796/new-local-dev
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
fix: bump ethermint to fix evm debug flag out of control
2024-10-23 22:40:15 +08:00
Solovyov1796
96e70d3cbd
Merge branch '0glabs:dev' into new-local-dev 2024-10-23 21:55:54 +08:00
Solovyov1796
f9ef0bac6e fix: bump ethermint to fix evm debug flag out of control 2024-10-23 21:55:03 +08:00
Solovyov1796
5f2089b4c5
Merge pull request #79 from Solovyov1796/new-local-dev
test: disable evm trace in localtestnet
2024-10-23 21:44:28 +08:00
Solovyov1796
a4b8d77411 test: disable evm trace in localtestnet 2024-10-23 20:18:42 +08:00
Solovyov1796
f5ce18dd5f
Merge pull request #77 from Solovyov1796/local-dev
bump ethermint
2024-10-23 17:48:35 +08:00
Solovyov1796
890e858558 disable full error trace in localtestnet 2024-10-23 14:40:37 +08:00
Solovyov1796
afacc89c8d bump ethermint 2024-10-23 14:24:03 +08:00
Solovyov1796
38764453a5
bump ethermint
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-10-21 18:08:25 +08:00
Solovyov1796
a90bd43999
Merge pull request #76 from Solovyov1796/local-dev
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
update module versions
2024-10-16 23:35:52 +08:00
Solovyov1796
46d159a18c update module versions 2024-10-16 19:09:28 +08:00
0g-wh
840deea660 bump ethermint
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-10-14 07:23:48 +00:00
0g-wh
6c3360f102 fix tests 2024-09-27 14:26:07 +00:00
0g-wh
80b2dacbc2 fix evm denom 2024-09-27 07:54:20 +00:00
0g-wh
1152537679 rename kava to 0g 2024-09-27 03:04:45 +00:00
0g-wh
5bd6ac39ee fix 2024-09-26 09:42:10 +00:00
0g-wh
e0fcd07a08 Merge branch 'dev' of github.com:0glabs/0g-chain into dev 2024-09-25 15:43:41 +00:00
0g-wh
70ac592012 fix 2024-09-25 15:35:37 +00:00
0g-wh
de22587a5b fix 2024-09-25 15:31:20 +00:00
MiniFrenchBread
0c02c27a9d feat: DASigners change params (#67)
* feat: use gov to manage dasigners params

* feat: evm precompile query func

* test: unit test

* feat: remove epoch and block height hard check

* feat: add params event
2024-09-25 15:23:46 +00:00
0g-wh
4409bfc996 upgrade to ethermint v3.0.3 2024-09-25 15:23:46 +00:00
0g-wh
723241f484 Dockerfile for node 2024-09-25 15:23:35 +00:00
Solovyov1796
84d1a89bec fix wrong path of proto 2024-09-25 15:23:35 +00:00
Solovyov1796
94ddf20305 fix test build break 2024-09-25 15:23:35 +00:00
Solovyov1796
4ebbb886bf add conuncil back 2024-09-25 15:22:54 +00:00
MiniFrenchBread
57943ec0e0 fix: missing designers (#62)
* fix: add dasigners back

* test: remove manually initialize genesis

* feat: generate all missing epochs on begin block; only panic on smaller block height

* chore: add logs, fix EpochBlocks
2024-09-25 15:22:42 +00:00
MiniFrenchBread
04ce67f6a9 fix: designers; test: designers, precompile (#59)
* test: dasigners test

* test: genesis

* fix: abci; test: abci

* test: types

* test: keeper test

* test: util

* test: dasigners precompile

* chore: remove log
2024-09-25 15:22:10 +00:00
0g-wh
500e66733d fix wasm static link (#57) 2024-09-25 15:21:53 +00:00
0g-wh
8b691e61f8 Update upload-release-assets.yml 2024-09-25 15:21:53 +00:00
0g-wh
a0bdd2a142 add ibcwasmtypes to upgrades.go 2024-09-25 15:21:53 +00:00
0g-wh
53dcea2867 clean code 2024-09-25 15:20:46 +00:00
aeryz
d31a599c60 feat: add 08-wasm module
Signed-off-by: aeryz <abdullaheryz@protonmail.com>
2024-09-25 15:20:46 +00:00
0g-wh
07cf4ad258 fix cmd/keys 2024-09-25 15:19:44 +00:00
0g-wh
cb4e6e006e fix review issues 2024-09-25 15:19:44 +00:00
0g-wh
0e37d518ec prepare upgrade 2024-09-25 15:18:37 +00:00
0g-wh
822e374be6 rebase to kava cosmos 0.47 upgrade
rename

rename

tidy

clean code
2024-09-25 15:18:12 +00:00
0g-wh
9ca8359202 add Upload Release Assets workflow (#49)
* Create upload-release-assets.yml
2024-09-25 15:15:00 +00:00
Solovyov1796
32bcc7f4e3 update gitignore 2024-09-25 15:15:00 +00:00
Solovyov1796
f50a429527 merge testnet script 2024-09-25 15:15:00 +00:00
0g-wh
8ff2277450 add cosmovisor init script 2024-09-25 15:14:49 +00:00
Solovyov1796
cdf029c87a use 0glabs' cometbft 2024-09-25 15:14:48 +00:00
0xsatoshi
5f9325c2a0 enable vesting msgs 2024-09-25 15:14:33 +00:00
0xsatoshi
5f4f1851cb fix 2024-09-25 15:14:33 +00:00
0xsatoshi
4c28427089 fix 2024-09-25 15:14:33 +00:00
MiniFrenchBread
0f40b721ee refactor: epoch quorum storage 2024-09-25 15:14:33 +00:00
MiniFrenchBread
ec3733a2c6 feat: getQuorumRow 2024-09-25 15:14:33 +00:00
Solovyov1796
8df7625ac1 keep the EthSecp256k1 from cosmos for compatible 2024-09-25 15:14:33 +00:00
Solovyov1796
31c96eeb93 recover "rename denoms" in 3 files 2024-09-25 15:14:33 +00:00
Solovyov1796
ac1af4ae92 use chaincfg.MakeCoinForGasDenom 2024-09-25 15:14:33 +00:00
Solovyov1796
0d54bb9202 custom inflation calculation function 2024-09-25 15:14:33 +00:00
MiniFrenchBread
73158cd738 chore: remove tmp output 2024-09-25 15:14:33 +00:00
MiniFrenchBread
73b7d800a3 fix: decimals 2024-09-25 15:14:33 +00:00
MiniFrenchBread
27ddc91956 refactor: delegator 2024-09-25 15:14:33 +00:00
MiniFrenchBread
9962b7b0db fix: localtestnet.sh 2024-09-25 15:14:33 +00:00
MiniFrenchBread
f415fb1332 tidy 2024-09-25 15:14:33 +00:00
MiniFrenchBread
28b9c07e02 merge testnet/v0.1.x 2024-09-25 15:14:13 +00:00
Solovyov1796
45b7920181 remove the EthSecp256k1 from cosmos 2024-09-25 15:12:33 +00:00
Solovyov1796
56d337df16 rename denoms 2024-09-25 15:12:33 +00:00
Solovyov1796
85059d734e fix unit test 2024-09-25 15:12:16 +00:00
Solovyov1796
6b4e8415da remove module's legacy code 2024-09-25 15:11:45 +00:00
Solovyov1796
77b817f9b8 recover go mod file 2024-09-25 15:11:45 +00:00
Solovyov1796
46378d6157 remove das module 2024-09-25 15:11:28 +00:00
MiniFrenchBread
d0721fd172 feat: add get functions 2024-09-25 15:11:03 +00:00
MiniFrenchBread
5e34f5b289 fix: da signers begin block 2024-09-25 15:11:03 +00:00
MiniFrenchBread
d6bca1b221 feat: max quorum num 2024-09-25 15:11:03 +00:00
MiniFrenchBread
8dc89ad08d fix: quorum 2024-09-25 15:11:03 +00:00
MiniFrenchBread
e4989f10cd feat: quorum 2024-09-25 15:11:03 +00:00
MiniFrenchBread
9839a244bf fix: defaultGenesis 2024-09-25 15:11:03 +00:00
MiniFrenchBread
c9043ca158 feat: update dasigners proto api 2024-09-25 15:11:03 +00:00
MiniFrenchBread
8d48dadb02 fix: dasigners module 2024-09-25 15:11:03 +00:00
MiniFrenchBread
c80be7bbf7 chore: dependency 2024-09-25 15:11:03 +00:00
MiniFrenchBread
17fa02b554 feat: precompile 2024-09-25 15:10:47 +00:00
Solovyov1796
e348bd3748 rename the app name showed in usage (#10) 2024-09-25 15:08:30 +00:00
Peter Zhang
f44d7cc94d update max validator count 2024-09-25 15:08:30 +00:00
Peter Zhang
0bfbd114c9 update checkout branch 2024-09-25 15:08:30 +00:00
Solovyov1796
483a939724 update init-genesis.sh for devnet and testnet 2024-09-25 15:08:30 +00:00
Solovyov1796
547b0057c7 fix unit test 2024-09-25 15:08:30 +00:00
0xsatoshi
1da9745903 fix 2024-09-25 15:08:11 +00:00
0xsatoshi
e952a4a705 fix 2024-09-25 15:08:11 +00:00
Solovyov1796
69a4a6298e update scripts 2024-09-25 15:08:01 +00:00
Solovyov1796
d05c2f9563 update env vars 2024-09-25 15:08:01 +00:00
Peter Zhang
82f54a1974 modify deploy script 2024-09-25 15:08:01 +00:00
Solovyov1796
3f1140dcd4 update 2024-09-25 15:08:01 +00:00
Solovyov1796
849c95d93e fix unit test for x 2024-09-25 15:08:01 +00:00
Solovyov1796
eee50a3f75 add scripts for devnet 2024-09-25 15:07:54 +00:00
Solovyov1796
1d2820a3b6 fix panic 2024-09-25 15:07:54 +00:00
Solovyov1796
950e4766d2 merge script from branch v0.1.0 2024-09-25 15:07:54 +00:00
Solovyov1796
91698d388f fix test 2024-09-25 15:07:54 +00:00
Solovyov1796
4cf57457a7 add 0g code 2024-09-25 15:07:54 +00:00
Solovyov1796
337f1c5cc8 rename kava 2024-09-25 15:07:33 +00:00
Solovyov1796
a437523ea2 add vrf 2024-09-25 15:06:53 +00:00
Solovyov1796
77ec52e16b revise file structure in cmd 2024-09-25 15:04:55 +00:00
Solovyov1796
b1365fb792 add chaincfg to save all configration of chain 2024-09-25 15:04:10 +00:00
Solovyov1796
d61f4e94fd update build file 2024-09-25 15:04:10 +00:00
Solovyov1796
8bc3b15c46 revise proto files 2024-09-25 15:03:21 +00:00
Solovyov1796
e8008c9a3a remove useless modules 2024-09-25 15:03:21 +00:00
Solovyov1796
28fa4b7993 rename go mod path 2024-09-25 15:00:59 +00:00
Peter Zhang
bd0acdbd4b add deploy scripts 2024-09-25 14:57:06 +00:00
Peter Zhang
7f62518464 add deploy scripts 2024-09-25 14:57:06 +00:00
Draco
0b4c5da294 Add v26 migrate docs (#1863)
* add migrate docs

* change date to TDB
2024-09-25 14:57:00 +00:00
Nick DeLuca
ad93042155 Use IAVL 0.20.x for v0.26.x release, update deps (#1862)
* revert back to iavl v1 to avoid hash changes on new modules like
we are seeing on the v0.24.x to v0.25.x upgrade block.  Also, add
replace statements for exp and rapid to match upstream cosmos-sdk

* fix sharding prune store logging and error return.

* add comment to clarify WithKeyTable usage
2024-09-25 14:56:40 +00:00
Draco
a7dd451e44 Add packet-forwarding store to upgrade (#1856) 2024-09-25 14:56:13 +00:00
Kevin Davis
c99879e9f7 add legacy rest removal notice (#1857) 2024-09-25 14:55:15 +00:00
Levi Schoen
820a676709 upgrade to iavl v1 (#1845) 2024-09-25 14:55:15 +00:00
drklee3
493ce0516f
feat: Add upgrade handler, fractional balances & reserve transfer (#1966)
Add upgrade handler
Migrates from x/evmutil to x/precisebank:
- Fractional balances
- Reserve funds
  - Mints or burns coins to ensure fractional balances are fully backed.

Initialize remainder if necessary to ensure valid state.
E2E test with fixed kvtool
2024-08-21 18:01:29 -07:00
drklee3
65d091d458
fix(x/precisebank): Avoid blocked addr error on SendCoinsFromAccountToModule (#2012) 2024-08-21 17:29:04 -07:00
Nick DeLuca
8023be0067
chore(nodejs): Update to active LTS v20 (#2011)
* chore(nodejs): Use active LTS v20 for nodejs

This updates nodejs to use the active LTS v20 from the maintenance LTS
v18.  This expands compatibility with packages, adds native support for
more features, etc.

In addition, the ci-seed-chain workflow was updated to use the
.tool-version file instead of hardcoded version.

* chore(hardhat): Update hardhat for nodejs 20 support
2024-08-20 11:50:03 -07:00
Draco
eaacd83de5
chore: update internal testnet kava version (#2010) 2024-08-19 14:11:55 -04:00
Draco
6862cde560
fix: revert protonet voting period to 10min and change internal testnet period to 7d (#2009) 2024-08-19 12:51:21 -04:00
Draco
b8e6e584b8
chore(ci): update internal testnet genesis and seed to support committee voting (#2008)
* chore(ci): update internal testnet genesis and seed to support committee voting

* chore(ci): update gov proposal voting period to 7 days

* chore: use auto gas calculation
2024-08-19 12:23:51 -04:00
Evgeniy Scherbina
27d63f157c
ci: dispatch run-rosetta-tests event to rosetta-kava (#2007) 2024-08-15 12:00:24 -04:00
Robert Pirtle
7aede3390d
ci: add semantic pull request title linting (#2006)
enforces following conventional commit standard in all PR titles
2024-08-14 10:22:46 -07:00
Robert Pirtle
49f7be8486
docs: update latest mainnet kava version (#2005) 2024-08-13 12:49:12 -07:00
Nick DeLuca
fbce24abef
chore(precisebank): Add queries to swagger (#2004)
This adds the precisebank protobuf generated swagger documentation to
the swagger combine configuration in order to be rendered in the
swagger.yaml file.
2024-08-13 12:32:18 -07:00
Nick DeLuca
7e50ce8142
chore: Add ethermint to swagger (#2002)
This adds the upstream ethermint swagger file to the proto-deps and adds
the swagger combine config to include it in the kava generated swagger.

Run `make proto-all` to update.
2024-08-13 07:34:59 -07:00
Nick DeLuca
ab3cf7c994
feat!(precompile): Add registry and genesis tests (#1999)
* feat!(precompile): Add registry and genesis tests

Based on evgeniy-scherbina's work, this adds a new precompile module
which defines a contract moudule with an example noop contract that
will be will be used for implementing test functions.  In addition,
it defines a registry module that instantiates stateful precompile
contracts and associates them with an address in a global registry
defined in kava-labs/go-ethereum. See precompile/README.md for more
information.

The kava-labs/go-ethereum and kava-labs/etheremint replace statements
are updated to support these changes as well as an update to kvtool
which includes genesis state for the registry.NoopContractAddress and
initializes the contract's EthAccount with a non-zero sequence and
codehash set to keccak256(0x01), and sets the contract code to 0x01.
See tests/e2e/e2e_precompile_genesis_test.go for an overview of the
expected genesis state for an enabled precompile.

Co-authored-by: evgeniy-scherbina <evgeniy.shcherbina.es@gmail.com>

* chore: Precompile readme improvements

This fixes a typo (import -> important) and uses package terminology
instead of unclear module terminology.  This aligns best with golang
terminology were modules and packages are distinctly different and
modules are defined using go.mod.

* chore: Improve noop contract godoc

Add a more meaningful godoc where the noop contract is constructed.

* chore(e2e): Improve comments around query checks

Improve the clarity of comments around where the error is checked for
accounts and why it is not checked directly.

In addition, improve comment on why both grpc and rpc code is fetched
and where they are used.

---------

Co-authored-by: evgeniy-scherbina <evgeniy.shcherbina.es@gmail.com>
2024-08-09 09:55:31 -07:00
cuiweiyuan
33932e8ad6
chore: fix some function names (#1998)
Signed-off-by: cuiweiyuan <cuiweiyuan@aliyun.com.>
2024-08-08 06:38:35 -07:00
Nick DeLuca
ab10ce628c
chore(lint): Disable funlen for test functions (#1993)
This adds a regular expression that matches `func Test...` or
`func (suite *Suite) Test...` style functions and disables the length
check. An example from e2e tests that failed lint:

`func (suite *IntegrationTestSuite) TestEip712BasicMessageAuthorization()`
2024-08-07 13:25:18 -07:00
sesheffield
edf2935f31
chore(prs): add codeowners (#1995) 2024-08-07 15:50:24 -04:00
Nick DeLuca
a4583be44b
fix(docker): Ignore local build and lint cache (#1994)
These should not be replicated to docker contexts as they are local to
the build host.  In addition, the golangci-lint currently doesn't assume
the host user nor add other group read permissions when writing files,
so this causes permission errors when other docker processes attempt to
copy the files.
2024-08-07 11:20:17 -07:00
sesheffield
3c4d91a443
chore(linter): enable gosec on golangci linter and reformat config file (#1983)
add in gosec to the golangci.yml config file and reorder the linters-settings to be in alphabetical order
2024-08-07 12:24:27 -04:00
Nick DeLuca
774e2efce8
chore(lint): Update local make lint to match CI (#1991)
* chore(lint): Update local make lint to match CI

This updates the `make lint` behavior to match the command being
run in CI.

In addition, we refactor the make lint command to use docker in order to
to ease cross platform install, use a local build cache that integrates
with make clean, use the same version file, and encapsulate the logic in
its own make include.

We also remove the old lint logic as to not introduce a duplicate target
and avoid confusion from a difference in behavior.

While solutions like act for running github actions locally work, it is
not as straightfoward, is slower, and uses the local git repository
instead of a clone (though I am not sure how the checkout step works
within act).

* fix(lint): Use shared timeout with .golangci.yml

Instead of using a local and different timeout in the lint makefile
target we can rely on golangci to load this configuration from
.golangci.yml instead and share this setting with CI.

* fix(lint): Fix golangci-lint cache mount path

This uses the correct cache dir default of ~/.cache enabling use
of cache between lint calls.

* fix(lint): Fix lint caching

This includes a couple fixes - 1) It adds support for full caching of go
mod and go build, speeding up the lint process quite a bit.  And 2) does
not mix lint cache with make clean files -- the docker container creates
root owned files that cause make clean to error and we choose not to
require make clean to run with higher permissions.  The cache must be
deleted manually.
2024-08-05 10:13:17 -07:00
Nick DeLuca
272f82ec99
chore(lint): Enable localmodule for import linter (#1989)
We use three sections through-out the codebase -- standard, default, and
localmodule.  This change updates the linter to enforce this pattern as
files are added or modified.
2024-08-02 12:08:38 -07:00
Nick DeLuca
e198eeb3b4
fix(e2e): Use docker compose V2 for kvtool and Makefile (#1990)
* chore(Makefile): Migrate to docker compose v2

Use V2 `docker compose` instead of V1 `docker-compose`

* chore(kvtool): Update to latest master commit
2024-08-02 10:45:57 -07:00
drklee3
bbfaa54ddf
chore(deps): Bump cometbft to v0.37.9-kava.1 (#1988)
This resolves ASA-2024-008. Patched in 0.37.7 but that version has a breaking change which was reverted in 0.37.8.

The replace for golang.org/x/exp prevents breaking change in slices package causing compile error with gogoproto
2024-08-02 09:27:28 -07:00
dependabot[bot]
4e66a56208
chore(deps-dev): bump undici from 5.22.1 to 5.28.4 in /contracts (#1925)
Bumps [undici](https://github.com/nodejs/undici) from 5.22.1 to 5.28.4.
- [Release notes](https://github.com/nodejs/undici/releases)
- [Commits](https://github.com/nodejs/undici/compare/v5.22.1...v5.28.4)

---
updated-dependencies:
- dependency-name: undici
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-31 17:24:20 -07:00
dependabot[bot]
9c629ad113
chore(deps-dev): bump braces from 3.0.2 to 3.0.3 in /contracts (#1941)
Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3.
- [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md)
- [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3)

---
updated-dependencies:
- dependency-name: braces
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-31 17:17:25 -07:00
sesheffield
b0d737d354
chore(linter): update Golang linter configuration (#1977)
* update golinter + add go sec

* add golangci.yml
Co-authored-by: @faddat jacobgadikian@gmail.com

* update

* update

* fix release version

* remove sec, update from pr comments, cleanup golangci.yml to not break on master

* remove @faddat, not valid codeowner

* remove unnecessary make command

* remove incorrectly named golangci.yml file

* add --new-from-rev

* use master instead of main

* remove extra echo

* set the exports properly

* add setup go to work with act

* add some docs to golangci linter

* test new-from-rev

* enable more linters, but app.go back

* verify issues-exit-code being gone

* put it back

* enable more linters

* remove exclusions
2024-07-31 16:23:44 -04:00
dependabot[bot]
a8df31b31a
chore(deps): bump github.com/btcsuite/btcd from 0.23.4 to 0.24.0 (#1900)
* chore(deps): bump github.com/btcsuite/btcd from 0.23.4 to 0.24.0

Bumps [github.com/btcsuite/btcd](https://github.com/btcsuite/btcd) from 0.23.4 to 0.24.0.
- [Release notes](https://github.com/btcsuite/btcd/releases)
- [Changelog](https://github.com/btcsuite/btcd/blob/master/CHANGES)
- [Commits](https://github.com/btcsuite/btcd/compare/v0.23.4...v0.24.0)

---
updated-dependencies:
- dependency-name: github.com/btcsuite/btcd
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore: bump github.com/btcsuite/btcd in e2e-ibc

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: drklee3 <derrick@dlee.dev>
2024-07-31 12:52:55 -07:00
dependabot[bot]
6243944db6
chore(deps): bump github.com/hashicorp/go-getter from 1.7.1 to 1.7.5 (#1953)
* chore(deps): bump github.com/hashicorp/go-getter from 1.7.1 to 1.7.5

Bumps [github.com/hashicorp/go-getter](https://github.com/hashicorp/go-getter) from 1.7.1 to 1.7.5.
- [Release notes](https://github.com/hashicorp/go-getter/releases)
- [Changelog](https://github.com/hashicorp/go-getter/blob/main/.goreleaser.yml)
- [Commits](https://github.com/hashicorp/go-getter/compare/v1.7.1...v1.7.5)

---
updated-dependencies:
- dependency-name: github.com/hashicorp/go-getter
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore: bump github.com/hashicorp/go-getter from 1.7.1 to 1.7.5 in e2e-ibc

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: drklee3 <derrick@dlee.dev>
2024-07-31 10:27:46 -07:00
sesheffield
7f339d20ca
fix(insolvency) kava lend insolvency check bug implementation (#1982)
* add additional tests that attempt to borrow funds from the insolvent market(should fail), and attempt to borrow funds from the not insolvent market (it will fail, but shouldn't). The not insolvent market should continue to processs borrows

* remove unused code

* make tests less specific for string contains

* add new get total reserves for denoms functionality

* start utilizing GetTotalReservesForDenoms in ValidateBorrow

* update tests for Borrow to not fail when borrowing from an insolvent market

* use get total reseves in GetTotalReservesForDenoms for reusability

* refactor GetTotalReservesForDenoms to GetTotalReservesByCoinDenoms for more clarity

* change the structure for new and old tests and add more verbosity for other tests

* remove print

* remove unneeded code

* add paren

* adjust structure again after initial PR

* remove duplicate test case with invalid test name, and update to use error contains in places where it was validating if true for strings contains

* no need for keeper method
2024-07-30 13:08:48 -04:00
sesheffield
916ec6d30c
test(insolvency): add tests for Kava lend insolvency check (#1981)
* add additional tests that attempt to borrow funds from the insolvent market(should fail), and attempt to borrow funds from the not insolvent market (it will fail, but shouldn't). The not insolvent market should continue to processs borrows

* remove unused code

* make tests less specific for string contains

* change the structure for new and old tests and add more verbosity for other tests

* remove print

* remove unneeded code

* add paren

* remove duplicate test case with invalid test name, and update to use error contains in places where it was validating if true for strings contains

---------

Co-authored-by: Sam Sheffield <sam.sheffield@kavalabs.io>
2024-07-29 20:51:08 -04:00
Nick DeLuca
b4c04656ab
docs(x/precisebank): Add spec for logic (#1969) 2024-07-29 09:42:17 -07:00
drklee3
837e57ec2e
docs(x/evmutil): Remove akava and evmbankkeeper from spec (#1968) 2024-07-26 14:01:53 -07:00
drklee3
5f802fcfbd
feat(x/precisebank): Emit coin_spent and coin_received events (#1978) 2024-07-26 13:05:49 -07:00
riyueguang
f229afce1a
chore: fix some comments (#1980)
Signed-off-by: riyueguang <rustruby@outlook.com>
2024-07-26 12:38:06 -07:00
drklee3
608f70b20a
feat: Add gRPC query for remainder and account fractional balance (#1971) 2024-07-25 13:36:36 -07:00
Evgeniy Scherbina
74f76d125c
Upgrade opendb (#1972) 2024-07-19 15:44:34 -04:00
drklee3
3853e276a6
feat(x/precisebank): Add query service with TotalFractionalBalances (#1970)
Add query service to precisebank, mostly for e2e test purposes in #1966
Also fix client grpc codec
2024-07-19 10:24:23 -07:00
Evgeniy Scherbina
7aef2f09e9
Upgrade ethermint and opendb (#1965) 2024-07-15 17:45:49 -04:00
Evgeniy Scherbina
58d7c89f8e
Replace opendb package from kava with generic opendb repo (#1959)
* Upgrade ethermint

* Remove opendb package from kava and add custom dbOpener function

* Open metadata.db with custom opendb function
2024-07-11 09:23:31 -04:00
drklee3
d2d661276e
feat: Use x/precisebank for x/evm keeper (#1960)
Replace x/evmutil EvmBankKeeper usage for x/evm
2024-07-10 14:20:12 -07:00
drklee3
9de9de671e
feat(x/precisebank): Display 0 reserve balance to module consumers (#1958)
Module reserve represents fractional balances, so it should be hidden to consumers to not have a misleading total balance that doubles the fractional balances. This modifies GetBalance() and SpendableCoin() to always return zero coins when fetching the reserve address balance for fractional amounts.
2024-07-10 11:14:17 -07:00
drklee3
ce6aac3a72
refactor(x/precisebank): Replace FractionalAmount wrapper with func (#1961)
Removal of unnecessary wrapper type, along with using conversionFactor-1 instead of maxFractionalAmount
2024-07-09 15:33:31 -07:00
drklee3
23ce7d8169
feat(x/precisebank): Return full balances in GetBalance(), add SpendableCoin method (#1957)
Change GetBalance() to return full balances instead of spendable to align behavior with x/bank. Add SpendableCoin() method with support of akava for use in x/evm.
2024-06-28 18:06:48 -07:00
drklee3
60a8073574
feat(x/precisebank): Emit events for send/mint/burn (#1955)
Emits the **total** akava amount for both ukava and akava send/mint/burns. If both akava,ukava are sent (not possible via x/evm nor cosmos messages but still an edge case), then the sum is emitted. No other denoms are emitted by x/precisebank as they will be emitted by the underlying x/bank
2024-06-27 19:40:17 -07:00
Robert Pirtle
2d07988994
e2e-ibc: add ERC20 convert to coin & IBC test (#1950)
* generate erc20 golang interface

* write interchain test that deploys ERC20

* enable deployed erc20 as a conversion pair

* convert erc20 to sdk coin!

* refactor: move RandomMnemonic() to util

* erc20 -> cosmos coin -> ibc e2e test

* add NewEvmSignerFromMnemonic to util

* ci: update ibc-test cache dependency list

* fix ci dependencies
2024-06-24 14:55:40 -07:00
Paul Downing
6a9eda8634
bump deploy version for internal-testnet (#1952) 2024-06-24 11:33:37 -06:00
Paul Downing
4788c064bf
ci: add native wbtc to internal-testnet evm params proposal (#1951)
* add native wbtc to internal-testnet evm params proposal

* Update seed-internal-testnet.sh

remove leading whitespace
2024-06-24 11:12:18 -06:00
drklee3
1743cf5275
fix(x/precisebank): Ensure exact reserve balance on integer carry when minting (#1932)
Fix reserve minting an extra coin when the recipient module both carries fractional over to integer balance AND remainder is insufficient. Adjusts fractional carry to simply send from reserve, instead of doing an additional mint. Add invariant to ensure reserve matches exactly with fractional balances + remainder, failing on both insufficient and excess funds.
2024-06-20 15:20:13 -07:00
rustco
9aef8e4971
chore: fix mismatched method comments (#1949)
Signed-off-by: rustco <ruster@111.com>
2024-06-20 15:09:21 -07:00
drklee3
38230d35e3
feat(x/precisebank): Implement BurnCoins (#1934)
Implement & test BurnCoins method
2024-06-20 15:02:23 -07:00
Paul Downing
af5eea690b
update internal-testnet verison to latest commit (#1948) 2024-06-18 13:57:22 -06:00
Paul Downing
1c1db357f5
update internal testnet wbtc contract config (#1947) 2024-06-18 13:33:33 -06:00
drklee3
409841c79c
feat(x/precisebank): Implement SendCoins (#1923)
Implements methods SendCoins, SendCoinsFromModuleToAccount, SendCoinsFromAccountToModule
2024-06-17 10:53:41 -07:00
Robert Pirtle
4c3f6533a0 ci: bump internal testnet version 2024-06-14 14:04:38 -07:00
Robert Pirtle
e1bd6ffa2f
ci: prefer checkout action to manual pull (#1945)
for internal testnet deployment, record the desired deployment version
as an action variable that can be used by the checkout action instead of
using manual pull & checkout commands
2024-06-14 13:15:45 -07:00
Paul Downing
5b0e7c8c58
bump internal testnet version (#1944) 2024-06-13 18:58:59 -06:00
Paul Downing
8d85c1ae1e
Update genesis.json (#1943)
* Update genesis.json

* align native wbtc naming conventions

* fitx testnet native wbtc naming in genesis

* alphabetically order denoms for internal-testnet genesis
2024-06-13 17:54:39 -06:00
todaymoon
80f2370d68
chore: make function comments match function names (#1935)
Signed-off-by: todaymoon <csgcgl@foxmail.com>
2024-06-13 12:35:22 -07:00
Paul Downing
16233d6031
Update KAVA.VERSION internal-testnet (#1942) 2024-06-12 14:54:31 -06:00
Nick DeLuca
828f17897e
use step output directly instead of fetching more than once (#1940)
Fix issue in finding ref
2024-06-12 12:45:29 -07:00
Paul Downing
a79d852d1c
Update KAVA.VERSION on internal-testnet (#1938)
- use most recent commit to deploy to internal-testnet
2024-06-12 12:24:02 -06:00
Paul Downing
0306bec0ae
bump internal-testnet VERSION and genesis file for wbtc config (#1937)
* bump internal-testnet VERSION and genesis file for wbtc config

* Fix EOF on validate genesis by add missing modules; Fix validation by fixing gov params; update total escrow to default

* bump version for latest genesis

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2024-06-12 10:43:15 -06:00
Paul Downing
5c51530b8e
add new native wbtc contract and seeds to internal-testnet (#1933)
## What Changes
- add a native `wbtc` contract to internal-testnet for testing
- seed the dev wallet and some e2e test wallets with funds for this new contract
2024-06-07 13:26:24 -06:00
Robert Pirtle
21dc0e21b3
ci: don't lint on release tag push (#1930)
the release tag CI is run when semantic versioned tags are pushed.
it is presumed that the commit and/or PR to the release branch being
tagged has already passed the lints.

this gets around Github Actions CI running check-proto-breaking-remote
which compares the pushed commit against _master_ (not the previous release)
2024-06-04 13:02:13 -07:00
zoupingshi
8d07d9cb3b
chore: fix some function names (#1929)
Signed-off-by: zoupingshi <hellocatty@tom.com>
2024-05-31 07:15:15 -07:00
Robert Pirtle
e7cc89a642
deps: use cosmos-sdk v0.47.10-iavl-v1-kava.1 (#1926)
previously, v0.47.10-kava.2 used iavl v1, but this version will be
retracted because that branch & tag should only be used for iavl v0.

this sdk version is the same as v0.47.10-kava.2, but also includes a bug
fix to the initial iavl version used when adding new modules
see https://github.com/Kava-Labs/cosmos-sdk/pull/545
2024-05-28 16:03:47 -07:00
Robert Pirtle
2e8c7ce337
feat(cli): add iavlviewer command (#1922)
* port iavlviewer to kava v0.26.x to debug app hash

* add hash subcommand to iavlviewer

additionally, use better error handling

* update changelog

* separate iavlviewer command into subcommands

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2024-05-28 11:00:28 -07:00
drklee3
110adcab2c
feat(x/precisebank): Implement MintCoins (#1920)
Implement MintCoins method that matches x/bank MintCoins validation behavior
2024-05-24 12:03:09 -07:00
Ruaridh
3d5f5902b8
chore(docs): update security contact (#1921)
* update readme

* update comment in ERC20 contract

* Revert "update comment in ERC20 contract"

This reverts commit c50a80d83d936ade7df2c82482717432d6c83db8.
2024-05-24 09:38:36 -07:00
drklee3
4cf41d18c2
feat(x/precisebank): Implement GetBalance (#1916)
Implement GetBalance for extended balances which passes through to `x/bank` for non-extended denoms. This diverges from `x/evmutil` behavior which will panic on non-"akava" calls.

Add bank / account keeper mocks for testing, with mockery config for [mockery package setup](https://vektra.github.io/mockery/latest/migrating_to_packages/)
2024-05-21 14:11:13 -07:00
drklee3
dbc3ad7fd2
feat(x/precisebank): Implement ExportGenesis (#1915) 2024-05-20 09:50:31 -07:00
Uditya Kumar
7990021431
Update README.md (#1919) 2024-05-17 11:54:09 -07:00
Uditya Kumar
fa33947496
docs: Fix README link to Run Validator Node docs 2024-05-17 09:46:36 -07:00
drklee3
4ff43eb270
feat(x/precisebank): Add keeper methods for store (#1912)
- Add store methods to get/set/delete/etc account fractional balances & remainder amount
- Add invariants to ensure stored state is correct
2024-05-16 15:30:31 -07:00
largemouth
d66b7d2705
chore: fix some typos (#1913)
Signed-off-by: largemouth <largemouth@aliyun.com>
2024-05-16 13:27:48 -07:00
drklee3
025b7b2cdb
feat(x/precisebank): Add remainder amount to genesis (#1911)
- Validate total fractional amounts in genesis type
- Validate against fractional balances such that `(sum(balances) + remainder) % conversionFactor == 0`
- Add new utility type `SplitBalance` for splitting up full balances into each
2024-05-15 14:07:24 -07:00
drklee3
94914d4ca1
feat(x/precisebank): Add FractionalBalance types (#1907)
- Add necessary types to track account fractional balances.
- Add FractionalBalance type to genesis
2024-05-13 14:16:05 -07:00
drklee3
3c53e72220
feat: Add x/precisebank module basic setup (#1906)
- Add initial setup and empty genesis type for x/precisebank
- Basic tests with mostly empty values, to be filled out with additional implementation
2024-05-10 09:30:28 -07:00
Robert Pirtle
871e26670c
chore(rocksdb): bump deps for rocksdb v8.10.0 (#1903) 2024-05-09 09:56:18 -07:00
Robert Pirtle
da2f835bf7
docs: update README for v0.26.0 (#1904) 2024-05-08 12:59:10 -07:00
Robert Pirtle
6a7fd4c8bd
test(e2e-ibc): downgrade to ibc v7 for ibc tests (#1901)
* downgrade to ibc v7 for ibc tests

* add conformance test (does not pass consistently)

* limit number of nodes for more consistent passing

* update to upstream v7 branch of interchaintest

also, remove unnecessary go.mod replace statements

* better names for int pointers
2024-05-07 13:15:38 -07:00
Robert Pirtle
f72b628b71
ci: extract separate rocksdb base image (#1898)
* docker: separate rocksdb base image from build

* ci: inject go build cache for docker img builds
2024-05-02 10:36:49 -07:00
Robert Pirtle
3e877aca88
test: expose evm and add query to EVM (#1892) 2024-04-29 10:13:28 -07:00
forcedebug
360f21f9f8
Fix mismatched method names in comments (#1890)
Signed-off-by: forcedebug <forcedebug@outlook.com>
2024-04-23 14:44:04 -07:00
Robert Pirtle
d981070ede
test: add packet-forwarding middleware e2e test (#1883)
* setup interchaintest IBC test

* e2e test of packet forwarding middleware

* rename interchain -> e2e-ibc & add make cmd

* add CI step that runs e2e-ibc tests

* use current branch for docker image in e2e-ibc
2024-04-19 12:35:13 -07:00
careworry
346f4be683
chore: fix some typos in comments (#1881)
Signed-off-by: careworry <worrycare@outlook.com>
2024-04-18 06:56:52 -07:00
CoolCu
1b6f1468ec
Fix some typos in comments (#1878)
Signed-off-by: CoolCu <coolcui@qq.com>
2024-04-16 11:54:09 -07:00
Robert Pirtle
72e8641c8d
build: inject brew deps for MacOS rocksdb build (#1812)
* build: inject brew deps for MacOS rocksdb build

* configure make build-rocksdb-brew
2024-04-08 17:00:07 -07:00
dependabot[bot]
ac2e46f91e
chore(deps-dev): bump follow-redirects in /contracts (#1850)
Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.15.2 to 1.15.6.
- [Release notes](https://github.com/follow-redirects/follow-redirects/releases)
- [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.15.2...v1.15.6)

---
updated-dependencies:
- dependency-name: follow-redirects
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-08 16:59:09 -07:00
Evgeniy Scherbina
4686a2a3e9
Fix typo in seed-internal-testnet script (#1875) 2024-04-05 13:30:56 -04:00
Nick DeLuca
543417c01f
bump deps (#1870) 2024-04-05 07:15:13 -07:00
frameflare
41b79e44af
chore: remove repetitive words (#1869)
Signed-off-by: frameflare <yangzenghua@outlook.com>
2024-04-05 07:13:40 -07:00
alex
0ea92335de
fix:paramaters->paramaters (#1796)
Fix misspellings across docs & comments
2024-04-05 07:02:52 -07:00
Adam Robert Turman
2a93c41fcc
Internal testnet: include EVM contracts & funds for remaining bep3 denoms (#1868)
* include remaining bep3 denoms

* typos
2024-04-04 12:21:05 -05:00
Robert Pirtle
3033529d9f
ci: start all internal testnet regardless of state (#1866)
even if the nodes are not in standby, target them for the start job in
the internal testnet deployment CI
2024-03-29 11:38:06 -07:00
Adam Robert Turman
198b620cb4
Add "axlBNB" to internal testnet (#1860)
* - add contract address for axlBNB
- seed EVM wallets with axlBNB

* update proposal to include new contract address

* Feedback

Co-authored-by: Ruaridh <rhuairahrighairidh@users.noreply.github.com>

* reorder coins in alphabetical order

* clean up

---------

Co-authored-by: Ruaridh <rhuairahrighairidh@users.noreply.github.com>
2024-03-28 10:56:38 -05:00
Draco
d3233d65d5
bep3 conversion msg server tests (#1859) 2024-03-27 16:52:15 -07:00
Nick DeLuca
6ea518960a
Optimize CDP Begin Blocker (#1822)
* optimize cdp begin blocker by removing unnecessary checks, reusing data
and prefix stores in loops, and reducing number of repeated calculations

* fix panic for new cdp types if both previous accural time and global
interest factor are not set

* do not touch global interest factor if no CDP's exist; revert to panic
if global interest factor is not found since this is an unreachable
state by normal keeper operation -- it can only be reached if store
is modified outside of public interface and normal operation
2024-03-26 13:06:26 -07:00
Nick DeLuca
673790465d
Optimize Pricefeed EndBlocker (#1851)
* optimize pricefeed endblocker to iterate all markets only once to remove
overhead of opening and closing iterator for each market individually.
In addition, extend tests to cover 100% of abci and price updating
behavior.

* use test cases that can't be confused with mean to ensure median is
always used
2024-03-26 13:05:52 -07:00
Draco
3afb656d1f
Implement bep3 evm native conversion logic (#1848)
* Implement bep3 evm native conversion logic

* Update changelog

* Fix indentation

* Add bep3 conversion keeper tests

* make DefaultBEP3ConversionDenoms private

* refactor bep3 conversion

* update bep3 tests to cover all bep3 assets

* minor refactor
2024-03-25 13:43:31 -04:00
Draco
969614d555
Bump cosmos-sdk to v0.47.10-kava.2 with iavl v1 support (#1846)
* Bump to cosmos-sdk to v0.47.10-kava.2

* Update go version in dockerfile to 1.21

* Fix shard logic for iavl v1

* Update changelog
2024-03-22 09:40:18 -04:00
Nick DeLuca
7866ee2f74
update deps and add mergify config for v0.26.x release branch (#1853) 2024-03-21 08:45:10 -07:00
Robert Pirtle
66e41733e7
target all internal testnet instances on update (#1844)
(not just those in standby)
2024-03-12 13:31:33 -07:00
187 changed files with 36639 additions and 6451 deletions

View File

@ -11,5 +11,10 @@ docs/
networks/
scratch/
# Ignore build cache directories to avoid
# errors when addings these to docker images
build/.cache
build/.golangci-lint
go.work
go.work.sum

3
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,3 @@
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# Global rule:
* @rhuairahrighairidh @karzak @pirtleshell @drklee3 @nddeluca @DracoLi @evgeniy-scherbina @sesheffield @boodyvo @lbayas

View File

@ -83,6 +83,31 @@ TETHER_USDT_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NA
TETHER_USDT_CONTRACT_ADDRESS=${TETHER_USDT_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000
# deploy and fund axlBNB ERC20 contract
AXL_BNB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBNB" axlBNB 18)
AXL_BNB_CONTRACT_ADDRESS=${AXL_BNB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlBUSD ERC20 contract
AXL_BUSD_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBUSD" axlBUSD 18)
AXL_BUSD_CONTRACT_ADDRESS=${AXL_BUSD_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlXRPB ERC20 contract
AXL_XRPB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlXRPB" axlXRPB 18)
AXL_XRPB_CONTRACT_ADDRESS=${AXL_XRPB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlBTC ERC20 contract
AXL_BTCB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBTCB" axlBTCB 18)
AXL_BTCB_CONTRACT_ADDRESS=${AXL_BTCB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund native wBTC ERC20 contract
WBTC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "wBTC" wBTC 8)
WBTC_CONTRACT_ADDRESS=${WBTC_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 100000000000000000
# seed some evm wallets
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
@ -91,6 +116,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
# seed webapp E2E whale account
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 100000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
@ -99,6 +129,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$WBTC_CONTRACT_ADDRESS" 10000000000000
# give dev-wallet enough delegation power to pass proposals by itself
@ -138,7 +173,7 @@ PARAM_CHANGE_PROP_TEMPLATE=$(
{
"subspace": "evmutil",
"key": "EnabledConversionPairs",
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"}]"
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"},{\"kava_erc20_address\":\"AXL_BNB_CONTRACT_ADDRESS\",\"denom\":\"bnb\"},{\"kava_erc20_address\":\"AXL_BUSD_CONTRACT_ADDRESS\",\"denom\":\"busd\"},{\"kava_erc20_address\":\"AXL_BTCB_CONTRACT_ADDRESS\",\"denom\":\"btcb\"},{\"kava_erc20_address\":\"AXL_XRPB_CONTRACT_ADDRESS\",\"denom\":\"xrpb\"},{\"kava_erc20_address\":\"WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/bitgo/wbtc\"}]"
}
]
}
@ -155,6 +190,11 @@ finalProposal="${finalProposal/AXL_USDC_CONTRACT_ADDRESS/$AXL_USDC_CONTRACT_ADDR
finalProposal="${finalProposal/AXL_WBTC_CONTRACT_ADDRESS/$AXL_WBTC_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/wETH_CONTRACT_ADDRESS/$wETH_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/TETHER_USDT_CONTRACT_ADDRESS/$TETHER_USDT_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BNB_CONTRACT_ADDRESS/$AXL_BNB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BUSD_CONTRACT_ADDRESS/$AXL_BUSD_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BTCB_CONTRACT_ADDRESS/$AXL_BTCB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_XRPB_CONTRACT_ADDRESS/$AXL_XRPB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/WBTC_CONTRACT_ADDRESS/$WBTC_CONTRACT_ADDRESS}"
# create unique proposal filename
proposalFileName="$(date +%s)-proposal.json"
@ -185,6 +225,21 @@ sleep $AVG_SECONDS_BETWEEN_BLOCKS
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"
# submit a kava token committee proposal
COMMITTEE_PROP_TEMPLATE=$(
cat <<'END_HEREDOC'
{
"@type": "/cosmos.gov.v1beta1.TextProposal",
"title": "The next big thing signaling proposal.",
"description": "The purpose of this proposal is to signal support/opposition to the next big thing"
}
END_HEREDOC
)
committeeProposalFileName="$(date +%s)-committee-proposal.json"
echo "$COMMITTEE_PROP_TEMPLATE" >$committeeProposalFileName
tokenCommitteeId=4
kava tx committee submit-proposal "$tokenCommitteeId" "$committeeProposalFileName" --gas auto --gas-adjustment 1.5 --gas-prices 0.01ukava --from god -y
# if adding more cosmos coins -> er20s, ensure that the deployment order below remains the same.
# convert 1 HARD to an erc20. doing this ensures the contract is deployed.
kava tx evmutil convert-cosmos-coin-to-erc20 \

View File

@ -1,54 +0,0 @@
name: Manual Deployment (Internal Testnet)
# allow to be triggered manually
on: workflow_dispatch
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-internal-testnet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.internal.testnet.us-east.production.kava.io:443
chain-id: kava_2221-17000
seed-script-filename: seed-internal-testnet.sh
erc20-deployer-network-name: internal_testnet
genesis_validator_addresses: "kavavaloper1xcgtffvv2yeqmgs3yz4gv29kgjrj8usxrnrlwp kavavaloper1w66m9hdzwgd6uc8g93zqkcumgwzrpcw958sh3s"
kava_version_filepath: ./ci/env/kava-internal-testnet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.internal
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,79 +0,0 @@
name: Continuous Deployment (Internal Testnet)
# run after every successful CI job of new commits to the master branch
# if deploy version or config has changed
on:
workflow_run:
workflows: [Continuous Integration (Kava Master)]
types:
- completed
jobs:
changed_files:
runs-on: ubuntu-latest
# define output for first job forwarding output of changedInternalTestnetConfig job
outputs:
changedInternalTestnetConfig: ${{ steps.changed-internal-testnet-config.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # OR "2" -> To retrieve the preceding commit.
- name: Get all changed internal testnet files
id: changed-internal-testnet-config
uses: tj-actions/changed-files@v42
with:
# Avoid using single or double quotes for multiline patterns
files: |
ci/env/kava-internal-testnet/**
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
needs: [changed_files]
# only start cd pipeline if last ci run was successful
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.changed_files.outputs.changedInternalTestnetConfig == 'true' }}
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-internal-testnet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.internal.testnet.us-east.production.kava.io:443
chain-id: kava_2221-17000
seed-script-filename: seed-internal-testnet.sh
erc20-deployer-network-name: internal_testnet
genesis_validator_addresses: "kavavaloper1xcgtffvv2yeqmgs3yz4gv29kgjrj8usxrnrlwp kavavaloper1w66m9hdzwgd6uc8g93zqkcumgwzrpcw958sh3s"
kava_version_filepath: ./ci/env/kava-internal-testnet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.internal
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,54 +0,0 @@
name: Manual Deployment (Protonet)
# allow to be triggered manually
on: workflow_dispatch
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-protonet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
chain-id: proto_2221-17000
seed-script-filename: seed-protonet.sh
erc20-deployer-network-name: protonet
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.proto
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,60 +0,0 @@
name: Continuous Deployment (Protonet)
# run after every successful CI job of new commits to the master branch
on:
workflow_run:
workflows: [Continuous Integration (Kava Master)]
types:
- completed
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
# only start cd pipeline if last ci run was successful
if: ${{ github.event.workflow_run.conclusion == 'success' }}
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-protonet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
chain-id: proto_2221-17000
seed-script-filename: seed-protonet.sh
erc20-deployer-network-name: protonet
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.proto
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,80 +0,0 @@
name: Reset Internal Testnet
on:
workflow_call:
inputs:
chain-id:
required: true
type: string
aws-region:
required: true
type: string
ssm-document-name:
required: true
type: string
playbook-name:
required: true
type: string
playbook-infrastructure-branch:
required: true
type: string
secrets:
CI_AWS_KEY_ID:
required: true
CI_AWS_KEY_SECRET:
required: true
KAVA_PRIVATE_GITHUB_ACCESS_TOKEN:
required: true
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# download updated binary and genesis
# reset application database state (only done on internal testnet)
jobs:
place-chain-nodes-on-standby:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: take the chain offline
run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh
env:
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
- name: checkout infrastructure repo
uses: actions/checkout@v4
with:
repository: Kava-Labs/infrastructure
token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }}
path: infrastructure
ref: master
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava node updater
run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../
- name: run reset playbook on all chain nodes
run: |
kava-node-updater \
--debug \
--max-retries=2 \
--aws-ssm-document-name=$SSM_DOCUMENT_NAME \
--infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--node-states=Standby \
--wait-for-node-sync-after-upgrade=false
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
PLAYBOOK_NAME: ${{ inputs.playbook-name }}
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
AWS_SDK_LOAD_CONFIG: 1
PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }}

View File

@ -1,94 +0,0 @@
name: Seed Chain
on:
workflow_call:
inputs:
chain-api-url:
required: true
type: string
chain-id:
required: true
type: string
seed-script-filename:
required: true
type: string
erc20-deployer-network-name:
required: true
type: string
genesis_validator_addresses:
required: true
type: string
kava_version_filepath:
required: true
type: string
secrets:
DEV_WALLET_MNEMONIC:
required: true
KAVA_TESTNET_GOD_MNEMONIC:
required: true
jobs:
seed-chain-state:
runs-on: ubuntu-latest
steps:
- name: checkout repo from master
uses: actions/checkout@v4
with:
ref: master
- name: checkout version of kava used by network
run: |
git pull -p
git checkout $(cat ${KAVA_VERSION_FILEPATH})
env:
KAVA_VERSION_FILEPATH: ${{ inputs.kava_version_filepath }}
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava binary
run: make install
- name: checkout go evm tools repo
uses: actions/checkout@v4
with:
repository: ethereum/go-ethereum
path: go-ethereum
ref: v1.10.26
- name: install go evm tools
run: |
make
make devtools
working-directory: go-ethereum
- name: checkout kava bridge repo for deploying evm contracts
uses: actions/checkout@v4
with:
repository: Kava-Labs/kava-bridge
path: kava-bridge
ref: main
- name: install nodeJS
uses: actions/setup-node@v3
with:
cache: npm
node-version: 18
cache-dependency-path: kava-bridge/contract/package.json
- name: "install ERC20 contract deployment dependencies"
run: "npm install"
working-directory: kava-bridge/contract
- name: compile default erc20 contracts
run: make compile-contracts
working-directory: kava-bridge
- name: download seed script from current commit
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/${GITHUB_SHA}/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
working-directory: kava-bridge/contract
env:
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
- name: run seed scripts
run: bash ./${SEED_SCRIPT_FILENAME}
working-directory: kava-bridge/contract
env:
CHAIN_API_URL: ${{ inputs.chain-api-url }}
CHAIN_ID: ${{ inputs.chain-id }}
DEV_WALLET_MNEMONIC: ${{ secrets.DEV_WALLET_MNEMONIC }}
KAVA_TESTNET_GOD_MNEMONIC: ${{ secrets.KAVA_TESTNET_GOD_MNEMONIC }}
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
ERC20_DEPLOYER_NETWORK_NAME: ${{ inputs.erc20-deployer-network-name }}
GENESIS_VALIDATOR_ADDRESSES: ${{ inputs.genesis_validator_addresses }}

View File

@ -1,78 +0,0 @@
name: Start Chain
on:
workflow_call:
inputs:
chain-id:
required: true
type: string
aws-region:
required: true
type: string
ssm-document-name:
required: true
type: string
playbook-name:
required: true
type: string
playbook-infrastructure-branch:
required: true
type: string
secrets:
CI_AWS_KEY_ID:
required: true
CI_AWS_KEY_SECRET:
required: true
KAVA_PRIVATE_GITHUB_ACCESS_TOKEN:
required: true
jobs:
# start kava, allow nodes to start processing requests from users once they are synced to live
serve-traffic:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: take the chain offline
run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh
env:
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
- name: checkout infrastructure repo
uses: actions/checkout@v4
with:
repository: Kava-Labs/infrastructure
token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }}
path: infrastructure
ref: master
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava node updater
run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../
- name: run start-chain playbook on all chain nodes
run: |
kava-node-updater \
--debug \
--max-retries=2 \
--aws-ssm-document-name=$SSM_DOCUMENT_NAME \
--infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--node-states=Standby \
--wait-for-node-sync-after-upgrade=true
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
PLAYBOOK_NAME: ${{ inputs.playbook-name }}
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
AWS_SDK_LOAD_CONFIG: 1
PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }}
- name: bring the chain online
run: bash ${GITHUB_WORKSPACE}/.github/scripts/exit-standby-all-chain-nodes.sh

View File

@ -1,7 +0,0 @@
name: Continuous Integration (Commit)
on:
push:
# run per commit ci checks against this commit
jobs:
lint:
uses: ./.github/workflows/ci-lint.yml

View File

@ -1,79 +0,0 @@
name: Continuous Integration (Default Checks)
on:
workflow_call:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
tests/e2e/kvtool/go.sum
- name: build application
run: make build
test:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
tests/e2e/kvtool/go.sum
- name: run unit tests
run: make test
- name: run e2e tests
run: make docker-build test-e2e
validate-internal-testnet-genesis:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: save version of kava that will be deployed if this pr is merged
id: kava-version
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
- name: checkout repo from master
uses: actions/checkout@v4
with:
ref: master
- name: checkout version of kava that will be deployed if this pr is merged
run: |
git pull -p
git checkout $KAVA_VERSION
env:
KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava cli
run: make install
- name: checkout repo from current commit to validate current branch's genesis
uses: actions/checkout@v4
- name: validate testnet genesis
run: kava validate-genesis ci/env/kava-internal-testnet/genesis.json
validate-protonet-genesis:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava cli
run: make install
- name: validate protonet genesis
run: kava validate-genesis ci/env/kava-protonet/genesis.json

View File

@ -1,102 +0,0 @@
name: Build & Publish Docker Images
on:
workflow_call:
inputs:
dockerhub-username:
required: true
type: string
# this workflow publishes a rocksdb & goleveldb docker images with these tags:
# - <commit-hash>-goleveldb
# - <extra-image-tag>-goleveldb
# - <commit-hash>-rocksdb
# - <extra-image-tag>-rocksdb
extra-image-tag:
required: true
type: string
secrets:
CI_DOCKERHUB_TOKEN:
required: true
# runs in ci-master after successful checks
# you can use images built by this action in future jobs.
# https://docs.docker.com/build/ci/github-actions/examples/#share-built-image-between-jobs
jobs:
docker-goleveldb:
# https://github.com/marketplace/actions/build-and-push-docker-images
runs-on: ubuntu-latest
steps:
# ensure working with latest code
- name: Checkout
uses: actions/checkout@v4
# generate a git commit hash to be used as image tag
- name: Generate short hash
id: commit-hash
run: echo "short=$( git rev-parse --short $GITHUB_SHA )" >> $GITHUB_OUTPUT
# qemu is used to emulate different platform architectures
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# cross-platform build of the image
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# authenticate for publish to docker hub
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
# publish to docker hub, tag with short git hash
- name: Build and push (goleveldb)
uses: docker/build-push-action@v5
with:
context: .
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
push: true
tags: kava/kava:${{ steps.commit-hash.outputs.short }}-goleveldb,kava/kava:${{ inputs.extra-image-tag }}-goleveldb
docker-rocksdb:
# https://github.com/marketplace/actions/build-and-push-docker-images
runs-on: ubuntu-latest
steps:
# ensure working with latest code
- name: Checkout
uses: actions/checkout@v4
# generate a git commit hash to be used as image tag
- name: Generate short hash
id: commit-hash
run: echo "short=$( git rev-parse --short $GITHUB_SHA )" >> $GITHUB_OUTPUT
# qemu is used to emulate different platform architectures
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# cross-platform build of the image
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# authenticate for publish to docker hub
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
# publish to docker hub, tag with short git hash
- name: Build and push (rocksdb)
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile-rocksdb
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
push: true
tags: kava/kava:${{ steps.commit-hash.outputs.short }}-rocksdb,kava/kava:${{ inputs.extra-image-tag }}-rocksdb

View File

@ -1,17 +0,0 @@
name: Lint Checks
on:
workflow_call:
# run per commit ci checks against this commit
jobs:
proto-lint:
uses: ./.github/workflows/proto.yml
golangci-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: golangci-lint
uses: reviewdog/action-golangci-lint@v2
with:
github_token: ${{ secrets.github_token }}
reporter: github-pr-review
golangci_lint_flags: --timeout 10m

View File

@ -1,56 +0,0 @@
name: Continuous Integration (Kava Master)
on:
push:
# run CI on any push to the master branch
branches:
- master
jobs:
# run per commit ci checks against master branch
lint-checks:
uses: ./.github/workflows/ci-lint.yml
# run default ci checks against master branch
default-checks:
uses: ./.github/workflows/ci-default.yml
# build and upload versions of kava for use on internal infrastructure
# configurations for databases, cpu architectures and operating systems
publish-internal:
# only run if all checks pass
needs: [lint-checks, default-checks]
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: set build tag
run: echo "BUILD_TAG=$(date +%s)-$(git rev-parse HEAD | cut -c 1-8)" >> $GITHUB_ENV
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
env:
ROCKSDB_VERSION: v8.10.0
- name: Build and upload release artifacts
run: bash ${GITHUB_WORKSPACE}/.github/scripts/publish-internal-release-artifacts.sh
env:
BUILD_TAG: ${{ env.BUILD_TAG }}
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
docker:
# only run if all checks pass
needs: [lint-checks, default-checks]
uses: ./.github/workflows/ci-docker.yml
with:
dockerhub-username: kavaops
extra-image-tag: master
secrets: inherit
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.releases.merge
namespace: Kava/ContinuousIntegration
secrets: inherit
needs: [publish-internal]

View File

@ -1,23 +0,0 @@
name: Continuous Integration (PR)
on:
pull_request:
# run CI on pull requests to master or a release branch
branches:
- master
- 'release/**'
- 'releases/**'
# run default ci checks against current PR
jobs:
default:
uses: ./.github/workflows/ci-default.yml
rocksdb:
uses: ./.github/workflows/ci-rocksdb-build.yml
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.releases.pr
namespace: Kava/ContinuousIntegration
secrets: inherit
needs: [default]

View File

@ -1,35 +0,0 @@
name: Continuous Integration (Release)
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+*"
jobs:
# run per commit ci checks against released version
lint-checks:
uses: ./.github/workflows/ci-lint.yml
# run default ci checks against released version
default-checks:
uses: ./.github/workflows/ci-default.yml
# get the version tag that triggered this workflow
get-version-tag:
# prep version release only if all checks pass
needs: [lint-checks, default-checks]
runs-on: ubuntu-latest
outputs:
git-tag: ${{ steps.git-tag.outputs.tag }}
steps:
- uses: actions/checkout@v4
- id: git-tag
run: echo "tag=$(git describe --always --tags --match='v*')" >> $GITHUB_OUTPUT
# build and upload versions of kava for use on internal infrastructure
# configurations for databases, cpu architectures and operating systems
docker:
# only run if all checks pass
needs: get-version-tag
uses: ./.github/workflows/ci-docker.yml
with:
dockerhub-username: kavaops
extra-image-tag: ${{ needs.get-version-tag.outputs.git-tag }}
secrets: inherit

View File

@ -1,43 +0,0 @@
name: Continuous Integration (Rocksdb Build)
env:
ROCKSDB_VERSION: v8.10.0
on:
workflow_call:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
- name: build application
run: make build COSMOS_BUILD_OPTIONS=rocksdb
test:
runs-on: ubuntu-latest
steps:
- name: install RocksDB dependencies
run: sudo apt-get update
&& sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
- name: install RocksDB as shared library
run: git clone https://github.com/facebook/rocksdb.git
&& cd rocksdb
&& git checkout $ROCKSDB_VERSION
&& sudo make -j$(nproc) install-shared
&& sudo ldconfig
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: run unit tests
run: make test-rocksdb

View File

@ -1,45 +0,0 @@
name: Metric Pipeline
on:
workflow_call:
inputs:
aws-region:
required: true
type: string
metric-name:
required: true
type: string
namespace:
required: true
type: string
secrets:
CI_AWS_KEY_ID:
required: true
CI_AWS_KEY_SECRET:
required: true
jobs:
metric-pipeline-result:
runs-on: ubuntu-latest
if: always() # always run to capture workflow success or failure
steps:
# Make sure the secrets are stored in you repo settings
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.CI_AWS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CI_AWS_KEY_SECRET }}
aws-region: ${{ inputs.aws-region }}
- name: Calculate Pipleline Success
# run this action to get the workflow conclusion
# You can get the conclusion via env (env.WORKFLOW_CONCLUSION)
# values: neutral, success, skipped, cancelled, timed_out,
# action_required, failure
uses: technote-space/workflow-conclusion-action@v3
- name: Metric Pipleline Success
# replace TAG by the latest tag in the repository
uses: ros-tooling/action-cloudwatch-metrics@0.0.5
with:
metric-value: ${{ env.WORKFLOW_CONCLUSION == 'success' }}
metric-name: ${{ inputs.metric-name }}
namespace: ${{ inputs.namespace }}

View File

@ -1,26 +0,0 @@
name: Protobuf Checks
on:
workflow_call:
jobs:
check-proto:
name: "Check Proto"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- run: go mod download
- run: make install-build-deps
- run: make check-proto-deps
- run: make check-proto-lint
- run: make check-proto-format
- run: make check-proto-breaking-remote
- run: BUF_CHECK_BREAKING_AGAINST_REMOTE="branch=$GITHUB_BASE_REF" make check-proto-breaking-remote
if: github.event_name == 'pull_request'
- run: make check-proto-gen
- run: make check-proto-gen-doc
- run: make check-proto-gen-swagger

8
.gitignore vendored
View File

@ -31,6 +31,9 @@ out
# Ignore build cache dir
build/.cache
# Ignore make lint cache
build/.golangci-lint
# Ignore installed binaires
build/bin
@ -46,3 +49,8 @@ go.work.sum
# runtime
run
# contracts
precompiles/interfaces/build
precompiles/interfaces/node_modules
precompiles/interfaces/abis

1
.golangci-version Normal file
View File

@ -0,0 +1 @@
v1.59

130
.golangci.yml Normal file
View File

@ -0,0 +1,130 @@
run:
timeout: 20m # set maximum time allowed for the linter to run. If the linting process exceeds this duration, it will be terminated
modules-download-mode: readonly # Ensures that modules are not modified during the linting process
allow-parallel-runners: true # enables parallel execution of linters to speed up linting process
linters:
disable-all: true
enable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- containedctx
- contextcheck
- decorder
- dogsled
# - dupl
# - dupword
- durationcheck
- errcheck
- errchkjson
- errname
- errorlint
# - exhaustive
- exportloopref
- funlen
- gci
- ginkgolinter
- gocheckcompilerdirectives
# - gochecknoglobals
# - gochecknoinits
- goconst
- gocritic
- godox
- gofmt
# - gofumpt
- goheader
- goimports
- mnd
# - gomodguard
- goprintffuncname
- gosec
- gosimple
- govet
- grouper
- importas
- ineffassign
# - interfacebloat
- lll
- loggercheck
- makezero
- mirror
- misspell
- musttag
# - nakedret
# - nestif
- nilerr
# - nilnil
# - noctx
- nolintlint
# - nonamedreturns
- nosprintfhostport
- prealloc
- predeclared
- promlinter
# - reassign
- revive
- rowserrcheck
- staticcheck
# - stylecheck
- tagalign
# - testpackage
# - thelper
# - tparallel
- typecheck
# - unconvert
- unparam
- unused
# - usestdlibvars
- wastedassign
# - whitespace
- wrapcheck
issues:
exclude-rules:
# Disable funlen for "func Test..." or func (suite *Suite) Test..." type functions
# These functions tend to be descriptive and exceed length limits.
- source: "^func (\\(.*\\) )?Test"
linters:
- funlen
linters-settings:
errcheck:
check-blank: true # check for assignments to the blank identifier '_' when errors are returned
check-type-assertions: false # check type assertion
errorlint:
check-generated: false # disabled linting of generated files
default-signifies-exhaustive: false # exhaustive handling of error types
exhaustive:
default-signifies-exhaustive: false # exhaustive handling of error types
gci:
sections: # defines the order of import sections
- standard
- default
- localmodule
goconst:
min-len: 3 # min length for string constants to be checked
min-occurrences: 3 # min occurrences of the same constant before it's flagged
godox:
keywords: # specific keywords to flag for further action
- BUG
- FIXME
- HACK
gosec:
exclude-generated: true
lll:
line-length: 120
misspell:
locale: US
ignore-words: expect
nolintlint:
allow-leading-space: false
require-explanation: true
require-specific: true
prealloc:
simple: true # enables simple preallocation checks
range-loops: true # enabled preallocation checks in range loops
for-loops: false # disables preallocation checks in for loops
unparam:
check-exported: true # checks exported functions and methods for unused params

16
.mockery.yaml Normal file
View File

@ -0,0 +1,16 @@
# Generate EXPECT() methods, type-safe methods to generate call expectations
with-expecter: true
# Generate mocks in adjacent mocks directory to the interfaces
dir: "{{.InterfaceDir}}/mocks"
mockname: "Mock{{.InterfaceName}}"
outpkg: "mocks"
filename: "Mock{{.InterfaceName}}.go"
packages:
github.com/0glabs/0g-chain/x/precisebank/types:
# package-specific config
config:
interfaces:
AccountKeeper:
BankKeeper:

View File

@ -1,2 +1,2 @@
golang 1.21
nodejs 18.16.0
golang 1.21.9
nodejs 20.16.0

View File

@ -38,6 +38,16 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [v0.26.0]
### Features
- (precisebank) [#1906] Add new `x/precisebank` module with bank decimal extension for EVM usage.
- (cli) [#1922] Add `iavlviewer` CLI command for low-level iavl db debugging.
### Improvements
- (rocksdb) [#1903] Bump cometbft-db dependency for use with rocksdb v8.10.0
- (deps) [#1988] Bump cometbft to v0.37.9-kava.1
## [v0.26.0]
### Features
- (cli) [#1785] Add `shard` CLI command to support creating partitions of data for standalone nodes
@ -330,6 +340,10 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
- [#257](https://github.com/Kava-Labs/kava/pulls/257) Include scripts to run
large-scale simulations remotely using aws-batch
[#1988]: https://github.com/Kava-Labs/kava/pull/1988
[#1922]: https://github.com/Kava-Labs/kava/pull/1922
[#1906]: https://github.com/Kava-Labs/kava/pull/1906
[#1903]: https://github.com/Kava-Labs/kava/pull/1903
[#1846]: https://github.com/Kava-Labs/kava/pull/1846
[#1848]: https://github.com/Kava-Labs/kava/pull/1848
[#1839]: https://github.com/Kava-Labs/kava/pull/1839
@ -392,8 +406,9 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
[#750]: https://github.com/Kava-Labs/kava/pull/750
[#751]: https://github.com/Kava-Labs/kava/pull/751
[#780]: https://github.com/Kava-Labs/kava/pull/780
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.25.0...HEAD
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.24.3
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.26.0...HEAD
[v0.26.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.26.0
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.25.0
[v0.24.3]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.24.1
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.24.0
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.23.2

View File

@ -1,23 +1,6 @@
FROM golang:1.20-bullseye AS chain-builder
FROM kava/rocksdb:v8.10.1-go1.21 AS kava-builder
# Set up dependencies
RUN apt-get update \
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
&& rm -rf /var/lib/apt/lists/*
# Set working directory for the build
WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v8.10.0
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \
&& cd rocksdb \
&& git checkout $ROCKSDB_VERSION \
&& make -j$(nproc) install-shared \
&& ldconfig
RUN apt-get update
WORKDIR /root/0gchain
# Copy dependency files first to facilitate dependency caching

22
Dockerfile-rocksdb-base Normal file
View File

@ -0,0 +1,22 @@
# published to https://hub.docker.com/repository/docker/kava/rocksdb/tags
# docker buildx build --platform linux/amd64,linux/arm64 -t kava/rocksdb:v8.10.1-go1.21 -f Dockerfile-rocksdb-base . --push
FROM golang:1.21-bullseye
# Set up dependencies
RUN apt-get update \
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
&& rm -rf /var/lib/apt/lists/*
# Set working directory for the build
WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v8.10.0
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \
&& cd rocksdb \
&& git checkout $ROCKSDB_VERSION \
&& make -j$(nproc) install-shared \
&& ldconfig

View File

@ -105,6 +105,8 @@ include $(BUILD_DIR)/deps.mk
include $(BUILD_DIR)/proto.mk
include $(BUILD_DIR)/proto-deps.mk
include $(BUILD_DIR)/lint.mk
#export GO111MODULE = on
# process build tags
build_tags = netgo
@ -208,6 +210,14 @@ build-release: go.sum
build-linux: go.sum
LEDGER_ENABLED=false GOOS=linux GOARCH=amd64 $(MAKE) build
# build on rocksdb-backed kava on macOS with shared libs from brew
# this assumes you are on macOS & these deps have been installed with brew:
# rocksdb, snappy, lz4, and zstd
# use like `make build-rocksdb-brew COSMOS_BUILD_OPTIONS=rocksdb`
build-rocksdb-brew:
export CGO_CFLAGS := -I$(shell brew --prefix rocksdb)/include
export CGO_LDFLAGS := -L$(shell brew --prefix rocksdb)/lib -lrocksdb -lstdc++ -lm -lz -L$(shell brew --prefix snappy)/lib -L$(shell brew --prefix lz4)/lib -L$(shell brew --prefix zstd)/lib
install: go.sum
$(GO_BIN) install -mod=readonly $(BUILD_FLAGS) $(MAIN_ENTRY)
@ -234,13 +244,6 @@ link-check:
# TODO: replace kava in following line with project name
liche -r . --exclude "^http://127.*|^https://riot.im/app*|^http://kava-testnet*|^https://testnet-dex*|^https://kava3.data.kava.io*|^https://ipfs.io*|^https://apps.apple.com*|^https://kava.quicksync.io*"
lint:
golangci-lint run
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" | xargs gofmt -d -s
$(GO_BIN) mod verify
.PHONY: lint
format:
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs gofmt -w -s
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs misspell -w
@ -265,11 +268,11 @@ build-docker-local-0gchain:
# Run a 4-node testnet locally
localnet-start: build-linux localnet-stop
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z $(DOCKER_IMAGE_NAME)-node testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
docker-compose up -d
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z kava/kavanode testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
$(DOCKER) compose up -d
localnet-stop:
docker-compose down
$(DOCKER) compose down
# Launch a new single validator chain
start:
@ -311,12 +314,14 @@ test-basic: test
test-e2e: docker-build
$(GO_BIN) test -failfast -count=1 -v ./tests/e2e/...
# run interchaintest tests (./tests/e2e-ibc)
test-ibc: docker-build
cd tests/e2e-ibc && KAVA_TAG=local $(GO_BIN) test -timeout 10m .
.PHONY: test-ibc
test:
@$(GO_BIN) test $$($(GO_BIN) list ./... | grep -v 'contrib' | grep -v 'tests/e2e')
test-rocksdb:
@go test -tags=rocksdb $(MAIN_ENTRY)/opendb
# Run cli integration tests
# `-p 4` to use 4 cores, `-tags cli_test` to tell $(GO_BIN) not to ignore the cli package
# These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set.
@ -327,6 +332,18 @@ test-cli: build
test-migrate:
@$(GO_BIN) test -v -count=1 ./migrate/...
# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169
ifeq ($(OS_FAMILY),Darwin)
FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic
endif
test-fuzz:
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzMintCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzBurnCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzSendCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_NonZeroRemainder ./x/precisebank/types
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_ZeroRemainder ./x/precisebank/types
# Kick start lots of sims on an AWS cluster.
# This submits an AWS Batch job to run a lot of sims, each within a docker image. Results are uploaded to S3
start-remote-sims:
@ -337,14 +354,14 @@ start-remote-sims:
# submit an array job on AWS Batch, using 1000 seeds, spot instances
aws batch submit-job \
-—job-name "master-$(VERSION)" \
-—job-queue “simulation-1-queue-spot" \
-—job-queue "simulation-1-queue-spot" \
-—array-properties size=1000 \
-—job-definition $(BINARY_NAME)-sim-master \
-—container-override environment=[{SIM_NAME=master-$(VERSION)}]
update-kvtool:
git submodule init || true
git submodule update
git submodule update --remote
cd tests/e2e/kvtool && make install
.PHONY: all build-linux install clean build test test-cli test-all test-rest test-basic start-remote-sims
.PHONY: all build-linux install build test test-cli test-all test-rest test-basic test-fuzz start-remote-sims

View File

@ -12,7 +12,7 @@ Zero Gravity (0G) is the foundational infrastructure for high-performance dapps
It efficiently orchestrates utilization of hardware resources such as storage and compute and software assets such as data and models to handle the scale and complexity of AI workloads.
Continue reading [here](https://0g-doc-new.vercel.app/intro) if you want to learn more about 0G dAIOS and how its various layers enable limitless scalability.
Continue reading [here](https://docs.0g.ai/intro) if you want to learn more about 0G dAIOS and how its various layers enable limitless scalability.
## 0G Product Suite
- DA: ultra high-performance data availability layer with KZG and quorum-based DAS
@ -21,17 +21,15 @@ Continue reading [here](https://0g-doc-new.vercel.app/intro) if you want to lear
- Network: high-performance, low-latency, and decentralized network
## Documentation
- If you want to build with 0G's network, DA layer, inference serving, or storage SDK, please refer to the [Build with 0G Documentation](https://0g-doc-new.vercel.app/).
- If you want to build with 0G's network, DA layer, inference serving, or storage SDK, please refer to the [Build with 0G Documentation](https://docs.0g.ai/build-with-0g/contracts).
- If you want to run a validator node, DA node, or storage node, please refer to the [Run a Node Documentation](https://0g-doc-new.vercel.app/run-a-node/overview).
- If you want to run a validator node, DA node, or storage node, please refer to the [Run a Node Documentation](https://docs.0g.ai/run-a-node/overview).
## Support and Additional Resources
We want to do everything we can to help you be successful while working on your contribution and projects. Here you'll find various resources and communities that may help you complete a project or contribute to 0G.
### Communities
- [0G Telegram](https://t.me/web3_0glabs)
- [0G Discord](https://discord.com/invite/0glabs)
- [0G Discord](https://discord.com/invite/0glabs)

View File

@ -168,6 +168,7 @@ func newEthAnteHandler(options HandlerOptions) sdk.AnteHandler {
evmante.NewEthSetUpContextDecorator(options.EvmKeeper), // outermost AnteDecorator. SetUpContext must be called first
evmante.NewEthMempoolFeeDecorator(options.EvmKeeper), // Check eth effective gas price against minimal-gas-prices
evmante.NewEthValidateBasicDecorator(options.EvmKeeper),
evmante.NewEvmMinGasPriceDecorator(options.EvmKeeper),
evmante.NewEthSigVerificationDecorator(options.EvmKeeper),
evmante.NewEthAccountVerificationDecorator(options.AccountKeeper, options.EvmKeeper),
evmante.NewCanTransferDecorator(options.EvmKeeper),

View File

@ -405,7 +405,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
// usdxToMintAmt: 99,
// },
{
name: "fails when convertion more erc20 usdc than balance",
name: "fails when conversion more erc20 usdc than balance",
usdcDepositAmt: 51_000,
usdxToMintAmt: 100,
errMsg: "transfer amount exceeds balance",

View File

@ -95,6 +95,7 @@ import (
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
"github.com/ethereum/go-ethereum/core/vm"
evmante "github.com/evmos/ethermint/app/ante"
ethermintconfig "github.com/evmos/ethermint/server/config"
"github.com/evmos/ethermint/x/evm"
@ -109,6 +110,7 @@ import (
chainparams "github.com/0glabs/0g-chain/app/params"
"github.com/0glabs/0g-chain/chaincfg"
dasignersprecompile "github.com/0glabs/0g-chain/precompiles/dasigners"
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
"github.com/0glabs/0g-chain/x/bep3"
bep3keeper "github.com/0glabs/0g-chain/x/bep3/keeper"
@ -129,6 +131,9 @@ import (
issuance "github.com/0glabs/0g-chain/x/issuance"
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
"github.com/0glabs/0g-chain/x/precisebank"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
pricefeed "github.com/0glabs/0g-chain/x/pricefeed"
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
@ -136,7 +141,6 @@ import (
validatorvestingrest "github.com/0glabs/0g-chain/x/validator-vesting/client/rest"
validatorvestingtypes "github.com/0glabs/0g-chain/x/validator-vesting/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
)
var (
@ -178,6 +182,7 @@ var (
validatorvesting.AppModuleBasic{},
evmutil.AppModuleBasic{},
mint.AppModuleBasic{},
precisebank.AppModuleBasic{},
council.AppModuleBasic{},
dasigners.AppModuleBasic{},
consensus.AppModuleBasic{},
@ -199,6 +204,7 @@ var (
issuancetypes.ModuleAccountName: {authtypes.Minter, authtypes.Burner},
bep3types.ModuleName: {authtypes.Burner, authtypes.Minter},
minttypes.ModuleName: {authtypes.Minter},
precisebanktypes.ModuleName: {authtypes.Minter, authtypes.Burner}, // used for reserve account to back fractional amounts
}
)
@ -268,6 +274,7 @@ type App struct {
mintKeeper mintkeeper.Keeper
dasignersKeeper dasignerskeeper.Keeper
consensusParamsKeeper consensusparamkeeper.Keeper
precisebankKeeper precisebankkeeper.Keeper
// make scoped keepers public for test purposes
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
@ -318,7 +325,7 @@ func NewApp(
counciltypes.StoreKey,
dasignerstypes.StoreKey,
vestingtypes.StoreKey,
consensusparamtypes.StoreKey, crisistypes.StoreKey,
consensusparamtypes.StoreKey, crisistypes.StoreKey, precisebanktypes.StoreKey,
ibcwasmtypes.StoreKey,
)
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey)
@ -482,26 +489,41 @@ func NewApp(
app.accountKeeper,
)
evmBankKeeper := evmutilkeeper.NewEvmBankKeeper(app.evmutilKeeper, app.bankKeeper, app.accountKeeper)
app.precisebankKeeper = precisebankkeeper.NewKeeper(
app.appCodec,
keys[precisebanktypes.StoreKey],
app.bankKeeper,
app.accountKeeper,
)
// dasigners keeper
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper, govAuthAddrStr)
// precopmiles
precompiles := make(map[common.Address]vm.PrecompiledContract)
// dasigners
daSignersPrecompile, err := dasignersprecompile.NewDASignersPrecompile(app.dasignersKeeper)
if err != nil {
panic("initialize precompile failed")
panic(fmt.Sprintf("initialize dasigners precompile failed: %v", err))
}
precompiles[daSignersPrecompile.Address()] = daSignersPrecompile
// evm keeper
// staking
stakingPrecompile, err := stakingprecompile.NewStakingPrecompile(app.stakingKeeper)
if err != nil {
panic(fmt.Sprintf("initialize staking precompile failed: %v", err))
}
precompiles[stakingPrecompile.Address()] = stakingPrecompile
app.evmKeeper = evmkeeper.NewKeeper(
appCodec, keys[evmtypes.StoreKey], tkeys[evmtypes.TransientKey],
govAuthAddr,
app.accountKeeper, evmBankKeeper, app.stakingKeeper, app.feeMarketKeeper,
app.accountKeeper,
app.precisebankKeeper, // x/precisebank in place of x/bank
app.stakingKeeper,
app.feeMarketKeeper,
options.EVMTrace,
evmSubspace,
precompiles,
)
app.evmutilKeeper.SetEvmKeeper(app.evmKeeper)
// It's important to note that the PFM Keeper must be initialized before the Transfer Keeper
@ -669,6 +691,7 @@ func NewApp(
evmutil.NewAppModule(app.evmutilKeeper, app.bankKeeper, app.accountKeeper),
// nil InflationCalculationFn, use SDK's default inflation function
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, nil, mintSubspace),
precisebank.NewAppModule(app.precisebankKeeper, app.bankKeeper, app.accountKeeper),
council.NewAppModule(app.CouncilKeeper),
ibcwasm.NewAppModule(app.ibcWasmClientKeeper),
dasigners.NewAppModule(app.dasignersKeeper, *app.stakingKeeper),
@ -716,6 +739,7 @@ func NewApp(
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName,
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
@ -752,6 +776,7 @@ func NewApp(
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName,
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
@ -786,7 +811,8 @@ func NewApp(
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
precisebanktypes.ModuleName, // Must be run after x/bank to verify reserve balance
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)

View File

@ -3,9 +3,9 @@ Package params defines the simulation parameters for the 0gChain app.
It contains the default weights used for each transaction used on the module's
simulation. These weights define the chance for a transaction to be simulated at
any gived operation.
any given operation.
You can repace the default values for the weights by providing a params.json
You can replace the default values for the weights by providing a params.json
file with the weights defined for each of the transaction operations:
{

View File

@ -130,7 +130,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
})
suite.Run("VetoedFails", func() {
suite.SetupTest()
@ -145,7 +145,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
})
suite.Run("UnvetoedAndYesAboveThresholdPasses", func() {
suite.SetupTest()
@ -162,7 +162,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Truef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
suite.Run("UnvetoedAndYesBelowThresholdFails", func() {
suite.SetupTest()
@ -179,7 +179,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
suite.Run("NotEnoughStakeFails", func() {
suite.SetupTest()
@ -191,7 +191,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
suite.Run("UnvetoedAndAllAbstainedFails", func() {
suite.SetupTest()
@ -204,7 +204,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
}

View File

@ -47,6 +47,7 @@ import (
committeekeeper "github.com/0glabs/0g-chain/x/committee/keeper"
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
)
@ -99,28 +100,33 @@ func NewTestAppFromSealed() TestApp {
}
// nolint
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
func (tApp TestApp) GetPrecisebankKeeper() precisebankkeeper.Keeper { return tApp.precisebankKeeper }
func (tApp TestApp) GetKVStoreKey(key string) *storetypes.KVStoreKey {
return tApp.keys[key]
}
func (tApp TestApp) GetBlockedMaccAddrs() map[string]bool {
return tApp.loadBlockedMaccAddrs()
}
// LegacyAmino returns the app's amino codec.
func (app *App) LegacyAmino() *codec.LegacyAmino {
return app.legacyAmino
@ -465,7 +471,7 @@ func (tApp TestApp) SetInflation(ctx sdk.Context, value sdk.Dec) {
mk.SetParams(ctx, mintParams)
}
// GeneratePrivKeyAddressPairsFromRand generates (deterministically) a total of n private keys and addresses.
// GeneratePrivKeyAddressPairs generates (deterministically) a total of n private keys and addresses.
func GeneratePrivKeyAddressPairs(n int) (keys []cryptotypes.PrivKey, addrs []sdk.AccAddress) {
r := rand.New(rand.NewSource(12345)) // make the generation deterministic
keys = make([]cryptotypes.PrivKey, n)

View File

@ -3,13 +3,21 @@ package app
import (
"fmt"
sdkmath "cosmossdk.io/math"
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const (
UpgradeName_Testnet = "v0.3.1"
UpgradeName_Testnet = "v0.4.0"
)
// RegisterUpgradeHandlers registers the upgrade handlers for the app.
@ -18,6 +26,24 @@ func (app App) RegisterUpgradeHandlers() {
UpgradeName_Testnet,
upgradeHandler(app, UpgradeName_Testnet),
)
upgradeInfo, err := app.upgradeKeeper.ReadUpgradeInfoFromDisk()
if err != nil {
panic(err)
}
doUpgrade := upgradeInfo.Name == UpgradeName_Testnet
if doUpgrade && !app.upgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
storeUpgrades := storetypes.StoreUpgrades{
Added: []string{
precisebanktypes.ModuleName,
},
}
// configure store loader that checks if version == upgradeHeight and applies store upgrades
app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
}
}
// upgradeHandler returns an UpgradeHandler for the given upgrade parameters.
@ -30,13 +56,213 @@ func upgradeHandler(
plan upgradetypes.Plan,
fromVM module.VersionMap,
) (module.VersionMap, error) {
app.Logger().Info(fmt.Sprintf("running %s upgrade handler", name))
logger := app.Logger()
logger.Info(fmt.Sprintf("running %s upgrade handler", name))
params := app.mintKeeper.GetParams(ctx)
params.MintDenom = "ua0gi"
app.mintKeeper.SetParams(ctx, params)
// Run migrations for all modules and return new consensus version map.
versionMap, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
if err != nil {
return nil, err
}
// run migrations for all modules and return new consensus version map
return app.mm.RunMigrations(ctx, app.configurator, fromVM)
logger.Info("completed store migrations")
// Migration of fractional balances from x/evmutil to x/precisebank
if err := MigrateEvmutilToPrecisebank(
ctx,
app.accountKeeper,
app.bankKeeper,
app.evmutilKeeper,
app.precisebankKeeper,
); err != nil {
return nil, err
}
logger.Info("completed x/evmutil to x/precisebank migration")
return versionMap, nil
}
}
// MigrateEvmutilToPrecisebank migrates all required state from x/evmutil to
// x/precisebank and ensures the resulting state is correct.
// This migrates the following state:
// - Fractional balances
// - Fractional balance reserve
// Initializes the following state in x/precisebank:
// - Remainder amount
func MigrateEvmutilToPrecisebank(
ctx sdk.Context,
accountKeeper evmutiltypes.AccountKeeper,
bankKeeper bankkeeper.Keeper,
evmutilKeeper evmutilkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) error {
logger := ctx.Logger()
aggregateSum, err := TransferFractionalBalances(
ctx,
evmutilKeeper,
precisebankKeeper,
)
if err != nil {
return fmt.Errorf("fractional balances transfer: %w", err)
}
logger.Info(
"fractional balances transferred from x/evmutil to x/precisebank",
"aggregate sum", aggregateSum,
)
remainder := InitializeRemainder(ctx, precisebankKeeper, aggregateSum)
logger.Info("remainder amount initialized in x/precisebank", "remainder", remainder)
// Migrate fractional balances, reserve, and ensure reserve fully backs all
// fractional balances.
if err := TransferFractionalBalanceReserve(
ctx,
accountKeeper,
bankKeeper,
precisebankKeeper,
); err != nil {
return fmt.Errorf("reserve transfer: %w", err)
}
return nil
}
// TransferFractionalBalances migrates fractional balances from x/evmutil to
// x/precisebank. It sets the fractional balance in x/precisebank and deletes
// the account from x/evmutil. Returns the aggregate sum of all fractional
// balances.
func TransferFractionalBalances(
ctx sdk.Context,
evmutilKeeper evmutilkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) (sdkmath.Int, error) {
aggregateSum := sdkmath.ZeroInt()
var iterErr error
evmutilKeeper.IterateAllAccounts(ctx, func(acc evmutiltypes.Account) bool {
// Set account balance in x/precisebank
precisebankKeeper.SetFractionalBalance(ctx, acc.Address, acc.Balance)
// Delete account from x/evmutil
iterErr := evmutilKeeper.SetAccount(ctx, evmutiltypes.Account{
Address: acc.Address,
// Set balance to 0 to delete it
Balance: sdkmath.ZeroInt(),
})
// Halt iteration if there was an error
if iterErr != nil {
return true
}
// Aggregate sum of all fractional balances
aggregateSum = aggregateSum.Add(acc.Balance)
// Continue iterating
return false
})
return aggregateSum, iterErr
}
// InitializeRemainder initializes the remainder amount in x/precisebank. It
// calculates the remainder amount that is needed to ensure that the sum of all
// fractional balances is a multiple of the conversion factor. The remainder
// amount is stored in the store and returned.
func InitializeRemainder(
ctx sdk.Context,
precisebankKeeper precisebankkeeper.Keeper,
aggregateSum sdkmath.Int,
) sdkmath.Int {
// Extra fractional coins that exceed the conversion factor.
// This extra + remainder should equal the conversion factor to ensure
// (sum(fBalances) + remainder) % conversionFactor = 0
extraFractionalAmount := aggregateSum.Mod(precisebanktypes.ConversionFactor())
remainder := precisebanktypes.ConversionFactor().
Sub(extraFractionalAmount).
// Mod conversion factor to ensure remainder is valid.
// If extraFractionalAmount is a multiple of conversion factor, the
// remainder is 0.
Mod(precisebanktypes.ConversionFactor())
// Panics if the remainder is invalid. In a correct chain state and only
// mint/burns due to transfers, this would be 0.
precisebankKeeper.SetRemainderAmount(ctx, remainder)
return remainder
}
// TransferFractionalBalanceReserve migrates the fractional balance reserve from
// x/evmutil to x/precisebank. It transfers the reserve balance from x/evmutil
// to x/precisebank and ensures that the reserve fully backs all fractional
// balances. It mints or burns coins to back the fractional balances exactly.
func TransferFractionalBalanceReserve(
ctx sdk.Context,
accountKeeper evmutiltypes.AccountKeeper,
bankKeeper bankkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) error {
logger := ctx.Logger()
// Transfer x/evmutil reserve to x/precisebank.
evmutilAddr := accountKeeper.GetModuleAddress(evmutiltypes.ModuleName)
reserveBalance := bankKeeper.GetBalance(ctx, evmutilAddr, precisebanktypes.IntegerCoinDenom)
if err := bankKeeper.SendCoinsFromModuleToModule(
ctx,
evmutiltypes.ModuleName, // from x/evmutil
precisebanktypes.ModuleName, // to x/precisebank
sdk.NewCoins(reserveBalance),
); err != nil {
return fmt.Errorf("failed to transfer reserve from x/evmutil to x/precisebank: %w", err)
}
logger.Info(fmt.Sprintf("transferred reserve balance: %s", reserveBalance))
// Ensure x/precisebank reserve fully backs all fractional balances.
totalFractionalBalances := precisebankKeeper.GetTotalSumFractionalBalances(ctx)
// Does NOT ensure state is correct, total fractional balances should be a
// multiple of conversion factor but is not guaranteed due to the remainder.
// Remainder initialization is handled by InitializeRemainder.
// Determine how much the reserve is off by, e.g. unbacked amount
expectedReserveBalance := totalFractionalBalances.Quo(precisebanktypes.ConversionFactor())
// If there is a remainder (totalFractionalBalances % conversionFactor != 0),
// then expectedReserveBalance is rounded up to the nearest integer.
if totalFractionalBalances.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
expectedReserveBalance = expectedReserveBalance.Add(sdkmath.OneInt())
}
unbackedAmount := expectedReserveBalance.Sub(reserveBalance.Amount)
logger.Info(fmt.Sprintf("total account fractional balances: %s", totalFractionalBalances))
// Three possible cases:
// 1. Reserve is not enough, mint coins to back the fractional balances
// 2. Reserve is too much, burn coins to back the fractional balances exactly
// 3. Reserve is exactly enough, no action needed
if unbackedAmount.IsPositive() {
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount))
if err := bankKeeper.MintCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
return fmt.Errorf("failed to mint extra reserve coins: %w", err)
}
logger.Info(fmt.Sprintf("unbacked amount minted to reserve: %s", unbackedAmount))
} else if unbackedAmount.IsNegative() {
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount.Neg()))
if err := bankKeeper.BurnCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
return fmt.Errorf("failed to burn extra reserve coins: %w", err)
}
logger.Info(fmt.Sprintf("extra reserve amount burned: %s", unbackedAmount.Neg()))
} else {
logger.Info("reserve exactly backs fractional balances, no mint/burn needed")
}
return nil
}

434
app/upgrades_test.go Normal file
View File

@ -0,0 +1,434 @@
package app_test
import (
"strconv"
"testing"
"time"
sdkmath "cosmossdk.io/math"
"github.com/0glabs/0g-chain/app"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/stretchr/testify/require"
)
func TestMigrateEvmutilToPrecisebank(t *testing.T) {
// Full test case with all components together
tests := []struct {
name string
initialReserve sdkmath.Int
fractionalBalances []sdkmath.Int
}{
{
"no fractional balances",
sdkmath.NewInt(0),
[]sdkmath.Int{},
},
{
"sufficient reserve, 0 remainder",
// Accounts adding up to 2 int units, same as reserve
sdkmath.NewInt(2),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, 0 remainder",
// Accounts adding up to 2 int units, but only 1 int unit in reserve
sdkmath.NewInt(1),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"excess reserve, 0 remainder",
// Accounts adding up to 2 int units, but 3 int unit in reserve
sdkmath.NewInt(3),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"sufficient reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, same as reserve
sdkmath.NewInt(2),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, less than reserve,
// Reserve should be 2 and remainder 0.5
sdkmath.NewInt(1),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"excess reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, 3 int units in reserve
sdkmath.NewInt(3),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
ak := tApp.GetAccountKeeper()
bk := tApp.GetBankKeeper()
evmuk := tApp.GetEvmutilKeeper()
pbk := tApp.GetPrecisebankKeeper()
reserveCoin := sdk.NewCoin(precisebanktypes.IntegerCoinDenom, tt.initialReserve)
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(reserveCoin))
require.NoError(t, err)
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
// Double check balances
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
require.Equal(t, tt.initialReserve, oldReserveBalance.Amount, "initial x/evmutil reserve balance")
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
// Set accounts
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
err := evmuk.SetBalance(ctx, addr, balance)
require.NoError(t, err)
}
// Run full x/evmutil -> x/precisebank migration
err = app.MigrateEvmutilToPrecisebank(
ctx,
ak,
bk,
evmuk,
pbk,
)
require.NoError(t, err)
// Check old reserve is empty
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
// Check new reserve fully backs fractional balances
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
remainder := pbk.GetRemainderAmount(ctx)
expectedReserveBal := fractionalBalanceTotal.Add(remainder)
require.Equal(
t,
expectedReserveBal,
newReserveBalanceAfter.Amount.Mul(precisebanktypes.ConversionFactor()),
"new reserve should equal total fractional balances",
)
// Check balances are deleted in evmutil and migrated to precisebank
for i := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
acc := evmuk.GetAccount(ctx, addr)
require.Nil(t, acc, "account should be deleted")
balance := pbk.GetFractionalBalance(ctx, addr)
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
}
// Checks balances valid and remainder
res, stop := precisebankkeeper.AllInvariants(pbk)(ctx)
require.Falsef(t, stop, "invariants should pass: %s", res)
})
}
}
func TestTransferFractionalBalances(t *testing.T) {
tests := []struct {
name string
fractionalBalances []sdkmath.Int
}{
{
"no fractional balances",
[]sdkmath.Int{},
},
{
"balanced fractional balances",
[]sdkmath.Int{
// 4 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"unbalanced balances",
[]sdkmath.Int{
// 3 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
evmutilk := tApp.GetEvmutilKeeper()
pbk := tApp.GetPrecisebankKeeper()
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
err := evmutilk.SetBalance(ctx, addr, balance)
require.NoError(t, err)
}
// Run balance transfer
aggregateSum, err := app.TransferFractionalBalances(
ctx,
evmutilk,
pbk,
)
require.NoError(t, err)
// Check balances are deleted in evmutil and migrated to precisebank
sum := sdkmath.ZeroInt()
for i := range tt.fractionalBalances {
sum = sum.Add(tt.fractionalBalances[i])
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
acc := evmutilk.GetAccount(ctx, addr)
require.Nil(t, acc, "account should be deleted")
balance := pbk.GetFractionalBalance(ctx, addr)
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
}
require.Equal(t, sum, aggregateSum, "aggregate sum should be correct")
})
}
}
func TestInitializeRemainder(t *testing.T) {
tests := []struct {
name string
giveAggregateSum sdkmath.Int
wantRemainder sdkmath.Int
}{
{
"0 remainder, 1ukava",
precisebanktypes.ConversionFactor(),
sdkmath.NewInt(0),
},
{
"0 remainder, multiple ukava",
precisebanktypes.ConversionFactor().MulRaw(5),
sdkmath.NewInt(0),
},
{
"non-zero remainder, min",
precisebanktypes.ConversionFactor().SubRaw(1),
sdkmath.NewInt(1),
},
{
"non-zero remainder, max",
sdkmath.NewInt(1),
precisebanktypes.ConversionFactor().SubRaw(1),
},
{
"non-zero remainder, half",
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
pbk := tApp.GetPrecisebankKeeper()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
remainder := app.InitializeRemainder(
ctx,
tApp.GetPrecisebankKeeper(),
tt.giveAggregateSum,
)
require.Equal(t, tt.wantRemainder, remainder)
// Check actual state
remainderAfter := pbk.GetRemainderAmount(ctx)
require.Equal(t, tt.wantRemainder, remainderAfter)
// Not checking invariants here since it requires actual balance state
aggregateSumWithRemainder := tt.giveAggregateSum.Add(remainder)
require.True(
t,
aggregateSumWithRemainder.
Mod(precisebanktypes.ConversionFactor()).
IsZero(),
"remainder + aggregate sum should be a multiple of the conversion factor",
)
})
}
}
func TestTransferFractionalBalanceReserve(t *testing.T) {
tests := []struct {
name string
initialReserve sdk.Coin
fractionalBalances []sdkmath.Int
}{
{
"balanced reserve, no remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 2 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 4 accounts, total 2 int units
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"extra reserve funds",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(2)),
[]sdkmath.Int{
// 2 accounts, total 1 int units
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, with remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 5 accounts, total 2.5 int units
// Expected 3 int units in reserve, 0.5 remainder
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"extra reserve funds, with remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(3)),
[]sdkmath.Int{
// 3 accounts, total 1.5 int units.
// Expected 2 int units in reserve, 0.5 remainder
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
bk := tApp.GetBankKeeper()
pbk := tApp.GetPrecisebankKeeper()
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(tt.initialReserve))
require.NoError(t, err)
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
// Double check balances
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
require.Equal(t, tt.initialReserve, oldReserveBalance)
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte{byte(i)})
require.NotPanics(t, func() {
pbk.SetFractionalBalance(ctx, addr, balance)
}, "given fractional balances should be valid")
}
// Run reserve migration
err = app.TransferFractionalBalanceReserve(
ctx,
tApp.GetAccountKeeper(),
bk,
tApp.GetPrecisebankKeeper(),
)
require.NoError(t, err)
// Check old reserve is empty
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
// Check new reserve fully backs fractional balances
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
expectedReserveBal := fractionalBalanceTotal.
Quo(precisebanktypes.ConversionFactor())
// Check if theres a remainder
if fractionalBalanceTotal.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
expectedReserveBal = expectedReserveBal.Add(sdkmath.OneInt())
}
require.Equal(
t,
expectedReserveBal,
newReserveBalanceAfter.Amount,
"new reserve should equal total fractional balances + remainder",
)
})
}
}

45
build/lint.mk Normal file
View File

@ -0,0 +1,45 @@
################################################################################
### Required Variables ###
################################################################################
ifndef DOCKER
$(error DOCKER not set)
endif
ifndef BUILD_DIR
$(error BUILD_DIR not set)
endif
################################################################################
### Lint Settings ###
################################################################################
LINT_FROM_REV ?= $(shell git merge-base origin/master HEAD)
GOLANGCI_VERSION ?= $(shell cat .golangci-version)
GOLANGCI_IMAGE_TAG ?= golangci/golangci-lint:$(GOLANGCI_VERSION)
GOLANGCI_DIR ?= $(CURDIR)/$(BUILD_DIR)/.golangci-lint
GOLANGCI_CACHE_DIR ?= $(GOLANGCI_DIR)/$(GOLANGCI_VERSION)-cache
GOLANGCI_MOD_CACHE_DIR ?= $(GOLANGCI_DIR)/go-mod
################################################################################
### Lint Target ###
################################################################################
.PHONY: lint
lint: $(GOLANGCI_CACHE_DIR) $(GOLANGCI_MOD_CACHE_DIR)
@echo "Running lint from rev $(LINT_FROM_REV), use LINT_FROM_REV var to override."
$(DOCKER) run -t --rm \
-v $(GOLANGCI_CACHE_DIR):/root/.cache \
-v $(GOLANGCI_MOD_CACHE_DIR):/go/pkg/mod \
-v $(CURDIR):/app \
-w /app \
$(GOLANGCI_IMAGE_TAG) \
golangci-lint run -v --new-from-rev $(LINT_FROM_REV)
$(GOLANGCI_CACHE_DIR):
@mkdir -p $@
$(GOLANGCI_MOD_CACHE_DIR):
@mkdir -p $@

View File

@ -56,6 +56,7 @@ proto-update-deps: check-rsync ## Update all third party proto files
@mkdir -p client/docs
@cp -f $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
@cp -f $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
@cp -f $(ETHERMINT_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/ethermint-swagger.yml
@mkdir -p $(COSMOS_PROTO_TYPES)
@cp -f $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto

View File

@ -1,76 +0,0 @@
package chaincfg
import (
"github.com/shopspring/decimal"
sdk "github.com/cosmos/cosmos-sdk/types"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
)
var (
Xmax, _ = sdk.NewDecFromStr("1.0") // upper limit on staked supply (as % of circ supply)
Ymin, _ = sdk.NewDecFromStr("0.05") // target APY at upper limit
Xmin, _ = sdk.NewDecFromStr("0.2") // lower limit on staked supply (as % of circ supply)
Ymax, _ = sdk.NewDecFromStr("0.15") // target APY at lower limit
decayRate, _ = sdk.NewDecFromStr("10")
)
func decExp(x sdk.Dec) sdk.Dec {
xDec := decimal.NewFromBigInt(x.BigInt(), -18)
expDec, _ := xDec.ExpTaylor(18)
expInt := expDec.Shift(18).BigInt()
return sdk.NewDecFromBigIntWithPrec(expInt, 18)
}
func NextInflationRate(ctx sdk.Context, minter minttypes.Minter, params minttypes.Params, bondedRatio sdk.Dec, circulatingRatio sdk.Dec) sdk.Dec {
X := bondedRatio.Quo(circulatingRatio)
var apy sdk.Dec
if X.LT(Xmin) {
apy = Ymax
} else {
exp := decayRate.Neg().Mul(Xmax.Sub(Xmin))
c := decExp(exp)
d := Ymin.Sub(Ymax.Mul(c)).Quo(sdk.OneDec().Sub(c))
expBonded := decayRate.Neg().Mul(X.Sub(Xmin))
cBonded := decExp(expBonded)
e := Ymax.Sub(d).Mul(cBonded)
apy = d.Add(e)
}
inflation := apy.Mul(bondedRatio)
// // The target annual inflation rate is recalculated for each previsions cycle. The
// // inflation is also subject to a rate change (positive or negative) depending on
// // the distance from the desired ratio (67%). The maximum rate change possible is
// // defined to be 13% per year, however the annual inflation is capped as between
// // 7% and 20%.
// // (1 - bondedRatio/GoalBonded) * InflationRateChange
// inflationRateChangePerYear := sdk.OneDec().
// Sub(bondedRatio.Quo(params.GoalBonded)).
// Mul(params.InflationRateChange)
// inflationRateChange := inflationRateChangePerYear.Quo(sdk.NewDec(int64(params.BlocksPerYear)))
// // adjust the new annual inflation for this next cycle
// inflation := minter.Inflation.Add(inflationRateChange) // note inflationRateChange may be negative
// if inflation.GT(params.InflationMax) {
// inflation = params.InflationMax
// }
// if inflation.LT(params.InflationMin) {
// inflation = params.InflationMin
// }
ctx.Logger().Info(
"nextInflationRate",
"bondedRatio", bondedRatio,
"circulatingRatio", circulatingRatio,
"apy", apy,
"inflation", inflation,
"params", params,
"minter", minter,
)
return inflation
}

View File

@ -1 +1 @@
a967d2fdda299ec8e1e3b99fb55bd06ecfdb0469
6862cde560c70cb82f7908e6cef22ca223465bd2

View File

@ -22,6 +22,8 @@
},
"app_hash": "",
"app_state": {
"06-solomachine": null,
"07-tendermint": null,
"auction": {
"next_auction_id": "1",
"params": {
@ -505,6 +507,10 @@
{
"address": "kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq",
"coins": [
{
"denom": "bnb",
"amount": "500000000"
},
{
"denom": "btcb",
"amount": "200000000"
@ -525,6 +531,10 @@
"denom": "erc20/axelar/wbtc",
"amount": "1000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"amount": "200000000"
},
{
"denom": "erc20/multichain/usdc",
"amount": "1000000000000000000"
@ -556,12 +566,20 @@
{
"denom": "usdx",
"amount": "103000000000"
},
{
"denom": "xrpb",
"amount": "1000000000000000"
}
]
},
{
"address": "kava1krh7k30pc9rteejpl2zycj0vau58y8c69xkzws",
"coins": [
{
"denom": "bnb",
"amount": "100000000000000000"
},
{
"denom": "btcb",
"amount": "200000000"
@ -582,6 +600,10 @@
"denom": "erc20/axelar/wbtc",
"amount": "1000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"amount": "200000000"
},
{
"denom": "erc20/tether/usdt",
"amount": "100000000000"
@ -601,6 +623,10 @@
{
"denom": "usdx",
"amount": "103000000000"
},
{
"denom": "xrpb",
"amount": "103000000000"
}
]
},
@ -822,6 +848,7 @@
"gov_denom": "ukava",
"params": {
"circuit_breaker": false,
"liquidation_block_interval": 500,
"collateral_params": [
{
"denom": "bnb",
@ -993,8 +1020,7 @@
"check_collateralization_index_count": "10",
"conversion_factor": "6"
}
]
,
],
"debt_auction_lot": "10000000000",
"debt_auction_threshold": "100000000000",
"debt_param": {
@ -1241,7 +1267,15 @@
"votes": []
},
"community": {
"params": {}
"params": {
"upgrade_time_disable_inflation": "2023-11-01T00:00:00Z",
"upgrade_time_set_staking_rewards_per_second": "744191",
"staking_rewards_per_second": "0"
},
"staking_rewards_state": {
"last_accumulation_time": "0001-01-01T00:00:00Z",
"last_truncation_error": "0"
}
},
"crisis": {
"constant_fee": {
@ -2067,6 +2101,25 @@
}
],
"nested_types": []
},
{
"msg_type_url": "/kava.committee.v1beta1.MsgVote",
"msg_value_type_name": "MsgValueCommitteeVote",
"value_types": [
{
"name": "proposal_id",
"type": "uint64"
},
{
"name": "voter",
"type": "string"
},
{
"name": "vote_type",
"type": "int32"
}
],
"nested_types": []
}
],
"allow_unprotected_txs": false
@ -2229,22 +2282,27 @@
"deposits": [],
"votes": [],
"proposals": [],
"deposit_params": {
"deposit_params": null,
"voting_params": {
"voting_period": "604800s"
},
"tally_params": null,
"params": {
"min_deposit": [
{
"denom": "ukava",
"amount": "10000000"
}
],
"max_deposit_period": "172800s"
},
"voting_params": {
"voting_period": "600s"
},
"tally_params": {
"max_deposit_period": "172800s",
"voting_period": "604800s",
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000"
"veto_threshold": "0.334000000000000000",
"min_initial_deposit_ratio": "0.000000000000000000",
"burn_vote_quorum": false,
"burn_proposal_deposit_prevote": false,
"burn_vote_veto": true
}
},
"hard": {
@ -2519,6 +2577,24 @@
},
"reserve_factor": "0.025000000000000000",
"keeper_reward_percentage": "0.020000000000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"borrow_limit": {
"has_max_limit": true,
"maximum_limit": "0.000000000000000000",
"loan_to_value": "0.000000000000000000"
},
"spot_market_id": "btc:usd:30",
"conversion_factor": "100000000",
"interest_rate_model": {
"base_rate_apy": "0.000000000000000000",
"base_multiplier": "0.050000000000000000",
"kink": "0.800000000000000000",
"jump_multiplier": "5.000000000000000000"
},
"reserve_factor": "0.025000000000000000",
"keeper_reward_percentage": "0.020000000000000000"
}
],
"minimum_borrow_usd_value": "10.000000000000000000"
@ -2734,6 +2810,18 @@
"amount": "787"
}
]
},
{
"active": true,
"collateral_type": "erc20/bitgo/wbtc",
"start": "2022-11-11T15:00:00Z",
"end": "2025-11-11T15:00:00Z",
"rewards_per_second": [
{
"denom": "ukava",
"amount": "787"
}
]
}
],
"hard_borrow_reward_periods": [],
@ -3170,6 +3258,16 @@
}
},
"params": null,
"packetfowardmiddleware": {
"params": {
"fee_percentage": "0.000000000000000000"
},
"in_flight_packets": {}
},
"precisebank": {
"balances": [],
"remainder": "0"
},
"pricefeed": {
"params": {
"markets": [
@ -3643,6 +3741,7 @@
}
]
},
"router": {},
"savings": {
"params": {
"supported_denoms": [
@ -3814,7 +3913,8 @@
"params": {
"send_enabled": true,
"receive_enabled": true
}
},
"total_escrowed": []
},
"upgrade": {},
"validatorvesting": null,

View File

@ -3006,6 +3006,9 @@
},
"in_flight_packets": {}
},
"precisebank": {
"remainder": "0"
},
"pricefeed": {
"params": {
"markets": [

View File

@ -182,6 +182,23 @@
]
}
},
{
"url": "./out/swagger/kava/precisebank/v1/query.swagger.json",
"tags": {
"rename": {
"Query": "Precisebank"
}
},
"operationIds": {
"rename": [
{
"type": "regex",
"from": "(.*)",
"to": "Precisebank$1"
}
]
}
},
{
"url": "./out/swagger/kava/pricefeed/v1beta1/query.swagger.json",
"tags": {
@ -295,6 +312,30 @@
]
}
},
{
"url": "./client/docs/ethermint-swagger.yml",
"dereference": {
"circular": "ignore"
},
"tags": {
"rename": {
"Query": "Ethermint"
}
},
"operationIds": {
"rename": [
{
"type": "regex",
"from": "(.*)",
"to": "Ethermint$1"
}
]
},
"paths": {
"exclude": [
]
}
},
{
"url": "./client/docs/legacy-swagger.yml",
"dereference": {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,381 @@
[
{
"inputs": [
{
"internalType": "string",
"name": "name",
"type": "string"
},
{
"internalType": "string",
"name": "symbol",
"type": "string"
},
{
"internalType": "uint8",
"name": "decimals_",
"type": "uint8"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "burn",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "subtractedValue",
"type": "uint256"
}
],
"name": "decreaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "addedValue",
"type": "uint256"
}
],
"name": "increaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "mint",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]

File diff suppressed because one or more lines are too long

1069
client/erc20/main.go Normal file

File diff suppressed because one or more lines are too long

View File

@ -6,6 +6,8 @@ import (
"fmt"
"net/url"
"github.com/0glabs/0g-chain/app"
"github.com/cosmos/cosmos-sdk/codec"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
@ -28,8 +30,20 @@ func newGrpcConnection(ctx context.Context, endpoint string) (*grpc.ClientConn,
return nil, fmt.Errorf("unknown grpc url scheme: %s", grpcUrl.Scheme)
}
// Ensure the encoding config is set up correctly with the query client
// otherwise it will produce panics like:
// invalid Go type math.Int for field ...
encodingConfig := app.MakeEncodingConfig()
protoCodec := codec.NewProtoCodec(encodingConfig.InterfaceRegistry)
grpcCodec := protoCodec.GRPCCodec()
secureOpt := grpc.WithTransportCredentials(creds)
grpcConn, err := grpc.DialContext(ctx, grpcUrl.Host, secureOpt)
grpcConn, err := grpc.DialContext(
ctx,
grpcUrl.Host,
secureOpt,
grpc.WithDefaultCallOptions(grpc.ForceCodec(grpcCodec)),
)
if err != nil {
return nil, err
}

View File

@ -28,6 +28,7 @@ import (
committeetypes "github.com/0glabs/0g-chain/x/committee/types"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
)
@ -60,11 +61,12 @@ type QueryClient struct {
// kava module query clients
Bep3 bep3types.QueryClient
Committee committeetypes.QueryClient
Evmutil evmutiltypes.QueryClient
Issuance issuancetypes.QueryClient
Pricefeed pricefeedtypes.QueryClient
Bep3 bep3types.QueryClient
Committee committeetypes.QueryClient
Evmutil evmutiltypes.QueryClient
Issuance issuancetypes.QueryClient
Pricefeed pricefeedtypes.QueryClient
Precisebank precisebanktypes.QueryClient
}
// NewQueryClient creates a new QueryClient and initializes all the module query clients
@ -95,11 +97,12 @@ func NewQueryClient(grpcEndpoint string) (*QueryClient, error) {
IbcClient: ibcclienttypes.NewQueryClient(conn),
IbcTransfer: ibctransfertypes.NewQueryClient(conn),
Bep3: bep3types.NewQueryClient(conn),
Committee: committeetypes.NewQueryClient(conn),
Evmutil: evmutiltypes.NewQueryClient(conn),
Issuance: issuancetypes.NewQueryClient(conn),
Pricefeed: pricefeedtypes.NewQueryClient(conn),
Bep3: bep3types.NewQueryClient(conn),
Committee: committeetypes.NewQueryClient(conn),
Evmutil: evmutiltypes.NewQueryClient(conn),
Issuance: issuancetypes.NewQueryClient(conn),
Pricefeed: pricefeedtypes.NewQueryClient(conn),
Precisebank: precisebanktypes.NewQueryClient(conn),
}
return client, nil
}

View File

@ -7,6 +7,7 @@ import (
"path/filepath"
"strings"
"github.com/Kava-Labs/opendb"
cometbftdb "github.com/cometbft/cometbft-db"
"github.com/cometbft/cometbft/libs/log"
tmtypes "github.com/cometbft/cometbft/types"
@ -63,7 +64,7 @@ func (ac appCreator) newApp(
homeDir := cast.ToString(appOpts.Get(flags.FlagHome))
snapshotDir := filepath.Join(homeDir, "data", "snapshots") // TODO can these directory names be imported from somewhere?
snapshotDB, err := cometbftdb.NewDB("metadata", server.GetAppDBBackend(appOpts), snapshotDir)
snapshotDB, err := opendb.OpenDB(appOpts, snapshotDir, "metadata", server.GetAppDBBackend(appOpts))
if err != nil {
panic(err)
}

View File

@ -31,10 +31,7 @@ func newDataCmd(opts ethermintserver.StartOptions) *cobra.Command {
}
printKeys(tree)
hash, err := tree.Hash()
if err != nil {
return err
}
hash := tree.Hash()
fmt.Printf("Hash: %X\n", hash)
fmt.Printf("Size: %X\n", tree.Size())

View File

@ -28,11 +28,7 @@ func newHashCmd(opts ethermintserver.StartOptions) *cobra.Command {
return err
}
hash, err := tree.Hash()
if err != nil {
return err
}
fmt.Printf("Hash: %X\n", hash)
fmt.Printf("Hash: %X\n", tree.Hash())
return nil
},

View File

@ -2,15 +2,19 @@ package iavlviewer
import (
"fmt"
"os"
"strconv"
dbm "github.com/cometbft/cometbft-db"
"cosmossdk.io/log"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/store/wrapper"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
"github.com/cosmos/iavl"
iavldb "github.com/cosmos/iavl/db"
)
const (
@ -54,7 +58,9 @@ func openPrefixTree(opts ethermintserver.StartOptions, cmd *cobra.Command, prefi
}
}()
tree, err := readTree(db, version, []byte(prefix))
cosmosdb := wrapper.NewCosmosDB(db)
tree, err := readTree(cosmosdb, version, []byte(prefix))
if err != nil {
return nil, fmt.Errorf("failed to read tree with prefix %s: %s", prefix, err)
}
@ -69,10 +75,7 @@ func readTree(db dbm.DB, version int, prefix []byte) (*iavl.MutableTree, error)
db = dbm.NewPrefixDB(db, prefix)
}
tree, err := iavl.NewMutableTree(db, DefaultCacheSize, false)
if err != nil {
return nil, err
}
tree := iavl.NewMutableTree(iavldb.NewWrapper(db), DefaultCacheSize, false, log.NewLogger(os.Stdout))
ver, err := tree.LoadVersion(int64(version))
if err != nil {
return nil, err

View File

@ -14,14 +14,14 @@ import (
"syscall"
"time"
"github.com/0glabs/0g-chain/cmd/opendb"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/server"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"github.com/cometbft/cometbft/libs/log"
"github.com/Kava-Labs/opendb"
)
const (

View File

@ -3,16 +3,18 @@ package main
import (
"fmt"
"os"
"path/filepath"
dbm "github.com/cometbft/cometbft-db"
tmcfg "github.com/cometbft/cometbft/config"
tmcli "github.com/cometbft/cometbft/libs/cli"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/config"
"github.com/cosmos/cosmos-sdk/client/debug"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
"github.com/cosmos/cosmos-sdk/server"
tmcfg "github.com/cometbft/cometbft/config"
tmcli "github.com/cometbft/cometbft/libs/cli"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/cosmos/cosmos-sdk/x/genutil"
@ -29,8 +31,8 @@ import (
"github.com/0glabs/0g-chain/chaincfg"
"github.com/0glabs/0g-chain/cmd/0gchaind/iavlviewer"
"github.com/0glabs/0g-chain/cmd/0gchaind/rocksdb"
"github.com/0glabs/0g-chain/cmd/opendb"
"github.com/0glabs/0g-chain/crypto/vrf"
"github.com/Kava-Labs/opendb"
)
func customKeyringOptions() keyring.Option {
@ -52,7 +54,7 @@ func NewRootCmd() *cobra.Command {
WithAccountRetriever(types.AccountRetriever{}).
WithBroadcastMode(flags.FlagBroadcastMode).
WithHomeDir(chaincfg.DefaultNodeHome).
WithKeyringOptions(customKeyringOptions()).
WithKeyringOptions(hd.EthSecp256k1Option()).
WithViper(chaincfg.EnvPrefix)
rootCmd := &cobra.Command{
Use: chaincfg.AppName,
@ -90,7 +92,14 @@ func NewRootCmd() *cobra.Command {
return rootCmd
}
// addSubCmds registers all the sub commands used by 0g-chain.
// dbOpener is a function to open `application.db`, potentially with customized options.
// dbOpener sets dataDir to "data", dbName to "application" and calls generic OpenDB function.
func dbOpener(opts servertypes.AppOptions, rootDir string, backend dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(rootDir, "data")
return opendb.OpenDB(opts, dataDir, "application", backend)
}
// addSubCmds registers all the sub commands used by kava.
func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, defaultNodeHome string) {
gentxModule, ok := app.ModuleBasics[genutiltypes.ModuleName].(genutil.AppModuleBasic)
if !ok {
@ -120,7 +129,7 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
opts := ethermintserver.StartOptions{
AppCreator: ac.newApp,
DefaultNodeHome: chaincfg.DefaultNodeHome,
DBOpener: opendb.OpenDB,
DBOpener: dbOpener,
}
// ethermintserver adds additional flags to start the JSON-RPC server for evm support
ethermintserver.AddCommands(

View File

@ -219,9 +219,12 @@ func shardApplicationDb(multistore *rootmulti.Store, startBlock, endBlock int64)
}
if len(pruneHeights) > 0 {
// prune application state
fmt.Printf("pruning application state to height %d\n", startBlock)
if err := multistore.PruneStores(true, pruneHeights); err != nil {
return fmt.Errorf("failed to prune application state: %s", err)
for _, pruneHeight := range pruneHeights {
if err := multistore.PruneStores(pruneHeight); err != nil {
return fmt.Errorf("failed to prune application state: %s", err)
}
}
}

View File

@ -1,499 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
)
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
var rocksdbMetrics *Metrics
// Metrics contains all rocksdb metrics which will be reported to prometheus
type Metrics struct {
// Keys
NumberKeysWritten metrics.Gauge
NumberKeysRead metrics.Gauge
NumberKeysUpdated metrics.Gauge
EstimateNumKeys metrics.Gauge
// Files
NumberFileOpens metrics.Gauge
NumberFileErrors metrics.Gauge
// Memory
BlockCacheUsage metrics.Gauge
EstimateTableReadersMem metrics.Gauge
CurSizeAllMemTables metrics.Gauge
BlockCachePinnedUsage metrics.Gauge
// Cache
BlockCacheMiss metrics.Gauge
BlockCacheHit metrics.Gauge
BlockCacheAdd metrics.Gauge
BlockCacheAddFailures metrics.Gauge
// Detailed Cache
BlockCacheIndexMiss metrics.Gauge
BlockCacheIndexHit metrics.Gauge
BlockCacheIndexBytesInsert metrics.Gauge
BlockCacheFilterMiss metrics.Gauge
BlockCacheFilterHit metrics.Gauge
BlockCacheFilterBytesInsert metrics.Gauge
BlockCacheDataMiss metrics.Gauge
BlockCacheDataHit metrics.Gauge
BlockCacheDataBytesInsert metrics.Gauge
// Latency
DBGetMicrosP50 metrics.Gauge
DBGetMicrosP95 metrics.Gauge
DBGetMicrosP99 metrics.Gauge
DBGetMicrosP100 metrics.Gauge
DBGetMicrosCount metrics.Gauge
DBWriteMicrosP50 metrics.Gauge
DBWriteMicrosP95 metrics.Gauge
DBWriteMicrosP99 metrics.Gauge
DBWriteMicrosP100 metrics.Gauge
DBWriteMicrosCount metrics.Gauge
// Write Stall
StallMicros metrics.Gauge
DBWriteStallP50 metrics.Gauge
DBWriteStallP95 metrics.Gauge
DBWriteStallP99 metrics.Gauge
DBWriteStallP100 metrics.Gauge
DBWriteStallCount metrics.Gauge
DBWriteStallSum metrics.Gauge
// Bloom Filter
BloomFilterUseful metrics.Gauge
BloomFilterFullPositive metrics.Gauge
BloomFilterFullTruePositive metrics.Gauge
// LSM Tree Stats
LastLevelReadBytes metrics.Gauge
LastLevelReadCount metrics.Gauge
NonLastLevelReadBytes metrics.Gauge
NonLastLevelReadCount metrics.Gauge
GetHitL0 metrics.Gauge
GetHitL1 metrics.Gauge
GetHitL2AndUp metrics.Gauge
}
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
func registerMetrics() {
if rocksdbMetrics != nil {
// metrics already registered
return
}
labels := make([]string, 0)
rocksdbMetrics = &Metrics{
// Keys
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_written",
Help: "",
}, labels),
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_read",
Help: "",
}, labels),
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_updated",
Help: "",
}, labels),
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "estimate_num_keys",
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
}, labels),
// Files
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_opens",
Help: "",
}, labels),
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_errors",
Help: "",
}, labels),
// Memory
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_usage",
Help: "memory size for the entries residing in block cache",
}, labels),
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "estimate_table_readers_mem",
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
}, labels),
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "cur_size_all_mem_tables",
Help: "approximate size of active and unflushed immutable memtables (bytes)",
}, labels),
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_pinned_usage",
Help: "returns the memory size for the entries being pinned",
}, labels),
// Cache
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_miss",
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
}, labels),
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_hit",
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
}, labels),
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add",
Help: "number of blocks added to block cache",
}, labels),
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add_failures",
Help: "number of failures when adding blocks to block cache",
}, labels),
// Detailed Cache
BlockCacheIndexMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_miss",
Help: "",
}, labels),
BlockCacheIndexHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_hit",
Help: "",
}, labels),
BlockCacheIndexBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_bytes_insert",
Help: "",
}, labels),
BlockCacheFilterMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_miss",
Help: "",
}, labels),
BlockCacheFilterHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_hit",
Help: "",
}, labels),
BlockCacheFilterBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_bytes_insert",
Help: "",
}, labels),
BlockCacheDataMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_miss",
Help: "",
}, labels),
BlockCacheDataHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_hit",
Help: "",
}, labels),
BlockCacheDataBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_bytes_insert",
Help: "",
}, labels),
// Latency
DBGetMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p50",
Help: "",
}, labels),
DBGetMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p95",
Help: "",
}, labels),
DBGetMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p99",
Help: "",
}, labels),
DBGetMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p100",
Help: "",
}, labels),
DBGetMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_count",
Help: "",
}, labels),
DBWriteMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p50",
Help: "",
}, labels),
DBWriteMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p95",
Help: "",
}, labels),
DBWriteMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p99",
Help: "",
}, labels),
DBWriteMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p100",
Help: "",
}, labels),
DBWriteMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_count",
Help: "",
}, labels),
// Write Stall
StallMicros: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "stall_micros",
Help: "Writer has to wait for compaction or flush to finish.",
}, labels),
DBWriteStallP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p50",
Help: "",
}, labels),
DBWriteStallP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p95",
Help: "",
}, labels),
DBWriteStallP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p99",
Help: "",
}, labels),
DBWriteStallP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p100",
Help: "",
}, labels),
DBWriteStallCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_count",
Help: "",
}, labels),
DBWriteStallSum: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_sum",
Help: "",
}, labels),
// Bloom Filter
BloomFilterUseful: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_useful",
Help: "number of times bloom filter has avoided file reads, i.e., negatives.",
}, labels),
BloomFilterFullPositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_positive",
Help: "number of times bloom FullFilter has not avoided the reads.",
}, labels),
BloomFilterFullTruePositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_true_positive",
Help: "number of times bloom FullFilter has not avoided the reads and data actually exist.",
}, labels),
// LSM Tree Stats
LastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_bytes",
Help: "",
}, labels),
LastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_count",
Help: "",
}, labels),
NonLastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_bytes",
Help: "",
}, labels),
NonLastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_count",
Help: "",
}, labels),
GetHitL0: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l0",
Help: "number of Get() queries served by L0",
}, labels),
GetHitL1: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l1",
Help: "number of Get() queries served by L1",
}, labels),
GetHitL2AndUp: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l2_and_up",
Help: "number of Get() queries served by L2 and up",
}, labels),
}
}
// report reports metrics to prometheus based on rocksdb props and stats
func (m *Metrics) report(props *properties, stats *stats) {
// Keys
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
// Files
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
// Memory
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
// Cache
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
// Detailed Cache
m.BlockCacheIndexMiss.Set(float64(stats.BlockCacheIndexMiss))
m.BlockCacheIndexHit.Set(float64(stats.BlockCacheIndexHit))
m.BlockCacheIndexBytesInsert.Set(float64(stats.BlockCacheIndexBytesInsert))
m.BlockCacheFilterMiss.Set(float64(stats.BlockCacheFilterMiss))
m.BlockCacheFilterHit.Set(float64(stats.BlockCacheFilterHit))
m.BlockCacheFilterBytesInsert.Set(float64(stats.BlockCacheFilterBytesInsert))
m.BlockCacheDataMiss.Set(float64(stats.BlockCacheDataMiss))
m.BlockCacheDataHit.Set(float64(stats.BlockCacheDataHit))
m.BlockCacheDataBytesInsert.Set(float64(stats.BlockCacheDataBytesInsert))
// Latency
m.DBGetMicrosP50.Set(stats.DBGetMicros.P50)
m.DBGetMicrosP95.Set(stats.DBGetMicros.P95)
m.DBGetMicrosP99.Set(stats.DBGetMicros.P99)
m.DBGetMicrosP100.Set(stats.DBGetMicros.P100)
m.DBGetMicrosCount.Set(stats.DBGetMicros.Count)
m.DBWriteMicrosP50.Set(stats.DBWriteMicros.P50)
m.DBWriteMicrosP95.Set(stats.DBWriteMicros.P95)
m.DBWriteMicrosP99.Set(stats.DBWriteMicros.P99)
m.DBWriteMicrosP100.Set(stats.DBWriteMicros.P100)
m.DBWriteMicrosCount.Set(stats.DBWriteMicros.Count)
// Write Stall
m.StallMicros.Set(float64(stats.StallMicros))
m.DBWriteStallP50.Set(stats.DBWriteStallHistogram.P50)
m.DBWriteStallP95.Set(stats.DBWriteStallHistogram.P95)
m.DBWriteStallP99.Set(stats.DBWriteStallHistogram.P99)
m.DBWriteStallP100.Set(stats.DBWriteStallHistogram.P100)
m.DBWriteStallCount.Set(stats.DBWriteStallHistogram.Count)
m.DBWriteStallSum.Set(stats.DBWriteStallHistogram.Sum)
// Bloom Filter
m.BloomFilterUseful.Set(float64(stats.BloomFilterUseful))
m.BloomFilterFullPositive.Set(float64(stats.BloomFilterFullPositive))
m.BloomFilterFullTruePositive.Set(float64(stats.BloomFilterFullTruePositive))
// LSM Tree Stats
m.LastLevelReadBytes.Set(float64(stats.LastLevelReadBytes))
m.LastLevelReadCount.Set(float64(stats.LastLevelReadCount))
m.NonLastLevelReadBytes.Set(float64(stats.NonLastLevelReadBytes))
m.NonLastLevelReadCount.Set(float64(stats.NonLastLevelReadCount))
m.GetHitL0.Set(float64(stats.GetHitL0))
m.GetHitL1.Set(float64(stats.GetHitL1))
m.GetHitL2AndUp.Set(float64(stats.GetHitL2AndUp))
}

View File

@ -1,18 +0,0 @@
//go:build !rocksdb
// +build !rocksdb
package opendb
import (
"path/filepath"
dbm "github.com/cometbft/cometbft-db"
"github.com/cosmos/cosmos-sdk/server/types"
)
// OpenDB is a copy of default DBOpener function used by ethermint, see for details:
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647
func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
return dbm.NewDB("application", backendType, dataDir)
}

View File

@ -1,398 +0,0 @@
//go:build rocksdb
// +build rocksdb
// Copyright 2023 Kava Labs, Inc.
// Copyright 2023 Cronos Labs, Inc.
//
// Derived from https://github.com/crypto-org-chain/cronos@496ce7e
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opendb
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
dbm "github.com/cometbft/cometbft-db"
"github.com/cosmos/cosmos-sdk/server/types"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cast"
)
var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default")
const (
// default tm-db block cache size for RocksDB
defaultBlockCacheSize = 1 << 30
DefaultColumnFamilyName = "default"
enableMetricsOptName = "rocksdb.enable-metrics"
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
defaultReportMetricsIntervalSecs = 15
maxOpenFilesDBOptName = "rocksdb.max-open-files"
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
useFsyncDBOptName = "rocksdb.use_fsync"
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
numLevelsCFOptName = "rocksdb.num-levels"
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"
blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
blockSizeBBTOOptName = "rocksdb.block_size"
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
formatVersionBBTOOptName = "rocksdb.format_version"
asyncIOReadOptName = "rocksdb.read-async-io"
)
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
if backendType == dbm.RocksDBBackend {
return openRocksdb(dataDir, appOpts)
}
return dbm.NewDB("application", backendType, dataDir)
}
// openRocksdb loads existing options, overrides some of them with appOpts and opens database
// option will be overridden only in case if it explicitly specified in appOpts
func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
optionsPath := filepath.Join(dir, "application.db")
dbOpts, cfOpts, err := LoadLatestOptions(optionsPath)
if err != nil {
return nil, err
}
// customize rocksdb options
bbtoOpts := bbtoFromAppOpts(appOpts)
dbOpts.SetBlockBasedTableFactory(bbtoOpts)
cfOpts.SetBlockBasedTableFactory(bbtoOpts)
dbOpts = overrideDBOpts(dbOpts, appOpts)
cfOpts = overrideCFOpts(cfOpts, appOpts)
readOpts := readOptsFromAppOpts(appOpts)
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
if reportMetricsIntervalSecs == 0 {
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
}
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, readOpts, enableMetrics, reportMetricsIntervalSecs)
}
// LoadLatestOptions loads and returns database and column family options
// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned
// if database exists it should have only one column family named default
func LoadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) {
latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize))
if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") {
return newDefaultOptions(), newDefaultOptions(), nil
}
if err != nil {
return nil, nil, err
}
cfNames := latestOpts.ColumnFamilyNames()
cfOpts := latestOpts.ColumnFamilyOpts()
// db should have only one column family named default
ok := len(cfNames) == 1 && cfNames[0] == DefaultColumnFamilyName
if !ok {
return nil, nil, ErrUnexpectedConfiguration
}
// return db and cf opts
return latestOpts.Options(), &cfOpts[0], nil
}
// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence
func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName)
if maxOpenFiles != nil {
dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles))
}
maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName)
if maxFileOpeningThreads != nil {
dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads))
}
tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName)
if tableCacheNumshardbits != nil {
dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits))
}
allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName)
if allowMMAPWrites != nil {
dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites))
}
allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName)
if allowMMAPReads != nil {
dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads))
}
useFsync := appOpts.Get(useFsyncDBOptName)
if useFsync != nil {
dbOpts.SetUseFsync(cast.ToBool(useFsync))
}
useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName)
if useAdaptiveMutex != nil {
dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex))
}
bytesPerSync := appOpts.Get(bytesPerSyncDBOptName)
if bytesPerSync != nil {
dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync))
}
maxBackgroundJobs := appOpts.Get(maxBackgroundJobsDBOptName)
if maxBackgroundJobs != nil {
dbOpts.SetMaxBackgroundJobs(cast.ToInt(maxBackgroundJobs))
}
return dbOpts
}
// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence
func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
writeBufferSize := appOpts.Get(writeBufferSizeCFOptName)
if writeBufferSize != nil {
cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize))
}
numLevels := appOpts.Get(numLevelsCFOptName)
if numLevels != nil {
cfOpts.SetNumLevels(cast.ToInt(numLevels))
}
maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName)
if maxWriteBufferNumber != nil {
cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber))
}
minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName)
if minWriteBufferNumberToMerge != nil {
cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge))
}
maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName)
if maxBytesForLevelBase != nil {
cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase))
}
maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName)
if maxBytesForLevelMultiplier != nil {
cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier))
}
targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName)
if targetFileSizeBase != nil {
cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase))
}
targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName)
if targetFileSizeMultiplier != nil {
cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier))
}
level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName)
if level0FileNumCompactionTrigger != nil {
cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger))
}
level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName)
if level0SlowdownWritesTrigger != nil {
cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger))
}
return cfOpts
}
func readOptsFromAppOpts(appOpts types.AppOptions) *grocksdb.ReadOptions {
ro := grocksdb.NewDefaultReadOptions()
asyncIO := appOpts.Get(asyncIOReadOptName)
if asyncIO != nil {
ro.SetAsyncIO(cast.ToBool(asyncIO))
}
return ro
}
func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions {
bbto := defaultBBTO()
blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName)
if blockCacheSize != nil {
cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize))
bbto.SetBlockCache(cache)
}
bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName)
if bitsPerKey != nil {
filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey))
bbto.SetFilterPolicy(filter)
}
blockSize := appOpts.Get(blockSizeBBTOOptName)
if blockSize != nil {
bbto.SetBlockSize(cast.ToInt(blockSize))
}
cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName)
if cacheIndexAndFilterBlocks != nil {
bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks))
}
pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName)
if pinL0FilterAndIndexBlocksInCache != nil {
bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache))
}
formatVersion := appOpts.Get(formatVersionBBTOOptName)
if formatVersion != nil {
bbto.SetFormatVersion(cast.ToInt(formatVersion))
}
return bbto
}
// newRocksDBWithOptions opens rocksdb with provided database and column family options
// newRocksDBWithOptions expects that db has only one column family named default
func newRocksDBWithOptions(
name string,
dir string,
dbOpts *grocksdb.Options,
cfOpts *grocksdb.Options,
readOpts *grocksdb.ReadOptions,
enableMetrics bool,
reportMetricsIntervalSecs int64,
) (*dbm.RocksDB, error) {
dbPath := filepath.Join(dir, name+".db")
// Ensure path exists
if err := os.MkdirAll(dbPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create db path: %w", err)
}
// EnableStatistics adds overhead so shouldn't be enabled in production
if enableMetrics {
dbOpts.EnableStatistics()
}
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{DefaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
if err != nil {
return nil, err
}
if enableMetrics {
registerMetrics()
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
}
wo := grocksdb.NewDefaultWriteOptions()
woSync := grocksdb.NewDefaultWriteOptions()
woSync.SetSync(true)
return dbm.NewRocksDBWithRawDB(db, readOpts, wo, woSync), nil
}
// newDefaultOptions returns default tm-db options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func newDefaultOptions() *grocksdb.Options {
// default rocksdb option, good enough for most cases, including heavy workloads.
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
// compression: snappy as default, need to -lsnappy to enable.
bbto := defaultBBTO()
opts := grocksdb.NewDefaultOptions()
opts.SetBlockBasedTableFactory(bbto)
// SetMaxOpenFiles to 4096 seems to provide a reliable performance boost
opts.SetMaxOpenFiles(4096)
opts.SetCreateIfMissing(true)
opts.IncreaseParallelism(runtime.NumCPU())
// 1.5GB maximum memory use for writebuffer.
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
return opts
}
// defaultBBTO returns default tm-db bbto options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func defaultBBTO() *grocksdb.BlockBasedTableOptions {
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize))
bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10))
return bbto
}
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
// NOTE: should be launched as a goroutine
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
ticker := time.NewTicker(interval)
for {
select {
case <-ticker.C:
props, stats, err := getPropsAndStats(db)
if err != nil {
continue
}
rocksdbMetrics.report(props, stats)
}
}
}
// getPropsAndStats gets statistics from rocksdb
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
propsLoader := newPropsLoader(db)
props, err := propsLoader.load()
if err != nil {
return nil, nil, err
}
statMap, err := parseSerializedStats(props.OptionsStatistics)
if err != nil {
return nil, nil, err
}
statLoader := newStatLoader(statMap)
stats, err := statLoader.load()
if err != nil {
return nil, nil, err
}
return props, stats, nil
}

View File

@ -1,384 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"os"
"path/filepath"
"testing"
"github.com/linxGnu/grocksdb"
"github.com/stretchr/testify/require"
)
type mockAppOptions struct {
opts map[string]interface{}
}
func newMockAppOptions(opts map[string]interface{}) *mockAppOptions {
return &mockAppOptions{
opts: opts,
}
}
func (m *mockAppOptions) Get(key string) interface{} {
return m.opts[key]
}
func TestOpenRocksdb(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
writeBufferSizeCFOptName: 999_999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := openRocksdb(dir, tc.mockAppOptions)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
mockAppOpts := newMockAppOptions(map[string]interface{}{})
db, err := openRocksdb(dir, mockAppOpts)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestLoadLatestOptions(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
const testCasesNum = 3
dbOptsList := make([]*grocksdb.Options, testCasesNum)
cfOptsList := make([]*grocksdb.Options, testCasesNum)
dbOptsList[0] = newDefaultOptions()
cfOptsList[0] = newDefaultOptions()
dbOptsList[1] = newDefaultOptions()
dbOptsList[1].SetMaxOpenFiles(999)
cfOptsList[1] = newDefaultOptions()
cfOptsList[1].SetWriteBufferSize(999_999)
dbOptsList[2] = newDefaultOptions()
dbOptsList[2].SetMaxOpenFiles(999)
dbOptsList[2].SetMaxFileOpeningThreads(9)
cfOptsList[2] = newDefaultOptions()
cfOptsList[2].SetWriteBufferSize(999_999)
cfOptsList[2].SetNumLevels(9)
for _, tc := range []struct {
desc string
dbOpts *grocksdb.Options
cfOpts *grocksdb.Options
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
dbOpts: dbOptsList[0],
cfOpts: cfOptsList[0],
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
dbOpts: dbOptsList[1],
cfOpts: cfOptsList[1],
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
dbOpts: dbOptsList[2],
cfOpts: cfOptsList[2],
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestOverrideDBOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-open-files",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: 9,
},
{
desc: "override max-open-files and max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dbOpts := newDefaultOptions()
dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
})
}
}
func TestOverrideCFOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
writeBufferSize uint64
numLevels int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override write-buffer-size",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
}),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
numLevelsCFOptName: 9,
}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: 9,
},
{
desc: "override write-buffer-size and num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
cfOpts := newDefaultOptions()
cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions)
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
}
func TestReadOptsFromAppOpts(t *testing.T) {
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
asyncIO bool
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
asyncIO: false,
},
{
desc: "set asyncIO option to true",
mockAppOptions: newMockAppOptions(map[string]interface{}{
asyncIOReadOptName: true,
}),
asyncIO: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
readOpts := readOptsFromAppOpts(tc.mockAppOptions)
require.Equal(t, tc.asyncIO, readOpts.IsAsyncIO())
})
}
}
func TestNewRocksDBWithOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts := newDefaultOptions()
dbOpts.SetMaxOpenFiles(999)
cfOpts := newDefaultOptions()
cfOpts.SetWriteBufferSize(999_999)
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err = LoadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, 999, dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels())
}
func TestNewDefaultOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
maxOpenFiles := defaultOpts.GetMaxOpenFiles()
require.Equal(t, 4096, maxOpenFiles)
}

View File

@ -1,87 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strings"
"errors"
)
type propsGetter interface {
GetProperty(propName string) (value string)
GetIntProperty(propName string) (value uint64, success bool)
}
type propsLoader struct {
db propsGetter
errorMsgs []string
}
func newPropsLoader(db propsGetter) *propsLoader {
return &propsLoader{
db: db,
errorMsgs: make([]string, 0),
}
}
func (l *propsLoader) load() (*properties, error) {
props := &properties{
BaseLevel: l.getIntProperty("rocksdb.base-level"),
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
}
if len(l.errorMsgs) != 0 {
errorMsg := strings.Join(l.errorMsgs, ";")
return nil, errors.New(errorMsg)
}
return props, nil
}
func (l *propsLoader) getProperty(propName string) string {
value := l.db.GetProperty(propName)
if value == "" {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
return ""
}
return value
}
func (l *propsLoader) getIntProperty(propName string) uint64 {
value, ok := l.db.GetIntProperty(propName)
if !ok {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
return 0
}
return value
}
type properties struct {
BaseLevel uint64
BlockCacheCapacity uint64
BlockCachePinnedUsage uint64
BlockCacheUsage uint64
CurSizeActiveMemTable uint64
CurSizeAllMemTables uint64
EstimateLiveDataSize uint64
EstimateNumKeys uint64
EstimateTableReadersMem uint64
LiveSSTFilesSize uint64
SizeAllMemTables uint64
OptionsStatistics string
}

View File

@ -1,112 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"testing"
"github.com/stretchr/testify/require"
)
type mockPropsGetter struct {
props map[string]string
intProps map[string]uint64
}
func newMockPropsGetter(
props map[string]string,
intProps map[string]uint64,
) *mockPropsGetter {
return &mockPropsGetter{
props: props,
intProps: intProps,
}
}
func (m *mockPropsGetter) GetProperty(propName string) string {
return m.props[propName]
}
func (m *mockPropsGetter) GetIntProperty(propName string) (uint64, bool) {
prop, ok := m.intProps[propName]
return prop, ok
}
func TestPropsLoader(t *testing.T) {
defaultProps := map[string]string{
"rocksdb.options-statistics": "1",
}
defaultIntProps := map[string]uint64{
"rocksdb.base-level": 1,
"rocksdb.block-cache-capacity": 2,
"rocksdb.block-cache-pinned-usage": 3,
"rocksdb.block-cache-usage": 4,
"rocksdb.cur-size-active-mem-table": 5,
"rocksdb.cur-size-all-mem-tables": 6,
"rocksdb.estimate-live-data-size": 7,
"rocksdb.estimate-num-keys": 8,
"rocksdb.estimate-table-readers-mem": 9,
"rocksdb.live-sst-files-size": 10,
"rocksdb.size-all-mem-tables": 11,
}
missingProps := make(map[string]string)
missingIntProps := make(map[string]uint64)
defaultExpectedProps := properties{
BaseLevel: 1,
BlockCacheCapacity: 2,
BlockCachePinnedUsage: 3,
BlockCacheUsage: 4,
CurSizeActiveMemTable: 5,
CurSizeAllMemTables: 6,
EstimateLiveDataSize: 7,
EstimateNumKeys: 8,
EstimateTableReadersMem: 9,
LiveSSTFilesSize: 10,
SizeAllMemTables: 11,
OptionsStatistics: "1",
}
for _, tc := range []struct {
desc string
props map[string]string
intProps map[string]uint64
expectedProps *properties
success bool
}{
{
desc: "success case",
props: defaultProps,
intProps: defaultIntProps,
expectedProps: &defaultExpectedProps,
success: true,
},
{
desc: "missing props",
props: missingProps,
intProps: defaultIntProps,
expectedProps: nil,
success: false,
},
{
desc: "missing integer props",
props: defaultProps,
intProps: missingIntProps,
expectedProps: nil,
success: false,
},
} {
t.Run(tc.desc, func(t *testing.T) {
mockPropsGetter := newMockPropsGetter(tc.props, tc.intProps)
propsLoader := newPropsLoader(mockPropsGetter)
actualProps, err := propsLoader.load()
if tc.success {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.Equal(t, tc.expectedProps, actualProps)
})
}
}

View File

@ -1,111 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strings"
"errors"
)
// stat represents one line from rocksdb statistics data, stat may have one or more properties
// examples:
// - rocksdb.block.cache.miss COUNT : 5
// - rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
// `rocksdb.compaction.times.micros` is name of stat, P50, COUNT, SUM, etc... are props of stat
type stat struct {
name string
props map[string]string
}
// parseSerializedStats parses serialisedStats into map of stat objects
// example of serializedStats:
// rocksdb.block.cache.miss COUNT : 5
// rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
func parseSerializedStats(serializedStats string) (map[string]*stat, error) {
stats := make(map[string]*stat, 0)
serializedStatList := strings.Split(serializedStats, "\n")
if len(serializedStatList) == 0 {
return nil, errors.New("serializedStats is empty")
}
serializedStatList = serializedStatList[:len(serializedStatList)-1]
// iterate over stats line by line
for _, serializedStat := range serializedStatList {
stat, err := parseSerializedStat(serializedStat)
if err != nil {
return nil, err
}
stats[stat.name] = stat
}
return stats, nil
}
// parseSerializedStat parses serialisedStat into stat object
// example of serializedStat:
// rocksdb.block.cache.miss COUNT : 5
func parseSerializedStat(serializedStat string) (*stat, error) {
tokens := strings.Split(serializedStat, " ")
tokensNum := len(tokens)
if err := validateTokens(tokens); err != nil {
return nil, fmt.Errorf("tokens are invalid: %v", err)
}
props := make(map[string]string)
for idx := 1; idx < tokensNum; idx += 3 {
// never should happen, but double check to avoid unexpected panic
if idx+2 >= tokensNum {
break
}
key := tokens[idx]
sep := tokens[idx+1]
value := tokens[idx+2]
if err := validateStatProperty(key, value, sep); err != nil {
return nil, fmt.Errorf("invalid stat property: %v", err)
}
props[key] = value
}
return &stat{
name: tokens[0],
props: props,
}, nil
}
// validateTokens validates that tokens contains name + N triples (key, sep, value)
func validateTokens(tokens []string) error {
tokensNum := len(tokens)
if tokensNum < 4 {
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
}
if (tokensNum-1)%3 != 0 {
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
}
if tokens[0] == "" {
return fmt.Errorf("stat name shouldn't be empty")
}
return nil
}
// validateStatProperty validates that key and value are divided by separator and aren't empty
func validateStatProperty(key, value, sep string) error {
if key == "" {
return fmt.Errorf("key shouldn't be empty")
}
if sep != ":" {
return fmt.Errorf("separator should be :")
}
if value == "" {
return fmt.Errorf("value shouldn't be empty")
}
return nil
}

View File

@ -1,208 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestParseSerializedStats(t *testing.T) {
defaultSerializedStats := `rocksdb.block.cache.miss COUNT : 1
rocksdb.block.cache.hit COUNT : 2
rocksdb.block.cache.add COUNT : 3
rocksdb.block.cache.add.failures COUNT : 4
rocksdb.compaction.times.micros P50 : 1 P95 : 2 P99 : 3 P100 : 4 COUNT : 5 SUM : 6
rocksdb.compaction.times.cpu_micros P50 : 7 P95 : 8 P99 : 9 P100 : 10 COUNT : 11 SUM : 12
`
defaultExpectedStatMap := map[string]*stat{
"rocksdb.block.cache.miss": {
name: "rocksdb.block.cache.miss",
props: map[string]string{
"COUNT": "1",
},
},
"rocksdb.block.cache.hit": {
name: "rocksdb.block.cache.hit",
props: map[string]string{
"COUNT": "2",
},
},
"rocksdb.block.cache.add": {
name: "rocksdb.block.cache.add",
props: map[string]string{
"COUNT": "3",
},
},
"rocksdb.block.cache.add.failures": {
name: "rocksdb.block.cache.add.failures",
props: map[string]string{
"COUNT": "4",
},
},
"rocksdb.compaction.times.micros": {
name: "rocksdb.compaction.times.micros",
props: map[string]string{
"P50": "1",
"P95": "2",
"P99": "3",
"P100": "4",
"COUNT": "5",
"SUM": "6",
},
},
"rocksdb.compaction.times.cpu_micros": {
name: "rocksdb.compaction.times.cpu_micros",
props: map[string]string{
"P50": "7",
"P95": "8",
"P99": "9",
"P100": "10",
"COUNT": "11",
"SUM": "12",
},
},
}
for _, tc := range []struct {
desc string
serializedStats string
expectedStatMap map[string]*stat
errMsg string
}{
{
desc: "success case",
serializedStats: defaultSerializedStats,
expectedStatMap: defaultExpectedStatMap,
errMsg: "",
},
{
desc: "missing value #1",
serializedStats: `rocksdb.block.cache.miss COUNT :
`,
expectedStatMap: nil,
errMsg: "invalid number of tokens",
},
{
desc: "missing value #2",
serializedStats: `rocksdb.compaction.times.micros P50 : 1 P95 :
`,
expectedStatMap: nil,
errMsg: "invalid number of tokens",
},
{
desc: "missing stat name",
serializedStats: ` COUNT : 1
`,
expectedStatMap: nil,
errMsg: "stat name shouldn't be empty",
},
{
desc: "empty stat",
serializedStats: ``,
expectedStatMap: make(map[string]*stat),
errMsg: "",
},
} {
t.Run(tc.desc, func(t *testing.T) {
actualStatMap, err := parseSerializedStats(tc.serializedStats)
if tc.errMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.errMsg)
}
require.Equal(t, tc.expectedStatMap, actualStatMap)
})
}
}
func TestValidateTokens(t *testing.T) {
for _, tc := range []struct {
desc string
tokens []string
errMsg string
}{
{
desc: "success case",
tokens: []string{"name", "key", ":", "value"},
errMsg: "",
},
{
desc: "missing value #1",
tokens: []string{"name", "key", ":"},
errMsg: "invalid number of tokens",
},
{
desc: "missing value #2",
tokens: []string{"name", "key", ":", "value", "key2", ":"},
errMsg: "invalid number of tokens",
},
{
desc: "empty stat name",
tokens: []string{"", "key", ":", "value"},
errMsg: "stat name shouldn't be empty",
},
} {
t.Run(tc.desc, func(t *testing.T) {
err := validateTokens(tc.tokens)
if tc.errMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.errMsg)
}
})
}
}
func TestValidateStatProperty(t *testing.T) {
for _, tc := range []struct {
desc string
key string
value string
sep string
errMsg string
}{
{
desc: "success case",
key: "key",
value: "value",
sep: ":",
errMsg: "",
},
{
desc: "missing key",
key: "",
value: "value",
sep: ":",
errMsg: "key shouldn't be empty",
},
{
desc: "missing value",
key: "key",
value: "",
sep: ":",
errMsg: "value shouldn't be empty",
},
{
desc: "invalid separator",
key: "key",
value: "value",
sep: "#",
errMsg: "separator should be :",
},
} {
t.Run(tc.desc, func(t *testing.T) {
err := validateStatProperty(tc.key, tc.value, tc.sep)
if tc.errMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.errMsg)
}
})
}
}

View File

@ -1,284 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strconv"
)
const (
sum = "SUM"
count = "COUNT"
p50 = "P50"
p95 = "P95"
p99 = "P99"
p100 = "P100"
)
type statLoader struct {
// statMap contains map of stat objects returned by parseSerializedStats function
// example of stats:
// #1: rocksdb.block.cache.miss COUNT : 5
// #2: rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
// #1 case will be cast into int64
// #2 case will be cast into float64Histogram
statMap map[string]*stat
// NOTE: some methods accumulate errors instead of returning them, these methods are private and not intended to use outside
errors []error
}
func newStatLoader(statMap map[string]*stat) *statLoader {
return &statLoader{
statMap: statMap,
errors: make([]error, 0),
}
}
type stats struct {
NumberKeysWritten int64
NumberKeysRead int64
NumberKeysUpdated int64
// total block cache misses
// BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
// BLOCK_CACHE_FILTER_MISS +
// BLOCK_CACHE_DATA_MISS;
// BLOCK_CACHE_INDEX_MISS: # of times cache miss when accessing index block from block cache.
// BLOCK_CACHE_FILTER_MISS: # of times cache miss when accessing filter block from block cache.
// BLOCK_CACHE_DATA_MISS: # of times cache miss when accessing data block from block cache.
BlockCacheMiss int64
// total block cache hit
// BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
// BLOCK_CACHE_FILTER_HIT +
// BLOCK_CACHE_DATA_HIT;
// BLOCK_CACHE_INDEX_HIT: # of times cache hit when accessing index block from block cache.
// BLOCK_CACHE_FILTER_HIT: # of times cache hit when accessing filter block from block cache.
// BLOCK_CACHE_DATA_HIT: # of times cache hit when accessing data block from block cache.
BlockCacheHit int64
// # of blocks added to block cache.
BlockCacheAdd int64
// # of failures when adding blocks to block cache.
BlockCacheAddFailures int64
BlockCacheIndexMiss int64
BlockCacheIndexHit int64
BlockCacheIndexBytesInsert int64
BlockCacheFilterMiss int64
BlockCacheFilterHit int64
BlockCacheFilterBytesInsert int64
BlockCacheDataMiss int64
BlockCacheDataHit int64
BlockCacheDataBytesInsert int64
CompactReadBytes int64 // Bytes read during compaction
CompactWriteBytes int64 // Bytes written during compaction
CompactionTimesMicros *float64Histogram
CompactionTimesCPUMicros *float64Histogram
NumFilesInSingleCompaction *float64Histogram
// Read amplification statistics.
// Read amplification can be calculated using this formula
// (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
//
// REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
// TODO(yevhenii): seems not working?
ReadAmpEstimateUsefulBytes int64 // Estimate of total bytes actually used.
ReadAmpTotalReadBytes int64 // Total size of loaded data blocks.
NumberFileOpens int64
NumberFileErrors int64
// # of times bloom filter has avoided file reads, i.e., negatives.
BloomFilterUseful int64
// # of times bloom FullFilter has not avoided the reads.
BloomFilterFullPositive int64
// # of times bloom FullFilter has not avoided the reads and data actually
// exist.
BloomFilterFullTruePositive int64
// # of memtable hits.
MemtableHit int64
// # of memtable misses.
MemtableMiss int64
// # of Get() queries served by L0
GetHitL0 int64
// # of Get() queries served by L1
GetHitL1 int64
// # of Get() queries served by L2 and up
GetHitL2AndUp int64
// The number of uncompressed bytes issued by DB::Put(), DB::Delete(),
// DB::Merge(), and DB::Write().
BytesWritten int64
// The number of uncompressed bytes read from DB::Get(). It could be
// either from memtables, cache, or table files.
// For the number of logical bytes read from DB::MultiGet(),
// please use NUMBER_MULTIGET_BYTES_READ.
BytesRead int64
// Writer has to wait for compaction or flush to finish.
StallMicros int64
DBWriteStallHistogram *float64Histogram
// Last level and non-last level read statistics
LastLevelReadBytes int64
LastLevelReadCount int64
NonLastLevelReadBytes int64
NonLastLevelReadCount int64
DBGetMicros *float64Histogram
DBWriteMicros *float64Histogram
// Value size distribution in each operation
BytesPerRead *float64Histogram
BytesPerWrite *float64Histogram
BytesPerMultiget *float64Histogram
// Time spent flushing memtable to disk
FlushMicros *float64Histogram
}
type float64Histogram struct {
Sum float64
Count float64
P50 float64
P95 float64
P99 float64
P100 float64
}
func (l *statLoader) error() error {
if len(l.errors) != 0 {
return fmt.Errorf("%v", l.errors)
}
return nil
}
func (l *statLoader) load() (*stats, error) {
stats := &stats{
NumberKeysWritten: l.getInt64StatValue("rocksdb.number.keys.written", count),
NumberKeysRead: l.getInt64StatValue("rocksdb.number.keys.read", count),
NumberKeysUpdated: l.getInt64StatValue("rocksdb.number.keys.updated", count),
BlockCacheMiss: l.getInt64StatValue("rocksdb.block.cache.miss", count),
BlockCacheHit: l.getInt64StatValue("rocksdb.block.cache.hit", count),
BlockCacheAdd: l.getInt64StatValue("rocksdb.block.cache.add", count),
BlockCacheAddFailures: l.getInt64StatValue("rocksdb.block.cache.add.failures", count),
BlockCacheIndexMiss: l.getInt64StatValue("rocksdb.block.cache.index.miss", count),
BlockCacheIndexHit: l.getInt64StatValue("rocksdb.block.cache.index.hit", count),
BlockCacheIndexBytesInsert: l.getInt64StatValue("rocksdb.block.cache.index.bytes.insert", count),
BlockCacheFilterMiss: l.getInt64StatValue("rocksdb.block.cache.filter.miss", count),
BlockCacheFilterHit: l.getInt64StatValue("rocksdb.block.cache.filter.hit", count),
BlockCacheFilterBytesInsert: l.getInt64StatValue("rocksdb.block.cache.filter.bytes.insert", count),
BlockCacheDataMiss: l.getInt64StatValue("rocksdb.block.cache.data.miss", count),
BlockCacheDataHit: l.getInt64StatValue("rocksdb.block.cache.data.hit", count),
BlockCacheDataBytesInsert: l.getInt64StatValue("rocksdb.block.cache.data.bytes.insert", count),
CompactReadBytes: l.getInt64StatValue("rocksdb.compact.read.bytes", count),
CompactWriteBytes: l.getInt64StatValue("rocksdb.compact.write.bytes", count),
CompactionTimesMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.micros"),
CompactionTimesCPUMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.cpu_micros"),
NumFilesInSingleCompaction: l.getFloat64HistogramStatValue("rocksdb.numfiles.in.singlecompaction"),
ReadAmpEstimateUsefulBytes: l.getInt64StatValue("rocksdb.read.amp.estimate.useful.bytes", count),
ReadAmpTotalReadBytes: l.getInt64StatValue("rocksdb.read.amp.total.read.bytes", count),
NumberFileOpens: l.getInt64StatValue("rocksdb.no.file.opens", count),
NumberFileErrors: l.getInt64StatValue("rocksdb.no.file.errors", count),
BloomFilterUseful: l.getInt64StatValue("rocksdb.bloom.filter.useful", count),
BloomFilterFullPositive: l.getInt64StatValue("rocksdb.bloom.filter.full.positive", count),
BloomFilterFullTruePositive: l.getInt64StatValue("rocksdb.bloom.filter.full.true.positive", count),
MemtableHit: l.getInt64StatValue("rocksdb.memtable.hit", count),
MemtableMiss: l.getInt64StatValue("rocksdb.memtable.miss", count),
GetHitL0: l.getInt64StatValue("rocksdb.l0.hit", count),
GetHitL1: l.getInt64StatValue("rocksdb.l1.hit", count),
GetHitL2AndUp: l.getInt64StatValue("rocksdb.l2andup.hit", count),
BytesWritten: l.getInt64StatValue("rocksdb.bytes.written", count),
BytesRead: l.getInt64StatValue("rocksdb.bytes.read", count),
StallMicros: l.getInt64StatValue("rocksdb.stall.micros", count),
DBWriteStallHistogram: l.getFloat64HistogramStatValue("rocksdb.db.write.stall"),
LastLevelReadBytes: l.getInt64StatValue("rocksdb.last.level.read.bytes", count),
LastLevelReadCount: l.getInt64StatValue("rocksdb.last.level.read.count", count),
NonLastLevelReadBytes: l.getInt64StatValue("rocksdb.non.last.level.read.bytes", count),
NonLastLevelReadCount: l.getInt64StatValue("rocksdb.non.last.level.read.count", count),
DBGetMicros: l.getFloat64HistogramStatValue("rocksdb.db.get.micros"),
DBWriteMicros: l.getFloat64HistogramStatValue("rocksdb.db.write.micros"),
BytesPerRead: l.getFloat64HistogramStatValue("rocksdb.bytes.per.read"),
BytesPerWrite: l.getFloat64HistogramStatValue("rocksdb.bytes.per.write"),
BytesPerMultiget: l.getFloat64HistogramStatValue("rocksdb.bytes.per.multiget"),
FlushMicros: l.getFloat64HistogramStatValue("rocksdb.db.flush.micros"),
}
err := l.error()
if err != nil {
return nil, err
}
return stats, nil
}
// getFloat64HistogramStatValue converts stat object into float64Histogram
func (l *statLoader) getFloat64HistogramStatValue(statName string) *float64Histogram {
return &float64Histogram{
Sum: l.getFloat64StatValue(statName, sum),
Count: l.getFloat64StatValue(statName, count),
P50: l.getFloat64StatValue(statName, p50),
P95: l.getFloat64StatValue(statName, p95),
P99: l.getFloat64StatValue(statName, p99),
P100: l.getFloat64StatValue(statName, p100),
}
}
// getInt64StatValue converts property of stat object into int64
func (l *statLoader) getInt64StatValue(statName, propName string) int64 {
stringVal := l.getStatValue(statName, propName)
if stringVal == "" {
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
return 0
}
intVal, err := strconv.ParseInt(stringVal, 10, 64)
if err != nil {
l.errors = append(l.errors, fmt.Errorf("can't parse int: %v", err))
return 0
}
return intVal
}
// getFloat64StatValue converts property of stat object into float64
func (l *statLoader) getFloat64StatValue(statName, propName string) float64 {
stringVal := l.getStatValue(statName, propName)
if stringVal == "" {
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
return 0
}
floatVal, err := strconv.ParseFloat(stringVal, 64)
if err != nil {
l.errors = append(l.errors, fmt.Errorf("can't parse float: %v", err))
return 0
}
return floatVal
}
// getStatValue gets property of stat object
func (l *statLoader) getStatValue(statName, propName string) string {
stat, ok := l.statMap[statName]
if !ok {
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't exist", statName))
return ""
}
prop, ok := stat.props[propName]
if !ok {
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't have %v property", statName, propName))
return ""
}
return prop
}

View File

@ -1,90 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestStatsLoader(t *testing.T) {
defaultStat := stat{
props: map[string]string{
"COUNT": "1",
},
}
defaultHistogramStat := stat{
props: map[string]string{
"P50": "1",
"P95": "2",
"P99": "3",
"P100": "4",
"COUNT": "5",
"SUM": "6",
},
}
defaultStatMap := map[string]*stat{
"rocksdb.number.keys.written": &defaultStat,
"rocksdb.number.keys.read": &defaultStat,
"rocksdb.number.keys.updated": &defaultStat,
"rocksdb.block.cache.miss": &defaultStat,
"rocksdb.block.cache.hit": &defaultStat,
"rocksdb.block.cache.add": &defaultStat,
"rocksdb.block.cache.add.failures": &defaultStat,
"rocksdb.block.cache.index.miss": &defaultStat,
"rocksdb.block.cache.index.hit": &defaultStat,
"rocksdb.block.cache.index.bytes.insert": &defaultStat,
"rocksdb.block.cache.filter.miss": &defaultStat,
"rocksdb.block.cache.filter.hit": &defaultStat,
"rocksdb.block.cache.filter.bytes.insert": &defaultStat,
"rocksdb.block.cache.data.miss": &defaultStat,
"rocksdb.block.cache.data.hit": &defaultStat,
"rocksdb.block.cache.data.bytes.insert": &defaultStat,
"rocksdb.compact.read.bytes": &defaultStat,
"rocksdb.compact.write.bytes": &defaultStat,
"rocksdb.compaction.times.micros": &defaultHistogramStat,
"rocksdb.compaction.times.cpu_micros": &defaultHistogramStat,
"rocksdb.numfiles.in.singlecompaction": &defaultHistogramStat,
"rocksdb.read.amp.estimate.useful.bytes": &defaultStat,
"rocksdb.read.amp.total.read.bytes": &defaultStat,
"rocksdb.no.file.opens": &defaultStat,
"rocksdb.no.file.errors": &defaultStat,
"rocksdb.bloom.filter.useful": &defaultStat,
"rocksdb.bloom.filter.full.positive": &defaultStat,
"rocksdb.bloom.filter.full.true.positive": &defaultStat,
"rocksdb.memtable.hit": &defaultStat,
"rocksdb.memtable.miss": &defaultStat,
"rocksdb.l0.hit": &defaultStat,
"rocksdb.l1.hit": &defaultStat,
"rocksdb.l2andup.hit": &defaultStat,
"rocksdb.bytes.written": &defaultStat,
"rocksdb.bytes.read": &defaultStat,
"rocksdb.stall.micros": &defaultStat,
"rocksdb.db.write.stall": &defaultHistogramStat,
"rocksdb.last.level.read.bytes": &defaultStat,
"rocksdb.last.level.read.count": &defaultStat,
"rocksdb.non.last.level.read.bytes": &defaultStat,
"rocksdb.non.last.level.read.count": &defaultStat,
"rocksdb.db.get.micros": &defaultHistogramStat,
"rocksdb.db.write.micros": &defaultHistogramStat,
"rocksdb.bytes.per.read": &defaultHistogramStat,
"rocksdb.bytes.per.write": &defaultHistogramStat,
"rocksdb.bytes.per.multiget": &defaultHistogramStat,
"rocksdb.db.flush.micros": &defaultHistogramStat,
}
statLoader := newStatLoader(defaultStatMap)
stats, err := statLoader.load()
require.NoError(t, err)
require.Equal(t, stats.NumberKeysWritten, int64(1))
require.Equal(t, stats.NumberKeysRead, int64(1))
require.Equal(t, stats.CompactionTimesMicros.P50, float64(1))
require.Equal(t, stats.CompactionTimesMicros.P95, float64(2))
require.Equal(t, stats.CompactionTimesMicros.P99, float64(3))
require.Equal(t, stats.CompactionTimesMicros.P100, float64(4))
require.Equal(t, stats.CompactionTimesMicros.Count, float64(5))
require.Equal(t, stats.CompactionTimesMicros.Sum, float64(6))
}

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@
"private": true,
"description": "Solidity contracts for 0g Blockchain",
"engines": {
"node": ">=18.0.0"
"node": ">=20.0.0"
},
"scripts": {
"build": "npm run clean && npm run compile && npm run ethermint-json",
@ -23,14 +23,14 @@
"test": "hardhat test"
},
"devDependencies": {
"@nomicfoundation/hardhat-toolbox": "^2.0.2",
"@nomicfoundation/hardhat-toolbox": "^5.0.0",
"@openzeppelin/contracts": "4.8.3",
"@typescript-eslint/eslint-plugin": "^5.59.6",
"@typescript-eslint/parser": "^5.59.6",
"eslint": "^8.40.0",
"eslint-config-prettier": "8.8.0",
"eslint-plugin-prettier": "^4.2.1",
"hardhat": "^2.14.0",
"hardhat": "^2.22.8",
"prettier": "2.8.8",
"prettier-plugin-solidity": "^1.1.3",
"solhint": "^3.4.1",

View File

@ -26,7 +26,7 @@ rm -rf $DATA
BINARY=kava
# Create new data directory, overwriting any that alread existed
# Create new data directory, overwriting any that already existed
chainID="kavalocalnet_8888-1"
$BINARY init validator --chain-id $chainID

File diff suppressed because it is too large Load Diff

123
go.mod
View File

@ -4,61 +4,62 @@ go 1.21
require (
cosmossdk.io/errors v1.0.1
cosmossdk.io/log v1.3.1
cosmossdk.io/math v1.3.0
cosmossdk.io/simapp v0.0.0-20231127212628-044ff4d8c015
github.com/Kava-Labs/opendb v0.0.0-20240719173129-a2f11f6d7e51
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cometbft/cometbft v0.37.4
github.com/cometbft/cometbft v0.37.9
github.com/cometbft/cometbft-db v0.9.1
github.com/coniks-sys/coniks-go v0.0.0-20180722014011-11acf4819b71
github.com/consensys/gnark-crypto v0.12.1
github.com/cosmos/cosmos-proto v1.0.0-beta.4
github.com/cosmos/cosmos-db v1.0.2
github.com/cosmos/cosmos-proto v1.0.0-beta.5
github.com/cosmos/cosmos-sdk v0.47.10
github.com/cosmos/go-bip39 v1.0.0
github.com/cosmos/gogoproto v1.4.10
github.com/cosmos/iavl v0.20.1
github.com/cosmos/iavl v1.2.0
github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.1.3
github.com/cosmos/ibc-go/modules/light-clients/08-wasm v0.1.1-ibc-go-v7.3-wasmvm-v1.5
github.com/cosmos/ibc-go/v7 v7.4.0
github.com/ethereum/go-ethereum v1.10.26
github.com/evmos/ethermint v0.21.0
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.3
github.com/golang/protobuf v1.5.4
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/linxGnu/grocksdb v1.8.6
github.com/linxGnu/grocksdb v1.8.13
github.com/pelletier/go-toml/v2 v2.1.0
github.com/prometheus/client_golang v1.14.0
github.com/shopspring/decimal v1.4.0
github.com/spf13/cast v1.6.0
github.com/spf13/cobra v1.7.0
github.com/spf13/viper v1.16.0
github.com/stretchr/testify v1.8.4
github.com/spf13/cobra v1.8.0
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.9.0
github.com/subosito/gotenv v1.6.0
golang.org/x/crypto v0.24.0
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0
google.golang.org/grpc v1.60.1
google.golang.org/protobuf v1.32.0
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
sigs.k8s.io/yaml v1.4.0
)
require (
cloud.google.com/go v0.111.0 // indirect
cloud.google.com/go/compute v1.23.3 // indirect
cloud.google.com/go v0.112.0 // indirect
cloud.google.com/go/compute v1.24.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
cloud.google.com/go/storage v1.35.1 // indirect
cloud.google.com/go/iam v1.1.6 // indirect
cloud.google.com/go/storage v1.36.0 // indirect
cosmossdk.io/api v0.3.1 // indirect
cosmossdk.io/core v0.6.1 // indirect
cosmossdk.io/depinject v1.0.0-alpha.4 // indirect
cosmossdk.io/log v1.3.1 // indirect
cosmossdk.io/tools/rosetta v0.2.1 // indirect
filippo.io/edwards25519 v1.0.0 // indirect
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
github.com/99designs/keyring v1.2.1 // indirect
github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect
github.com/CosmWasm/wasmvm v1.5.2 // indirect
github.com/DataDog/zstd v1.5.5 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
@ -67,30 +68,32 @@ require (
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/btcsuite/btcd v0.23.4 // indirect
github.com/btcsuite/btcd v0.24.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/btcutil v1.1.3 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
github.com/btcsuite/btcd/btcutil v1.1.5 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/cockroachdb/apd/v2 v2.0.2 // indirect
github.com/cockroachdb/errors v1.10.0 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v1.1.0 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect
github.com/confio/ics23/go v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/cosmos/btcutil v1.0.5 // indirect
github.com/cosmos/gogogateway v1.2.0 // indirect
github.com/cosmos/ics23/go v0.10.0 // indirect
github.com/cosmos/ledger-cosmos-go v0.13.1 // indirect
github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect
github.com/cosmos/rosetta-sdk-go v0.10.0 // indirect
github.com/creachadair/taskgroup v0.4.2 // indirect
github.com/danieljoos/wincred v1.1.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
github.com/dgraph-io/ristretto v0.1.1 // indirect
@ -100,20 +103,22 @@ require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
github.com/emicklei/dot v1.6.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.23.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-kit/kit v0.13.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/golang/glog v1.1.2 // indirect
github.com/golang/glog v1.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
@ -121,7 +126,7 @@ require (
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/orderedcode v0.0.1 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gorilla/handlers v1.5.1 // indirect
@ -131,11 +136,12 @@ require (
github.com/gtank/merlin v0.1.1 // indirect
github.com/gtank/ristretto255 v0.1.2 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-getter v1.7.1 // indirect
github.com/hashicorp/go-getter v1.7.5 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
@ -148,11 +154,10 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect
github.com/klauspost/compress v1.17.0 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
@ -167,28 +172,33 @@ require (
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mtibben/percent v0.2.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rakyll/statik v0.1.7 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/rs/cors v1.8.3 // indirect
github.com/rs/zerolog v1.32.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sasha-s/go-deadlock v0.3.1 // indirect
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tendermint/go-amino v0.16.0 // indirect
github.com/tidwall/btree v1.6.0 // indirect
github.com/tidwall/btree v1.7.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
@ -197,20 +207,24 @@ require (
github.com/zondax/ledger-go v0.14.3 // indirect
go.etcd.io/bbolt v1.3.8 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/oauth2 v0.15.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
go.opentelemetry.io/otel v1.22.0 // indirect
go.opentelemetry.io/otel/metric v1.22.0 // indirect
go.opentelemetry.io/otel/trace v1.22.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/api v0.153.0 // indirect
google.golang.org/api v0.162.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
@ -224,20 +238,23 @@ replace (
// Use the cosmos keyring code
github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0
// Use cometbft fork of tendermint
github.com/cometbft/cometbft => github.com/kava-labs/cometbft v0.37.4-kava.1
github.com/cometbft/cometbft-db => github.com/kava-labs/cometbft-db v0.9.1-kava.1
github.com/cometbft/cometbft => github.com/0glabs/cometbft v0.37.9-0glabs.1
github.com/cometbft/cometbft-db => github.com/kava-labs/cometbft-db v0.9.1-kava.2
// Use cosmos-sdk fork with backported fix for unsafe-reset-all, staking transfer events, and custom tally handler support
// github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.46.11-kava.3
github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.47.10-0glabs.3
github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.47.10-0glabs.7
github.com/cosmos/iavl => github.com/kava-labs/iavl v1.2.0-kava.1
// See https://github.com/cosmos/cosmos-sdk/pull/13093
github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.2
// Use go-ethereum fork with precompiles
// Tracking kava-labs/go-ethereum kava/release/v1.10 branch
// TODO: Tag before release
github.com/ethereum/go-ethereum => github.com/evmos/go-ethereum v1.10.26-evmos-rc2
// Use ethermint fork that respects min-gas-price with NoBaseFee true and london enabled, and includes eip712 support
github.com/evmos/ethermint => github.com/0glabs/ethermint v0.21.0-0g.v3.0.3
github.com/evmos/ethermint => github.com/0glabs/ethermint v0.21.0-0g.v3.1.7
// See https://github.com/cosmos/cosmos-sdk/pull/10401, https://github.com/cosmos/cosmos-sdk/commit/0592ba6158cd0bf49d894be1cef4faeec59e8320
github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.9.0
// Downgraded to avoid bugs in following commits which causes "version does not exist" errors
github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
// Avoid change in slices.SortFunc, see https://github.com/cosmos/cosmos-sdk/issues/20159
golang.org/x/exp => golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb
)

244
go.sum
View File

@ -32,8 +32,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
@ -71,8 +71,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
@ -112,8 +112,8 @@ cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y97
cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
@ -174,8 +174,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w=
cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
@ -209,10 +209,12 @@ filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw=
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.3 h1:Wx3tVMTuFaaHDeJT/OzT7QLfAIpeaZsG9R6XoTOyKCw=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.3/go.mod h1:BWo24B8cApWcO2/widWYIdt3CPxbh+HCSypCPpjTjog=
github.com/0glabs/ethermint v0.21.0-0g.v3.0.3 h1:QNrXBQV5L/9FvYRUzJRXMV745xBmJhIP0aEdo0u8x+8=
github.com/0glabs/ethermint v0.21.0-0g.v3.0.3/go.mod h1:HYQUhvcZBIG71H3xlxQSk0XyQEjeaHsduOj6O2QImrE=
github.com/0glabs/cometbft v0.37.9-0glabs.1 h1:KQJG17Y21suKP3QNICLto4b5Ak73XbSmKxeLbg0ZM68=
github.com/0glabs/cometbft v0.37.9-0glabs.1/go.mod h1:j0Q3RqrCd+cztWCugs3obbzC4NyHGBPZZjtm/fWV00I=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.7 h1:6+JquK9BaZZdIA3gx1AXhPBAdYCG+FQ94Y7FN35CvB4=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.7/go.mod h1:KskIVnhXTFqrw7CDccMvx7To5KzUsOomIsQV7sPGOog=
github.com/0glabs/ethermint v0.21.0-0g.v3.1.7 h1:wzr6z/LTsbjoAaBf0JkMtIDl/+B5KLd0GrU5brSZCY0=
github.com/0glabs/ethermint v0.21.0-0g.v3.1.7/go.mod h1:S1Ahmqpzo1XUsfmmpGT7ok0hu5Fekz/pD6EDtXaBg9Q=
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs=
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
@ -229,9 +231,13 @@ github.com/CosmWasm/wasmvm v1.5.2/go.mod h1:Q0bSEtlktzh7W2hhEaifrFp1Erx11ckQZmjq
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Kava-Labs/opendb v0.0.0-20240719173129-a2f11f6d7e51 h1:tMTENCeSPIJO8yCpEQbT15XYXt4EFNQUx3s334uxVts=
github.com/Kava-Labs/opendb v0.0.0-20240719173129-a2f11f6d7e51/go.mod h1:LbPsJiWvj90NT3Y9YV8EFPkWfvp8A15Tp88qqKa3LxA=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
@ -303,9 +309,9 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P
github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y=
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
github.com/btcsuite/btcd v0.23.4 h1:IzV6qqkfwbItOS/sg/aDfPDsjPP8twrCOE2R93hxMlQ=
github.com/btcsuite/btcd v0.23.4/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
@ -313,11 +319,12 @@ github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
@ -349,8 +356,8 @@ github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
@ -378,15 +385,23 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E=
github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU=
github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI=
github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA=
@ -413,8 +428,10 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk=
github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis=
github.com/cosmos/cosmos-proto v1.0.0-beta.4 h1:aEL7tU/rLOmxZQ9z4i7mzxcLbSCY48OdY7lIWTLG7oU=
github.com/cosmos/cosmos-proto v1.0.0-beta.4/go.mod h1:oeB+FyVzG3XrQJbJng0EnV8Vljfk9XvTIpGILNU/9Co=
github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs=
github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA=
github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA=
github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
@ -423,8 +440,6 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
github.com/cosmos/gogoproto v1.4.10 h1:QH/yT8X+c0F4ZDacDv3z+xE3WU1P1Z3wQoLMBRJoKuI=
github.com/cosmos/gogoproto v1.4.10/go.mod h1:3aAZzeRWpAwr+SS/LLkICX2/kDFyaYVzckBDzygIxek=
github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg=
github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A=
github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.1.3 h1:MZGDMETv72suFpTAD6VPGqSIm1FJcChtk2HmVh9D+Bo=
github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.1.3/go.mod h1:UvDmcGIWJPIytq+Q78/ff5NTOsuX/7IrNgEugTW5i0s=
github.com/cosmos/ibc-go/modules/light-clients/08-wasm v0.1.1-ibc-go-v7.3-wasmvm-v1.5 h1:sMoHjep+KInjMrppNCEutMVm1p8nI9WhKCuMQ+EcUHw=
@ -435,15 +450,16 @@ github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZD
github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0=
github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo=
github.com/cosmos/keyring v1.2.0/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA=
github.com/cosmos/ledger-cosmos-go v0.13.1 h1:12ac9+GwBb9BjP7X5ygpFk09Itwzjzfmg6A2CWFjoVs=
github.com/cosmos/ledger-cosmos-go v0.13.1/go.mod h1:5tv2RVJEd2+Y38TIQN4CRjJeQGyqOEiKJDfqhk5UjqE=
github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5nvue4rK+yM=
github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8=
github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM=
github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creachadair/taskgroup v0.4.2 h1:jsBLdAJE42asreGss2xZGZ8fJra7WtwnHWeJFxv2Li8=
github.com/creachadair/taskgroup v0.4.2/go.mod h1:qiXUOSrbwAY3u0JPGTzObbE3yf9hcXHDKBZ2ZjpCbgM=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
@ -459,11 +475,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
@ -504,6 +521,8 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI=
github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@ -515,13 +534,15 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/evmos/go-ethereum v1.10.26-evmos-rc2 h1:tYghk1ZZ8X4/OQ4YI9hvtm8aSN8OSqO0g9vo/sCMdBo=
github.com/evmos/go-ethereum v1.10.26-evmos-rc2/go.mod h1:/6CsT5Ceen2WPLI/oCA3xMcZ5sWMF/D46SjM/ayY0Oo=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@ -541,8 +562,8 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqG
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE=
github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
@ -556,8 +577,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
@ -566,8 +587,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
@ -620,8 +641,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -656,8 +677,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
@ -723,8 +744,8 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
@ -780,8 +801,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY=
github.com/hashicorp/go-getter v1.7.1/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=
github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4=
github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@ -802,8 +823,11 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
@ -880,10 +904,10 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kava-labs/cometbft v0.37.4-kava.1 h1:QRuyBieWdUBpe4pcXgzu1SdMH2lkTaqXr/JPIeqdiHE=
github.com/kava-labs/cometbft v0.37.4-kava.1/go.mod h1:Cmg5Hp4sNpapm7j+x0xRyt2g0juQfmB752ous+pA0G8=
github.com/kava-labs/cometbft-db v0.9.1-kava.1 h1:0KmSPdXYdRp6TsgKuMxRnMZCMEGC5ysIVjuJddYr4tw=
github.com/kava-labs/cometbft-db v0.9.1-kava.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U=
github.com/kava-labs/cometbft-db v0.9.1-kava.2 h1:ZQaio886ifvml9XtJB4IYHhlArgA3+/a5Zwidg7H2J8=
github.com/kava-labs/cometbft-db v0.9.1-kava.2/go.mod h1:PvUZbx7zeR7I4CAvtKBoii/5ia5gXskKjDjIVpt7gDw=
github.com/kava-labs/iavl v1.2.0-kava.1 h1:HPme3nVrR25XshEFDckMg6fp0tVfpAjTi32/5Iiyuzk=
github.com/kava-labs/iavl v1.2.0-kava.1/go.mod h1:HidWWLVAtODJqFD6Hbne2Y0q3SdxByJepHUOeoH4LiI=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@ -894,8 +918,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
@ -925,12 +949,10 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linxGnu/grocksdb v1.8.6 h1:O7I6SIGPrypf3f/gmrrLUBQDKfO8uOoYdWf4gLS06tc=
github.com/linxGnu/grocksdb v1.8.6/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY=
github.com/linxGnu/grocksdb v1.8.13 h1:X3Id7Obhf8qLY9WPc4LmmtIyabmdDf810XSFDnLlW7E=
github.com/linxGnu/grocksdb v1.8.13/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA=
github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -1067,6 +1089,8 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@ -1077,8 +1101,8 @@ github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU=
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
@ -1109,8 +1133,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@ -1127,8 +1151,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
@ -1145,8 +1169,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo=
github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
@ -1159,6 +1183,10 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
@ -1182,6 +1210,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@ -1193,18 +1223,16 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
@ -1214,8 +1242,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@ -1227,8 +1256,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
@ -1236,8 +1266,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E=
github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME=
github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
@ -1308,22 +1338,30 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU=
go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -1452,8 +1490,8 @@ golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfS
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1479,8 +1517,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1765,8 +1803,8 @@ google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ
google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/api v0.162.0 h1:Vhs54HkaEpkMBdgGdOT2P6F0csGG/vxDS0hWHJzmmps=
google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1885,12 +1923,12 @@ google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqw
google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg=
google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0=
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o=
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1932,8 +1970,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1950,8 +1988,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -1,52 +0,0 @@
run:
tests: false
# # timeout for analysis, e.g. 30s, 5m, default is 1m
# timeout: 5m
linters:
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dogsled
# - errcheck
- goconst
- gocritic
- gofmt
- goimports
- golint
- gosec
- gosimple
- govet
- ineffassign
- interfacer
- maligned
- misspell
- nakedret
- prealloc
- scopelint
- staticcheck
- structcheck
- stylecheck
- typecheck
- unconvert
- unused
- misspell
- wsl
issues:
exclude-rules:
- text: "Use of weak random number generator"
linters:
- gosec
- text: "comment on exported var"
linters:
- golint
linters-settings:
dogsled:
max-blank-identifiers: 3
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true

View File

@ -36,11 +36,14 @@ $BINARY init validator --chain-id $chainID
sed -in-place='' 's/enable = false/enable = true/g' $DATA/config/app.toml
# Set evm tracer to json
sed -in-place='' 's/tracer = ""/tracer = "json"/g' $DATA/config/app.toml
sed -in-place='' 's/tracer = ""/tracer = ""/g' $DATA/config/app.toml
# Enable full error trace to be returned on tx failure
# Disable full error trace
sed -in-place='' '/iavl-cache-size/a\
trace = true' $DATA/config/app.toml
trace = false' $DATA/config/app.toml
# Set min gas prices
sed -in-place='' 's/minimum-gas-prices = "0ua0gi"/minimum-gas-prices = "0.01ua0gi,100000neuron"/g' $DATA/config/app.toml
# Set client chain id
sed -in-place='' 's/chain-id = ""/chain-id = "zgchain_8888-1"/g' $DATA/config/client.toml

View File

@ -3,4 +3,5 @@ package common
const (
ErrGetStateDB = "get EVM StateDB failed"
ErrInvalidNumberOfArgs = "invalid number of arguments; expected %d; got: %d"
ErrSenderNotOrigin = "msg.sender is not from tx origin"
)

View File

@ -0,0 +1,18 @@
package common
import (
"math/big"
"strings"
"cosmossdk.io/math"
"github.com/ethereum/go-ethereum/common"
)
func ToLowerHexWithoutPrefix(addr common.Address) string {
return strings.ToLower(addr.Hex()[2:])
}
// BigIntToLegacyDec converts a uint number (18 decimals) to math.LegacyDec (18 decimals)
func BigIntToLegacyDec(x *big.Int) math.LegacyDec {
return math.LegacyNewDecFromBigIntWithPrec(x, math.LegacyPrecision)
}

View File

@ -139,11 +139,11 @@ func (d *DASignersPrecompile) Run(evm *vm.EVM, contract *vm.Contract, readonly b
bz, err = d.RegisteredEpoch(ctx, evm, method, args)
// txs
case DASignersFunctionRegisterSigner:
bz, err = d.RegisterSigner(ctx, evm, stateDB, method, args)
bz, err = d.RegisterSigner(ctx, evm, stateDB, contract, method, args)
case DASignersFunctionRegisterNextEpoch:
bz, err = d.RegisterNextEpoch(ctx, evm, stateDB, method, args)
bz, err = d.RegisterNextEpoch(ctx, evm, stateDB, contract, method, args)
case DASignersFunctionUpdateSocket:
bz, err = d.UpdateSocket(ctx, evm, stateDB, method, args)
bz, err = d.UpdateSocket(ctx, evm, stateDB, contract, method, args)
}
if err != nil {

View File

@ -14,6 +14,7 @@ import (
"github.com/0glabs/0g-chain/x/dasigners/v1/types"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/consensys/gnark-crypto/ecc/bn254"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
@ -23,7 +24,6 @@ import (
"cosmossdk.io/math"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
)
type DASignersTestSuite struct {
@ -44,8 +44,7 @@ func (suite *DASignersTestSuite) AddDelegation(from string, to string, amount ma
suite.Require().NoError(err)
validator, found := suite.StakingKeeper.GetValidator(suite.Ctx, valAddr)
if !found {
consPriv, err := ethsecp256k1.GenerateKey()
suite.Require().NoError(err)
consPriv := ed25519.GenPrivKey()
newValidator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
suite.Require().NoError(err)
validator = newValidator
@ -73,8 +72,8 @@ func (suite *DASignersTestSuite) SetupTest() {
suite.Assert().EqualValues(ok, true)
suite.dasigners = precompile.(*dasignersprecompile.DASignersPrecompile)
suite.signerOne = testutil.GenSigner()
suite.signerTwo = testutil.GenSigner()
suite.signerOne = suite.GenSigner()
suite.signerTwo = suite.GenSigner()
abi, err := abi.JSON(strings.NewReader(dasignersprecompile.DASignersABI))
suite.Assert().NoError(err)
suite.abi = abi

View File

@ -62,7 +62,7 @@ func (d *DASignersPrecompile) IsSigner(ctx sdk.Context, _ *vm.EVM, method *abi.M
if len(args) != 1 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
}
account := ToLowerHexWithoutPrefix(args[0].(common.Address))
account := precopmiles_common.ToLowerHexWithoutPrefix(args[0].(common.Address))
_, found, err := d.dasignersKeeper.GetSigner(ctx, account)
if err != nil {
return nil, err
@ -74,7 +74,7 @@ func (d *DASignersPrecompile) RegisteredEpoch(ctx sdk.Context, _ *vm.EVM, method
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
account := ToLowerHexWithoutPrefix(args[0].(common.Address))
account := precopmiles_common.ToLowerHexWithoutPrefix(args[0].(common.Address))
epoch := args[1].(*big.Int).Uint64()
_, found, err := d.dasignersKeeper.GetRegistration(ctx, epoch, account)
if err != nil {

View File

@ -7,18 +7,30 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/evmos/ethermint/x/evm/statedb"
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
)
func (d *DASignersPrecompile) RegisterSigner(ctx sdk.Context, evm *vm.EVM, stateDB *statedb.StateDB, method *abi.Method, args []interface{}) ([]byte, error) {
func (d *DASignersPrecompile) RegisterSigner(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgRegisterSigner(args)
if err != nil {
return nil, err
}
// validation
sender := ToLowerHexWithoutPrefix(evm.Origin)
sender := precopmiles_common.ToLowerHexWithoutPrefix(evm.Origin)
if sender != msg.Signer.Account {
return nil, fmt.Errorf(ErrInvalidSender, sender, msg.Signer.Account)
}
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = d.dasignersKeeper.RegisterSigner(sdk.WrapSDKContext(ctx), msg)
if err != nil {
@ -32,11 +44,22 @@ func (d *DASignersPrecompile) RegisterSigner(ctx sdk.Context, evm *vm.EVM, state
return method.Outputs.Pack()
}
func (d *DASignersPrecompile) RegisterNextEpoch(ctx sdk.Context, evm *vm.EVM, stateDB *statedb.StateDB, method *abi.Method, args []interface{}) ([]byte, error) {
msg, err := NewMsgRegisterNextEpoch(args, ToLowerHexWithoutPrefix(evm.Origin))
func (d *DASignersPrecompile) RegisterNextEpoch(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgRegisterNextEpoch(args, precopmiles_common.ToLowerHexWithoutPrefix(evm.Origin))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = d.dasignersKeeper.RegisterNextEpoch(sdk.WrapSDKContext(ctx), msg)
if err != nil {
@ -45,11 +68,22 @@ func (d *DASignersPrecompile) RegisterNextEpoch(ctx sdk.Context, evm *vm.EVM, st
return method.Outputs.Pack()
}
func (d *DASignersPrecompile) UpdateSocket(ctx sdk.Context, evm *vm.EVM, stateDB *statedb.StateDB, method *abi.Method, args []interface{}) ([]byte, error) {
msg, err := NewMsgUpdateSocket(args, ToLowerHexWithoutPrefix(evm.Origin))
func (d *DASignersPrecompile) UpdateSocket(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgUpdateSocket(args, precopmiles_common.ToLowerHexWithoutPrefix(evm.Origin))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = d.dasignersKeeper.UpdateSocket(sdk.WrapSDKContext(ctx), msg)
if err != nil {

View File

@ -3,7 +3,6 @@ package dasigners
import (
"fmt"
"math/big"
"strings"
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
dasignerstypes "github.com/0glabs/0g-chain/x/dasigners/v1/types"
@ -90,7 +89,7 @@ func NewQuerySignerRequest(args []interface{}) (*dasignerstypes.QuerySignerReque
Accounts: make([]string, len(accounts)),
}
for i, account := range accounts {
req.Accounts[i] = ToLowerHexWithoutPrefix(account)
req.Accounts[i] = precopmiles_common.ToLowerHexWithoutPrefix(account)
}
return &req, nil
}
@ -139,10 +138,6 @@ func NewIDASignersSignerDetail(signer *dasignerstypes.Signer) IDASignersSignerDe
}
}
func ToLowerHexWithoutPrefix(addr common.Address) string {
return strings.ToLower(addr.Hex()[2:])
}
func NewMsgRegisterSigner(args []interface{}) (*dasignerstypes.MsgRegisterSigner, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
@ -151,7 +146,7 @@ func NewMsgRegisterSigner(args []interface{}) (*dasignerstypes.MsgRegisterSigner
signer := args[0].(IDASignersSignerDetail)
return &dasignerstypes.MsgRegisterSigner{
Signer: &dasignerstypes.Signer{
Account: ToLowerHexWithoutPrefix(signer.Signer),
Account: precopmiles_common.ToLowerHexWithoutPrefix(signer.Signer),
Socket: signer.Socket,
PubkeyG1: SerializeG1(signer.PkG1),
PubkeyG2: SerializeG2(signer.PkG2),

View File

@ -0,0 +1,18 @@
{
"extends": "solhint:recommended",
"plugins": ["prettier"],
"rules": {
"avoid-low-level-calls": "off",
"compiler-version": "off",
"gas-custom-errors": "off",
"explicit-types": ["warn", "implicit"],
"func-visibility": ["warn", { "ignoreConstructors": true }],
"max-states-count": "off",
"no-empty-blocks": "off",
"no-global-import": "off",
"no-inline-assembly": "off",
"not-rely-on-time": "off",
"prettier/prettier": "error",
"reason-string": "off"
}
}

View File

@ -0,0 +1,88 @@
// SPDX-License-Identifier: LGPL-3.0-only
pragma solidity >=0.8.0;
library BN254 {
struct G1Point {
uint X;
uint Y;
}
// Encoding of field elements is: X[1] * i + X[0]
struct G2Point {
uint[2] X;
uint[2] Y;
}
}
interface IDASigners {
/*=== struct ===*/
struct SignerDetail {
address signer;
string socket;
BN254.G1Point pkG1;
BN254.G2Point pkG2;
}
struct Params {
uint tokensPerVote;
uint maxVotesPerSigner;
uint maxQuorums;
uint epochBlocks;
uint encodedSlices;
}
/*=== event ===*/
event NewSigner(
address indexed signer,
BN254.G1Point pkG1,
BN254.G2Point pkG2
);
event SocketUpdated(address indexed signer, string socket);
/*=== function ===*/
function params() external view returns (Params memory);
function epochNumber() external view returns (uint);
function quorumCount(uint _epoch) external view returns (uint);
function isSigner(address _account) external view returns (bool);
function getSigner(
address[] memory _account
) external view returns (SignerDetail[] memory);
function getQuorum(
uint _epoch,
uint _quorumId
) external view returns (address[] memory);
function getQuorumRow(
uint _epoch,
uint _quorumId,
uint32 _rowIndex
) external view returns (address);
function registerSigner(
SignerDetail memory _signer,
BN254.G1Point memory _signature
) external;
function updateSocket(string memory _socket) external;
function registeredEpoch(
address _account,
uint _epoch
) external view returns (bool);
function registerNextEpoch(BN254.G1Point memory _signature) external;
function getAggPkG1(
uint _epoch,
uint _quorumId,
bytes memory _quorumBitmap
)
external
view
returns (BN254.G1Point memory aggPkG1, uint total, uint hit);
}

View File

@ -0,0 +1,415 @@
// SPDX-License-Identifier: LGPL-3.0-only
pragma solidity >=0.8.0;
/**
* @dev Description defines a validator description
*/
struct Description {
string moniker;
string identity;
string website;
string securityContact;
string details;
}
/**
* @dev CommissionRates defines the initial commission rates to be used for creating
* a validator.
*/
struct CommissionRates {
uint rate; // 18 decimals
uint maxRate; // 18 decimals
uint maxChangeRate; // 18 decimals
}
/**
* @dev Commission defines the commission parameters.
*/
struct Commission {
CommissionRates commissionRates;
uint updateTime;
}
/**
* @dev Validator defines a validator.
*/
struct Validator {
string operatorAddress;
string consensusPubkey;
bool jailed;
BondStatus status;
uint tokens;
uint delegatorShares; // 18 decimals
Description description;
int64 unbondingHeight;
int64 unbondingTime;
Commission commission;
uint minSelfDelegation;
int64 unbondingOnHoldRefCount;
uint64[] unbondingIds;
}
/**
* @dev Delegation represents the bond with tokens held by an account.
*/
struct Delegation {
string delegatorAddress;
string validatorAddress;
uint shares; // 18 decimals
}
/**
* @dev RedelegationResponse is equivalent to a Redelegation except that its entries
* contain a balance in addition to shares which is more suitable for client
* responses.
*/
struct DelegationResponse {
Delegation delegation;
uint balance;
}
/**
* @dev UnbondingDelegationEntry defines an unbonding object with relevant metadata.
*/
struct UnbondingDelegationEntry {
int64 creationHeight;
int64 completionTime;
uint initialBalance;
uint balance;
uint64 unbondingId;
int64 unbondingOnHoldRefCount;
}
/**
* @dev UnbondingDelegation stores all of a single delegator's unbonding bonds
* for a single validator in an time-ordered list.
*/
struct UnbondingDelegation {
string delegatorAddress;
string validatorAddress;
UnbondingDelegationEntry[] entries;
}
/**
* @dev RedelegationResponse is equivalent to a Redelegation except that its entries
* contain a balance in addition to shares which is more suitable for client
* responses.
*/
struct RedelegationResponse {
Redelegation redelegation;
RedelegationEntryResponse[] entries;
}
/**
* @dev Redelegation contains the list of a particular delegator's redelegating bonds
* from a particular source validator to a particular destination validator.
*/
struct Redelegation {
string delegatorAddress;
string validatorSrcAddress;
string validatorDstAddress;
RedelegationEntry[] entries;
}
/**
* @dev RedelegationEntry defines a redelegation object with relevant metadata.
*/
struct RedelegationEntry {
int64 creationHeight;
int64 completionTime;
uint initialBalance;
uint sharesDst; // 18 decimals
uint64 unbondingId;
int64 unbondingOnHoldRefCount;
}
/**
* @dev RedelegationEntryResponse is equivalent to a RedelegationEntry except that it
* contains a balance in addition to shares which is more suitable for client
* responses.
*/
struct RedelegationEntryResponse {
RedelegationEntry redelegationEntry;
uint balance;
}
/**
* @dev Params defines the parameters for the x/staking module.
*/
struct Params {
int64 unbondingTime;
uint32 maxValidators;
uint32 maxEntries;
uint32 historicalEntries;
string bondDenom;
uint minCommissionRate; // 18 decimals
}
/**
* @dev BondStatus is the status of a validator.
*/
enum BondStatus {
Unspecified,
Unbonded,
Unbonding,
Bonded
}
struct NullableUint {
bool isNull;
uint value;
}
struct PageRequest {
bytes key;
uint64 offset;
uint64 limit;
bool countTotal;
bool reverse;
}
struct PageResponse {
bytes nextKey;
uint64 total;
}
interface IStaking {
/*=== cosmos tx ===*/
/**
* @dev CreateValidator defines a method for creating a new validator for tx sender.
* cosmos grpc: rpc CreateValidator(MsgCreateValidator) returns (MsgCreateValidatorResponse);
*/
function createValidator(
Description memory description,
CommissionRates memory commission,
uint minSelfDelegation,
string memory pubkey, // 0gchaind tendermint show-validator
uint value
) external;
/**
* @dev EditValidator defines a method for editing an existing validator (tx sender).
* cosmos grpc: rpc EditValidator(MsgEditValidator) returns (MsgEditValidatorResponse);
*/
function editValidator(
Description memory description,
NullableUint memory commissionRate,
NullableUint memory minSelfDelegation
) external;
/**
* @dev Delegate defines a method for performing a delegation of coins from a delegator to a validator.abi
* The delegator is tx sender.
* cosmos grpc: rpc Delegate(MsgDelegate) returns (MsgDelegateResponse);
*/
function delegate(
string memory validatorAddress,
uint amount // in bond denom
) external;
/**
* @dev BeginRedelegate defines a method for performing a redelegationA
* of coins from a delegator and source validator to a destination validator.
* The delegator is tx sender.
* cosmos grpc: rpc BeginRedelegate(MsgBeginRedelegate) returns (MsgBeginRedelegateResponse);
*/
function beginRedelegate(
string memory validatorSrcAddress,
string memory validatorDstAddress,
uint amount // in bond denom
) external returns (uint completionTime);
/**
* @dev Undelegate defines a method for performing an undelegation from a
* delegate and a validator.
* The delegator is tx sender.
* cosmos grpc: rpc Undelegate(MsgUndelegate) returns (MsgUndelegateResponse);
*/
function undelegate(
string memory validatorAddress,
uint amount // in bond denom
) external returns (uint completionTime);
/**
* @dev CancelUnbondingDelegation defines a method for performing canceling the unbonding delegation
* and delegate back to previous validator.
* The delegator is tx sender.
* Since: cosmos-sdk 0.46
* cosmos grpc: rpc CancelUnbondingDelegation(MsgCancelUnbondingDelegation) returns (MsgCancelUnbondingDelegationResponse);
*/
function cancelUnbondingDelegation(
string memory validatorAddress,
uint amount, // in bond denom
uint creationHeight
) external;
/**
* @dev UpdateParams defines an operation for updating the x/staking module parameters.
* Since: cosmos-sdk 0.47
* grpc: rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse);
*/
// Skipped. This function is controlled by governance module.
/*=== cosmos query ===*/
/**
* @dev Validators queries all validators that match the given status.
* cosmos grpc: rpc Validators(QueryValidatorsRequest) returns (QueryValidatorsResponse);
*/
function validators(
string memory status,
PageRequest memory pagination
)
external
view
returns (
Validator[] memory validators,
PageResponse memory paginationResult
);
/**
* @dev Validator queries validator info for given validator address.
* cosmos grpc: rpc Validator(QueryValidatorRequest) returns (QueryValidatorResponse);
*/
function validator(
string memory validatorAddress
) external view returns (Validator memory validator);
/**
* @dev ValidatorDelegations queries delegate info for given validator.
* cosmos grpc: rpc ValidatorDelegations(QueryValidatorDelegationsRequest) returns (QueryValidatorDelegationsResponse);
*/
function validatorDelegations(
string memory validatorAddr,
PageRequest memory pagination
)
external
view
returns (
DelegationResponse[] memory delegationResponses,
PageResponse memory paginationResult
);
/**
* @dev ValidatorUnbondingDelegations queries unbonding delegations of a validator.
* cosmos grpc: rpc ValidatorUnbondingDelegations(QueryValidatorUnbondingDelegationsRequest) returns (QueryValidatorUnbondingDelegationsResponse);
*/
//
function validatorUnbondingDelegations(
string memory validatorAddr,
PageRequest memory pagination
)
external
view
returns (
UnbondingDelegation[] memory unbondingResponses,
PageResponse memory paginationResult
);
/**
* @dev Delegation queries delegate info for given validator delegator pair.
* cosmos grpc: rpc Delegation(QueryDelegationRequest) returns (QueryDelegationResponse);
*/
function delegation(
string memory delegatorAddr,
string memory validatorAddr
) external view returns (Delegation memory delegation, uint balance);
/**
* @dev UnbondingDelegation queries unbonding info for given validator delegator pair.
* cosmos grpc: rpc UnbondingDelegation(QueryUnbondingDelegationRequest) returns (QueryUnbondingDelegationResponse);
*/
function unbondingDelegation(
string memory delegatorAddr,
string memory validatorAddr
) external view returns (UnbondingDelegation memory unbond);
/**
* @dev DelegatorDelegations queries all delegations of a given delegator address.
*
* cosmos grpc: rpc DelegatorDelegations(QueryDelegatorDelegationsRequest) returns (QueryDelegatorDelegationsResponse);
*/
function delegatorDelegations(
string memory delegatorAddr,
PageRequest memory pagination
)
external
view
returns (
DelegationResponse[] memory delegationResponses,
PageResponse memory paginationResult
);
/**
* @dev DelegatorUnbondingDelegations queries all unbonding delegations of a given delegator address.
* cosmos grpc: rpc DelegatorUnbondingDelegations(QueryDelegatorUnbondingDelegationsRequest)
*/
function delegatorUnbondingDelegations(
string memory delegatorAddr,
PageRequest memory pagination
)
external
view
returns (
UnbondingDelegation[] memory unbondingResponses,
PageResponse memory paginationResult
);
/**
* @dev Redelegations queries redelegations of given address.
*
* grpc: rpc Redelegations(QueryRedelegationsRequest) returns (QueryRedelegationsResponse);
*/
function redelegations(
string memory delegatorAddress,
string memory srcValidatorAddress,
string memory dstValidatorAddress,
PageRequest calldata pageRequest
)
external
view
returns (
RedelegationResponse[] calldata redelegationResponses,
PageResponse calldata paginationResult
);
/**
* @dev DelegatorValidators queries all validators info for given delegator address.
* cosmos grpc: rpc DelegatorValidators(QueryDelegatorValidatorsRequest) returns (QueryDelegatorValidatorsResponse);
*/
function delegatorValidators(
string memory delegatorAddr,
PageRequest memory pagination
)
external
view
returns (
Validator[] memory validators,
PageResponse memory paginationResult
);
/**
* @dev DelegatorValidator queries validator info for given delegator validator pair.
* cosmos grpc: rpc DelegatorValidator(QueryDelegatorValidatorRequest) returns (QueryDelegatorValidatorResponse);
*/
function delegatorValidator(
string memory delegatorAddr,
string memory validatorAddr
) external view returns (Validator memory validator);
/**
* @dev Pool queries the pool info.
* cosmos grpc: rpc Pool(QueryPoolRequest) returns (QueryPoolResponse);
*/
function pool()
external
view
returns (uint notBondedTokens, uint bondedTokens);
/**
* @dev Parameters queries the staking parameters.
* cosmos grpc: rpc Params(QueryParamsRequest) returns (QueryParamsResponse);
*/
function params() external view returns (Params memory params);
}

View File

@ -0,0 +1,33 @@
import "hardhat-abi-exporter";
import { HardhatUserConfig } from "hardhat/types";
const config: HardhatUserConfig = {
paths: {
artifacts: "build/artifacts",
cache: "build/cache",
sources: "contracts",
},
solidity: {
compilers: [
{
version: "0.8.20",
settings: {
evmVersion: "istanbul",
optimizer: {
enabled: true,
runs: 200,
},
},
},
],
},
abiExporter: {
path: "./abis",
runOnCompile: true,
clear: true,
flat: true,
format: "json",
},
};
export default config;

View File

@ -0,0 +1,27 @@
{
"name": "precompile-contracts",
"version": "1.0.0",
"license": "MIT",
"scripts": {
"build": "hardhat compile",
"fmt:sol": "prettier 'contracts/**/*.sol' -w"
},
"devDependencies": {
"@nomicfoundation/hardhat-ethers": "^3.0.5",
"@typescript-eslint/eslint-plugin": "6.21.0",
"@typescript-eslint/parser": "6.21.0",
"eslint": "8.34.0",
"eslint-config-prettier": "8.6.0",
"eslint-plugin-no-only-tests": "3.1.0",
"eslint-plugin-prettier": "4.2.1",
"hardhat": "^2.22.2",
"hardhat-abi-exporter": "^2.10.1",
"prettier": "2.8.4",
"prettier-plugin-organize-imports": "3.2.4",
"prettier-plugin-solidity": "1.1.2",
"solhint": "^4.5.4",
"solhint-plugin-prettier": "0.0.5",
"ts-node": "^10.9.2",
"typescript": "4.9.5"
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,5 @@
package staking
const (
ErrPubKeyInvalidLength = "public key with invalid length"
)

View File

@ -0,0 +1,223 @@
package staking
import (
sdk "github.com/cosmos/cosmos-sdk/types"
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/core/vm"
)
func (s *StakingPrecompile) Validators(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryValidatorsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.Validators(ctx, req)
if err != nil {
return nil, err
}
validators := make([]Validator, len(response.Validators))
for i, v := range response.Validators {
validators[i] = convertValidator(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(validators, paginationResult)
}
func (s *StakingPrecompile) Validator(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryValidatorRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.Validator(ctx, req)
if err != nil {
return nil, err
}
return method.Outputs.Pack(convertValidator(response.Validator))
}
func (s *StakingPrecompile) ValidatorDelegations(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryValidatorDelegationsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.ValidatorDelegations(ctx, req)
if err != nil {
return nil, err
}
delegationResponses := make([]DelegationResponse, len(response.DelegationResponses))
for i, v := range response.DelegationResponses {
delegationResponses[i] = convertDelegationResponse(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(delegationResponses, paginationResult)
}
func (s *StakingPrecompile) ValidatorUnbondingDelegations(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryValidatorUnbondingDelegationsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.ValidatorUnbondingDelegations(ctx, req)
if err != nil {
return nil, err
}
unbondingResponses := make([]UnbondingDelegation, len(response.UnbondingResponses))
for i, v := range response.UnbondingResponses {
unbondingResponses[i] = convertUnbondingDelegation(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(unbondingResponses, paginationResult)
}
func (s *StakingPrecompile) Delegation(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryDelegationRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.Delegation(ctx, req)
if err != nil {
return nil, err
}
delegation := convertDelegation(response.DelegationResponse.Delegation)
balance := response.DelegationResponse.Balance.Amount.BigInt()
return method.Outputs.Pack(delegation, balance)
}
func (s *StakingPrecompile) UnbondingDelegation(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryUnbondingDelegationRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.UnbondingDelegation(ctx, req)
if err != nil {
return nil, err
}
return method.Outputs.Pack(convertUnbondingDelegation(response.Unbond))
}
func (s *StakingPrecompile) DelegatorDelegations(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryDelegatorDelegationsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.DelegatorDelegations(ctx, req)
if err != nil {
return nil, err
}
delegationResponses := make([]DelegationResponse, len(response.DelegationResponses))
for i, v := range response.DelegationResponses {
delegationResponses[i] = convertDelegationResponse(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(delegationResponses, paginationResult)
}
func (s *StakingPrecompile) DelegatorUnbondingDelegations(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryDelegatorUnbondingDelegationsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.DelegatorUnbondingDelegations(ctx, req)
if err != nil {
return nil, err
}
unbondingResponses := make([]UnbondingDelegation, len(response.UnbondingResponses))
for i, v := range response.UnbondingResponses {
unbondingResponses[i] = convertUnbondingDelegation(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(unbondingResponses, paginationResult)
}
func (s *StakingPrecompile) Redelegations(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryRedelegationsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.Redelegations(ctx, req)
if err != nil {
return nil, err
}
redelegationResponses := make([]RedelegationResponse, len(response.RedelegationResponses))
for i, v := range response.RedelegationResponses {
redelegationResponses[i] = convertRedelegationResponse(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(redelegationResponses, paginationResult)
}
func (s *StakingPrecompile) DelegatorValidators(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryDelegatorValidatorsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.DelegatorValidators(ctx, req)
if err != nil {
return nil, err
}
validators := make([]Validator, len(response.Validators))
for i, v := range response.Validators {
validators[i] = convertValidator(v)
}
paginationResult := convertPageResponse(response.Pagination)
return method.Outputs.Pack(validators, paginationResult)
}
func (s *StakingPrecompile) DelegatorValidator(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryDelegatorValidatorRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.DelegatorValidator(ctx, req)
if err != nil {
return nil, err
}
return method.Outputs.Pack(convertValidator(response.Validator))
}
func (s *StakingPrecompile) Pool(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryPoolRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.Pool(ctx, req)
if err != nil {
return nil, err
}
notBondedTokens := response.Pool.NotBondedTokens.BigInt()
bondedTokens := response.Pool.BondedTokens.BigInt()
return method.Outputs.Pack(notBondedTokens, bondedTokens)
}
func (s *StakingPrecompile) Params(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
req, err := NewQueryParamsRequest(args)
if err != nil {
return nil, err
}
response, err := stakingkeeper.Querier{Keeper: s.stakingKeeper}.Params(ctx, req)
if err != nil {
return nil, err
}
return method.Outputs.Pack(convertParams(response.Params))
}

View File

@ -0,0 +1,801 @@
package staking_test
import (
"math/big"
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
sdk "github.com/cosmos/cosmos-sdk/types"
query "github.com/cosmos/cosmos-sdk/types/query"
"github.com/ethereum/go-ethereum/common"
)
func (s *StakingTestSuite) TestValidators() {
method := stakingprecompile.StakingFunctionValidators
testCases := []struct {
name string
malleate func() []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
"",
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
validators := out[0].([]stakingprecompile.Validator)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(3, len(validators))
s.Assert().EqualValues(3, paginationResult.Total)
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
s.AddDelegation(s.signerOne.HexAddr, s.signerTwo.HexAddr, sdk.NewIntFromUint64(1000000))
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestValidator() {
method := stakingprecompile.StakingFunctionValidator
testCases := []struct {
name string
malleate func(operatorAddress string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(operatorAddress string) []byte {
input, err := s.abi.Pack(
method,
operatorAddress,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
validator := out[0].(stakingprecompile.Validator)
s.Require().EqualValues(common.HexToAddress(validator.OperatorAddress), common.BytesToAddress(operatorAddress.Bytes()))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestValidatorDelegations() {
method := stakingprecompile.StakingFunctionValidatorDelegations
testCases := []struct {
name string
malleate func(operatorAddress string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(operatorAddress string) []byte {
input, err := s.abi.Pack(
method,
operatorAddress,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
delegations := out[0].([]stakingprecompile.DelegationResponse)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
s.Require().EqualValues(len(delegations), len(d))
// jsonData, _ := json.MarshalIndent(delegations, "", " ")
// fmt.Printf("delegations: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestValidatorUnbondingDelegations() {
method := stakingprecompile.StakingFunctionValidatorUnbondingDelegations
testCases := []struct {
name string
malleate func(operatorAddress string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(operatorAddress string) []byte {
input, err := s.abi.Pack(
method,
operatorAddress,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
unbonding := out[0].([]stakingprecompile.UnbondingDelegation)
s.Require().EqualValues(len(unbonding), 1)
// jsonData, _ := json.MarshalIndent(unbonding, "", " ")
// fmt.Printf("delegations: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
_, err = s.stakingKeeper.Undelegate(s.Ctx, delAddr, operatorAddress, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegation() {
method := stakingprecompile.StakingFunctionDelegation
testCases := []struct {
name string
malleate func(delAddr, valAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, valAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
valAddr,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].(stakingprecompile.Delegation)
b := out[1].(*big.Int)
_ = d
_ = b
/*
jsonData, _ := json.MarshalIndent(d, "", " ")
fmt.Printf("delegation: %s\n", string(jsonData))
fmt.Printf("balance: %v\n", b)
*/
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestUnbondingDelegation() {
method := stakingprecompile.StakingFunctionUnbondingDelegation
testCases := []struct {
name string
malleate func(delAddr, valAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, valAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
valAddr,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
u := out[0].(stakingprecompile.UnbondingDelegation)
_ = u
// jsonData, _ := json.MarshalIndent(u, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
_, err = s.stakingKeeper.Undelegate(s.Ctx, delAddr, operatorAddress, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorDelegations() {
method := stakingprecompile.StakingFunctionDelegatorDelegations
testCases := []struct {
name string
malleate func(delAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].([]stakingprecompile.DelegationResponse)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(d))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorUnbondingDelegations() {
method := stakingprecompile.StakingFunctionDelegatorUnbondingDelegations
testCases := []struct {
name string
malleate func(delAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].([]stakingprecompile.UnbondingDelegation)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(d))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
_, err = s.stakingKeeper.Undelegate(s.Ctx, delAddr, operatorAddress, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestRedelegations() {
method := stakingprecompile.StakingFunctionRedelegations
testCases := []struct {
name string
malleate func(delAddr, srcValAddr, dstValAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, srcValAddr, dstValAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
srcValAddr,
dstValAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].([]stakingprecompile.RedelegationResponse)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(d))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegations: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
// setup redelegations
s.setupValidator(s.signerOne)
_, err = s.stakingKeeper.BeginRedelegation(s.Ctx, delAddr, operatorAddress, s.signerOne.ValAddr, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String(), s.signerOne.ValAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorValidators() {
method := stakingprecompile.StakingFunctionDelegatorValidators
testCases := []struct {
name string
malleate func(delAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
v := out[0].([]stakingprecompile.Validator)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(v))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(v, "", " ")
// fmt.Printf("validators: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorValidator() {
method := stakingprecompile.StakingFunctionDelegatorValidator
testCases := []struct {
name string
malleate func(delAddr, valAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, valAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
valAddr,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
v := out[0].(stakingprecompile.Validator)
_ = v
// jsonData, _ := json.MarshalIndent(v, "", " ")
// fmt.Printf("validators: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestPool() {
method := stakingprecompile.StakingFunctionPool
testCases := []struct {
name string
malleate func() []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
bonded := out[0].(*big.Int)
unbonded := out[0].(*big.Int)
s.Assert().Equal(bonded.Int64(), int64(0))
s.Assert().Equal(unbonded.Int64(), int64(0))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestParams() {
method := stakingprecompile.StakingFunctionParams
testCases := []struct {
name string
malleate func() []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
params := out[0].(stakingprecompile.Params)
_ = params
// jsonData, _ := json.MarshalIndent(params, "", " ")
// fmt.Printf("params: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}

View File

@ -0,0 +1,148 @@
package staking
import (
"fmt"
"strings"
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
"github.com/cosmos/cosmos-sdk/store/types"
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/evmos/ethermint/x/evm/statedb"
)
const (
PrecompileAddress = "0x0000000000000000000000000000000000001001"
// txs
StakingFunctionCreateValidator = "createValidator"
StakingFunctionEditValidator = "editValidator"
StakingFunctionDelegate = "delegate"
StakingFunctionBeginRedelegate = "beginRedelegate"
StakingFunctionUndelegate = "undelegate"
StakingFunctionCancelUnbondingDelegation = "cancelUnbondingDelegation"
// queries
StakingFunctionValidators = "validators"
StakingFunctionValidator = "validator"
StakingFunctionValidatorDelegations = "validatorDelegations"
StakingFunctionValidatorUnbondingDelegations = "validatorUnbondingDelegations"
StakingFunctionDelegation = "delegation"
StakingFunctionUnbondingDelegation = "unbondingDelegation"
StakingFunctionDelegatorDelegations = "delegatorDelegations"
StakingFunctionDelegatorUnbondingDelegations = "delegatorUnbondingDelegations"
StakingFunctionRedelegations = "redelegations"
StakingFunctionDelegatorValidators = "delegatorValidators"
StakingFunctionDelegatorValidator = "delegatorValidator"
StakingFunctionPool = "pool"
StakingFunctionParams = "params"
)
var _ vm.PrecompiledContract = &StakingPrecompile{}
type StakingPrecompile struct {
abi abi.ABI
stakingKeeper *stakingkeeper.Keeper
}
func NewStakingPrecompile(stakingKeeper *stakingkeeper.Keeper) (*StakingPrecompile, error) {
abi, err := abi.JSON(strings.NewReader(StakingABI))
if err != nil {
return nil, err
}
return &StakingPrecompile{
abi: abi,
stakingKeeper: stakingKeeper,
}, nil
}
// Address implements vm.PrecompiledContract.
func (s *StakingPrecompile) Address() common.Address {
return common.HexToAddress(PrecompileAddress)
}
// RequiredGas implements vm.PrecompiledContract.
func (s *StakingPrecompile) RequiredGas(input []byte) uint64 {
return 0
}
// Run implements vm.PrecompiledContract.
func (s *StakingPrecompile) Run(evm *vm.EVM, contract *vm.Contract, readonly bool) ([]byte, error) {
// parse input
if len(contract.Input) < 4 {
return nil, vm.ErrExecutionReverted
}
method, err := s.abi.MethodById(contract.Input[:4])
if err != nil {
return nil, vm.ErrExecutionReverted
}
args, err := method.Inputs.Unpack(contract.Input[4:])
if err != nil {
return nil, err
}
// get state db and context
stateDB, ok := evm.StateDB.(*statedb.StateDB)
if !ok {
return nil, fmt.Errorf(precopmiles_common.ErrGetStateDB)
}
ctx := stateDB.GetContext()
// reset gas config
ctx = ctx.WithKVGasConfig(types.KVGasConfig())
initialGas := ctx.GasMeter().GasConsumed()
var bz []byte
switch method.Name {
// queries
case StakingFunctionValidators:
bz, err = s.Validators(ctx, evm, method, args)
case StakingFunctionValidator:
bz, err = s.Validator(ctx, evm, method, args)
case StakingFunctionValidatorDelegations:
bz, err = s.ValidatorDelegations(ctx, evm, method, args)
case StakingFunctionValidatorUnbondingDelegations:
bz, err = s.ValidatorUnbondingDelegations(ctx, evm, method, args)
case StakingFunctionDelegation:
bz, err = s.Delegation(ctx, evm, method, args)
case StakingFunctionUnbondingDelegation:
bz, err = s.UnbondingDelegation(ctx, evm, method, args)
case StakingFunctionDelegatorDelegations:
bz, err = s.DelegatorDelegations(ctx, evm, method, args)
case StakingFunctionDelegatorUnbondingDelegations:
bz, err = s.DelegatorUnbondingDelegations(ctx, evm, method, args)
case StakingFunctionRedelegations:
bz, err = s.Redelegations(ctx, evm, method, args)
case StakingFunctionDelegatorValidators:
bz, err = s.DelegatorValidators(ctx, evm, method, args)
case StakingFunctionDelegatorValidator:
bz, err = s.DelegatorValidator(ctx, evm, method, args)
case StakingFunctionPool:
bz, err = s.Pool(ctx, evm, method, args)
case StakingFunctionParams:
bz, err = s.Params(ctx, evm, method, args)
// txs
case StakingFunctionCreateValidator:
bz, err = s.CreateValidator(ctx, evm, stateDB, contract, method, args)
case StakingFunctionEditValidator:
bz, err = s.EditValidator(ctx, evm, stateDB, contract, method, args)
case StakingFunctionDelegate:
bz, err = s.Delegate(ctx, evm, stateDB, contract, method, args)
case StakingFunctionBeginRedelegate:
bz, err = s.BeginRedelegate(ctx, evm, stateDB, contract, method, args)
case StakingFunctionUndelegate:
bz, err = s.Undelegate(ctx, evm, stateDB, contract, method, args)
case StakingFunctionCancelUnbondingDelegation:
bz, err = s.CancelUnbondingDelegation(ctx, evm, stateDB, contract, method, args)
}
if err != nil {
return nil, err
}
cost := ctx.GasMeter().GasConsumed() - initialGas
if !contract.UseGas(cost) {
return nil, vm.ErrOutOfGas
}
return bz, nil
}

View File

@ -0,0 +1,145 @@
package staking_test
import (
"errors"
"math/big"
"strings"
"testing"
"cosmossdk.io/math"
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
"github.com/0glabs/0g-chain/precompiles/testutil"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
sdk "github.com/cosmos/cosmos-sdk/types"
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/suite"
)
type StakingTestSuite struct {
testutil.PrecompileTestSuite
abi abi.ABI
addr common.Address
staking *stakingprecompile.StakingPrecompile
stakingKeeper *stakingkeeper.Keeper
signerOne *testutil.TestSigner
signerTwo *testutil.TestSigner
}
func (suite *StakingTestSuite) SetupTest() {
suite.PrecompileTestSuite.SetupTest()
suite.stakingKeeper = suite.App.GetStakingKeeper()
suite.addr = common.HexToAddress(stakingprecompile.PrecompileAddress)
precompiles := suite.EvmKeeper.GetPrecompiles()
precompile, ok := precompiles[suite.addr]
suite.Assert().EqualValues(ok, true)
suite.staking = precompile.(*stakingprecompile.StakingPrecompile)
suite.signerOne = suite.GenSigner()
suite.signerTwo = suite.GenSigner()
abi, err := abi.JSON(strings.NewReader(stakingprecompile.StakingABI))
suite.Assert().NoError(err)
suite.abi = abi
}
func (suite *StakingTestSuite) AddDelegation(from string, to string, amount math.Int) {
accAddr, err := sdk.AccAddressFromHexUnsafe(from)
suite.Require().NoError(err)
valAddr, err := sdk.ValAddressFromHex(to)
suite.Require().NoError(err)
validator, found := suite.StakingKeeper.GetValidator(suite.Ctx, valAddr)
if !found {
consPriv := ed25519.GenPrivKey()
newValidator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
suite.Require().NoError(err)
validator = newValidator
}
validator.Tokens = validator.Tokens.Add(amount)
validator.DelegatorShares = validator.DelegatorShares.Add(amount.ToLegacyDec())
suite.StakingKeeper.SetValidator(suite.Ctx, validator)
bonded := suite.stakingKeeper.GetDelegatorBonded(suite.Ctx, accAddr)
suite.StakingKeeper.SetDelegation(suite.Ctx, stakingtypes.Delegation{
DelegatorAddress: accAddr.String(),
ValidatorAddress: valAddr.String(),
Shares: bonded.Add(amount).ToLegacyDec(),
})
}
func (suite *StakingTestSuite) setupValidator(signer *testutil.TestSigner) {
method := stakingprecompile.StakingFunctionCreateValidator
description := stakingprecompile.Description{
Moniker: "test node",
Identity: "test node identity",
Website: "http://test.node.com",
SecurityContact: "test node security contract",
Details: "test node details",
}
commission := stakingprecompile.CommissionRates{
Rate: math.LegacyOneDec().BigInt(),
MaxRate: math.LegacyOneDec().BigInt(),
MaxChangeRate: math.LegacyOneDec().BigInt(),
}
minSelfDelegation := big.NewInt(1)
pubkey := "eh/aR8BGUBIYI/Ust0NVBxZafLDAm7344F9dKzZU+7g="
value := big.NewInt(100000000)
input, err := suite.abi.Pack(
method,
description,
commission,
minSelfDelegation,
pubkey,
value,
)
suite.Assert().NoError(err)
_, err = suite.runTx(input, signer, 10000000)
suite.Assert().NoError(err)
_, err = suite.stakingKeeper.ApplyAndReturnValidatorSetUpdates(suite.Ctx)
suite.Assert().NoError(err)
}
func (suite *StakingTestSuite) firstBondedValidator() (sdk.ValAddress, error) {
validators := suite.stakingKeeper.GetValidators(suite.Ctx, 10)
for _, v := range validators {
if v.IsBonded() {
return sdk.ValAddressFromBech32(v.OperatorAddress)
}
}
return nil, errors.New("no bonded validator")
}
func (suite *StakingTestSuite) runTx(input []byte, signer *testutil.TestSigner, gas uint64) ([]byte, error) {
contract := vm.NewPrecompile(vm.AccountRef(signer.Addr), vm.AccountRef(suite.addr), big.NewInt(0), gas)
contract.Input = input
msgEthereumTx := evmtypes.NewTx(suite.EvmKeeper.ChainID(), 0, &suite.addr, big.NewInt(0), gas, big.NewInt(0), big.NewInt(0), big.NewInt(0), input, nil)
msgEthereumTx.From = signer.HexAddr
err := msgEthereumTx.Sign(suite.EthSigner, signer.Signer)
suite.Assert().NoError(err, "failed to sign Ethereum message")
proposerAddress := suite.Ctx.BlockHeader().ProposerAddress
cfg, err := suite.EvmKeeper.EVMConfig(suite.Ctx, proposerAddress, suite.EvmKeeper.ChainID())
suite.Assert().NoError(err, "failed to instantiate EVM config")
msg, err := msgEthereumTx.AsMessage(suite.EthSigner, big.NewInt(0))
suite.Assert().NoError(err, "failed to instantiate Ethereum message")
evm := suite.EvmKeeper.NewEVM(suite.Ctx, msg, cfg, nil, suite.Statedb)
precompiles := suite.EvmKeeper.GetPrecompiles()
evm.WithPrecompiles(precompiles, []common.Address{suite.addr})
return suite.staking.Run(evm, contract, false)
}
func TestKeeperSuite(t *testing.T) {
suite.Run(t, new(StakingTestSuite))
}

163
precompiles/staking/tx.go Normal file
View File

@ -0,0 +1,163 @@
package staking
import (
"fmt"
"math/big"
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
sdk "github.com/cosmos/cosmos-sdk/types"
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/evmos/ethermint/x/evm/statedb"
)
func (s *StakingPrecompile) CreateValidator(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgCreateValidator(args, evm.Origin, s.stakingKeeper.BondDenom(ctx))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = stakingkeeper.NewMsgServerImpl(s.stakingKeeper).CreateValidator(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack()
}
func (s *StakingPrecompile) EditValidator(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgEditValidator(args, evm.Origin)
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = stakingkeeper.NewMsgServerImpl(s.stakingKeeper).EditValidator(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack()
}
func (s *StakingPrecompile) Delegate(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgDelegate(args, evm.Origin, s.stakingKeeper.BondDenom(ctx))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = stakingkeeper.NewMsgServerImpl(s.stakingKeeper).Delegate(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack()
}
func (s *StakingPrecompile) BeginRedelegate(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgBeginRedelegate(args, evm.Origin, s.stakingKeeper.BondDenom(ctx))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
response, err := stakingkeeper.NewMsgServerImpl(s.stakingKeeper).BeginRedelegate(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack(big.NewInt(response.CompletionTime.UTC().Unix()))
}
func (s *StakingPrecompile) Undelegate(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgUndelegate(args, evm.Origin, s.stakingKeeper.BondDenom(ctx))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
response, err := stakingkeeper.NewMsgServerImpl(s.stakingKeeper).Undelegate(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack(big.NewInt(response.CompletionTime.UTC().Unix()))
}
func (s *StakingPrecompile) CancelUnbondingDelegation(
ctx sdk.Context,
evm *vm.EVM,
stateDB *statedb.StateDB,
contract *vm.Contract,
method *abi.Method,
args []interface{},
) ([]byte, error) {
msg, err := NewMsgCancelUnbondingDelegation(args, evm.Origin, s.stakingKeeper.BondDenom(ctx))
if err != nil {
return nil, err
}
// validation
if contract.CallerAddress != evm.Origin {
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = stakingkeeper.NewMsgServerImpl(s.stakingKeeper).CancelUnbondingDelegation(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack()
}

View File

@ -0,0 +1,485 @@
package staking_test
import (
"encoding/base64"
"math/big"
"time"
"cosmossdk.io/math"
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/ethereum/go-ethereum/common"
"github.com/evmos/ethermint/x/evm/statedb"
)
func (s *StakingTestSuite) TestCreateValidator() {
method := stakingprecompile.StakingFunctionCreateValidator
description := stakingprecompile.Description{
Moniker: "test node",
Identity: "test node identity",
Website: "http://test.node.com",
SecurityContact: "test node security contract",
Details: "test node details",
}
commission := stakingprecompile.CommissionRates{
Rate: math.LegacyOneDec().BigInt(),
MaxRate: math.LegacyOneDec().BigInt(),
MaxChangeRate: math.LegacyOneDec().BigInt(),
}
minSelfDelegation := big.NewInt(1)
pubkey := "eh/aR8BGUBIYI/Ust0NVBxZafLDAm7344F9dKzZU+7g="
value := big.NewInt(100000000)
testCases := []struct {
name string
malleate func() []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte)
expError bool
errContains string
}{
{
"fail - ErrPubKeyInvalidLength",
func() []byte {
input, err := s.abi.Pack(
method,
description,
commission,
minSelfDelegation,
s.signerOne.HexAddr,
value,
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func([]byte) {},
true,
stakingprecompile.ErrPubKeyInvalidLength,
},
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
description,
commission,
minSelfDelegation,
pubkey,
value,
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte) {},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
s.stakingKeeper.ApplyAndReturnValidatorSetUpdates(s.Ctx)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
// query the validator in the staking keeper
validator := s.StakingKeeper.Validator(s.Ctx, s.signerOne.ValAddr)
s.Require().NoError(err)
s.Require().NotNil(validator, "expected validator not to be nil")
tc.postCheck(bz)
isBonded := validator.IsBonded()
s.Require().Equal(true, isBonded, "expected validator bonded to be %t; got %t", true, isBonded)
consPubKey, err := validator.ConsPubKey()
s.Require().NoError(err)
consPubKeyBase64 := base64.StdEncoding.EncodeToString(consPubKey.Bytes())
s.Require().Equal(pubkey, consPubKeyBase64, "expected validator pubkey to be %s; got %s", pubkey, consPubKeyBase64)
operator := validator.GetOperator()
s.Require().Equal(s.signerOne.ValAddr, operator, "expected validator operator to be %s; got %s", s.signerOne.ValAddr, operator)
commissionRate := validator.GetCommission()
s.Require().Equal(commission.Rate.String(), commissionRate.BigInt().String(), "expected validator commission rate to be %s; got %s", commission.Rate.String(), commissionRate.String())
valMinSelfDelegation := validator.GetMinSelfDelegation()
s.Require().Equal(minSelfDelegation.String(), valMinSelfDelegation.String(), "expected validator min self delegation to be %s; got %s", minSelfDelegation.String(), valMinSelfDelegation.String())
moniker := validator.GetMoniker()
s.Require().Equal(description.Moniker, moniker, "expected validator moniker to be %s; got %s", description.Moniker, moniker)
jailed := validator.IsJailed()
s.Require().Equal(false, jailed, "expected validator jailed to be %t; got %t", false, jailed)
}
})
}
}
func (s *StakingTestSuite) TestEditValidator() {
method := stakingprecompile.StakingFunctionEditValidator
description := stakingprecompile.Description{
Moniker: "test node",
Identity: "test node identity",
Website: "http://test.node.com",
SecurityContact: "test node security contract",
Details: "test node details",
}
newRate := math.LegacyOneDec().BigInt()
newRate.Div(newRate, big.NewInt(2))
minSelfDelegation := big.NewInt(2)
testCases := []struct {
name string
malleate func() []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte)
expError bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
description,
stakingprecompile.NullableUint{
IsNull: false,
Value: newRate,
},
stakingprecompile.NullableUint{
IsNull: true,
Value: math.LegacyOneDec().BigInt(),
},
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte) {},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
s.setupValidator(s.signerOne)
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
// query the validator in the staking keeper
validator := s.StakingKeeper.Validator(s.Ctx, s.signerOne.ValAddr)
s.Require().NoError(err)
s.Require().NotNil(validator, "expected validator not to be nil")
tc.postCheck(bz)
isBonded := validator.IsBonded()
s.Require().Equal(true, isBonded, "expected validator bonded to be %t; got %t", false, isBonded)
operator := validator.GetOperator()
s.Require().Equal(s.signerOne.ValAddr, operator, "expected validator operator to be %s; got %s", s.signerOne.ValAddr, operator)
commissionRate := validator.GetCommission()
s.Require().Equal(newRate.String(), commissionRate.BigInt().String(), "expected validator commission rate to be %s; got %s", newRate.String(), commissionRate.String())
valMinSelfDelegation := validator.GetMinSelfDelegation()
s.Require().Equal(big.NewInt(1).String(), valMinSelfDelegation.String(), "expected validator min self delegation to be %s; got %s", minSelfDelegation.String(), valMinSelfDelegation.String())
moniker := validator.GetMoniker()
s.Require().Equal(description.Moniker, moniker, "expected validator moniker to be %s; got %s", description.Moniker, moniker)
jailed := validator.IsJailed()
s.Require().Equal(false, jailed, "expected validator jailed to be %t; got %t", false, jailed)
}
})
}
}
func (s *StakingTestSuite) TestDelegate() {
method := stakingprecompile.StakingFunctionDelegate
testCases := []struct {
name string
malleate func(valAddr string) []byte
gas uint64
callerAddress *common.Address
postCheck func(valAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(valAddr string) []byte {
input, err := s.abi.Pack(
method,
valAddr,
big.NewInt(1000000),
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(valAddr sdk.ValAddress) {
d, found := s.stakingKeeper.GetDelegation(s.Ctx, s.signerOne.AccAddr, valAddr)
s.Assert().EqualValues(found, true)
s.Assert().EqualValues(d.ValidatorAddress, valAddr.String())
s.Assert().EqualValues(d.DelegatorAddress, s.signerOne.AccAddr.String())
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(operatorAddress)
}
})
}
}
func (s *StakingTestSuite) TestBeginRedelegate() {
method := stakingprecompile.StakingFunctionBeginRedelegate
testCases := []struct {
name string
malleate func(srcAddr, dstAddr string) []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte, srcAddr, dstAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(srcAddr, dstAddr string) []byte {
input, err := s.abi.Pack(
method,
srcAddr,
dstAddr,
big.NewInt(1000000),
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte, srcAddr, dstAddr sdk.ValAddress) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d, found := s.stakingKeeper.GetRedelegation(s.Ctx, s.signerOne.AccAddr, srcAddr, dstAddr)
s.Assert().EqualValues(found, true)
s.Assert().EqualValues(d.DelegatorAddress, s.signerOne.AccAddr.String())
s.Assert().EqualValues(d.ValidatorSrcAddress, srcAddr.String())
s.Assert().EqualValues(d.ValidatorDstAddress, dstAddr.String())
completionTime := out[0].(*big.Int)
params := s.stakingKeeper.GetParams(s.Ctx)
s.Assert().EqualValues(completionTime.Int64(), s.Ctx.BlockHeader().Time.Add(params.UnbondingTime).UTC().Unix())
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
s.setupValidator(s.signerOne)
bz, err := s.runTx(tc.malleate(s.signerOne.ValAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(bz, s.signerOne.ValAddr, operatorAddress)
}
})
}
}
func (s *StakingTestSuite) TestUndelegate() {
method := stakingprecompile.StakingFunctionUndelegate
testCases := []struct {
name string
malleate func(valAddr string) []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte, valAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(valAddr string) []byte {
input, err := s.abi.Pack(
method,
valAddr,
big.NewInt(1000000),
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte, valAddr sdk.ValAddress) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d, found := s.stakingKeeper.GetUnbondingDelegation(s.Ctx, s.signerOne.AccAddr, valAddr)
s.Assert().EqualValues(found, true)
s.Assert().EqualValues(d.DelegatorAddress, s.signerOne.AccAddr.String())
s.Assert().EqualValues(d.ValidatorAddress, valAddr.String())
completionTime := out[0].(*big.Int)
params := s.stakingKeeper.GetParams(s.Ctx)
s.Assert().EqualValues(completionTime.Int64(), s.Ctx.BlockHeader().Time.Add(params.UnbondingTime).UTC().Unix())
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
s.setupValidator(s.signerOne)
bz, err := s.runTx(tc.malleate(s.signerOne.ValAddr.String()), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(bz, s.signerOne.ValAddr)
}
})
}
}
func (s *StakingTestSuite) TestCancelUnbondingDelegation() {
method := stakingprecompile.StakingFunctionCancelUnbondingDelegation
testCases := []struct {
name string
malleate func(valAddr string, height *big.Int) []byte
gas uint64
callerAddress *common.Address
postCheck func(valAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(valAddr string, height *big.Int) []byte {
input, err := s.abi.Pack(
method,
valAddr,
big.NewInt(1),
height,
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(valAddr sdk.ValAddress) {
_, found := s.stakingKeeper.GetUnbondingDelegation(s.Ctx, s.signerOne.AccAddr, valAddr)
s.Assert().EqualValues(found, false)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
s.setupValidator(s.signerOne)
// unbond
_, err := s.stakingKeeper.Undelegate(s.Ctx, s.signerOne.AccAddr, s.signerOne.ValAddr, sdk.NewDec(1))
s.Require().NoError(err)
u, _ := s.stakingKeeper.GetUnbondingDelegation(s.Ctx, s.signerOne.AccAddr, s.signerOne.ValAddr)
height := u.Entries[0].CreationHeight
bz, err := s.runTx(tc.malleate(s.signerOne.ValAddr.String(), big.NewInt(height)), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(s.signerOne.ValAddr)
}
})
}
}

View File

@ -0,0 +1,602 @@
package staking
import (
"encoding/base64"
"errors"
"fmt"
"math/big"
"cosmossdk.io/math"
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdk "github.com/cosmos/cosmos-sdk/types"
query "github.com/cosmos/cosmos-sdk/types/query"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/common"
)
type Commission = struct {
CommissionRates CommissionRates `json:"commissionRates"`
UpdateTime *big.Int `json:"updateTime"`
}
type CommissionRates = struct {
Rate *big.Int `json:"rate"`
MaxRate *big.Int `json:"maxRate"`
MaxChangeRate *big.Int `json:"maxChangeRate"`
}
type Delegation = struct {
DelegatorAddress string `json:"delegatorAddress"`
ValidatorAddress string `json:"validatorAddress"`
Shares *big.Int `json:"shares"`
}
type DelegationResponse = struct {
Delegation Delegation `json:"delegation"`
Balance *big.Int `json:"balance"`
}
type Description = struct {
Moniker string `json:"moniker"`
Identity string `json:"identity"`
Website string `json:"website"`
SecurityContact string `json:"securityContact"`
Details string `json:"details"`
}
type NullableUint = struct {
IsNull bool `json:"isNull"`
Value *big.Int `json:"value"`
}
type PageRequest = struct {
Key []byte `json:"key"`
Offset uint64 `json:"offset"`
Limit uint64 `json:"limit"`
CountTotal bool `json:"countTotal"`
Reverse bool `json:"reverse"`
}
type PageResponse = struct {
NextKey []byte `json:"nextKey"`
Total uint64 `json:"total"`
}
type Params = struct {
UnbondingTime int64 `json:"unbondingTime"`
MaxValidators uint32 `json:"maxValidators"`
MaxEntries uint32 `json:"maxEntries"`
HistoricalEntries uint32 `json:"historicalEntries"`
BondDenom string `json:"bondDenom"`
MinCommissionRate *big.Int `json:"minCommissionRate"`
}
type Redelegation = struct {
DelegatorAddress string `json:"delegatorAddress"`
ValidatorSrcAddress string `json:"validatorSrcAddress"`
ValidatorDstAddress string `json:"validatorDstAddress"`
Entries []RedelegationEntry `json:"entries"`
}
type RedelegationEntry = struct {
CreationHeight int64 `json:"creationHeight"`
CompletionTime int64 `json:"completionTime"`
InitialBalance *big.Int `json:"initialBalance"`
SharesDst *big.Int `json:"sharesDst"`
UnbondingId uint64 `json:"unbondingId"`
UnbondingOnHoldRefCount int64 `json:"unbondingOnHoldRefCount"`
}
type RedelegationEntryResponse = struct {
RedelegationEntry RedelegationEntry `json:"redelegationEntry"`
Balance *big.Int `json:"balance"`
}
type RedelegationResponse = struct {
Redelegation Redelegation `json:"redelegation"`
Entries []RedelegationEntryResponse `json:"entries"`
}
type UnbondingDelegation = struct {
DelegatorAddress string `json:"delegatorAddress"`
ValidatorAddress string `json:"validatorAddress"`
Entries []UnbondingDelegationEntry `json:"entries"`
}
type UnbondingDelegationEntry = struct {
CreationHeight int64 `json:"creationHeight"`
CompletionTime int64 `json:"completionTime"`
InitialBalance *big.Int `json:"initialBalance"`
Balance *big.Int `json:"balance"`
UnbondingId uint64 `json:"unbondingId"`
UnbondingOnHoldRefCount int64 `json:"unbondingOnHoldRefCount"`
}
type Validator = struct {
OperatorAddress string `json:"operatorAddress"`
ConsensusPubkey string `json:"consensusPubkey"`
Jailed bool `json:"jailed"`
Status uint8 `json:"status"`
Tokens *big.Int `json:"tokens"`
DelegatorShares *big.Int `json:"delegatorShares"`
Description Description `json:"description"`
UnbondingHeight int64 `json:"unbondingHeight"`
UnbondingTime int64 `json:"unbondingTime"`
Commission Commission `json:"commission"`
MinSelfDelegation *big.Int `json:"minSelfDelegation"`
UnbondingOnHoldRefCount int64 `json:"unbondingOnHoldRefCount"`
UnbondingIds []uint64 `json:"unbondingIds"`
}
func convertValidator(v stakingtypes.Validator) Validator {
validator := Validator{}
operatorAddress, err := sdk.ValAddressFromBech32(v.OperatorAddress)
if err != nil {
validator.OperatorAddress = v.OperatorAddress
} else {
validator.OperatorAddress = common.BytesToAddress(operatorAddress.Bytes()).String()
}
ed25519pk, ok := v.ConsensusPubkey.GetCachedValue().(cryptotypes.PubKey)
if !ok {
validator.ConsensusPubkey = v.ConsensusPubkey.String()
} else {
validator.ConsensusPubkey = base64.StdEncoding.EncodeToString(ed25519pk.Bytes())
}
validator.Jailed = v.Jailed
validator.Status = uint8(v.Status)
validator.Tokens = v.Tokens.BigInt()
validator.DelegatorShares = v.DelegatorShares.BigInt()
validator.Description = Description{
Moniker: v.Description.Moniker,
Identity: v.Description.Identity,
Website: v.Description.Website,
SecurityContact: v.Description.SecurityContact,
Details: v.Description.Details,
}
validator.UnbondingHeight = v.UnbondingHeight
validator.UnbondingTime = v.UnbondingTime.UTC().Unix()
validator.Commission = Commission{
CommissionRates: convertCommissionRates(v.Commission.CommissionRates),
UpdateTime: big.NewInt(v.Commission.UpdateTime.UTC().Unix()),
}
validator.MinSelfDelegation = v.MinSelfDelegation.BigInt()
validator.UnbondingOnHoldRefCount = v.UnbondingOnHoldRefCount
validator.UnbondingIds = v.UnbondingIds
return validator
}
func convertQueryPageRequest(pagination PageRequest) *query.PageRequest {
return &query.PageRequest{
Key: pagination.Key,
Offset: pagination.Offset,
Limit: pagination.Limit,
CountTotal: pagination.CountTotal,
Reverse: pagination.Reverse,
}
}
func convertPageResponse(pagination *query.PageResponse) PageResponse {
if pagination == nil {
return PageResponse{
NextKey: make([]byte, 0),
Total: 1,
}
}
return PageResponse{
NextKey: pagination.NextKey,
Total: pagination.Total,
}
}
func convertStakingDescription(description Description) stakingtypes.Description {
return stakingtypes.Description{
Moniker: description.Moniker,
Identity: description.Identity,
Website: description.Website,
SecurityContact: description.SecurityContact,
Details: description.Details,
}
}
func convertStakingCommissionRates(commission CommissionRates) stakingtypes.CommissionRates {
return stakingtypes.CommissionRates{
Rate: precopmiles_common.BigIntToLegacyDec(commission.Rate),
MaxRate: precopmiles_common.BigIntToLegacyDec(commission.MaxRate),
MaxChangeRate: precopmiles_common.BigIntToLegacyDec(commission.MaxChangeRate),
}
}
func convertCommissionRates(commission stakingtypes.CommissionRates) CommissionRates {
return CommissionRates{
Rate: commission.Rate.BigInt(),
MaxRate: commission.MaxRate.BigInt(),
MaxChangeRate: commission.MaxChangeRate.BigInt(),
}
}
func convertDelegation(delegation stakingtypes.Delegation) Delegation {
return Delegation{
DelegatorAddress: delegation.DelegatorAddress,
ValidatorAddress: delegation.ValidatorAddress,
Shares: delegation.Shares.BigInt(),
}
}
func convertDelegationResponse(response stakingtypes.DelegationResponse) DelegationResponse {
return DelegationResponse{
Delegation: convertDelegation(response.Delegation),
Balance: response.Balance.Amount.BigInt(),
}
}
func convertUnbondingDelegationEntry(entry stakingtypes.UnbondingDelegationEntry) UnbondingDelegationEntry {
return UnbondingDelegationEntry{
CreationHeight: entry.CreationHeight,
CompletionTime: entry.CompletionTime.UTC().Unix(),
InitialBalance: entry.InitialBalance.BigInt(),
Balance: entry.Balance.BigInt(),
UnbondingId: entry.UnbondingId,
UnbondingOnHoldRefCount: entry.UnbondingOnHoldRefCount,
}
}
func convertUnbondingDelegation(response stakingtypes.UnbondingDelegation) UnbondingDelegation {
entries := make([]UnbondingDelegationEntry, len(response.Entries))
for i, v := range response.Entries {
entries[i] = convertUnbondingDelegationEntry(v)
}
return UnbondingDelegation{
DelegatorAddress: response.DelegatorAddress,
ValidatorAddress: response.ValidatorAddress,
Entries: entries,
}
}
func convertRedelegationEntry(entry stakingtypes.RedelegationEntry) RedelegationEntry {
return RedelegationEntry{
CreationHeight: entry.CreationHeight,
CompletionTime: entry.CompletionTime.UTC().Unix(),
InitialBalance: entry.InitialBalance.BigInt(),
SharesDst: entry.SharesDst.BigInt(),
UnbondingId: entry.UnbondingId,
UnbondingOnHoldRefCount: entry.UnbondingOnHoldRefCount,
}
}
func convertRedelegation(redelegation stakingtypes.Redelegation) Redelegation {
entries := make([]RedelegationEntry, len(redelegation.Entries))
for i, v := range redelegation.Entries {
entries[i] = convertRedelegationEntry(v)
}
return Redelegation{
DelegatorAddress: redelegation.DelegatorAddress,
ValidatorSrcAddress: redelegation.ValidatorSrcAddress,
ValidatorDstAddress: redelegation.ValidatorDstAddress,
Entries: entries,
}
}
func convertRedelegationEntryResponse(response stakingtypes.RedelegationEntryResponse) RedelegationEntryResponse {
return RedelegationEntryResponse{
RedelegationEntry: convertRedelegationEntry(response.RedelegationEntry),
Balance: response.Balance.BigInt(),
}
}
func convertRedelegationResponse(response stakingtypes.RedelegationResponse) RedelegationResponse {
entries := make([]RedelegationEntryResponse, len(response.Entries))
for i, v := range response.Entries {
entries[i] = convertRedelegationEntryResponse(v)
}
return RedelegationResponse{
Redelegation: convertRedelegation(response.Redelegation),
Entries: entries,
}
}
func convertParams(params stakingtypes.Params) Params {
return Params{
UnbondingTime: int64(params.UnbondingTime.Seconds()),
MaxValidators: params.MaxValidators,
MaxEntries: params.MaxEntries,
HistoricalEntries: params.HistoricalEntries,
BondDenom: params.BondDenom,
MinCommissionRate: params.MinCommissionRate.BigInt(),
}
}
func NewMsgCreateValidator(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgCreateValidator, error) {
if len(args) != 5 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 5, len(args))
}
description := args[0].(Description)
commission := args[1].(CommissionRates)
minSelfDelegation := args[2].(*big.Int)
pkstr := args[3].(string)
bz, err := base64.StdEncoding.DecodeString(pkstr)
if err != nil {
return nil, err
}
var pk cryptotypes.PubKey
if len(bz) == ed25519.PubKeySize {
pk = &ed25519.PubKey{Key: bz}
} else {
return nil, errors.New(ErrPubKeyInvalidLength)
}
pkAny, err := codectypes.NewAnyWithValue(pk)
if err != nil {
return nil, err
}
value := args[4].(*big.Int)
msg := &stakingtypes.MsgCreateValidator{
Description: convertStakingDescription(description),
Commission: convertStakingCommissionRates(commission),
MinSelfDelegation: math.NewIntFromBigInt(minSelfDelegation),
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: sdk.ValAddress(sender.Bytes()).String(),
Pubkey: pkAny,
Value: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(value)},
}
return msg, msg.ValidateBasic()
}
func NewMsgEditValidator(args []interface{}, sender common.Address) (*stakingtypes.MsgEditValidator, error) {
if len(args) != 3 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 3, len(args))
}
description := args[0].(Description)
commissionRateNullable := args[1].(NullableUint)
var commissionRate *sdk.Dec
if !commissionRateNullable.IsNull {
value := precopmiles_common.BigIntToLegacyDec(commissionRateNullable.Value)
commissionRate = &value
}
minSelfDelegationNullable := args[2].(NullableUint)
var minSelfDelegation *sdk.Int
if !minSelfDelegationNullable.IsNull {
value := math.NewIntFromBigInt(minSelfDelegationNullable.Value)
minSelfDelegation = &value
}
msg := &stakingtypes.MsgEditValidator{
Description: convertStakingDescription(description),
CommissionRate: commissionRate,
ValidatorAddress: sdk.ValAddress(sender.Bytes()).String(),
MinSelfDelegation: minSelfDelegation,
}
return msg, msg.ValidateBasic()
}
func NewMsgDelegate(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgDelegate, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
validatorAddress := args[0].(string)
amount := args[1].(*big.Int)
msg := &stakingtypes.MsgDelegate{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: validatorAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
}
return msg, msg.ValidateBasic()
}
func NewMsgBeginRedelegate(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgBeginRedelegate, error) {
if len(args) != 3 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 3, len(args))
}
validatorSrcAddress := args[0].(string)
validatorDstAddress := args[1].(string)
amount := args[2].(*big.Int)
msg := &stakingtypes.MsgBeginRedelegate{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorSrcAddress: validatorSrcAddress,
ValidatorDstAddress: validatorDstAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
}
return msg, msg.ValidateBasic()
}
func NewMsgUndelegate(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgUndelegate, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
validatorAddress := args[0].(string)
amount := args[1].(*big.Int)
msg := &stakingtypes.MsgUndelegate{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: validatorAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
}
return msg, msg.ValidateBasic()
}
func NewMsgCancelUnbondingDelegation(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgCancelUnbondingDelegation, error) {
if len(args) != 3 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 3, len(args))
}
validatorAddress := args[0].(string)
amount := args[1].(*big.Int)
creationHeight := args[2].(*big.Int)
msg := &stakingtypes.MsgCancelUnbondingDelegation{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: validatorAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
CreationHeight: creationHeight.Int64(),
}
return msg, msg.ValidateBasic()
}
func NewQueryValidatorsRequest(args []interface{}) (*stakingtypes.QueryValidatorsRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
status := args[0].(string)
pagination := args[1].(PageRequest)
return &stakingtypes.QueryValidatorsRequest{
Status: status,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryValidatorRequest(args []interface{}) (*stakingtypes.QueryValidatorRequest, error) {
if len(args) != 1 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
}
validatorAddress := args[0].(string)
return &stakingtypes.QueryValidatorRequest{
ValidatorAddr: validatorAddress,
}, nil
}
func NewQueryValidatorDelegationsRequest(args []interface{}) (*stakingtypes.QueryValidatorDelegationsRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
validatorAddr := args[0].(string)
pagination := args[1].(PageRequest)
return &stakingtypes.QueryValidatorDelegationsRequest{
ValidatorAddr: validatorAddr,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryValidatorUnbondingDelegationsRequest(args []interface{}) (*stakingtypes.QueryValidatorUnbondingDelegationsRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
validatorAddr := args[0].(string)
pagination := args[1].(PageRequest)
return &stakingtypes.QueryValidatorUnbondingDelegationsRequest{
ValidatorAddr: validatorAddr,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryDelegationRequest(args []interface{}) (*stakingtypes.QueryDelegationRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
delegatorAddr := args[0].(string)
validatorAddr := args[1].(string)
return &stakingtypes.QueryDelegationRequest{
DelegatorAddr: delegatorAddr,
ValidatorAddr: validatorAddr,
}, nil
}
func NewQueryUnbondingDelegationRequest(args []interface{}) (*stakingtypes.QueryUnbondingDelegationRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
delegatorAddr := args[0].(string)
validatorAddr := args[1].(string)
return &stakingtypes.QueryUnbondingDelegationRequest{
DelegatorAddr: delegatorAddr,
ValidatorAddr: validatorAddr,
}, nil
}
func NewQueryDelegatorDelegationsRequest(args []interface{}) (*stakingtypes.QueryDelegatorDelegationsRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
delegatorAddr := args[0].(string)
pagination := args[1].(PageRequest)
return &stakingtypes.QueryDelegatorDelegationsRequest{
DelegatorAddr: delegatorAddr,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryDelegatorUnbondingDelegationsRequest(args []interface{}) (*stakingtypes.QueryDelegatorUnbondingDelegationsRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
delegatorAddr := args[0].(string)
pagination := args[1].(PageRequest)
return &stakingtypes.QueryDelegatorUnbondingDelegationsRequest{
DelegatorAddr: delegatorAddr,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryRedelegationsRequest(args []interface{}) (*stakingtypes.QueryRedelegationsRequest, error) {
if len(args) != 4 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 4, len(args))
}
delegatorAddress := args[0].(string)
validatorSrcAddress := args[1].(string)
validatorDstAddress := args[2].(string)
pagination := args[3].(PageRequest)
return &stakingtypes.QueryRedelegationsRequest{
DelegatorAddr: delegatorAddress,
SrcValidatorAddr: validatorSrcAddress,
DstValidatorAddr: validatorDstAddress,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryDelegatorValidatorsRequest(args []interface{}) (*stakingtypes.QueryDelegatorValidatorsRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
delegatorAddr := args[0].(string)
pagination := args[1].(PageRequest)
return &stakingtypes.QueryDelegatorValidatorsRequest{
DelegatorAddr: delegatorAddr,
Pagination: convertQueryPageRequest(pagination),
}, nil
}
func NewQueryDelegatorValidatorRequest(args []interface{}) (*stakingtypes.QueryDelegatorValidatorRequest, error) {
if len(args) != 2 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
}
delegatorAddr := args[0].(string)
validatorAddr := args[1].(string)
return &stakingtypes.QueryDelegatorValidatorRequest{
DelegatorAddr: delegatorAddr,
ValidatorAddr: validatorAddr,
}, nil
}
func NewQueryPoolRequest(args []interface{}) (*stakingtypes.QueryPoolRequest, error) {
if len(args) != 0 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 0, len(args))
}
return &stakingtypes.QueryPoolRequest{}, nil
}
func NewQueryParamsRequest(args []interface{}) (*stakingtypes.QueryParamsRequest, error) {
if len(args) != 0 {
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 0, len(args))
}
return &stakingtypes.QueryParamsRequest{}, nil
}

Some files were not shown because too many files have changed in this diff Show More