diff --git a/README.md b/README.md index 48dda12869..f42fbd98b7 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,7 @@ We use the following modules from [Osmosis](https://github.com/osmosis-labs/osmo ``` x/epochs x/mint +x/ratelimit ``` We use the following module (marketed as public infra) from [Quicksilver](https://github.com/ingenuity-build/quicksilver) provided under [this License](https://github.com/ingenuity-build/quicksilver/blob/main/LICENSE): ``` diff --git a/app/app.go b/app/app.go index 8510bfaba4..0569ab3b63 100644 --- a/app/app.go +++ b/app/app.go @@ -83,7 +83,6 @@ import ( ibcclient "github.com/cosmos/ibc-go/v5/modules/core/02-client" ibcclientclient "github.com/cosmos/ibc-go/v5/modules/core/02-client/client" ibcclienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" - ibcporttypes "github.com/cosmos/ibc-go/v5/modules/core/05-port/types" ibchost "github.com/cosmos/ibc-go/v5/modules/core/24-host" ibckeeper "github.com/cosmos/ibc-go/v5/modules/core/keeper" ibctesting "github.com/cosmos/ibc-go/v5/testing" @@ -120,6 +119,10 @@ import ( icacallbacksmodule "github.com/Stride-Labs/stride/v5/x/icacallbacks" icacallbacksmodulekeeper "github.com/Stride-Labs/stride/v5/x/icacallbacks/keeper" icacallbacksmoduletypes "github.com/Stride-Labs/stride/v5/x/icacallbacks/types" + ratelimitmodule "github.com/Stride-Labs/stride/v5/x/ratelimit" + ratelimitclient "github.com/Stride-Labs/stride/v5/x/ratelimit/client" + ratelimitmodulekeeper "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + ratelimitmoduletypes "github.com/Stride-Labs/stride/v5/x/ratelimit/types" recordsmodule "github.com/Stride-Labs/stride/v5/x/records" recordsmodulekeeper "github.com/Stride-Labs/stride/v5/x/records/keeper" recordsmoduletypes "github.com/Stride-Labs/stride/v5/x/records/types" @@ -155,6 +158,10 @@ func getGovProposalHandlers() []govclient.ProposalHandler { ibcclientclient.UpdateClientProposalHandler, ibcclientclient.UpgradeProposalHandler, stakeibcclient.AddValidatorProposalHandler, + ratelimitclient.AddRateLimitProposalHandler, + ratelimitclient.UpdateRateLimitProposalHandler, + ratelimitclient.RemoveRateLimitProposalHandler, + ratelimitclient.ResetRateLimitProposalHandler, // this line is used by starport scaffolding # stargate/app/govProposalHandler ) @@ -193,6 +200,7 @@ var ( interchainquery.AppModuleBasic{}, ica.AppModuleBasic{}, recordsmodule.AppModuleBasic{}, + ratelimitmodule.AppModuleBasic{}, icacallbacksmodule.AppModuleBasic{}, claim.AppModuleBasic{}, // this line is used by starport scaffolding # stargate/app/moduleBasic @@ -283,6 +291,8 @@ type StrideApp struct { RecordsKeeper recordsmodulekeeper.Keeper ScopedIcacallbacksKeeper capabilitykeeper.ScopedKeeper IcacallbacksKeeper icacallbacksmodulekeeper.Keeper + ScopedratelimitKeeper capabilitykeeper.ScopedKeeper + RatelimitKeeper ratelimitmodulekeeper.Keeper ClaimKeeper claimkeeper.Keeper // this line is used by starport scaffolding # stargate/app/keeperDeclaration @@ -324,6 +334,7 @@ func NewStrideApp( interchainquerytypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, recordsmoduletypes.StoreKey, + ratelimitmoduletypes.StoreKey, icacallbacksmoduletypes.StoreKey, claimtypes.StoreKey, // this line is used by starport scaffolding # stargate/app/storeKey @@ -405,11 +416,30 @@ func NewStrideApp( appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper, ) + // Create Ratelimit Keeper + scopedratelimitKeeper := app.CapabilityKeeper.ScopeToModule(ratelimitmoduletypes.ModuleName) + app.ScopedratelimitKeeper = scopedratelimitKeeper + app.RatelimitKeeper = *ratelimitmodulekeeper.NewKeeper( + appCodec, + keys[ratelimitmoduletypes.StoreKey], + app.GetSubspace(ratelimitmoduletypes.ModuleName), + app.BankKeeper, + app.IBCKeeper.ChannelKeeper, + // TODO: Implement ICS4Wrapper in Records and pass records keeper here + app.IBCKeeper.ChannelKeeper, // ICS4Wrapper + ) + ratelimitModule := ratelimitmodule.NewAppModule(appCodec, app.RatelimitKeeper) + // Create Transfer Keepers app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, keys[ibctransfertypes.StoreKey], app.GetSubspace(ibctransfertypes.ModuleName), - app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper, - app.AccountKeeper, app.BankKeeper, scopedTransferKeeper, + appCodec, keys[ibctransfertypes.StoreKey], + app.GetSubspace(ibctransfertypes.ModuleName), + app.RatelimitKeeper, // ICS4Wrapper + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.AccountKeeper, + app.BankKeeper, + scopedTransferKeeper, ) transferModule := transfer.NewAppModule(app.TransferKeeper) transferIBCModule := transfer.NewIBCModule(app.TransferKeeper) @@ -508,7 +538,8 @@ func NewStrideApp( AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.DistrKeeper)). AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper)). AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper)). - AddRoute(stakeibcmoduletypes.RouterKey, stakeibcmodule.NewStakeibcProposalHandler(app.StakeibcKeeper)) + AddRoute(stakeibcmoduletypes.RouterKey, stakeibcmodule.NewStakeibcProposalHandler(app.StakeibcKeeper)). + AddRoute(ratelimitmoduletypes.RouterKey, ratelimitmodule.NewRateLimitProposalHandler(app.RatelimitKeeper, app.IBCKeeper.ChannelKeeper)) app.GovKeeper = govkeeper.NewKeeper( appCodec, keys[govtypes.StoreKey], app.GetSubspace(govtypes.ModuleName), app.AccountKeeper, app.BankKeeper, @@ -526,6 +557,7 @@ func NewStrideApp( app.StakeibcKeeper.Hooks(), app.MintKeeper.Hooks(), app.ClaimKeeper.Hooks(), + app.RatelimitKeeper.Hooks(), ), ) epochsModule := epochsmodule.NewAppModule(appCodec, app.EpochsKeeper) @@ -577,9 +609,11 @@ func NewStrideApp( // Stack three contains // - IBC // - records + // - ratelimit // - transfer // - base app var transferStack porttypes.IBCModule = transferIBCModule + transferStack = ratelimitmodule.NewIBCMiddleware(app.RatelimitKeeper, transferStack) transferStack = recordsmodule.NewIBCModule(app.RecordsKeeper, transferStack) // Create static IBC router, add transfer route, then set and seal it @@ -591,7 +625,7 @@ func NewStrideApp( // 4. In stakeibc's OnChanOpenInit, the stakeibc module steals the portId from the icacontroller module // 5. Now in OnChanOpenAck and any other subsequent IBC callback, the message server will identify // the portID owner as stakeibc and route to the same stakeibcStack, this time using the "stakeibc" route instead - ibcRouter := ibcporttypes.NewRouter() + ibcRouter := porttypes.NewRouter() ibcRouter. // ICAHost Stack AddRoute(icahosttypes.SubModuleName, icaHostIBCModule). @@ -642,6 +676,7 @@ func NewStrideApp( interchainQueryModule, icaModule, recordsModule, + ratelimitModule, icacallbacksModule, // this line is used by starport scaffolding # stargate/app/appModule ) @@ -675,6 +710,7 @@ func NewStrideApp( epochsmoduletypes.ModuleName, interchainquerytypes.ModuleName, recordsmoduletypes.ModuleName, + ratelimitmoduletypes.ModuleName, icacallbacksmoduletypes.ModuleName, claimtypes.ModuleName, // this line is used by starport scaffolding # stargate/app/beginBlockers @@ -705,6 +741,7 @@ func NewStrideApp( epochsmoduletypes.ModuleName, interchainquerytypes.ModuleName, recordsmoduletypes.ModuleName, + ratelimitmoduletypes.ModuleName, icacallbacksmoduletypes.ModuleName, claimtypes.ModuleName, // this line is used by starport scaffolding # stargate/app/endBlockers @@ -740,6 +777,7 @@ func NewStrideApp( epochsmoduletypes.ModuleName, interchainquerytypes.ModuleName, recordsmoduletypes.ModuleName, + ratelimitmoduletypes.ModuleName, icacallbacksmoduletypes.ModuleName, claimtypes.ModuleName, // this line is used by starport scaffolding # stargate/app/initGenesis @@ -1003,6 +1041,7 @@ func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino paramsKeeper.Subspace(icacontrollertypes.SubModuleName) paramsKeeper.Subspace(icahosttypes.SubModuleName) paramsKeeper.Subspace(recordsmoduletypes.ModuleName) + paramsKeeper.Subspace(ratelimitmoduletypes.ModuleName) paramsKeeper.Subspace(icacallbacksmoduletypes.ModuleName) // this line is used by starport scaffolding # stargate/app/paramSubspace diff --git a/app/apptesting/test_helpers.go b/app/apptesting/test_helpers.go index 867a514440..c5116dbda0 100644 --- a/app/apptesting/test_helpers.go +++ b/app/apptesting/test_helpers.go @@ -38,6 +38,11 @@ var ( })) ) +type SuitelessAppTestHelper struct { + App *app.StrideApp + Ctx sdk.Context +} + type AppTestHelper struct { suite.Suite @@ -69,6 +74,16 @@ func (s *AppTestHelper) Setup() { s.IcaAddresses = make(map[string]string) } +// Instantiates an TestHelper without the test suite +// This is for testing scenarios where we simply need the setup function to run, +// and need access to the TestHelper attributes and keepers (e.g. genesis tests) +func SetupSuitelessTestHelper() SuitelessAppTestHelper { + s := SuitelessAppTestHelper{} + s.App = app.InitStrideTestApp(true) + s.Ctx = s.App.BaseApp.NewContext(false, tmtypes.Header{Height: 1, ChainID: StrideChainID}) + return s +} + // Mints coins directly to a module account func (s *AppTestHelper) FundModuleAccount(moduleName string, amount sdk.Coin) { err := s.App.BankKeeper.MintCoins(s.Ctx, moduleName, sdk.NewCoins(amount)) diff --git a/app/upgrades.go b/app/upgrades.go index 119c64c07a..ba0ffe61ea 100644 --- a/app/upgrades.go +++ b/app/upgrades.go @@ -74,6 +74,14 @@ func (app *StrideApp) setupUpgradeHandlers() { Deleted: []string{authz.ModuleName}, } } + // TODO: RATE LIMIT UPGRADE + // 1. Add ratelimit store key when module is added + // storeUpgrades = &storetypes.StoreUpgrades{ + // Added: []string{ratelimittypes.StoreKey}, + // } + // + // 2. Add hour epoch to store + // 3. Add rate limits for existing denoms if storeUpgrades != nil { app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, storeUpgrades)) diff --git a/dockernet/config.sh b/dockernet/config.sh index 24641cd783..7f6e0702a1 100644 --- a/dockernet/config.sh +++ b/dockernet/config.sh @@ -91,6 +91,7 @@ IBC_STARS_DENOM=$IBC_STARS_CHANNEL_3_DENOM # CHAIN PARAMS BLOCK_TIME='1s' +STRIDE_HOUR_EPOCH_DURATION="90s" STRIDE_DAY_EPOCH_DURATION="100s" STRIDE_EPOCH_EPOCH_DURATION="40s" HOST_DAY_EPOCH_DURATION="60s" diff --git a/dockernet/config/relayer_config_juno_osmo.yaml b/dockernet/config/relayer_config_juno_osmo.yaml new file mode 100644 index 0000000000..3c101fc160 --- /dev/null +++ b/dockernet/config/relayer_config_juno_osmo.yaml @@ -0,0 +1,45 @@ +# Sometimes it's beneficial to test a channel that is not connected with Stride +# For this case, we can use juno <> osmo +global: + api-listen-addr: :5183 + timeout: 10s + memo: "" + light-cache-size: 20 +chains: + juno: + type: cosmos + value: + key: juno-osmo-rly1 + chain-id: JUNO + rpc-addr: http://juno1:26657 + account-prefix: juno + keyring-backend: test + gas-adjustment: 1.2 + gas-prices: 0.01ujuno + debug: false + timeout: 20s + output-format: json + sign-mode: direct + osmo: + type: cosmos + value: + key: juno-osmo-rly2 + chain-id: OSMO + rpc-addr: http://osmo1:26657 + account-prefix: osmo + keyring-backend: test + gas-adjustment: 1.2 + gas-prices: 0.01uosmo + debug: false + timeout: 20s + output-format: json + sign-mode: direct +paths: + juno-osmo: + src: + chain-id: JUNO + dst: + chain-id: OSMO + src-channel-filter: + rule: "" + channel-list: [] diff --git a/dockernet/docker-compose.yml b/dockernet/docker-compose.yml index addc6f318d..4f74244708 100644 --- a/dockernet/docker-compose.yml +++ b/dockernet/docker-compose.yml @@ -214,3 +214,10 @@ services: - ./state/relayer-host:/home/relayer/.relayer restart: always command: [ "bash", "start.sh", "stride-host" ] + + relayer-juno-osmo: + image: stridezone:relayer + volumes: + - ./state/relayer-juno-osmo:/home/relayer/.relayer + restart: always + command: [ "bash", "start.sh", "juno-osmo" ] diff --git a/dockernet/scripts/ratelimit/README.md b/dockernet/scripts/ratelimit/README.md new file mode 100644 index 0000000000..e4ae65a6b9 --- /dev/null +++ b/dockernet/scripts/ratelimit/README.md @@ -0,0 +1,13 @@ +## Rate Limit Integration Tests +* **These tests are not intended to be run with normal CI, they were meant as a comprehensive sanity check before deploying the module and are redundant with the unit tests.** +* **WARNING**: `STRIDE_HOUR_EPOCH_DURATION` must be at least '90s' in `config.sh` +* `HOST_CHAINS` should be set to `(GAIA JUNO OSMO)` in `config.sh` +* Start dockernet +``` +make start-docker +``` +* Run the integration tests +``` +bash dockernet/scripts/ratelimit/run_all_tests.sh +``` +* Each test will print a checkmark or X depending on the status - if there are no X's, the tests passed. diff --git a/dockernet/scripts/ratelimit/common.sh b/dockernet/scripts/ratelimit/common.sh new file mode 100644 index 0000000000..b7020056d6 --- /dev/null +++ b/dockernet/scripts/ratelimit/common.sh @@ -0,0 +1,150 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh + +PURPLE='\033[0;35m' +BOLD="\033[1m" +BLUE='\033[1;34m' +ITALIC="\033[3m" +NC="\033[0m" + +INITIAL_CHANNEL_VALUE=1000000000 +TRAVELER_JUNO=ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C + +checkmark() { + printf "$BLUE\xE2\x9C\x94$NC\n" +} + +xmark() { + printf "$PURPLE\xE2\x9C\x97$NC\n" +} + +print_header() { + header=$1 + printf "\n\n$BLUE[$header]$NC\n" + printf "$BLUE----------------------------------------------------------------------------$NC\n" +} + +get_test_indicator() { + expected=$1 + actual=$2 + if [[ "$expected" == "$actual" ]]; then + echo $(checkmark) + else + echo $(xmark) + fi +} + +print_expectation() { + expected=$1 + actual=$2 + description=$3 + + indicator=$(get_test_indicator $expected $actual) + printf "\n$indicator $indicator $indicator Expected $description: $expected | Actual $description: $actual $indicator $indicator $indicator\n" +} + +wait_until_epoch_end() { + seconds_til_epoch_start=$($STRIDE_MAIN_CMD q epochs seconds-remaining hour) + sleep_time=$((seconds_til_epoch_start+5)) + + echo ">>> Sleeping $sleep_time seconds until start of epoch..." + sleep $sleep_time +} + +get_flow_amount() { + denom=$1 + channel=$2 + flow_type=$3 + $STRIDE_MAIN_CMD q ratelimit rate-limit $channel --denom=$denom | grep $flow_type | awk '{printf $2}' | tr -d '"' +} + +get_channel_value() { + denom=$1 + channel=$2 + $STRIDE_MAIN_CMD q ratelimit rate-limit $channel --denom=$denom | grep "channel_value" | awk '{printf $2}' | tr -d '"' +} + +get_balance() { + chain=$1 + denom=$2 + + cmd=$(GET_VAR_VALUE ${chain}_MAIN_CMD) + address=$(${chain}_ADDRESS) + + $cmd q bank balances $address --denom $denom | grep amount | awk '{printf $2}' | tr -d '"' +} + +get_last_proposal_id() { + $STRIDE_MAIN_CMD q gov proposals | grep " id:" | tail -1 | awk '{printf $2}' | tr -d '"' +} + +check_transfer_status() { + src_chain=$1 + dst_chain=$2 + transfer_channel=$3 + rate_limit_channel=$4 + amount=$5 + transfer_denom=$6 + rate_limit_denom=$7 + success=$8 + + cmd=$(GET_VAR_VALUE ${src_chain}_MAIN_CMD) + val_prefix=$(GET_VAR_VALUE ${src_chain}_VAL_PREFIX) + destination_address=$(${dst_chain}_ADDRESS) + + # Determine packet direction + if [[ "$src_chain" == "STRIDE" ]]; then + transfer_description="from STRIDE to $dst_chain" + flow_type="outflow" + transfer_delay=4 + else + transfer_description="from $src_chain to STRIDE" + flow_type="inflow" + transfer_delay=10 + fi + + # Determine expectation + if [[ "$success" == "true" ]]; then + expected_flow_change=$amount + success_description="SHOULD SUCCEED" + else + expected_flow_change=0 + success_description="SHOULD FAIL" + fi + + printf "\n>>> Transferring ${amount}${transfer_denom} $transfer_description - $success_description\n" + + # Capture the inflow + start_flow=$(get_flow_amount $rate_limit_denom $rate_limit_channel $flow_type) + echo "Initial $flow_type for $transfer_denom: $start_flow" + + # Send the transfer + echo "Transferring..." + $cmd tx ibc-transfer transfer transfer $transfer_channel $destination_address ${amount}${transfer_denom} --from ${val_prefix}1 -y | TRIM_TX + sleep $transfer_delay + + # Capture the outflow + end_flow=$(get_flow_amount $rate_limit_denom $rate_limit_channel $flow_type) + echo "End $flow_type for $transfer_denom: $end_flow" + + # Determine if the flow change was a success + actual_flow_change=$((end_flow-start_flow)) + print_expectation $expected_flow_change $actual_flow_change "Flow Change" +} + +submit_proposal_and_vote() { + proposal_type=$1 + proposal_file=$2 + + echo ">>> Submitting proposal for: $proposal_file" + $STRIDE_MAIN_CMD tx gov submit-legacy-proposal $proposal_type ${CURRENT_DIR}/proposals/${proposal_file} --from ${STRIDE_VAL_PREFIX}1 -y | TRIM_TX + sleep 3 + + proposal_id=$(get_last_proposal_id) + echo ">>> Voting on proposal $proposal_id" + $STRIDE_MAIN_CMD tx gov vote $proposal_id yes --from ${STRIDE_VAL_PREFIX}1 -y | TRIM_TX + $STRIDE_MAIN_CMD tx gov vote $proposal_id yes --from ${STRIDE_VAL_PREFIX}2 -y | TRIM_TX + $STRIDE_MAIN_CMD tx gov vote $proposal_id yes --from ${STRIDE_VAL_PREFIX}3 -y | TRIM_TX + + echo "" +} diff --git a/dockernet/scripts/ratelimit/proposals/add_stujuno.json b/dockernet/scripts/ratelimit/proposals/add_stujuno.json new file mode 100644 index 0000000000..92f5634f6f --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_stujuno.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to stujuno", + "description": "Proposal to enable rate limiting on stujuno for Stride <> Osmo", + "denom": "stujuno", + "channel_id": "channel-2", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/add_traveler_ujuno_on_juno.json b/dockernet/scripts/ratelimit/proposals/add_traveler_ujuno_on_juno.json new file mode 100644 index 0000000000..be8fbfaa29 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_traveler_ujuno_on_juno.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to traveler juno on Stride <> Juno Channel", + "description": "Proposal to enable rate limiting on traveler juno for Stride <> Juno", + "denom": "ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C", + "channel_id": "channel-1", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/add_traveler_ujuno_on_osmo.json b/dockernet/scripts/ratelimit/proposals/add_traveler_ujuno_on_osmo.json new file mode 100644 index 0000000000..5d06c353f4 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_traveler_ujuno_on_osmo.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to traveler juno on Stride <> Osmo Channel", + "description": "Proposal to enable rate limiting on traveler juno for Stride <> Osmo", + "denom": "ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C", + "channel_id": "channel-2", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/add_uatom.json b/dockernet/scripts/ratelimit/proposals/add_uatom.json new file mode 100644 index 0000000000..acb8f710f7 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_uatom.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to uatom", + "description": "Proposal to enable rate limiting on uatom for Stride <> Gaia", + "denom": "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + "channel_id": "channel-0", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/add_ujuno.json b/dockernet/scripts/ratelimit/proposals/add_ujuno.json new file mode 100644 index 0000000000..dc2db60124 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_ujuno.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to ujuno", + "description": "Proposal to enable rate limiting on ujuno for Stride <> Juno", + "denom": "ibc/EFF323CC632EC4F747C61BCE238A758EFDB7699C3226565F7C20DA06509D59A5", + "channel_id": "channel-1", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/add_uosmo.json b/dockernet/scripts/ratelimit/proposals/add_uosmo.json new file mode 100644 index 0000000000..b798076583 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_uosmo.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to uosmo", + "description": "Proposal to enable rate limiting on usmo for Stride <> Osmo", + "denom": "ibc/13B2C536BB057AC79D5616B8EA1B9540EC1F2170718CAFF6F0083C966FFFED0B", + "channel_id": "channel-2", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/add_ustrd.json b/dockernet/scripts/ratelimit/proposals/add_ustrd.json new file mode 100644 index 0000000000..3bbe3c485d --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/add_ustrd.json @@ -0,0 +1,10 @@ +{ + "title": "Add Rate Limit to ustrd", + "description": "Proposal to enable rate limiting on ustrd for Stride <> Osmo", + "denom": "ustrd", + "channel_id": "channel-2", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/remove_uatom.json b/dockernet/scripts/ratelimit/proposals/remove_uatom.json new file mode 100644 index 0000000000..aeb30d8d36 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/remove_uatom.json @@ -0,0 +1,7 @@ +{ + "title": "Remove uatom Rate Limit", + "description": "Proposal to disable rate limiting on uatom for Stride <> Gaia", + "denom": "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + "channel_id": "channel-0", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/reset_uatom.json b/dockernet/scripts/ratelimit/proposals/reset_uatom.json new file mode 100644 index 0000000000..e909df7177 --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/reset_uatom.json @@ -0,0 +1,7 @@ +{ + "title": "Reset uatom Rate Limit", + "description": "Proposal to reset rate limit on uatom for Stride <> Gaia", + "denom": "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + "channel_id": "channel-0", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/proposals/update_uosmo.json b/dockernet/scripts/ratelimit/proposals/update_uosmo.json new file mode 100644 index 0000000000..4a875e871b --- /dev/null +++ b/dockernet/scripts/ratelimit/proposals/update_uosmo.json @@ -0,0 +1,10 @@ +{ + "title": "Update uosmo Rate Limit", + "description": "Proposal to increase the receive threshold on the usmo rate limit from Stride <> Osmo", + "denom": "ibc/13B2C536BB057AC79D5616B8EA1B9540EC1F2170718CAFF6F0083C966FFFED0B", + "channel_id": "channel-2", + "max_percent_send": "10", + "max_percent_recv": "11", + "duration_hours": "1", + "deposit": "10000000ustrd" +} \ No newline at end of file diff --git a/dockernet/scripts/ratelimit/run_all_tests.sh b/dockernet/scripts/ratelimit/run_all_tests.sh new file mode 100644 index 0000000000..68f3833252 --- /dev/null +++ b/dockernet/scripts/ratelimit/run_all_tests.sh @@ -0,0 +1,25 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/common.sh +source ${CURRENT_DIR}/setup.sh +source ${CURRENT_DIR}/test_epoch_reset.sh +source ${CURRENT_DIR}/test_tx_reset.sh +source ${CURRENT_DIR}/test_tx_remove.sh +source ${CURRENT_DIR}/test_quota_update.sh +source ${CURRENT_DIR}/test_bidirectional_flow.sh +source ${CURRENT_DIR}/test_denoms.sh + +setup_juno_osmo_channel +setup_channel_value +setup_rate_limits + +test_epoch_reset_atom_from_gaia_to_stride +test_epoch_reset_atom_from_stride_to_gaia +test_tx_reset_atom_from_gaia_to_stride +test_tx_reset_atom_from_stride_to_gaia +test_quota_update +test_bidirectional +test_denom_ustrd +test_denom_ujuno +test_denom_sttoken +test_denom_juno_traveler +test_remove_rate_limit diff --git a/dockernet/scripts/ratelimit/setup.sh b/dockernet/scripts/ratelimit/setup.sh new file mode 100644 index 0000000000..52ec52e8e6 --- /dev/null +++ b/dockernet/scripts/ratelimit/setup.sh @@ -0,0 +1,121 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +setup_juno_osmo_channel() { + print_header "CREATING JUNO <> OSMO CHANNEL" + + relayer_exec="$DOCKER_COMPOSE run --rm relayer-juno-osmo" + path="juno-osmo" + + relayer_logs=$LOGS/relayer-${path}.log + relayer_config=$STATE/relayer-${path}/config + + mkdir -p $relayer_config + cp ${DOCKERNET_HOME}/config/relayer_config_juno_osmo.yaml $relayer_config/config.yaml + + printf "JUNO <> OSMO - Adding relayer keys..." + RELAYER_JUNO_MNEMONIC="awkward remind blanket around naive senior sock pigeon portion umbrella edit scheme middle supreme agent indoor duty sock conduct market ethics exchange phrase mirror" + RELAYER_OSMO_MNEMONIC="solution simple collect warrior neither grain ethics dust guard high base hamster sail science valley organ mistake soon letter garden october morning correct hidden" + JUNO_RELAYER_ADDRESS=$($relayer_exec rly keys restore juno juno-osmo-rly1 "$RELAYER_JUNO_MNEMONIC") + OSMO_RELAYER_ADDRESS=$($relayer_exec rly keys restore osmo juno-osmo-rly2 "$RELAYER_OSMO_MNEMONIC") + echo "Done" + + printf "JUNO <> OSMO - Funding Relayers...\n" + $JUNO_MAIN_CMD tx bank send ${JUNO_VAL_PREFIX}1 $JUNO_RELAYER_ADDRESS 1000000ujuno --from ${JUNO_VAL_PREFIX}1 -y | TRIM_TX + $OSMO_MAIN_CMD tx bank send ${OSMO_VAL_PREFIX}1 $OSMO_RELAYER_ADDRESS 1000000uosmo --from ${OSMO_VAL_PREFIX}1 -y | TRIM_TX + sleep 3 + echo "Done" + + printf "JUNO <> OSMO - Creating client, connection, and transfer channel..." | tee -a $relayer_logs + $relayer_exec rly transact link $path >> $relayer_logs 2>&1 + echo "Done" + + $DOCKER_COMPOSE up -d relayer-${path} + $DOCKER_COMPOSE logs -f relayer-${path} | sed -r -u "s/\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[mGK]//g" >> $relayer_logs 2>&1 & +} + +setup_channel_value() { + print_header "INITIALIZING CHANNEL VALUE" + + # IBC Transfer + echo "Transfering for channel value..." + echo ">>> uatom" + $GAIA_MAIN_CMD tx ibc-transfer transfer transfer channel-0 $(STRIDE_ADDRESS) ${INITIAL_CHANNEL_VALUE}uatom --from ${GAIA_VAL_PREFIX}1 -y | TRIM_TX + sleep 3 + + echo ">>> ujuno" # second transfer is for stujuno + $JUNO_MAIN_CMD tx ibc-transfer transfer transfer channel-0 $(STRIDE_ADDRESS) ${INITIAL_CHANNEL_VALUE}ujuno --from ${JUNO_VAL_PREFIX}1 -y | TRIM_TX + sleep 3 + $JUNO_MAIN_CMD tx ibc-transfer transfer transfer channel-0 $(STRIDE_ADDRESS) ${INITIAL_CHANNEL_VALUE}ujuno --from ${JUNO_VAL_PREFIX}1 -y | TRIM_TX + sleep 3 + + echo ">>> uosmo" + $OSMO_MAIN_CMD tx ibc-transfer transfer transfer channel-0 $(STRIDE_ADDRESS) ${INITIAL_CHANNEL_VALUE}uosmo --from ${OSMO_VAL_PREFIX}1 -y | TRIM_TX + sleep 10 + + echo ">>> traveler-ujuno" + juno_on_osmo='ibc/448C1061CE97D86CC5E86374CD914870FB8EBA16C58661B5F1D3F46729A2422D' + $JUNO_MAIN_CMD tx ibc-transfer transfer transfer channel-5 $(OSMO_ADDRESS) ${INITIAL_CHANNEL_VALUE}ujuno --from ${JUNO_VAL_PREFIX}1 -y | TRIM_TX + sleep 10 + $OSMO_MAIN_CMD tx ibc-transfer transfer transfer channel-0 $(STRIDE_ADDRESS) ${INITIAL_CHANNEL_VALUE}${juno_on_osmo} --from ${OSMO_VAL_PREFIX}1 -y | TRIM_TX + sleep 3 + + # Liquid Stake + printf "\nLiquid staking juno...\n" + $STRIDE_MAIN_CMD tx stakeibc liquid-stake ${INITIAL_CHANNEL_VALUE} ujuno --from ${STRIDE_VAL_PREFIX}1 -y | TRIM_TX + sleep 5 +} + +setup_rate_limits() { + print_header "ADDING RATE LIMITS" + + # ustrd channel-2 + echo "ustrd on Stride <> Osmo Channel:" + submit_proposal_and_vote add-rate-limit add_ustrd.json + sleep 10 + + # ibc/uatom channel-0 + echo "uatom on Stride <> Gaia Channel:" + submit_proposal_and_vote add-rate-limit add_uatom.json + sleep 10 + + # ibc/ujuno channel-1 + echo "ujuno on Stride <> Juno Channel:" + submit_proposal_and_vote add-rate-limit add_ujuno.json + sleep 10 + + # ibc/uosmo channel-2 + echo "uosmo on Stride <> Osmo Channel:" + submit_proposal_and_vote add-rate-limit add_uosmo.json + sleep 10 + + # stujuno channel-2 + echo "stujuno on Stride <> Osmo Channel:" + submit_proposal_and_vote add-rate-limit add_stujuno.json + sleep 10 + + # traveler juno channel-1 + echo "traveler-ujuno on Stride <> Juno Channel:" + submit_proposal_and_vote add-rate-limit add_traveler_ujuno_on_juno.json + sleep 10 + + echo "traveler-ujuno on Stride <> Osmo Channel:" + # traveler juno channel-2 + submit_proposal_and_vote add-rate-limit add_traveler_ujuno_on_osmo.json + sleep 40 + + # Confirm all rate limits were added + num_rate_limits=$($STRIDE_MAIN_CMD q ratelimit list-rate-limits | grep path | wc -l | xargs) + if [[ "$num_rate_limits" != "7" ]]; then + echo "ERROR: Not all rate limits were added. Exiting." + exit 1 + fi + + # Confirm there are 4 rate limits on osmo (this is to test out the rate-limits-by-chain query) + num_rate_limits=$($STRIDE_MAIN_CMD q ratelimit rate-limits-by-chain OSMO | grep path | wc -l | xargs) + if [[ "$num_rate_limits" != "4" ]]; then + echo "ERROR: OSMO should have 4 rate limits (it had: $num_rate_limits)" + exit 1 + fi +} diff --git a/dockernet/scripts/ratelimit/test_bidirectional_flow.sh b/dockernet/scripts/ratelimit/test_bidirectional_flow.sh new file mode 100644 index 0000000000..50b7ea9a63 --- /dev/null +++ b/dockernet/scripts/ratelimit/test_bidirectional_flow.sh @@ -0,0 +1,44 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +test_bidirectional() { + print_header "TESTING BIDIRECTIONAL FLOW - ATOM" + + wait_until_epoch_end + + start_gaia_balance=$(get_balance GAIA uatom) + start_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + start_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + # Continuously transfer back and forth (the rate limit should never get hit) + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + + wait_until_epoch_end + + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + # Wait for the channel value to reset + wait_until_epoch_end + + # Balances and channel value should be unchanged + end_gaia_balance=$(get_balance GAIA uatom) + end_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + end_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + print_expectation $start_channel_value $end_channel_value "Channel Value" + print_expectation $start_stride_balance $end_stride_balance "Balance on Stride" + print_expectation $start_gaia_balance $end_gaia_balance "Balance on Gaia" +} diff --git a/dockernet/scripts/ratelimit/test_denoms.sh b/dockernet/scripts/ratelimit/test_denoms.sh new file mode 100644 index 0000000000..8e4661eaaa --- /dev/null +++ b/dockernet/scripts/ratelimit/test_denoms.sh @@ -0,0 +1,142 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +# We want to cover the following cases: +# +# 1. Send native +# 2. Send non-native (one hop away) +# 3. Send non-native (two hops away) +# 4. Recieve sink (one hop away) +# 5. Recieve sink (two hops away) +# 6. Receive source native +# 7. Recieve source non-native +# +# For each case, we'll simply need to try the transfer and check if the flow updated, +# if the flow didn't update along with expectations, that means either the denom or channel was wrong + +################################################## +# ustrd from Stride to Osmosis then back to Stride +################################################## +__test_denom_send_packet_native_ustrd() { # send native + # ustrd sent from Stride to Osmosis + # Expected Denom: ustrd + # Expected Channel: channel-2 + check_transfer_status STRIDE OSMO channel-2 channel-2 10000000 ustrd ustrd true +} + +__test_denom_receive_packet_native_ustrd() { # receive source native + # ustrd sent from Osmosis to Stride + # Expected Denom: ustrd + # Expected Channel: channel-2 + ustrd_on_osmo='ibc/FF6C2E86490C1C4FBBD24F55032831D2415B9D7882F85C3CC9C2401D79362BEA' + check_transfer_status OSMO STRIDE channel-0 channel-2 10000000 $ustrd_on_osmo ustrd true +} + +test_denom_ustrd() { + print_header "TESTING DENOMS - USTRD" + wait_until_epoch_end + + __test_denom_send_packet_native_ustrd + __test_denom_receive_packet_native_ustrd +} + +############################################## +# ujuno from Juno to Stride, then back to Juno +############################################## +__test_denom_receive_packet_non_native() { # recieve sink (one hop) + # ujuno sent from Juno to Stride + # Expected Denom: ibc/EFF323CC632EC4F747C61BCE238A758EFDB7699C3226565F7C20DA06509D59A5 + # Expected Channel: channel-1 + juno_on_stride='ibc/EFF323CC632EC4F747C61BCE238A758EFDB7699C3226565F7C20DA06509D59A5' + check_transfer_status JUNO STRIDE channel-0 channel-1 10000000 ujuno $juno_on_stride true +} + +__test_denom_send_packet_non_native() { # send non native (one hop) + # ujuno sent from Stride to Juno + # Expected Denom: ibc/EFF323CC632EC4F747C61BCE238A758EFDB7699C3226565F7C20DA06509D59A5 + # Expected Channel: channel-1 + juno_on_stride='ibc/EFF323CC632EC4F747C61BCE238A758EFDB7699C3226565F7C20DA06509D59A5' + check_transfer_status STRIDE JUNO channel-1 channel-1 10000000 $juno_on_stride $juno_on_stride true +} + +test_denom_ujuno() { + print_header "TESTING DENOMS - UJUNO" + wait_until_epoch_end + + __test_denom_receive_packet_non_native + __test_denom_send_packet_non_native +} + +##################################################### +# stujuno from Stride to Osmosis, then back to Stride +##################################################### +__test_denom_send_packet_native_sttoken() { # send native + # stujuno sent from Stride to Osmosis + # Expected Denom: stujuno + # Expected Channel: channel-2 + check_transfer_status STRIDE OSMO channel-2 channel-2 10000000 stujuno stujuno true +} + +__test_denom_recieve_packet_native_sttoken() { # receive source native + # stujuno sent from Osmosis to Stride + # Expected Denom: stujuno + # Expected Channel: channel-2 + stujuno_on_osmo='ibc/C4385BAF25938E02B0EA90D512CE43BFACA892F7FAD81D63CC82BD8EBFA21857' + check_transfer_status OSMO STRIDE channel-0 channel-2 10000000 $stujuno_on_osmo stujuno true +} + +test_denom_sttoken() { + print_header "TESTING DENOMS - STUJUNO" + wait_until_epoch_end + + __test_denom_send_packet_native_sttoken + __test_denom_recieve_packet_native_sttoken +} + +######################################################################## +# ujuno sent to Osmosis then to Stride, then to Juno then back to Stride +######################################################################## +__test_denom_receive_packet_sink_two_hops() { # receive sink two hops + # ujuno sent from Juno to Osmosis to Stride + # Expected Denom: ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C + # (transfer/channel-2(juno)/transfer/channel-5(osmo)/ujuno) + # Expected Channel: channel-2 + juno_on_osmo='ibc/448C1061CE97D86CC5E86374CD914870FB8EBA16C58661B5F1D3F46729A2422D' + traveler_juno_on_stride='ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C' + + printf "\n>>> Transferring ujuno from Juno to Osmosis\n" + $JUNO_MAIN_CMD tx ibc-transfer transfer transfer channel-5 $(OSMO_ADDRESS) 10000000ujuno --from ${JUNO_VAL_PREFIX}1 -y | TRIM_TX + sleep 10 + + # Then transfer from osmo to stride + check_transfer_status OSMO STRIDE channel-0 channel-2 10000000 $juno_on_osmo $traveler_juno_on_stride true +} + +__test_denom_send_packet_non_native_two_hops() { # send non-native (two hops) + # ujuno (through Osmosis) sent from Stride to Juno + # Expected Denom: ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C + # (transfer/channel-2(juno)/transfer/channel-5(osmo)/ujuno) + # Expected Channel: channel-1 + traveler_juno_on_stride='ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C' + check_transfer_status STRIDE JUNO channel-1 channel-1 10000000 $traveler_juno_on_stride $traveler_juno_on_stride true +} + +__test_denom_receive_packet_source_non_native() { # recieve source non-native + # ujuno (through Osmosis, then Stride) sent from Juno to Stride + # Expected Denom: ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C + # (transfer/channel-2(juno)/transfer/channel-5(osmo)/ujuno) + # Expected Channel: channel-1 + traveler_juno_on_stride='ibc/CD369927BBCE5198E0DC0D1A341C2F1DE51B1228BFD0633430055A39F58D229C' + traveler_juno_on_juno='ibc/2EB68CA4364B52B62B24AFB26B5B74892F7ABD52899F27E04E31A030DD59B991' + check_transfer_status JUNO STRIDE channel-0 channel-1 10000000 $traveler_juno_on_juno $traveler_juno_on_stride true +} + +test_denom_juno_traveler() { + print_header "TESTING DENOMS - TRAVELER JUNO" + wait_until_epoch_end + + __test_denom_receive_packet_sink_two_hops + __test_denom_send_packet_non_native_two_hops + __test_denom_receive_packet_source_non_native +} diff --git a/dockernet/scripts/ratelimit/test_epoch_reset.sh b/dockernet/scripts/ratelimit/test_epoch_reset.sh new file mode 100644 index 0000000000..a0cc00b415 --- /dev/null +++ b/dockernet/scripts/ratelimit/test_epoch_reset.sh @@ -0,0 +1,80 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +test_epoch_reset_atom_from_gaia_to_stride() { + print_header "TESTING EPOCHLY QUOTA RESET - UNIDIRECTIONAL FLOW - ATOM FROM GAIA -> STRIDE" + + wait_until_epoch_end + + start_gaia_balance=$(get_balance GAIA uatom) + start_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + start_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + # Transfer 2 times successfully + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + + # Attempt to transfer but should fail because it gets rate limited + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM false + + # Wait for rate limit to reset and then transfer successfully again + wait_until_epoch_end + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + + # Channel value should go up since the ibc denom is minted + expected_channel_value=$((start_channel_value+80000000)) + end_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + print_expectation $expected_channel_value $end_channel_value "Channel Value" + + # Confirm balance was updated appropriately + end_gaia_balance=$(get_balance GAIA uatom) + end_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + + expected_stride_balance=$((start_stride_balance+120000000)) + expected_gaia_balance=$((start_gaia_balance-120000000)) + + print_expectation $expected_stride_balance $end_stride_balance "Balance on Stride" + print_expectation $expected_gaia_balance $end_gaia_balance "Balance on Gaia" +} + +test_epoch_reset_atom_from_stride_to_gaia() { + print_header "TESTING EPOCHLY QUOTA RESET - UNIDIRECTIONAL FLOW - ATOM FROM STRIDE -> GAIA" + + wait_until_epoch_end + + start_gaia_balance=$(get_balance GAIA uatom) + start_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + start_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + # Transfer 2 times successfully + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + # Attempt to transfer but should fail because it gets rate limited + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM false + + # Wait for rate limit to reset and then transfer successfully again + wait_until_epoch_end + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + # Channel value should go down since the ibc denom will be burned + expected_channel_value=$((start_channel_value-80000000)) + end_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + print_expectation $expected_channel_value $end_channel_value "Channel Value" + + # Wait a few seconds for the ack error to refund the failed tokens on gaia + sleep 15 + + # Confirm balance was updated appropriately + end_gaia_balance=$(get_balance GAIA uatom) + end_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + + expected_stride_balance=$((start_stride_balance-120000000)) + expected_gaia_balance=$((start_gaia_balance+120000000)) + + print_expectation $expected_stride_balance $end_stride_balance "Balance on Stride" + print_expectation $expected_gaia_balance $end_gaia_balance "Balance on Gaia" +} diff --git a/dockernet/scripts/ratelimit/test_quota_update.sh b/dockernet/scripts/ratelimit/test_quota_update.sh new file mode 100644 index 0000000000..4b55df0f75 --- /dev/null +++ b/dockernet/scripts/ratelimit/test_quota_update.sh @@ -0,0 +1,40 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +test_quota_update() { + print_header "TESTING QUOTA UPDATE - OSMO FROM OSMOSIS -> STRIDE" + + wait_until_epoch_end + + start_osmo_balance=$(get_balance OSMO uosmo) + start_stride_balance=$(get_balance STRIDE $IBC_OSMO_CHANNEL_2_DENOM) + + # Transfer once successfully (just below the limit) + check_transfer_status OSMO STRIDE channel-0 channel-2 99999999 uosmo $IBC_OSMO_CHANNEL_2_DENOM true + + # Attempt to transfer but should fail because it gets rate limited + check_transfer_status OSMO STRIDE channel-0 channel-2 2 uosmo $IBC_OSMO_CHANNEL_2_DENOM false + + # Transfer in the other direction so the channel value stays is unchanged + check_transfer_status STRIDE OSMO channel-2 channel-2 99999999 $IBC_OSMO_CHANNEL_2_DENOM $IBC_OSMO_CHANNEL_2_DENOM true + + # Relax the send quota threshold (this will reset the flow) + printf "\n>>> Updating rate limit...\n" + submit_proposal_and_vote update-rate-limit update_uosmo.json + sleep 30 + + # Try the two transfers again, this time the second one should succeed + check_transfer_status OSMO STRIDE channel-0 channel-2 99999999 uosmo $IBC_OSMO_CHANNEL_2_DENOM true + check_transfer_status OSMO STRIDE channel-0 channel-2 2 uosmo $IBC_OSMO_CHANNEL_2_DENOM true + + # Confirm balance was updated appropriately + end_osmo_balance=$(get_balance OSMO uosmo) + end_stride_balance=$(get_balance STRIDE $IBC_OSMO_CHANNEL_2_DENOM) + + expected_stride_balance=$((start_stride_balance+100000001)) + expected_osmo_balance=$((start_osmo_balance-100000001)) + + print_expectation $expected_stride_balance $end_stride_balance "Balance on Stride" + print_expectation $expected_osmo_balance $end_osmo_balance "Balance on Osmo" +} diff --git a/dockernet/scripts/ratelimit/test_tx_remove.sh b/dockernet/scripts/ratelimit/test_tx_remove.sh new file mode 100644 index 0000000000..7c6043f39f --- /dev/null +++ b/dockernet/scripts/ratelimit/test_tx_remove.sh @@ -0,0 +1,28 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +test_remove_rate_limit() { + print_header "TESTING TX REMOVE RATE LIMIT - ATOM FROM GAIA -> STRIDE" + + wait_until_epoch_end + + start_gaia_balance=$(get_balance GAIA uatom) + start_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + + # Remove the rate limit + printf "\n>>> Removing rate limit...\n" + submit_proposal_and_vote remove-rate-limit remove_uatom.json + sleep 30 + + # Then successfully transfer a large amount the removal + printf "\n>>> Transferring $INITIAL_CHANNEL_VALUE uatom...\n" + $GAIA_MAIN_CMD tx ibc-transfer transfer transfer channel-0 $(STRIDE_ADDRESS) ${INITIAL_CHANNEL_VALUE}uatom --from ${GAIA_VAL_PREFIX}1 -y | TRIM_TX + sleep 15 + + # Confirm balance was updated appropriately + end_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + expected_stride_balance=$((start_stride_balance+INITIAL_CHANNEL_VALUE)) + + print_expectation $expected_stride_balance $end_stride_balance "Balance on Stride" +} diff --git a/dockernet/scripts/ratelimit/test_tx_reset.sh b/dockernet/scripts/ratelimit/test_tx_reset.sh new file mode 100644 index 0000000000..4cc7057c91 --- /dev/null +++ b/dockernet/scripts/ratelimit/test_tx_reset.sh @@ -0,0 +1,88 @@ +CURRENT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source ${CURRENT_DIR}/../../config.sh +source ${CURRENT_DIR}/common.sh + +test_tx_reset_atom_from_gaia_to_stride() { + print_header "TESTING TX QUOTA RESET - UNIDIRECTIONAL FLOW - ATOM FROM GAIA -> STRIDE" + + wait_until_epoch_end + + start_gaia_balance=$(get_balance GAIA uatom) + start_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + start_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + # Transfer successfully + check_transfer_status GAIA STRIDE channel-0 channel-0 80000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + + # Attempt to transfer but should fail because it gets rate limited + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM false + + # Reset the rate limit manually + printf "\n>>> Resetting rate limit...\n" + submit_proposal_and_vote reset-rate-limit reset_uatom.json + sleep 30 + + # Then successfully transfer after the reset + check_transfer_status GAIA STRIDE channel-0 channel-0 40000000 uatom $IBC_GAIA_CHANNEL_0_DENOM true + + # Channel value should go up since the ibc denom is minted + wait_until_epoch_end + expected_channel_value=$((start_channel_value+120000000)) + end_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + print_expectation $expected_channel_value $end_channel_value "Channel Value" + + # Confirm balance was updated appropriately + end_gaia_balance=$(get_balance GAIA uatom) + end_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + + expected_stride_balance=$((start_stride_balance+120000000)) + expected_gaia_balance=$((start_gaia_balance-120000000)) + + print_expectation $expected_stride_balance $end_stride_balance "Balance on Stride" + print_expectation $expected_gaia_balance $end_gaia_balance "Balance on Gaia" +} + +test_tx_reset_atom_from_stride_to_gaia() { + print_header "TESTING TX QUOTA RESET - UNIDIRECTIONAL FLOW - ATOM FROM STRIDE -> GAIA" + + wait_until_epoch_end + + start_gaia_balance=$(get_balance GAIA uatom) + start_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + start_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + # Transfer successfully + check_transfer_status STRIDE GAIA channel-0 channel-0 80000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + # Attempt to transfer but should fail because it gets rate limited + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM false + + # Reset the rate limit manually + printf "\n>>> Resetting rate limit...\n" + submit_proposal_and_vote reset-rate-limit reset_uatom.json + sleep 30 + + # Then successfully transfer after the reset + check_transfer_status STRIDE GAIA channel-0 channel-0 40000000 $IBC_GAIA_CHANNEL_0_DENOM $IBC_GAIA_CHANNEL_0_DENOM true + + # Channel value should go down since the ibc denom will be burned + wait_until_epoch_end + expected_channel_value=$((start_channel_value-120000000)) + end_channel_value=$(get_channel_value $IBC_GAIA_CHANNEL_0_DENOM channel-0) + + print_expectation $expected_channel_value $end_channel_value "Channel Value" + + # Wait a few seconds for the ack error to refund the failed tokens on gaia + sleep 15 + + # Confirm balance was updated appropriately + end_gaia_balance=$(get_balance GAIA uatom) + end_stride_balance=$(get_balance STRIDE $IBC_GAIA_CHANNEL_0_DENOM) + + expected_stride_balance=$((start_stride_balance-120000000)) + expected_gaia_balance=$((start_gaia_balance+120000000)) + + print_expectation $expected_stride_balance $end_stride_balance "Balance on Stride" + print_expectation $expected_gaia_balance $end_gaia_balance "Balance on Gaia" +} diff --git a/dockernet/src/init_chain.sh b/dockernet/src/init_chain.sh index 9ab9903e82..2a5fb3a313 100644 --- a/dockernet/src/init_chain.sh +++ b/dockernet/src/init_chain.sh @@ -22,6 +22,7 @@ set_stride_genesis() { # update params jq '(.app_state.epochs.epochs[] | select(.identifier=="day") ).duration = $epochLen' --arg epochLen $STRIDE_DAY_EPOCH_DURATION $genesis_config > json.tmp && mv json.tmp $genesis_config + jq '(.app_state.epochs.epochs[] | select(.identifier=="hour") ).duration = $epochLen' --arg epochLen $STRIDE_HOUR_EPOCH_DURATION $genesis_config > json.tmp && mv json.tmp $genesis_config jq '(.app_state.epochs.epochs[] | select(.identifier=="stride_epoch") ).duration = $epochLen' --arg epochLen $STRIDE_EPOCH_EPOCH_DURATION $genesis_config > json.tmp && mv json.tmp $genesis_config jq '.app_state.staking.params.unbonding_time = $newVal' --arg newVal "$UNBONDING_TIME" $genesis_config > json.tmp && mv json.tmp $genesis_config jq '.app_state.gov.deposit_params.max_deposit_period = $newVal' --arg newVal "$MAX_DEPOSIT_PERIOD" $genesis_config > json.tmp && mv json.tmp $genesis_config diff --git a/dockernet/upgrades/submit_upgrade.sh b/dockernet/upgrades/submit_upgrade.sh index 047eab869c..0f07e0053f 100644 --- a/dockernet/upgrades/submit_upgrade.sh +++ b/dockernet/upgrades/submit_upgrade.sh @@ -9,7 +9,7 @@ UPGRADE_HEIGHT="${UPGRADE_HEIGHT:-250}" PROPOSAL_ID=1 printf "PROPOSAL\n" -$STRIDE_MAIN_CMD tx gov submit-proposal software-upgrade $UPGRADE_NAME \ +$STRIDE_MAIN_CMD tx gov submit-legacy-proposal software-upgrade $UPGRADE_NAME \ --title $UPGRADE_NAME --description "version 2 description" \ --upgrade-height $UPGRADE_HEIGHT --from val1 -y | TRIM_TX diff --git a/proto/stride/ratelimit/genesis.proto b/proto/stride/ratelimit/genesis.proto new file mode 100644 index 0000000000..4c7011d280 --- /dev/null +++ b/proto/stride/ratelimit/genesis.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package stride.ratelimit; + +import "gogoproto/gogo.proto"; +import "stride/ratelimit/params.proto"; +import "stride/ratelimit/ratelimit.proto"; + +option go_package = "github.com/Stride-Labs/stride/v5/x/ratelimit/types"; + +// GenesisState defines the ratelimit module's genesis state. +message GenesisState { + // params defines all the parameters of the module. + Params params = 1 [ + (gogoproto.moretags) = "yaml:\"params\"", + (gogoproto.nullable) = false + ]; + + // list of rate limits + repeated RateLimit rate_limits = 2 [ + (gogoproto.moretags) = "yaml:\"rate_limits\"", + (gogoproto.nullable) = false + ]; +} diff --git a/proto/stride/ratelimit/gov.proto b/proto/stride/ratelimit/gov.proto new file mode 100644 index 0000000000..f1dabdf38f --- /dev/null +++ b/proto/stride/ratelimit/gov.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; +package stride.ratelimit; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/Stride-Labs/stride/v5/x/ratelimit/types"; + +message AddRateLimitProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + string denom = 3; + string channel_id = 4; + string max_percent_send = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 6 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 7; + string deposit = 8 [ (gogoproto.moretags) = "yaml:\"deposit\"" ]; +} + +message UpdateRateLimitProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + string denom = 3; + string channel_id = 4; + string max_percent_send = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 6 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 7; + string deposit = 8 [ (gogoproto.moretags) = "yaml:\"deposit\"" ]; +} + +message RemoveRateLimitProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + string denom = 3; + string channel_id = 4; + string deposit = 5 [ (gogoproto.moretags) = "yaml:\"deposit\"" ]; +} + +message ResetRateLimitProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + string denom = 3; + string channel_id = 4; + string deposit = 5 [ (gogoproto.moretags) = "yaml:\"deposit\"" ]; +} diff --git a/proto/stride/ratelimit/params.proto b/proto/stride/ratelimit/params.proto new file mode 100644 index 0000000000..c4dc82a895 --- /dev/null +++ b/proto/stride/ratelimit/params.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; +package stride.ratelimit; + +option go_package = "github.com/Stride-Labs/stride/v5/x/ratelimit/types"; + +// Params defines the ratelimit module's parameters. +message Params {} diff --git a/proto/stride/ratelimit/query.proto b/proto/stride/ratelimit/query.proto new file mode 100644 index 0000000000..34305aa2a4 --- /dev/null +++ b/proto/stride/ratelimit/query.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package stride.ratelimit; + +import "stride/ratelimit/ratelimit.proto"; +import "google/api/annotations.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/Stride-Labs/stride/v5/x/ratelimit/types"; + +// Query defines the gRPC querier service. +service Query { + rpc AllRateLimits(QueryAllRateLimitsRequest) + returns (QueryAllRateLimitsResponse) { + option (google.api.http).get = "/Stride-Labs/stride/ratelimit/ratelimits"; + } + rpc RateLimit(QueryRateLimitRequest) returns (QueryRateLimitResponse) { + option (google.api.http).get = + "/Stride-Labs/stride/ratelimit/ratelimit/{channel_id}/by_denom"; + } + rpc RateLimitsByChainId(QueryRateLimitsByChainIdRequest) + returns (QueryRateLimitsByChainIdResponse) { + option (google.api.http).get = + "/Stride-Labs/stride/ratelimit/ratelimits/{chain_id}"; + } + rpc RateLimitsByChannelId(QueryRateLimitsByChannelIdRequest) + returns (QueryRateLimitsByChannelIdResponse) { + option (google.api.http).get = + "/Stride-Labs/stride/ratelimit/ratelimits/{channel_id}"; + } +} + +message QueryAllRateLimitsRequest {} +message QueryAllRateLimitsResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryRateLimitRequest { + string denom = 1; + string channel_id = 2; +} +message QueryRateLimitResponse { RateLimit rate_limit = 1; } + +message QueryRateLimitsByChainIdRequest { string chain_id = 1; } +message QueryRateLimitsByChainIdResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryRateLimitsByChannelIdRequest { string channel_id = 1; } +message QueryRateLimitsByChannelIdResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/stride/ratelimit/ratelimit.proto b/proto/stride/ratelimit/ratelimit.proto new file mode 100644 index 0000000000..5437eb61f1 --- /dev/null +++ b/proto/stride/ratelimit/ratelimit.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package stride.ratelimit; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/Stride-Labs/stride/v5/x/ratelimit/types"; + +enum PacketDirection { + option (gogoproto.goproto_enum_prefix) = false; + + PACKET_SEND = 0; + PACKET_RECV = 1; +} + +message Path { + string denom = 1; + string channel_id = 2; +} + +message Quota { + string max_percent_send = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 3; +} + +message Flow { + string inflow = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string outflow = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string channel_value = 3 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; +} + +message RateLimit { + Path path = 1; + Quota quota = 2; + Flow flow = 3; +} diff --git a/testutil/localstride/state-export/scripts/submit_upgrade.sh b/testutil/localstride/state-export/scripts/submit_upgrade.sh index 27bde38b2b..22475d9f8d 100644 --- a/testutil/localstride/state-export/scripts/submit_upgrade.sh +++ b/testutil/localstride/state-export/scripts/submit_upgrade.sh @@ -24,7 +24,7 @@ TRIM_TX() { STRIDE_MAIN_CMD="docker-compose -f ${SCRIPT_DIR}/../docker-compose.yml exec -it stride strided" printf "PROPOSAL\n" -$STRIDE_MAIN_CMD tx gov submit-proposal software-upgrade $upgrade_name \ +$STRIDE_MAIN_CMD tx gov submit-legacy-proposal software-upgrade $upgrade_name \ --title $upgrade_name --description "upgrade" \ --upgrade-height $upgrade_height --from val -y | TRIM_TX diff --git a/testutil/network/network.go b/testutil/network/network.go index cc12946f9e..93e64b5312 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -21,8 +21,9 @@ import ( ) type ( - Network = network.Network - Config = network.Config + Network = network.Network + Config = network.Config + Validator = network.Validator ) // New creates instance with fully configured cosmos network. diff --git a/x/README.md b/x/README.md index 6a4c5257f8..c1cd156b36 100644 --- a/x/README.md +++ b/x/README.md @@ -13,6 +13,7 @@ category: 62c5c5ff03a5bf069004def2 `interchainquery` - Issues queries between IBC chains, verifies state proof and executes callbacks. `epochs` - Makes on-chain timers which other modules can execute code during. `mint` - Controls token supply emissions, and what modules they are directed to. +`ratelimit` - IBC middleware wrapping the transfer module, thottles large IBC transfers. ### Attribution diff --git a/x/epochs/keeper/epoch_test.go b/x/epochs/keeper/epoch_test.go index 8afa58de77..5568c6b1f5 100644 --- a/x/epochs/keeper/epoch_test.go +++ b/x/epochs/keeper/epoch_test.go @@ -25,10 +25,11 @@ func (suite *KeeperTestSuite) TestEpochLifeCycle() { suite.Require().Equal(epochInfo, epochInfoSaved) allEpochs := suite.App.EpochsKeeper.AllEpochInfos(ctx) - suite.Require().Len(allEpochs, 5) + suite.Require().Len(allEpochs, 6) suite.Require().Equal(allEpochs[0].Identifier, "day") // alphabetical order - suite.Require().Equal(allEpochs[1].Identifier, "mint") - suite.Require().Equal(allEpochs[2].Identifier, "monthly") - suite.Require().Equal(allEpochs[3].Identifier, "stride_epoch") - suite.Require().Equal(allEpochs[4].Identifier, "week") + suite.Require().Equal(allEpochs[1].Identifier, "hour") + suite.Require().Equal(allEpochs[2].Identifier, "mint") + suite.Require().Equal(allEpochs[3].Identifier, "monthly") + suite.Require().Equal(allEpochs[4].Identifier, "stride_epoch") + suite.Require().Equal(allEpochs[5].Identifier, "week") } diff --git a/x/epochs/keeper/grpc_query_test.go b/x/epochs/keeper/grpc_query_test.go index e2ae4191ac..30c702971e 100644 --- a/x/epochs/keeper/grpc_query_test.go +++ b/x/epochs/keeper/grpc_query_test.go @@ -18,31 +18,51 @@ func (suite *KeeperTestSuite) TestQueryEpochInfos() { // Invalid param epochInfosResponse, err := queryClient.EpochInfos(gocontext.Background(), &types.QueryEpochsInfoRequest{}) suite.Require().NoError(err) - suite.Require().Len(epochInfosResponse.Epochs, 4) + suite.Require().Len(epochInfosResponse.Epochs, 5) // check if EpochInfos are correct - suite.Require().Equal(epochInfosResponse.Epochs[0].Identifier, "day") - suite.Require().Equal(epochInfosResponse.Epochs[0].StartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[0].Duration, time.Hour*24) - suite.Require().Equal(epochInfosResponse.Epochs[0].CurrentEpoch, int64(0)) - suite.Require().Equal(epochInfosResponse.Epochs[0].CurrentEpochStartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[0].EpochCountingStarted, false) - suite.Require().Equal(epochInfosResponse.Epochs[1].Identifier, "mint") - suite.Require().Equal(epochInfosResponse.Epochs[1].StartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[1].Duration, time.Minute*60) - suite.Require().Equal(epochInfosResponse.Epochs[1].CurrentEpoch, int64(0)) - suite.Require().Equal(epochInfosResponse.Epochs[1].CurrentEpochStartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[1].EpochCountingStarted, false) - suite.Require().Equal(epochInfosResponse.Epochs[2].Identifier, "stride_epoch") - suite.Require().Equal(epochInfosResponse.Epochs[2].StartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[2].Duration, time.Hour*6) - suite.Require().Equal(epochInfosResponse.Epochs[2].CurrentEpoch, int64(0)) - suite.Require().Equal(epochInfosResponse.Epochs[2].CurrentEpochStartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[2].EpochCountingStarted, false) - suite.Require().Equal(epochInfosResponse.Epochs[3].Identifier, "week") - suite.Require().Equal(epochInfosResponse.Epochs[3].StartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[3].Duration, time.Hour*24*7) - suite.Require().Equal(epochInfosResponse.Epochs[3].CurrentEpoch, int64(0)) - suite.Require().Equal(epochInfosResponse.Epochs[3].CurrentEpochStartTime, chainStartTime) - suite.Require().Equal(epochInfosResponse.Epochs[3].EpochCountingStarted, false) + suite.Require().Equal(epochInfosResponse.Epochs[0], types.EpochInfo{ + Identifier: "day", + StartTime: chainStartTime, + Duration: time.Hour * 24, + CurrentEpoch: int64(0), + CurrentEpochStartTime: chainStartTime, + EpochCountingStarted: false, + }) + + suite.Require().Equal(epochInfosResponse.Epochs[1], types.EpochInfo{ + Identifier: "hour", + StartTime: chainStartTime, + Duration: time.Hour, + CurrentEpoch: int64(0), + CurrentEpochStartTime: chainStartTime, + EpochCountingStarted: false, + }) + + suite.Require().Equal(epochInfosResponse.Epochs[2], types.EpochInfo{ + Identifier: "mint", + StartTime: chainStartTime, + Duration: time.Minute * 60, + CurrentEpoch: int64(0), + CurrentEpochStartTime: chainStartTime, + EpochCountingStarted: false, + }) + + suite.Require().Equal(epochInfosResponse.Epochs[3], types.EpochInfo{ + Identifier: "stride_epoch", + StartTime: chainStartTime, + Duration: time.Hour * 6, + CurrentEpoch: int64(0), + CurrentEpochStartTime: chainStartTime, + EpochCountingStarted: false, + }) + + suite.Require().Equal(epochInfosResponse.Epochs[4], types.EpochInfo{ + Identifier: "week", + StartTime: chainStartTime, + Duration: time.Hour * 24 * 7, + CurrentEpoch: int64(0), + CurrentEpochStartTime: chainStartTime, + EpochCountingStarted: false, + }) } diff --git a/x/epochs/types/genesis.go b/x/epochs/types/genesis.go index 3708c6ba5e..3f469f2dc2 100644 --- a/x/epochs/types/genesis.go +++ b/x/epochs/types/genesis.go @@ -10,16 +10,20 @@ func NewGenesisState(epochs []EpochInfo) *GenesisState { } var ( - STRIDE_EPOCH = "stride_epoch" + HOUR_EPOCH = "hour" DAY_EPOCH = "day" + WEEK_EPOCH = "week" + STRIDE_EPOCH = "stride_epoch" MINT_EPOCH = "mint" ) // DefaultGenesis returns the default Capability genesis state +// The hour epoch was not included in the mainnet genesis config, +// but has been included here for local testing func DefaultGenesis() *GenesisState { epochs := []EpochInfo{ { - Identifier: "week", + Identifier: WEEK_EPOCH, StartTime: time.Time{}, Duration: time.Hour * 24 * 7, CurrentEpoch: 0, @@ -54,6 +58,15 @@ func DefaultGenesis() *GenesisState { CurrentEpochStartTime: time.Time{}, EpochCountingStarted: false, }, + { + Identifier: HOUR_EPOCH, + StartTime: time.Time{}, + Duration: time.Hour, + CurrentEpoch: 0, + CurrentEpochStartHeight: 0, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + }, } return NewGenesisState(epochs) } diff --git a/x/ratelimit/LICENSE b/x/ratelimit/LICENSE new file mode 100644 index 0000000000..f74f6d35d2 --- /dev/null +++ b/x/ratelimit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright for interchainquery public infrastructure attributed to 2022 Quicksilver + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/x/ratelimit/README.md b/x/ratelimit/README.md new file mode 100644 index 0000000000..a4ee50eb6d --- /dev/null +++ b/x/ratelimit/README.md @@ -0,0 +1,206 @@ +--- +title: "RateLimit" +excerpt: "" +category: 6392913957c533007128548e +--- +# RateLimit Module +## Overview +This `ratelimit` module is a native golang implementation, inspired by Osmosis's CosmWasm [`ibc-rate-limit`](https://github.com/osmosis-labs/osmosis/tree/main/x/ibc-rate-limit) module. The module is meant as a safety control in the event of a bug, attack, or economic failure of an external zone. It prevents massive inflows or outflows of IBC tokens to/from Stride in a short time frame. See [here](https://github.com/osmosis-labs/osmosis/tree/main/x/ibc-rate-limit#motivation) for an excellent summary by the Osmosis team on the motivation for rate limiting. + +Each rate limit is applied at a ChannelID + Denom granularity and is evaluated in evenly spaced fixed windows. For instance, a rate limit might be specified on `uosmo` (denominated as `ibc/D24B4564BCD51D3D02D9987D92571EAC5915676A9BD6D9B0C1D0254CB8A5EA34` on Stride), on the Stride <-> Osmosis transfer channel (`channel-5`), with a 24 hour window. + +Each rate limit will also have a configurable threshold that dictates the max inflow/outflow along the channel. The threshold is represented as a percentage of the total value along the channel. The channel value is calculated by querying the total supply of the denom at the start of the time window, and it remains constant until the window expires. For instance, the rate limit described above might have a threshold of 10% for both inflow and outflow. If the total supply of `ibc/D24B4564BCD51D3D02D9987D92571EAC5915676A9BD6D9B0C1D0254CB8A5EA34` was 100, then any transfer that would cause a net inflow or outflow greater than 10 (i.e. greater than 10% the channel value) would be rejected. Once the time window expires, the net inflow and outflow are reset to 0 and the channel value is re-calculated. + +The *net* inflow and outflow is used (rather than the total inflow/outflow) to prevent DOS attacks where someone repeatedly sends the same token back and forth across the same channel, causing the rate limit to be reached. + +The module is implemented as IBC Middleware around the transfer module. The epoch's module is leveraged to determine when each rate limit window has expired (each window is denominated in hours). This means all rate limit windows with the same window duration will start and end at the same time. + +## Implementation +Each rate limit is defined by the following three components: +1. **Path**: Defines the `ChannelId` and `Denom` +2. **Quota**: Defines the rate limit time window (`DurationHours`) and the max threshold for inflows/outflows (`MaxPercentRecv` and `MaxPercentSend` respectively) +3. **Flow**: Stores the current `Inflow`, `Outflow` and `ChannelValue`. Each time a quota expires, the inflow and outflow get reset to 0 and the channel value gets recalculated. Throughout the window, the inflow and outflow each increase monotonically. The net flow is used when determining if a transfer would exceed the quota. + * For `Send` packets: + $$\text{Exceeds Quota if:} \left(\frac{\text{Outflow} - \text{Inflow} + \text{Packet Amount}}{\text{ChannelValue}}\right) > \text{MaxPercentSend}$$ + * For `Receive` packets: + $$\text{Exceeds Quota if:} \left(\frac{\text{Inflow} - \text{Outflow} + \text{Packet Amount}}{\text{ChannelValue}}\right) > \text{MaxPercentRecv}$$ + +## Example Walk-Through +Using the example above, let's say we created a 24 hour rate limit on `ibc/D24B4564BCD51D3D02D9987D92571EAC5915676A9BD6D9B0C1D0254CB8A5EA34` ("`ibc/uosmo`"), `channel-5`, with a 10% send and receive threshold. +1. At the start of the window, the supply will be queried, to determine the channel value. Let's say the total supply was 100 +2. If someone transferred `8uosmo` from `Osmosis -> Stride`, the `Inflow` would increment by 8 +3. If someone tried to transfer another `8uosmo` from `Osmosis -> Stride`, it would exceed the quota since `(8+8)/100 = 16%` (which is greater than 10%) and thus, the transfer would be rejected. +4. If someone tried to transfer `12ibc/uosmo` from Stride -> Osmosis, the `Outflow` would increment by 12. Notice, even though 12 is greater than 10% the total channel value, the *net* outflow is only `4uatom` (since it's offset by the `8uatom` `Inflow`). As a result, this transaction would succeed. +5. Now if the person in (3) attempted to retry their transfer of`8uosmo` from `Osmosis -> Stride`, the `Inflow` would increment by 8 and the transaction would succeed (leaving a net inflow of 4). +6. Finally, at the end of the 24 hours, the `Inflow` and `Outflow` would get reset to 0 and the `ChannelValue` would be re-calculated. In this example, the new channel value would be 104 (since more `uosmo` was sent to Stride, and thus more `ibc/uosmo` was minted) + +| Step | Description | Transfer Status | Inflow | Outflow | Net Inflow | Net Outflow | Channel Value | +|:----:|:--------------------------------:|:---------------:|:------:|:-------:|:----------:|:-----------:|:-------------:| +| 1 | Rate limit created | | 0 | 0 | | | 100 | +| 2 | 8usomo Osmosis → Stride | Successful | 8 | 0 | 8% | | 100 | +| 3 | 8usomo Osmosis → Stride | Rejected | 16 | 0 | 16% (>10%) | | 100 | +| 3 | State reverted after rejected Tx | | 8 | 0 | 8% | | 100 | +| 4 | 12ibc/uosmo Stride → Osmosis | Successful | 8 | 12 | | 4% | 100 | +| 5 | 8usomo Osmosis → Stride | Successful | 16 | 12 | 4% | | 100 | +| 6 | Quota Reset | | 0 | 0 | | | 104 | + +## Denom Blacklist +The module also contains a blacklist to completely halt all IBC transfers for a given denom. There are keeper functions to add or remove denoms from the blacklist; however, these functions are not exposed externally through transactions or governance, and they should only be leveraged internally from the protocol in extreme scenarios. + +## Denoms +We always want to refer to the channel ID and denom as they appear on Stride. For instance, in the example above, we would store the rate limit with denom `ibc/D24B4564BCD51D3D02D9987D92571EAC5915676A9BD6D9B0C1D0254CB8A5EA34` and `channel-5`, instead of `uosmo` and `channel-326` (the ChannelID on Osmosis). + +However, since the ratelimit module acts as middleware to the transfer module, the respective denoms need to be interpreted using the denom trace associated with each packet. There are a few scenarios at play here... + +### Send Packets +The denom that the rate limiter will use for a send packet depends on whether it was a native token (e.g. ustrd, stuatom, etc.) or non-native token (e.g. ibc/...)... +#### Native vs Non-Native +* We can identify if the token is native or not by parsing the denom trace from the packet + * If the token is **native**, it **will not** have a prefix (e.g. `ustrd`) + * If the token is **non-native**, it **will** have a prefix (e.g. `transfer/channel-X/uosmo`) +#### Determining the denom in the rate limit +* For **native** tokens, return as is (e.g. `ustrd`) +* For **non-native** tokens, take the ibc hash (e.g. hash `transfer/channel-X/uosmo` into `ibc/...`) + +### Receive Packets +The denom that the rate limiter will use for a receive packet depends on whether it was a source or sink. + +#### Source vs Sink +As a token travels across IBC chains, its path is recorded in the denom trace. + +* **Sink**: If the token moves **forward**, to a chain different than its previous hop, the destination chain acts as a **sink zone**, and the new port and channel are **appended** to the denom trace. + * Ex1: `uatom` is sent from Cosmoshub to Stride + * Stride is the first destination for `uatom`, and acts as a sink zone + * The IBC denom becomes the hash of: `/{stride-port)/{stride-channel}/uatom` + * Ex2: `uatom` is sent from Cosmoshub to Osmosis then to Stride + * Here the receiving chain (Stride) is not the same as the previous hop (Cosmoshub), so Stride, once again, is acting as a sink zone + * The IBC denom becomes the hash of: `/{stride-port)/{stride-channel}/{osmosis-port}/{osmosis-channel}/uatom` + +* **Source**: If the token moves **backwards** (i.e. revisits the last chain it was sent from), the destination chain is acting as a **source zone**, and the port and channel are **removed** from the denom trace - undoing the last hop. Should a token reverse its course completely and head back along the same path to its native chain, the denom trace will unwind and reduce back down to the original base denom. + * Ex1: `ustrd` is sent from Stride to Osmosis, and then back to Stride + * Here the trace reduces from `/{osmosis-port}/{osmosis-channel}/ustrd` to simply `ustrd` + * Ex2: `ujuno` is sent to Stride, then to Osmosis, then back to Stride + * Here the trace reduces from `/{osmosis-port}/{osmosis-channel}/{stride-port}/{stride-channel}/ujuno` to just `/{stride-port}/{stride-channel}/ujuno` (the Osmosis hop is removed) + * Stride is the source in the examples above because the token went back and forth from Stride -> Osmosis -> Stride + +For a more detailed explanation, see the[ ICS-20 ADR](https://github.com/cosmos/ibc-go/blob/main/docs/architecture/adr-001-coin-source-tracing.md#example) and [spec](https://github.com/cosmos/ibc/tree/main/spec/app/ics-020-fungible-token-transfer). + +#### Determining the denom in the rate limit +* If the chain is acting as a **Sink**: Add on the Stride port and channel and hash it + * Ex1: `uosmo` sent from Osmosis to Stride + * Packet Denom Trace: `uosmo` + * (1) Add Stride Channel as Prefix: `transfer/channel-X/uosmo` + * (2) Hash: `ibc/...` + + * Ex2: `ujuno` sent from Osmosis to Stride + * Packet Denom Trace: `transfer/channel-Y/ujuno` (where channel-Y is the Juno <> Osmosis channel) + * (1) Add Stride Channel as Prefix: `transfer/channel-X/transfer/channel-Y/ujuno` + * (2) Hash: `ibc/...` + +* If the chain is acting as a **Source**: First, remove the prefix. Then if there is still a trace prefix, hash it + * Ex1: `ustrd` sent back to Stride from Osmosis + * Packet Denom: `transfer/channel-X/ustrd` + * (1) Remove Prefix: `ustrd` + * (2) No trace remaining, leave as is: `ustrd` + * Ex2: juno was sent to Stride, then to Osmosis, then back to Stride + * Packet Denom: `transfer/channel-X/transfer/channel-Z/ujuno` + * (1) Remove Prefix: `transfer/channel-Z/ujuno` + * (2) Hash: `ibc/...` + +## State +```go +RateLimit + Path + Denom string + ChannelId string + Quota + MaxPercentSend sdkmath.Int + MaxPercentRecv sdkmath.Int + DurationHours uint64 + Flow + Inflow sdkmath.Int + Outflow sdkmath.Int + ChannelValue sdkmath.Int +``` + +## Keeper functions +```go +// Stores a RateLimit object in the store +SetRateLimit(rateLimit types.RateLimit) + +// Removes a RateLimit object from the store +RemoveRateLimit(denom string, channelId string) + +// Reads a RateLimit object from the store +GetRateLimit(denom string, channelId string) + +// Gets a list of all RateLimit objects +GetAllRateLimits() + +// Resets the Inflow and Outflow of a RateLimit and re-calculates the ChannelValue +ResetRateLimit(denom string, channelId string) + +// Checks whether a packet will exceed a rate limit quota +// If it does not exceed the quota, it updates the `Inflow` or `Outflow` +// If it exceeds the quota, it returns an error +CheckRateLimitAndUpdateFlow(direction types.PacketDirection, denom string, channelId string, amount sdkmath.Int) +``` + +## Middleware Functions +```go +SendRateLimitedPacket (ICS4Wrapper SendPacket) +ReceiveRateLimitedPacket (IBCModule OnRecvPacket) +``` + +## Transactions (via Governance) +```go +// Adds a new rate limit +// Errors if: +// - `ChannelValue` is 0 (meaning supply of the denom is 0) +// - Rate limit already exists (as identified by the `channel_id` and `denom`) +// - Channel does not exist +AddRateLimit() +{"denom": string, "channel_id": string, "duration_hours": string, "max_percent_send": string, "max_percent_recv": string} + +// Updates a rate limit quota, and resets the rate limit +// Errors if: +// - Rate limit does not exist (as identified by the `channel_id` and `denom`) +UpdateRateLimit() +{"denom": string, "channel_id": string, "duration_hours": string, "max_percent_send": string, "max_percent_recv": string} + +// Resets the `Inflow` and `Outflow` of a rate limit to 0, and re-calculates the `ChannelValue` +// Errors if: +// - Rate limit does not exist (as identified by the `channel_id` and `denom`) +ResetRateLimit() +{"denom": string, "channel_id": string} + +// Removes the rate limit from the store +// Errors if: +// - Rate limit does not exist (as identified by the `channel_id` and `denom`) +RemoveRateLimit() +{"denom": string, "channel_id": string} +``` + +## Queries +```go +// Queries all rate limits +// CLI: +// strided q ratelimit list-rate-limits +// API: +// /Stride-Labs/stride/ratelimit/ratelimits +QueryRateLimits() + +// Queries a specific rate limit given a ChannelID and Denom +// CLI: +// strided q ratelimit rate-limit [denom] [channel-id] +// API: +// /Stride-Labs/stride/ratelimit/ratelimit/{denom}/{channel_id} +QueryRateLimit(denom string, channelId string) + +// Queries all rate limits associated with a given host chain +// CLI: +// strided q ratelimit rate-limits-by-chain [chain-id] +// API: +// /Stride-Labs/stride/ratelimit/ratelimits/{chain_id} +QueryRateLimitsByChainId(chainId string) +``` diff --git a/x/ratelimit/client/cli/query.go b/x/ratelimit/client/cli/query.go new file mode 100644 index 0000000000..478d589d4f --- /dev/null +++ b/x/ratelimit/client/cli/query.go @@ -0,0 +1,162 @@ +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +const ( + FlagDenom = "denom" +) + +// GetQueryCmd returns the cli query commands for this module. +func GetQueryCmd() *cobra.Command { + // Group ratelimit queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetCmdQueryRateLimit(), + GetCmdQueryAllRateLimits(), + GetCmdQueryRateLimitsByChainId(), + ) + return cmd +} + +// GetCmdQueryRateLimit implements a command to query rate limits by channel-id and denom +func GetCmdQueryRateLimit() *cobra.Command { + cmd := &cobra.Command{ + Use: "rate-limit [channel-id]", + Short: "Query rate limits by channel-id and denom", + Long: strings.TrimSpace( + fmt.Sprintf(`Query rate limits by channel-id and denom. + +Example: + $ %s query %s rate-limit [channel-id] + $ %s query %s rate-limit [channel-id] --denom=[denom] +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + channelId := args[0] + denom, err := cmd.Flags().GetString(FlagDenom) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + if denom == "" { + req := &types.QueryRateLimitsByChannelIdRequest{ + ChannelId: channelId, + } + res, err := queryClient.RateLimitsByChannelId(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintObjectLegacy(res.RateLimits) + } + + req := &types.QueryRateLimitRequest{ + Denom: denom, + ChannelId: channelId, + } + res, err := queryClient.RateLimit(context.Background(), req) + + if err != nil { + return err + } + + return clientCtx.PrintProto(res.RateLimit) + }, + } + + cmd.Flags().String(FlagDenom, "", "The denom identifying a specific rate limit") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryAllRateLimits return all available rate limits. +func GetCmdQueryAllRateLimits() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-rate-limits", + Short: "Query all rate limits", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllRateLimitsRequest{} + res, err := queryClient.AllRateLimits(context.Background(), req) + + if err != nil { + return err + } + + return clientCtx.PrintObjectLegacy(res.RateLimits) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryRateLimitsByChainId return all rate limits that exist between Stride +// and the specified ChainId +func GetCmdQueryRateLimitsByChainId() *cobra.Command { + cmd := &cobra.Command{ + Use: "rate-limits-by-chain [chain-id]", + Short: "Query all rate limits with the given ChainID", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + chainId := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryRateLimitsByChainIdRequest{ + ChainId: chainId, + } + res, err := queryClient.RateLimitsByChainId(context.Background(), req) + + if err != nil { + return err + } + + return clientCtx.PrintObjectLegacy(res.RateLimits) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/ratelimit/client/cli/tx.go b/x/ratelimit/client/cli/tx.go new file mode 100644 index 0000000000..e7ed36b6d2 --- /dev/null +++ b/x/ratelimit/client/cli/tx.go @@ -0,0 +1,300 @@ +package cli + +import ( + "fmt" + "os" + "strings" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/gogoproto/proto" + "github.com/spf13/cobra" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// Parse the gov proposal file into a proto message +func parseProposalFile(cdc codec.JSONCodec, proposalFile string, proposal proto.Message) error { + contents, err := os.ReadFile(proposalFile) + if err != nil { + return err + } + + if err = cdc.UnmarshalJSON(contents, proposal); err != nil { + return err + } + + return nil +} + +// Submits the governance proposal +func submitProposal(clientCtx client.Context, cmd *cobra.Command, proposal govtypes.Content, deposit sdk.Coins) error { + // Confirm a valid deposit was submitted + strideDenom, err := sdk.GetBaseDenom() + if err != nil { + return err + } + if len(deposit) != 1 || deposit.GetDenomByIndex(0) != strideDenom { + return errorsmod.Wrapf(sdkerrors.ErrInvalidCoins, "Deposit token denom must be %s", strideDenom) + } + + // Build and validate the proposal + from := clientCtx.GetFromAddress() + msg, err := govtypes.NewMsgSubmitProposal(proposal, deposit, from) + if err != nil { + return err + } + if err := msg.ValidateBasic(); err != nil { + return err + } + + // Finally, broadcast the proposal tx + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) +} + +// Adds a new rate limit proposal +func CmdAddRateLimitProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "add-rate-limit [proposal-file]", + Short: "Submit a add-rate-limit proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit an add-rate-limit proposal along with an initial deposit. +The proposal details must be supplied via a JSON file. + +Example: +$ %s tx gov submit-legacy-proposal add-rate-limit --from= + +Where proposal.json contains: +{ + "title": "Add Rate Limit to ...", + "description": "Proposal to enable rate limiting on...", + "denom": "ustrd", + "channel_id": "channel-0", + "max_percent_send": "10", + "max_percent_recv": "10", + "duration_hours": "24", + "deposit": "10000000ustrd" +} +`, version.AppName)), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + proposalFile := args[0] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + var proposal types.AddRateLimitProposal + if err := parseProposalFile(clientCtx.Codec, proposalFile, &proposal); err != nil { + return err + } + + depositFromFlags, err := cmd.Flags().GetString(govcli.FlagDeposit) + if err != nil { + return err + } + + // if deposit from flags is not empty, it overrides the deposit from proposal + if depositFromFlags != "" { + proposal.Deposit = depositFromFlags + } + deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return err + } + + return submitProposal(clientCtx, cmd, &proposal, deposit) + }, + } + + cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal") + + return cmd +} + +// Update a rate limit +func CmdUpdateRateLimitProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "update-rate-limit [proposal-file]", + Short: "Submit a update-rate-limit proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit an update-rate-limit proposal along with an initial deposit. +The proposal details must be supplied via a JSON file. + +Example: +$ %s tx gov submit-legacy-proposal update-rate-limit --from= + +Where proposal.json contains: +{ + "title": "Update Rate Limit ...", + "description": "Proposal to update rate limit...", + "denom": "ustrd", + "channel_id": "channel-0", + "max_percent_send": "10", + "max_percent_recv": "20", + "duration_hours": "24", + "deposit": "10000000ustrd" +} +`, version.AppName)), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + proposalFile := args[0] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + var proposal types.UpdateRateLimitProposal + if err := parseProposalFile(clientCtx.Codec, proposalFile, &proposal); err != nil { + return err + } + + depositFromFlags, err := cmd.Flags().GetString(govcli.FlagDeposit) + if err != nil { + return err + } + + // if deposit from flags is not empty, it overrides the deposit from proposal + if depositFromFlags != "" { + proposal.Deposit = depositFromFlags + } + deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return err + } + + return submitProposal(clientCtx, cmd, &proposal, deposit) + }, + } + + cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal") + + return cmd +} + +// Remove a rate limit +func CmdRemoveRateLimitProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "remove-rate-limit [proposal-file]", + Short: "Submit a remove-rate-limit proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit an remove-rate-limit proposal along with an initial deposit. +The proposal details must be supplied via a JSON file. + +Example: +$ %s tx gov submit-legacy-proposal remove-rate-limit --from= + +Where proposal.json contains: +{ + "title": "Remove Rate Limit ...", + "description": "Proposal to remove rate limiting on...", + "denom": "ustrd", + "channel_id": "channel-0", + "deposit": "10000000ustrd" +} +`, version.AppName)), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + proposalFile := args[0] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + var proposal types.RemoveRateLimitProposal + if err := parseProposalFile(clientCtx.Codec, proposalFile, &proposal); err != nil { + return err + } + + depositFromFlags, err := cmd.Flags().GetString(govcli.FlagDeposit) + if err != nil { + return err + } + + // if deposit from flags is not empty, it overrides the deposit from proposal + if depositFromFlags != "" { + proposal.Deposit = depositFromFlags + } + deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return err + } + + return submitProposal(clientCtx, cmd, &proposal, deposit) + }, + } + + cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal") + + return cmd +} + +// Reset a rate limit +func CmdResetRateLimitProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "reset-rate-limit [proposal-file]", + Short: "Submit a reset-rate-limit proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit an reset-rate-limit proposal along with an initial deposit. +The proposal details must be supplied via a JSON file. + +Example: +$ %s tx gov submit-legacy-proposal reset-rate-limit --from= + +Where proposal.json contains: +{ + "title": "Reset Rate Limit ...", + "description": "Proposal to reset the rate limit...", + "denom": "ustrd", + "channel_id": "channel-0", + "deposit": "10000000ustrd" +} +`, version.AppName)), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + proposalFile := args[0] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + var proposal types.ResetRateLimitProposal + if err := parseProposalFile(clientCtx.Codec, proposalFile, &proposal); err != nil { + return err + } + + depositFromFlags, err := cmd.Flags().GetString(govcli.FlagDeposit) + if err != nil { + return err + } + + // if deposit from flags is not empty, it overrides the deposit from proposal + if depositFromFlags != "" { + proposal.Deposit = depositFromFlags + } + deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return err + } + + return submitProposal(clientCtx, cmd, &proposal, deposit) + }, + } + + cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal") + + return cmd +} diff --git a/x/ratelimit/client/proposal_handler.go b/x/ratelimit/client/proposal_handler.go new file mode 100644 index 0000000000..ec6e793191 --- /dev/null +++ b/x/ratelimit/client/proposal_handler.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/Stride-Labs/stride/v5/x/ratelimit/client/cli" + + govclient "github.com/cosmos/cosmos-sdk/x/gov/client" +) + +var ( + AddRateLimitProposalHandler = govclient.NewProposalHandler(cli.CmdAddRateLimitProposal) + UpdateRateLimitProposalHandler = govclient.NewProposalHandler(cli.CmdUpdateRateLimitProposal) + RemoveRateLimitProposalHandler = govclient.NewProposalHandler(cli.CmdRemoveRateLimitProposal) + ResetRateLimitProposalHandler = govclient.NewProposalHandler(cli.CmdResetRateLimitProposal) +) diff --git a/x/ratelimit/genesis.go b/x/ratelimit/genesis.go new file mode 100644 index 0000000000..32fa1e04c3 --- /dev/null +++ b/x/ratelimit/genesis.go @@ -0,0 +1,28 @@ +package ratelimit + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// InitGenesis initializes the capability module's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) + for _, rateLimit := range genState.RateLimits { + k.SetRateLimit(ctx, rateLimit) + } +} + +// ExportGenesis returns the capability module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + rateLimits := k.GetAllRateLimits(ctx) + + genesis.Params = k.GetParams(ctx) + genesis.RateLimits = rateLimits + + return genesis +} diff --git a/x/ratelimit/genesis_test.go b/x/ratelimit/genesis_test.go new file mode 100644 index 0000000000..f77cd64cbf --- /dev/null +++ b/x/ratelimit/genesis_test.go @@ -0,0 +1,47 @@ +package ratelimit_test + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + sdkmath "cosmossdk.io/math" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + "github.com/Stride-Labs/stride/v5/testutil/nullify" + "github.com/Stride-Labs/stride/v5/x/ratelimit" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func createRateLimits() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := int64(1); i <= 3; i++ { + suffix := strconv.Itoa(int(i)) + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom-" + suffix, ChannelId: "channel-" + suffix}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(i), MaxPercentRecv: sdkmath.NewInt(i), DurationHours: uint64(i)}, + Flow: &types.Flow{Inflow: sdkmath.NewInt(i), Outflow: sdkmath.NewInt(i), ChannelValue: sdkmath.NewInt(i)}, + } + + rateLimits = append(rateLimits, rateLimit) + } + return rateLimits +} + +func TestGenesis(t *testing.T) { + genesisState := types.GenesisState{ + Params: types.Params{}, + RateLimits: createRateLimits(), + } + + s := apptesting.SetupSuitelessTestHelper() + ratelimit.InitGenesis(s.Ctx, s.App.RatelimitKeeper, genesisState) + got := ratelimit.ExportGenesis(s.Ctx, s.App.RatelimitKeeper) + require.NotNil(t, got) + + nullify.Fill(&genesisState) + nullify.Fill(got) + + require.Equal(t, genesisState.RateLimits, got.RateLimits) +} diff --git a/x/ratelimit/handler.go b/x/ratelimit/handler.go new file mode 100644 index 0000000000..e1e547d576 --- /dev/null +++ b/x/ratelimit/handler.go @@ -0,0 +1,67 @@ +package ratelimit + +import ( + "fmt" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + channelkeeper "github.com/cosmos/ibc-go/v5/modules/core/04-channel/keeper" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper/gov" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// NewMessageHandler returns ratelimit module messages +func NewMessageHandler(k keeper.Keeper) sdk.Handler { + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + _ = ctx.WithEventManager(sdk.NewEventManager()) + + switch msg := msg.(type) { + default: + errMsg := fmt.Sprintf("unrecognized %s message type: %T", types.ModuleName, msg) + return nil, errorsmod.Wrap(sdkerrors.ErrUnknownRequest, errMsg) + } + } +} + +// NewRateLimitProposalHandler returns ratelimit module's proposals +func NewRateLimitProposalHandler(k keeper.Keeper, channelKeeper channelkeeper.Keeper) govtypes.Handler { + return func(ctx sdk.Context, content govtypes.Content) error { + switch c := content.(type) { + case *types.AddRateLimitProposal: + return handleAddRateLimitProposal(ctx, k, channelKeeper, c) + case *types.UpdateRateLimitProposal: + return handleUpdateRateLimitProposal(ctx, k, c) + case *types.RemoveRateLimitProposal: + return handleRemoveRateLimitProposal(ctx, k, c) + case *types.ResetRateLimitProposal: + return handleResetRateLimitProposal(ctx, k, c) + default: + return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ratelimit proposal content type: %T", c) + } + } +} + +// Handler for adding a rate limit through governance +func handleAddRateLimitProposal(ctx sdk.Context, k keeper.Keeper, channelKeeper channelkeeper.Keeper, proposal *types.AddRateLimitProposal) error { + return gov.AddRateLimit(ctx, k, channelKeeper, proposal) +} + +// Handler for updating a rate limit through governance +func handleUpdateRateLimitProposal(ctx sdk.Context, k keeper.Keeper, proposal *types.UpdateRateLimitProposal) error { + return gov.UpdateRateLimit(ctx, k, proposal) +} + +// Handler for removing a rate limit through governance +func handleRemoveRateLimitProposal(ctx sdk.Context, k keeper.Keeper, proposal *types.RemoveRateLimitProposal) error { + return gov.RemoveRateLimit(ctx, k, proposal) +} + +// Handler for resetting a rate limit through governance +func handleResetRateLimitProposal(ctx sdk.Context, k keeper.Keeper, proposal *types.ResetRateLimitProposal) error { + return gov.ResetRateLimit(ctx, k, proposal) +} diff --git a/x/ratelimit/ibc_middleware.go b/x/ratelimit/ibc_middleware.go new file mode 100644 index 0000000000..a28db88c62 --- /dev/null +++ b/x/ratelimit/ibc_middleware.go @@ -0,0 +1,163 @@ +package ratelimit + +import ( + "fmt" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v5/modules/core/05-port/types" + + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +var _ porttypes.Middleware = &IBCMiddleware{} + +type IBCMiddleware struct { + app porttypes.IBCModule + keeper keeper.Keeper +} + +func NewIBCMiddleware(k keeper.Keeper, app porttypes.IBCModule) IBCMiddleware { + return IBCMiddleware{ + app: app, + keeper: k, + } +} + +// OnChanOpenInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenInit(ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) (string, error) { + return im.app.OnChanOpenInit( + ctx, + order, + connectionHops, + portID, + channelID, + channelCap, + counterparty, + version, + ) +} + +// OnChanOpenTry implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + counterpartyVersion string, +) (string, error) { + return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion) +} + +// OnChanOpenAck implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + counterpartyChannelID string, + counterpartyVersion string, +) error { + return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) +} + +// OnChanOpenConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanOpenConfirm(ctx, portID, channelID) +} + +// OnChanCloseInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanCloseInit(ctx, portID, channelID) +} + +// OnChanCloseConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanCloseConfirm(ctx, portID, channelID) +} + +// OnRecvPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) exported.Acknowledgement { + // Check if the packet would cause the rate limit to be exceeded, + // and if so, return an ack error + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 packet receive was denied: %s", err.Error())) + return channeltypes.NewErrorAcknowledgement(err) + } + + // If the packet was not rate-limited, pass it down to the Transfer OnRecvPacket callback + return im.app.OnRecvPacket(ctx, packet, relayer) +} + +// OnAcknowledgementPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + relayer sdk.AccAddress, +) error { + return im.app.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer) +} + +// OnTimeoutPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) error { + return im.app.OnTimeoutPacket(ctx, packet, relayer) +} + +// SendPacket implements the ICS4 Wrapper interface +// Rate-limited SendPacket found in RateLimit Keeper +func (im IBCMiddleware) SendPacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, +) error { + return im.keeper.SendPacket(ctx, chanCap, packet) +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface +func (im IBCMiddleware) WriteAcknowledgement( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + ack exported.Acknowledgement, +) error { + return im.keeper.WriteAcknowledgement(ctx, chanCap, packet, ack) +} + +// GetAppVersion returns the application version of the underlying application +func (i IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return i.keeper.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ratelimit/keeper/gov/gov.go b/x/ratelimit/keeper/gov/gov.go new file mode 100644 index 0000000000..9445454e39 --- /dev/null +++ b/x/ratelimit/keeper/gov/gov.go @@ -0,0 +1,106 @@ +package gov + +import ( + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + channelkeeper "github.com/cosmos/ibc-go/v5/modules/core/04-channel/keeper" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// Adds a new rate limit. Fails if the rate limit already exists or the channel value is 0 +func AddRateLimit(ctx sdk.Context, k keeper.Keeper, channelKeeper channelkeeper.Keeper, p *types.AddRateLimitProposal) error { + // Confirm the channel value is not zero + channelValue := k.GetChannelValue(ctx, p.Denom) + if channelValue.IsZero() { + return types.ErrZeroChannelValue + } + + // Confirm the rate limit does not already exist + _, found := k.GetRateLimit(ctx, p.Denom, p.ChannelId) + if found { + return types.ErrRateLimitAlreadyExists + } + + // Confirm the channel exists + _, found = channelKeeper.GetChannel(ctx, transfertypes.PortID, p.ChannelId) + if !found { + return types.ErrChannelNotFound + } + + // Create and store the rate limit object + path := types.Path{ + Denom: p.Denom, + ChannelId: p.ChannelId, + } + quota := types.Quota{ + MaxPercentSend: p.MaxPercentSend, + MaxPercentRecv: p.MaxPercentRecv, + DurationHours: p.DurationHours, + } + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Updates an existing rate limit. Fails if the rate limit doesn't exist +func UpdateRateLimit(ctx sdk.Context, k keeper.Keeper, p *types.UpdateRateLimitProposal) error { + // Confirm the rate limit exists + _, found := k.GetRateLimit(ctx, p.Denom, p.ChannelId) + if !found { + return types.ErrRateLimitNotFound + } + + // Update the rate limit object with the new quota information + // The flow should also get reset to 0 + path := types.Path{ + Denom: p.Denom, + ChannelId: p.ChannelId, + } + quota := types.Quota{ + MaxPercentSend: p.MaxPercentSend, + MaxPercentRecv: p.MaxPercentRecv, + DurationHours: p.DurationHours, + } + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, p.Denom), + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Removes a rate limit. Fails if the rate limit doesn't exist +func RemoveRateLimit(ctx sdk.Context, k keeper.Keeper, msg *types.RemoveRateLimitProposal) error { + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelId) + if !found { + return types.ErrRateLimitNotFound + } + + k.RemoveRateLimit(ctx, msg.Denom, msg.ChannelId) + return nil +} + +// Resets the flow on a rate limit. Fails if the rate limit doesn't exist +func ResetRateLimit(ctx sdk.Context, k keeper.Keeper, msg *types.ResetRateLimitProposal) error { + return k.ResetRateLimit(ctx, msg.Denom, msg.ChannelId) +} diff --git a/x/ratelimit/keeper/gov/gov_test.go b/x/ratelimit/keeper/gov/gov_test.go new file mode 100644 index 0000000000..1ec1d71234 --- /dev/null +++ b/x/ratelimit/keeper/gov/gov_test.go @@ -0,0 +1,208 @@ +package gov_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + + transfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + minttypes "github.com/Stride-Labs/stride/v5/x/mint/types" + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper/gov" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +type KeeperTestSuite struct { + apptesting.AppTestHelper +} + +func (s *KeeperTestSuite) SetupTest() { + s.Setup() +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +var ( + addRateLimitMsg = types.AddRateLimitProposal{ + Title: "AddRateLimit", + Denom: "denom", + ChannelId: "channel-0", + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(20), + DurationHours: 30, + } + + updateRateLimitMsg = types.UpdateRateLimitProposal{ + Title: "UpdateRateLimit", + Denom: "denom", + ChannelId: "channel-0", + MaxPercentRecv: sdkmath.NewInt(20), + MaxPercentSend: sdkmath.NewInt(30), + DurationHours: 40, + } + + removeRateLimitMsg = types.RemoveRateLimitProposal{ + Title: "RemoveRateLimit", + Denom: "denom", + ChannelId: "channel-0", + } + + resetRateLimitMsg = types.ResetRateLimitProposal{ + Title: "ResetRateLimit", + Denom: "denom", + ChannelId: "channel-0", + } +) + +// Helper function to create a channel and prevent a channel not exists error +func (s *KeeperTestSuite) createChannel(channelId string) { + s.App.IBCKeeper.ChannelKeeper.SetChannel(s.Ctx, transfertypes.PortID, channelId, channeltypes.Channel{}) +} + +// Helper function to mint tokens and create channel value to prevent a zero channel value error +func (s *KeeperTestSuite) createChannelValue(denom string, channelValue sdkmath.Int) { + err := s.App.BankKeeper.MintCoins(s.Ctx, minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(addRateLimitMsg.Denom, channelValue))) + s.Require().NoError(err) +} + +// Helper function to add a rate limit with an optional error expectation +func (s *KeeperTestSuite) addRateLimit(expectedErr *errorsmod.Error) { + actualErr := gov.AddRateLimit(s.Ctx, s.App.RatelimitKeeper, s.App.IBCKeeper.ChannelKeeper, &addRateLimitMsg) + + // If it should have been added successfully, confirm no error + // and confirm the rate limit was created + if expectedErr == nil { + s.Require().NoError(actualErr) + + _, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, addRateLimitMsg.Denom, addRateLimitMsg.ChannelId) + s.Require().True(found) + } else { + // If it should have failed, check the error + s.Require().Equal(actualErr, expectedErr) + } +} + +// Helper function to add a rate limit successfully +func (s *KeeperTestSuite) addRateLimitSuccessful() { + s.addRateLimit(nil) +} + +// Helper function to add a rate limit with an expected error +func (s *KeeperTestSuite) addRateLimitWithError(expectedErr *errorsmod.Error) { + s.addRateLimit(expectedErr) +} + +func (s *KeeperTestSuite) TestMsgServer_AddRateLimit() { + denom := addRateLimitMsg.Denom + channelId := addRateLimitMsg.ChannelId + channelValue := sdkmath.NewInt(100) + + // First try to add a rate limit when there's no channel value, it will fail + s.addRateLimitWithError(types.ErrZeroChannelValue) + + // Create channel value + s.createChannelValue(denom, channelValue) + + // Then try to add a rate limit before the channel has been created, it will also fail + s.addRateLimitWithError(types.ErrChannelNotFound) + + // Create the channel + s.createChannel(channelId) + + // Now add a rate limit successfully + s.addRateLimitSuccessful() + + // Finally, try to add the same rate limit again - it should fail + s.addRateLimitWithError(types.ErrRateLimitAlreadyExists) +} + +func (s *KeeperTestSuite) TestMsgServer_UpdateRateLimit() { + denom := updateRateLimitMsg.Denom + channelId := updateRateLimitMsg.ChannelId + channelValue := sdkmath.NewInt(100) + + // Create channel and channel value + s.createChannel(channelId) + s.createChannelValue(denom, channelValue) + + // Attempt to update a rate limit that does not exist + err := gov.UpdateRateLimit(s.Ctx, s.App.RatelimitKeeper, &updateRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful() + + // Update the rate limit successfully + err = gov.UpdateRateLimit(s.Ctx, s.App.RatelimitKeeper, &updateRateLimitMsg) + s.Require().NoError(err) + + // Check ratelimit quota is updated correctly + updatedRateLimit, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denom, channelId) + s.Require().True(found) + s.Require().Equal(updatedRateLimit.Quota, &types.Quota{ + MaxPercentSend: updateRateLimitMsg.MaxPercentSend, + MaxPercentRecv: updateRateLimitMsg.MaxPercentRecv, + DurationHours: updateRateLimitMsg.DurationHours, + }) +} + +func (s *KeeperTestSuite) TestMsgServer_RemoveRateLimit() { + denom := removeRateLimitMsg.Denom + channelId := removeRateLimitMsg.ChannelId + channelValue := sdkmath.NewInt(100) + + s.createChannel(channelId) + s.createChannelValue(denom, channelValue) + + // Attempt to remove a rate limit that does not exist + err := gov.RemoveRateLimit(s.Ctx, s.App.RatelimitKeeper, &removeRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful() + + // Remove the rate limit successfully + err = gov.RemoveRateLimit(s.Ctx, s.App.RatelimitKeeper, &removeRateLimitMsg) + s.Require().NoError(err) + + // Confirm it was removed + _, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denom, channelId) + s.Require().False(found) +} + +func (s *KeeperTestSuite) TestMsgServer_ResetRateLimit() { + denom := resetRateLimitMsg.Denom + channelId := resetRateLimitMsg.ChannelId + channelValue := sdkmath.NewInt(100) + + s.createChannel(channelId) + s.createChannelValue(denom, channelValue) + + // Attempt to reset a rate limit that does not exist + err := gov.ResetRateLimit(s.Ctx, s.App.RatelimitKeeper, &resetRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful() + + // Reset the rate limit successfully + err = gov.ResetRateLimit(s.Ctx, s.App.RatelimitKeeper, &resetRateLimitMsg) + s.Require().NoError(err) + + // Check ratelimit quota is flow correctly + resetRateLimit, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denom, channelId) + s.Require().True(found) + s.Require().Equal(resetRateLimit.Flow, &types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + }) +} diff --git a/x/ratelimit/keeper/grpc_query.go b/x/ratelimit/keeper/grpc_query.go new file mode 100644 index 0000000000..f2f2c48eea --- /dev/null +++ b/x/ratelimit/keeper/grpc_query.go @@ -0,0 +1,73 @@ +package keeper + +import ( + "context" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + + transfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +var _ types.QueryServer = Keeper{} + +// Query all rate limits +func (k Keeper) AllRateLimits(c context.Context, req *types.QueryAllRateLimitsRequest) (*types.QueryAllRateLimitsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + rateLimits := k.GetAllRateLimits(ctx) + return &types.QueryAllRateLimitsResponse{RateLimits: rateLimits}, nil +} + +// Query a rate limit by denom and channelId +func (k Keeper) RateLimit(c context.Context, req *types.QueryRateLimitRequest) (*types.QueryRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + rateLimit, found := k.GetRateLimit(ctx, req.Denom, req.ChannelId) + if !found { + return &types.QueryRateLimitResponse{}, nil + } + return &types.QueryRateLimitResponse{RateLimit: &rateLimit}, nil +} + +// Query all rate limits for a given chain +func (k Keeper) RateLimitsByChainId(c context.Context, req *types.QueryRateLimitsByChainIdRequest) (*types.QueryRateLimitsByChainIdResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := []types.RateLimit{} + for _, rateLimit := range k.GetAllRateLimits(ctx) { + + // Determine the client state from the channel Id + _, clientState, err := k.channelKeeper.GetChannelClientState(ctx, transfertypes.PortID, rateLimit.Path.ChannelId) + if err != nil { + return &types.QueryRateLimitsByChainIdResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Unable to fetch client state from channelId") + } + client, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + return &types.QueryRateLimitsByChainIdResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Client state is not tendermint") + } + + // If the chain ID matches, add the rate limit to the returned list + if client.ChainId == req.ChainId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChainIdResponse{RateLimits: rateLimits}, nil +} + +// Query all rate limits for a given channel +func (k Keeper) RateLimitsByChannelId(c context.Context, req *types.QueryRateLimitsByChannelIdRequest) (*types.QueryRateLimitsByChannelIdResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := []types.RateLimit{} + for _, rateLimit := range k.GetAllRateLimits(ctx) { + // If the channel ID matches, add the rate limit to the returned list + if rateLimit.Path.ChannelId == req.ChannelId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChannelIdResponse{RateLimits: rateLimits}, nil +} diff --git a/x/ratelimit/keeper/grpc_query_test.go b/x/ratelimit/keeper/grpc_query_test.go new file mode 100644 index 0000000000..c28e93837c --- /dev/null +++ b/x/ratelimit/keeper/grpc_query_test.go @@ -0,0 +1,92 @@ +package keeper_test + +import ( + "context" + "fmt" + "time" + + transfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/v5/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// Add three rate limits on different channels +// Each should have a different chainId +func (s *KeeperTestSuite) setupQueryRateLimitTests() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := int64(0); i <= 2; i++ { + clientId := fmt.Sprintf("07-tendermint-%d", i) + chainId := fmt.Sprintf("chain-%d", i) + connectionId := fmt.Sprintf("connection-%d", i) + channelId := fmt.Sprintf("channel-%d", i) + + // First register the client, connection, and channel (so we can map back to chainId) + // Nothing in the client state matters besides the chainId + clientState := ibctmtypes.NewClientState( + chainId, ibctmtypes.Fraction{}, time.Duration(0), time.Duration(0), time.Duration(0), clienttypes.Height{}, nil, nil, true, true, + ) + connection := connectiontypes.ConnectionEnd{ClientId: clientId} + channel := channeltypes.Channel{ConnectionHops: []string{connectionId}} + + s.App.IBCKeeper.ClientKeeper.SetClientState(s.Ctx, clientId, clientState) + s.App.IBCKeeper.ConnectionKeeper.SetConnection(s.Ctx, connectionId, connection) + s.App.IBCKeeper.ChannelKeeper.SetChannel(s.Ctx, transfertypes.PortID, channelId, channel) + + // Then add the rate limit + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom", ChannelId: channelId}, + } + s.App.RatelimitKeeper.SetRateLimit(s.Ctx, rateLimit) + rateLimits = append(rateLimits, rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestQueryAllRateLimits() { + expectedRateLimits := s.setupQueryRateLimitTests() + queryResponse, err := s.QueryClient.AllRateLimits(context.Background(), &types.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().ElementsMatch(expectedRateLimits, queryResponse.RateLimits) +} + +func (s *KeeperTestSuite) TestQueryRateLimit() { + allRateLimits := s.setupQueryRateLimitTests() + for _, expectedRateLimit := range allRateLimits { + queryResponse, err := s.QueryClient.RateLimit(context.Background(), &types.QueryRateLimitRequest{ + Denom: expectedRateLimit.Path.Denom, + ChannelId: expectedRateLimit.Path.ChannelId, + }) + s.Require().NoError(err, "no error expected when querying rate limit on channel: %s", expectedRateLimit.Path.ChannelId) + s.Require().Equal(expectedRateLimit, *queryResponse.RateLimit) + } +} + +func (s *KeeperTestSuite) TestQueryRateLimitsByChainId() { + allRateLimits := s.setupQueryRateLimitTests() + for i, expectedRateLimit := range allRateLimits { + chainId := fmt.Sprintf("chain-%d", i) + queryResponse, err := s.QueryClient.RateLimitsByChainId(context.Background(), &types.QueryRateLimitsByChainIdRequest{ + ChainId: chainId, + }) + s.Require().NoError(err, "no error expected when querying rate limit on chain: %s", chainId) + s.Require().Len(queryResponse.RateLimits, 1) + s.Require().Equal(expectedRateLimit, queryResponse.RateLimits[0]) + } +} + +func (s *KeeperTestSuite) TestQueryRateLimitsByChannelId() { + allRateLimits := s.setupQueryRateLimitTests() + for i, expectedRateLimit := range allRateLimits { + channelId := fmt.Sprintf("channel-%d", i) + queryResponse, err := s.QueryClient.RateLimitsByChannelId(context.Background(), &types.QueryRateLimitsByChannelIdRequest{ + ChannelId: channelId, + }) + s.Require().NoError(err, "no error expected when querying rate limit on channel: %s", channelId) + s.Require().Len(queryResponse.RateLimits, 1) + s.Require().Equal(expectedRateLimit, queryResponse.RateLimits[0]) + } +} diff --git a/x/ratelimit/keeper/hooks.go b/x/ratelimit/keeper/hooks.go new file mode 100644 index 0000000000..9db158de1d --- /dev/null +++ b/x/ratelimit/keeper/hooks.go @@ -0,0 +1,46 @@ +package keeper + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + epochstypes "github.com/Stride-Labs/stride/v5/x/epochs/types" +) + +// Before each hour epoch, check if any of the rate limits have expired, +// and reset them if they have +func (k Keeper) BeforeEpochStart(ctx sdk.Context, epochInfo epochstypes.EpochInfo) { + if epochInfo.Identifier == epochstypes.HOUR_EPOCH { + epochHour := uint64(epochInfo.CurrentEpoch) + + for _, rateLimit := range k.GetAllRateLimits(ctx) { + if epochHour%rateLimit.Quota.DurationHours == 0 { + err := k.ResetRateLimit(ctx, rateLimit.Path.Denom, rateLimit.Path.ChannelId) + if err != nil { + k.Logger(ctx).Error(fmt.Sprintf("Unable to reset quota for Denom: %s, ChannelId: %s", rateLimit.Path.Denom, rateLimit.Path.ChannelId)) + } + } + } + } +} + +func (k Keeper) AfterEpochEnd(ctx sdk.Context, epochInfo epochstypes.EpochInfo) {} + +type Hooks struct { + k Keeper +} + +var _ epochstypes.EpochHooks = Hooks{} + +func (k Keeper) Hooks() Hooks { + return Hooks{k} +} + +func (h Hooks) BeforeEpochStart(ctx sdk.Context, epochInfo epochstypes.EpochInfo) { + h.k.BeforeEpochStart(ctx, epochInfo) +} + +func (h Hooks) AfterEpochEnd(ctx sdk.Context, epochInfo epochstypes.EpochInfo) { + h.k.AfterEpochEnd(ctx, epochInfo) +} diff --git a/x/ratelimit/keeper/hooks_test.go b/x/ratelimit/keeper/hooks_test.go new file mode 100644 index 0000000000..bbf1a21468 --- /dev/null +++ b/x/ratelimit/keeper/hooks_test.go @@ -0,0 +1,73 @@ +package keeper_test + +import ( + "fmt" + + sdkmath "cosmossdk.io/math" + + epochstypes "github.com/Stride-Labs/stride/v5/x/epochs/types" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// Store a rate limit with a non-zero flow for each duration +func (s *KeeperTestSuite) resetRateLimits(denom string, durations []uint64, nonZeroFlow int64) { + // Add/reset rate limit with a quota duration hours for each duration in the list + for i, duration := range durations { + channelId := fmt.Sprintf("channel-%d", i) + + s.App.RatelimitKeeper.SetRateLimit(s.Ctx, types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelId: channelId, + }, + Quota: &types.Quota{ + DurationHours: duration, + }, + Flow: &types.Flow{ + Inflow: sdkmath.NewInt(nonZeroFlow), + Outflow: sdkmath.NewInt(nonZeroFlow), + ChannelValue: sdkmath.NewInt(100), + }, + }) + } +} + +func (s *KeeperTestSuite) TestBeforeEpochStart() { + // We'll create three rate limits with different durations + // And then pass in epoch ids that will cause each to trigger a reset in order + // i.e. epochId 2 will only cause duration 2 to trigger (2 % 2 == 0; and 9 % 2 != 0; 25 % 2 != 0), + // epochId 9, will only cause duration 3 to trigger (9 % 2 != 0; and 9 % 3 == 0; 25 % 3 != 0) + // epochId 25, will only cause duration 5 to trigger (9 % 5 != 0; and 9 % 5 != 0; 25 % 5 == 0) + durations := []uint64{2, 3, 5} + epochIds := []int64{2, 9, 25} + nonZeroFlow := int64(10) + + for i, epochId := range epochIds { + // First reset the rate limits to they have a non-zero flow + s.resetRateLimits(denom, durations, nonZeroFlow) + + duration := durations[i] + channelIdFromResetRateLimit := fmt.Sprintf("channel-%d", i) + + // Then trigger the epoch hook + epoch := epochstypes.EpochInfo{ + Identifier: epochstypes.HOUR_EPOCH, + CurrentEpoch: epochId, + } + s.App.RatelimitKeeper.BeforeEpochStart(s.Ctx, epoch) + + // Check rate limits (only one rate limit should reset for each hook trigger) + rateLimits := s.App.RatelimitKeeper.GetAllRateLimits(s.Ctx) + for _, rateLimit := range rateLimits { + context := fmt.Sprintf("duration: %d, epoch: %d", duration, epochId) + + if rateLimit.Path.ChannelId == channelIdFromResetRateLimit { + s.Require().Equal(int64(0), rateLimit.Flow.Inflow.Int64(), "inflow was not reset to 0 - %s", context) + s.Require().Equal(int64(0), rateLimit.Flow.Outflow.Int64(), "outflow was not reset to 0 - %s", context) + } else { + s.Require().Equal(nonZeroFlow, rateLimit.Flow.Inflow.Int64(), "inflow should have been left unchanged - %s", context) + s.Require().Equal(nonZeroFlow, rateLimit.Flow.Outflow.Int64(), "outflow should have been left unchanged - %s", context) + } + } + } +} diff --git a/x/ratelimit/keeper/keeper.go b/x/ratelimit/keeper/keeper.go new file mode 100644 index 0000000000..42b3afffda --- /dev/null +++ b/x/ratelimit/keeper/keeper.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +type ( + Keeper struct { + storeKey storetypes.StoreKey + cdc codec.BinaryCodec + paramstore paramtypes.Subspace + + bankKeeper types.BankKeeper + channelKeeper types.ChannelKeeper + ics4Wrapper types.ICS4Wrapper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + key storetypes.StoreKey, + ps paramtypes.Subspace, + bankKeeper types.BankKeeper, + channelKeeper types.ChannelKeeper, + ics4Wrapper types.ICS4Wrapper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: key, + paramstore: ps, + bankKeeper: bankKeeper, + channelKeeper: channelKeeper, + ics4Wrapper: ics4Wrapper, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} diff --git a/x/ratelimit/keeper/keeper_test.go b/x/ratelimit/keeper/keeper_test.go new file mode 100644 index 0000000000..10012187b6 --- /dev/null +++ b/x/ratelimit/keeper/keeper_test.go @@ -0,0 +1,24 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +type KeeperTestSuite struct { + apptesting.AppTestHelper + QueryClient types.QueryClient +} + +func (s *KeeperTestSuite) SetupTest() { + s.Setup() + s.QueryClient = types.NewQueryClient(s.QueryHelper) +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} diff --git a/x/ratelimit/keeper/packet.go b/x/ratelimit/keeper/packet.go new file mode 100644 index 0000000000..9a19b71959 --- /dev/null +++ b/x/ratelimit/keeper/packet.go @@ -0,0 +1,182 @@ +package keeper + +import ( + "encoding/json" + "fmt" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + transfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v5/modules/core/exported" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// Parse the denom from the Send Packet that will be used by the rate limit module +// The denom that the rate limiter will use for a SEND packet depends on whether +// it was a NATIVE token (e.g. ustrd, stuatom, etc.) or NON-NATIVE token (e.g. ibc/...)... +// +// We can identify if the token is native or not by parsing the trace denom from the packet +// If the token is NATIVE, it will not have a prefix (e.g. ustrd), +// and if it is NON-NATIVE, it will have a prefix (e.g. transfer/channel-2/uosmo) +// +// For NATIVE denoms, return as is (e.g. ustrd) +// For NON-NATIVE denoms, take the ibc hash (e.g. hash "transfer/channel-2/usoms" into "ibc/...") +func ParseDenomFromSendPacket(packet transfertypes.FungibleTokenPacketData) (denom string) { + // Determine the denom by looking at the denom trace path + denomTrace := transfertypes.ParseDenomTrace(packet.Denom) + + // Native assets will have an empty trace path and can be returned as is + if denomTrace.Path == "" { + denom = packet.Denom + } else { + // Non-native assets should be hashed + denom = denomTrace.IBCDenom() + } + + return denom +} + +// Parse the denom from the Recv Packet that will be used by the rate limit module +// The denom that the rate limiter will use for a RECEIVE packet depends on whether it was a source or sink +// Sink: The token moves forward, to a chain different than its previous hop +// The new port and channel are APPENDED to the denom trace. +// (e.g. A -> B, B is a sink) (e.g. A -> B -> C, C is a sink) +// Source: The token moves backwards (i.e. revisits the last chain it was sent from) +// The port and channel are REMOVED from the denom trace - undoing the last hop. +// (e.g. A -> B -> A, A is a source) (e.g. A -> B -> C -> B, B is a source) +// +// If the chain is acting as a SINK: +// We add on the Stride port and channel and hash it +// Ex1: uosmo sent from Osmosis to Stride +// Packet Denom: uosmo +// -> Add Prefix: transfer/channel-X/uosmo +// -> Hash: ibc/... +// +// Ex2: ujuno sent from Osmosis to Stride +// PacketDenom: transfer/channel-Y/ujuno (channel-Y is the Juno <> Osmosis channel) +// -> Add Prefix: transfer/channel-X/transfer/channel-Y/ujuno +// -> Hash: ibc/... +// +// If the chain is acting as a SOURCE: +// First, remove the prefix. Then if there is still a denom trace, hash it +// Ex1: ustrd sent back to Stride from Osmosis +// Packet Denom: transfer/channel-X/ustrd +// -> Remove Prefix: ustrd +// -> Leave as is: ustrd +// +// Ex2: juno was sent to Stride, then to Osmosis, then back to Stride +// Packet Denom: transfer/channel-X/transfer/channel-Z/ujuno +// -> Remove Prefix: transfer/channel-Z/ujuno +// -> Hash: ibc/... +func ParseDenomFromRecvPacket(packet channeltypes.Packet, packetData transfertypes.FungibleTokenPacketData) (denom string) { + // To determine the denom, first check whether Stride is acting as source + if transfertypes.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), packetData.Denom) { + // Remove the source prefix (e.g. transfer/channel-X/transfer/channel-Z/ujuno -> transfer/channel-Z/ujuno) + sourcePrefix := transfertypes.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel()) + unprefixedDenom := packetData.Denom[len(sourcePrefix):] + + // Native assets will have an empty trace path and can be returned as is + denomTrace := transfertypes.ParseDenomTrace(unprefixedDenom) + if denomTrace.Path == "" { + denom = unprefixedDenom + } else { + // Non-native assets should be hashed + denom = denomTrace.IBCDenom() + } + } else { + // Prefix the destination channel - this will contain the trailing slash (e.g. transfer/channel-X/) + destinationPrefix := transfertypes.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel()) + prefixedDenom := destinationPrefix + packetData.Denom + + // Hash the denom trace + denomTrace := transfertypes.ParseDenomTrace(prefixedDenom) + denom = denomTrace.IBCDenom() + } + + return denom +} + +// Middleware implementation for SendPacket with rate limiting +func (k Keeper) SendRateLimitedPacket(ctx sdk.Context, packet ibcexported.PacketI) error { + // The Stride channelID should always be used as the key for the RateLimit object (not the counterparty channelID) + // For a SEND packet, the Stride channelID is the SOURCE channel + // This is because the Source and Desination are defined from the perspective of a packet recipient + // Meaning, when this packet lands on a the host chain, the "Source" will be the Stride Channel, + // and the "Destination" will be the Host Channel + channelId := packet.GetSourceChannel() + + // Parse the packet data + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &packetData); err != nil { + return err + } + + amount, ok := sdk.NewIntFromString(packetData.Amount) + if !ok { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "Unable to cast packet amount to sdkmath.Int") + } + + denom := ParseDenomFromSendPacket(packetData) + + err := k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_SEND, denom, channelId, amount) + if err != nil { + return err + } + + return nil +} + +// Middleware implementation for RecvPacket with rate limiting +func (k Keeper) ReceiveRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + // The Stride channelID should always be used as the key for the RateLimit object (not the counterparty channelID) + // For a RECEIVE packet, the Stride channelID is the DESTINATION channel + // This is because the Source and Desination are defined from the perspective of a packet recipient + // Meaning, when this packet lands on a Stride, the "Source" will be the host zone's channel, + // and the "Destination" will be the Stride Channel + channelId := packet.GetDestChannel() + + // Parse the amount and denom from the packet + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &packetData); err != nil { + return err + } + + amount, ok := sdk.NewIntFromString(packetData.Amount) + if !ok { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "Unable to cast packet amount to sdkmath.Int") + } + + denom := ParseDenomFromRecvPacket(packet, packetData) + + // Check whether the rate limit has been exceeded - and if it hasn't, send the packet + err := k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_RECV, denom, channelId, amount) + if err != nil { + return err + } + + return nil +} + +// SendPacket wraps IBC ChannelKeeper's SendPacket function +// If the packet does not get rate limited, it passes the packet to the IBC Channel keeper +func (k Keeper) SendPacket(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI) error { + if err := k.SendRateLimitedPacket(ctx, packet); err != nil { + k.Logger(ctx).Error(fmt.Sprintf("ICS20 packet send was denied: %s", err.Error())) + return err + } + return k.ics4Wrapper.SendPacket(ctx, chanCap, packet) +} + +// WriteAcknowledgement wraps IBC ChannelKeeper's WriteAcknowledgement function +func (k Keeper) WriteAcknowledgement(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, acknowledgement ibcexported.Acknowledgement) error { + return k.ics4Wrapper.WriteAcknowledgement(ctx, chanCap, packet, acknowledgement) +} + +// GetAppVersion wraps IBC ChannelKeeper's GetAppVersion function +func (k Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ratelimit/keeper/packet_test.go b/x/ratelimit/keeper/packet_test.go new file mode 100644 index 0000000000..bb3587119f --- /dev/null +++ b/x/ratelimit/keeper/packet_test.go @@ -0,0 +1,240 @@ +package keeper_test + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "testing" + + sdkmath "cosmossdk.io/math" + transfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + "github.com/stretchr/testify/require" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +const ( + transferPort = "transfer" + uosmo = "uosmo" + ujuno = "ujuno" + ustrd = "ustrd" + stuatom = "stuatom" + channelOnStride = "channel-0" + channelOnHost = "channel-1" +) + +func hashDenomTrace(denomTrace string) string { + trace32byte := sha256.Sum256([]byte(denomTrace)) + var traceTmByte tmbytes.HexBytes = trace32byte[:] + return fmt.Sprintf("ibc/%s", traceTmByte) +} + +func TestParseDenomFromSendPacket(t *testing.T) { + testCases := []struct { + name string + packetDenomTrace string + expectedDenom string + }{ + // Native assets stay as is + { + name: "ustrd", + packetDenomTrace: ustrd, + expectedDenom: ustrd, + }, + { + name: "stuatom", + packetDenomTrace: stuatom, + expectedDenom: stuatom, + }, + // Non-native assets are hashed + { + name: "uosmo_one_hop", + packetDenomTrace: "transfer/channel-0/usomo", + expectedDenom: hashDenomTrace("transfer/channel-0/usomo"), + }, + { + name: "uosmo_two_hops", + packetDenomTrace: "transfer/channel-2/transfer/channel-1/usomo", + expectedDenom: hashDenomTrace("transfer/channel-2/transfer/channel-1/usomo"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + packet := transfertypes.FungibleTokenPacketData{ + Denom: tc.packetDenomTrace, + } + + parsedDenom := keeper.ParseDenomFromSendPacket(packet) + require.Equal(t, tc.expectedDenom, parsedDenom, tc.name) + }) + } +} + +func TestParseDenomFromRecvPacket(t *testing.T) { + osmoChannelOnStride := "channel-0" + strideChannelOnOsmo := "channel-100" + junoChannelOnOsmo := "channel-200" + junoChannelOnStride := "channel-300" + + testCases := []struct { + name string + packetDenomTrace string + sourceChannel string + destinationChannel string + expectedDenom string + }{ + // Sink asset one hop away: + // uosmo sent from Osmosis to Stride (uosmo) + // -> tack on prefix (transfer/channel-0/uosmo) and hash + { + name: "sink_one_hop", + packetDenomTrace: uosmo, + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, osmoChannelOnStride, uosmo)), + }, + // Sink asset two hops away: + // ujuno sent from Juno to Osmosis to Stride (transfer/channel-200/ujuno) + // -> tack on prefix (transfer/channel-0/transfer/channel-200/ujuno) and hash + { + name: "sink_two_hops", + packetDenomTrace: fmt.Sprintf("%s/%s/%s", transferPort, junoChannelOnOsmo, ujuno), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s/%s/%s", transferPort, osmoChannelOnStride, transferPort, junoChannelOnOsmo, ujuno)), + }, + // Native source assets + // ustrd sent from Stride to Osmosis and then back to Stride (transfer/channel-0/ustrd) + // -> remove prefix and leave as is (ustrd) + { + name: "native_source", + packetDenomTrace: fmt.Sprintf("%s/%s/%s", transferPort, strideChannelOnOsmo, ustrd), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: ustrd, + }, + // Non-native source assets + // ujuno was sent from Juno to Stride, then to Osmosis, then back to Stride (transfer/channel-0/transfer/channel-300/ujuno) + // -> remove prefix (transfer/channel-300/ujuno) and hash + { + name: "non_native_source", + packetDenomTrace: fmt.Sprintf("%s/%s/%s/%s/%s", transferPort, strideChannelOnOsmo, transferPort, junoChannelOnStride, ujuno), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, junoChannelOnStride, ujuno)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + packet := channeltypes.Packet{ + SourcePort: transferPort, + DestinationPort: transferPort, + SourceChannel: tc.sourceChannel, + DestinationChannel: tc.destinationChannel, + } + packetData := transfertypes.FungibleTokenPacketData{ + Denom: tc.packetDenomTrace, + } + + parsedDenom := keeper.ParseDenomFromRecvPacket(packet, packetData) + require.Equal(t, tc.expectedDenom, parsedDenom, tc.name) + }) + } +} + +func (s *KeeperTestSuite) createRateLimitCloseToQuota(denom string, channelId string, direction types.PacketDirection) { + channelValue := sdkmath.NewInt(100) + threshold := sdkmath.NewInt(10) + + // Set inflow/outflow close to threshold, depending on which direction we're going in + inflow := sdkmath.ZeroInt() + outflow := sdkmath.ZeroInt() + if direction == types.PACKET_RECV { + inflow = sdkmath.NewInt(9) + } else { + outflow = sdkmath.NewInt(9) + } + + // Store rate limit + s.App.RatelimitKeeper.SetRateLimit(s.Ctx, types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelId: channelId, + }, + Quota: &types.Quota{ + MaxPercentSend: threshold, + MaxPercentRecv: threshold, + }, + Flow: &types.Flow{ + Inflow: inflow, + Outflow: outflow, + ChannelValue: channelValue, + }, + }) +} + +func (s *KeeperTestSuite) TestSendRateLimitedPacket() { + // For send packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + amountToExceed := "5" + + // Create rate limit (for SEND, use SOURCE channel) + s.createRateLimitCloseToQuota(denom, sourceChannel, types.PACKET_SEND) + + // This packet should cause an Outflow quota exceed error + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: amountToExceed}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + } + + // We check for a quota error because it doesn't appear until the end of the function + // We're avoiding checking for a success here because we can get a false positive if the rate limit doesn't exist + err = s.App.RatelimitKeeper.SendRateLimitedPacket(s.Ctx, packet) + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error type") + s.Require().ErrorContains(err, "Outflow exceeds quota", "error text") +} + +func (s *KeeperTestSuite) TestReceiveRateLimitedPacket() { + // For receive packets, the source will be the host and the destination will be stride + packetDenom := uosmo + sourceChannel := channelOnHost + destinationChannel := channelOnStride + amountToExceed := "5" + + // When the packet is recieved, the port and channel prefix will be added and the denom will be hashed + // before the rate limit is found from the store + rateLimitDenom := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, channelOnStride, packetDenom)) + + // Create rate limit (for RECV, use DESTINATION channel) + s.createRateLimitCloseToQuota(rateLimitDenom, destinationChannel, types.PACKET_RECV) + + // This packet should cause an Outflow quota exceed error + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: packetDenom, Amount: amountToExceed}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + } + + // We check for a quota error because it doesn't appear until the end of the function + // We're avoiding checking for a success here because we can get a false positive if the rate limit doesn't exist + err = s.App.RatelimitKeeper.ReceiveRateLimitedPacket(s.Ctx, packet) + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error type") + s.Require().ErrorContains(err, "Inflow exceeds quota", "error text") +} diff --git a/x/ratelimit/keeper/params.go b/x/ratelimit/keeper/params.go new file mode 100644 index 0000000000..88552a2dcc --- /dev/null +++ b/x/ratelimit/keeper/params.go @@ -0,0 +1,17 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// GetParams get all parameters as types.Params +func (k Keeper) GetParams(ctx sdk.Context) types.Params { + return types.NewParams() +} + +// SetParams set the params +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramstore.SetParamSet(ctx, ¶ms) +} diff --git a/x/ratelimit/keeper/rate_limit.go b/x/ratelimit/keeper/rate_limit.go new file mode 100644 index 0000000000..24a95cb742 --- /dev/null +++ b/x/ratelimit/keeper/rate_limit.go @@ -0,0 +1,199 @@ +package keeper + +import ( + "strings" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +// Get the rate limit byte key built from the denom and channelId +func GetRateLimitItemKey(denom string, channelId string) []byte { + return append(types.KeyPrefix(denom), types.KeyPrefix(channelId)...) +} + +// The total value on a given path (aka, the denominator in the percentage calculation) +// is the total supply of the given denom +func (k Keeper) GetChannelValue(ctx sdk.Context, denom string) sdkmath.Int { + return k.bankKeeper.GetSupply(ctx, denom).Amount +} + +// If the rate limit is exceeded or the denom is blacklisted, we emit an event +func EmitTransferDeniedEvent(ctx sdk.Context, reason, denom, channelId string, direction types.PacketDirection, amount sdkmath.Int, err error) { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTransferDenied, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReason, reason), + sdk.NewAttribute(types.AttributeKeyAction, strings.ToLower(direction.String())), // packet_send or packet_recv + sdk.NewAttribute(types.AttributeKeyDenom, denom), + sdk.NewAttribute(types.AttributeKeyChannel, channelId), + sdk.NewAttribute(types.AttributeKeyAmount, amount.String()), + sdk.NewAttribute(types.AttributeKeyError, err.Error()), + ), + ) +} + +// Adds an amount to the flow in either the SEND or RECV direction +func (k Keeper) UpdateFlow(rateLimit types.RateLimit, direction types.PacketDirection, amount sdkmath.Int) error { + switch direction { + case types.PACKET_SEND: + return rateLimit.Flow.AddOutflow(amount, *rateLimit.Quota) + case types.PACKET_RECV: + return rateLimit.Flow.AddInflow(amount, *rateLimit.Quota) + default: + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid packet direction (%s)", direction.String()) + } +} + +// Checks whether the given packet will exceed the rate limit +// Called by OnRecvPacket and OnSendPacket +func (k Keeper) CheckRateLimitAndUpdateFlow(ctx sdk.Context, direction types.PacketDirection, denom string, channelId string, amount sdkmath.Int) error { + // First check if the denom is blacklisted + if k.IsDenomBlacklisted(ctx, denom) { + err := errorsmod.Wrapf(types.ErrDenomIsBlacklisted, "denom %s is blacklisted", denom) + EmitTransferDeniedEvent(ctx, types.EventBlacklistedDenom, denom, channelId, direction, amount, err) + return err + } + + // If there's no rate limit yet for this denom, no action is necessary + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return nil + } + + // Update the flow object with the change in amount + err := k.UpdateFlow(rateLimit, direction, amount) + if err != nil { + // If the rate limit was exceeded, emit an event + EmitTransferDeniedEvent(ctx, types.EventRateLimitExceeded, denom, channelId, direction, amount, err) + return err + } + + // If there's no quota error, update the rate limit object in the store with the new flow + k.SetRateLimit(ctx, rateLimit) + + return nil +} + +// Reset the rate limit after expiration +// The inflow and outflow should get reset to 1 and the channelValue should be updated +func (k Keeper) ResetRateLimit(ctx sdk.Context, denom string, channelId string) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return types.ErrRateLimitNotFound + } + + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + rateLimit.Flow = &flow + + k.SetRateLimit(ctx, rateLimit) + return nil +} + +// Stores/Updates a rate limit object in the store +func (k Keeper) SetRateLimit(ctx sdk.Context, rateLimit types.RateLimit) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + rateLimitKey := GetRateLimitItemKey(rateLimit.Path.Denom, rateLimit.Path.ChannelId) + rateLimitValue := k.cdc.MustMarshal(&rateLimit) + + store.Set(rateLimitKey, rateLimitValue) +} + +// Removes a rate limit object from the store using denom and channel-id +func (k Keeper) RemoveRateLimit(ctx sdk.Context, denom string, channelId string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + rateLimitKey := GetRateLimitItemKey(denom, channelId) + store.Delete(rateLimitKey) +} + +// Grabs and returns a rate limit object from the store using denom and channel-id +func (k Keeper) GetRateLimit(ctx sdk.Context, denom string, channelId string) (rateLimit types.RateLimit, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + rateLimitKey := GetRateLimitItemKey(denom, channelId) + rateLimitValue := store.Get(rateLimitKey) + + if len(rateLimitValue) == 0 { + return rateLimit, false + } + + k.cdc.MustUnmarshal(rateLimitValue, &rateLimit) + return rateLimit, true +} + +// Returns all rate limits stored +func (k Keeper) GetAllRateLimits(ctx sdk.Context) []types.RateLimit { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allRateLimits := []types.RateLimit{} + for ; iterator.Valid(); iterator.Next() { + + rateLimit := types.RateLimit{} + k.cdc.MustUnmarshal(iterator.Value(), &rateLimit) + allRateLimits = append(allRateLimits, rateLimit) + } + + return allRateLimits +} + +// Adds a denom to a blacklist to prevent all IBC transfers with this denom +func (k Keeper) AddDenomToBlacklist(ctx sdk.Context, denom string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BlacklistKeyPrefix) + + key := types.KeyPrefix(denom) + value := key // The denom will act as both the key and value + + store.Set(key, value) +} + +// Removes a denom from a blacklist to re-enable IBC transfers for that denom +func (k Keeper) RemoveDenomFromBlacklist(ctx sdk.Context, denom string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BlacklistKeyPrefix) + key := types.KeyPrefix(denom) + store.Delete(key) +} + +// Check if a denom is currently blacklistec +func (k Keeper) IsDenomBlacklisted(ctx sdk.Context, denom string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BlacklistKeyPrefix) + + key := types.KeyPrefix(denom) + value := store.Get(key) + + if len(value) == 0 { + return false + } + denomFromStore := string(value) + + return denom == denomFromStore +} + +// Get all the blacklisted denoms +func (k Keeper) GetAllBlacklistedDenoms(ctx sdk.Context) []string { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BlacklistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allBlacklistedDenoms := []string{} + for ; iterator.Valid(); iterator.Next() { + allBlacklistedDenoms = append(allBlacklistedDenoms, string(iterator.Key())) + } + + return allBlacklistedDenoms +} diff --git a/x/ratelimit/keeper/rate_limit_test.go b/x/ratelimit/keeper/rate_limit_test.go new file mode 100644 index 0000000000..6a0bb9bbf9 --- /dev/null +++ b/x/ratelimit/keeper/rate_limit_test.go @@ -0,0 +1,403 @@ +package keeper_test + +import ( + "strconv" + + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + + minttypes "github.com/Stride-Labs/stride/v5/x/mint/types" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +const ( + denom = "denom" + channelId = "channel-0" +) + +type action struct { + direction types.PacketDirection + amount int64 + addToBlacklist bool + removeFromBlacklist bool +} + +type checkRateLimitTestCase struct { + name string + actions []action + expectedError string +} + +// Helper function to check if an element is in an array +func isInArray(element string, arr []string) bool { + for _, e := range arr { + if e == element { + return true + } + } + return false +} + +func (s *KeeperTestSuite) TestGetChannelValue() { + supply := sdkmath.NewInt(100) + + // Mint coins to increase the supply, which will increase the channel value + err := s.App.BankKeeper.MintCoins(s.Ctx, minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(denom, supply))) + s.Require().NoError(err) + + expected := supply + actual := s.App.RatelimitKeeper.GetChannelValue(s.Ctx, denom) + s.Require().Equal(expected, actual) +} + +// Helper function to create 5 rate limit objects with various attributes +func (s *KeeperTestSuite) createRateLimits() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := 1; i <= 5; i++ { + suffix := strconv.Itoa(i) + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom-" + suffix, ChannelId: "channel-" + suffix}, + Flow: &types.Flow{Inflow: sdkmath.NewInt(10), Outflow: sdkmath.NewInt(10)}, + } + + rateLimits = append(rateLimits, rateLimit) + s.App.RatelimitKeeper.SetRateLimit(s.Ctx, rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestGetRateLimit() { + rateLimits := s.createRateLimits() + + expectedRateLimit := rateLimits[0] + denom := expectedRateLimit.Path.Denom + channelId := expectedRateLimit.Path.ChannelId + + actualRateLimit, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denom, channelId) + s.Require().True(found, "element should have been found, but was not") + s.Require().Equal(expectedRateLimit, actualRateLimit) +} + +func (s *KeeperTestSuite) TestRemoveRateLimit() { + rateLimits := s.createRateLimits() + + rateLimitToRemove := rateLimits[0] + denomToRemove := rateLimitToRemove.Path.Denom + channelIdToRemove := rateLimitToRemove.Path.ChannelId + + s.App.RatelimitKeeper.RemoveRateLimit(s.Ctx, denomToRemove, channelIdToRemove) + _, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denomToRemove, channelIdToRemove) + s.Require().False(found, "the removed element should not have been found, but it was") +} + +func (s *KeeperTestSuite) TestResetRateLimit() { + rateLimits := s.createRateLimits() + + rateLimitToReset := rateLimits[0] + denomToRemove := rateLimitToReset.Path.Denom + channelIdToRemove := rateLimitToReset.Path.ChannelId + + err := s.App.RatelimitKeeper.ResetRateLimit(s.Ctx, denomToRemove, channelIdToRemove) + s.Require().NoError(err) + + rateLimit, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denomToRemove, channelIdToRemove) + s.Require().True(found, "element should have been found, but was not") + s.Require().Zero(rateLimit.Flow.Inflow.Int64(), "Inflow should have been reset to 0") + s.Require().Zero(rateLimit.Flow.Outflow.Int64(), "Outflow should have been reset to 0") +} + +func (s *KeeperTestSuite) TestGetAllRateLimits() { + expectedRateLimits := s.createRateLimits() + actualRateLimits := s.App.RatelimitKeeper.GetAllRateLimits(s.Ctx) + s.Require().Len(actualRateLimits, len(expectedRateLimits)) + s.Require().ElementsMatch(expectedRateLimits, actualRateLimits, "all rate limits") +} + +func (s *KeeperTestSuite) TestDenomBlacklist() { + allDenoms := []string{"denom1", "denom2", "denom3", "denom4"} + denomsToBlacklist := []string{"denom1", "denom3"} + + // No denoms are currently blacklisted + for _, denom := range allDenoms { + isBlacklisted := s.App.RatelimitKeeper.IsDenomBlacklisted(s.Ctx, denom) + s.Require().False(isBlacklisted, "%s should not be blacklisted yet", denom) + } + + // Blacklist two denoms + for _, denom := range denomsToBlacklist { + s.App.RatelimitKeeper.AddDenomToBlacklist(s.Ctx, denom) + } + + // Confirm half the list was blacklisted and the others were not + for _, denom := range allDenoms { + isBlacklisted := s.App.RatelimitKeeper.IsDenomBlacklisted(s.Ctx, denom) + + if isInArray(denom, denomsToBlacklist) { + s.Require().True(isBlacklisted, "%s should have been blacklisted", denom) + } else { + s.Require().False(isBlacklisted, "%s should not have been blacklisted", denom) + } + } + actualBlacklistedDenoms := s.App.RatelimitKeeper.GetAllBlacklistedDenoms(s.Ctx) + s.Require().Len(actualBlacklistedDenoms, len(denomsToBlacklist), "number of blacklisted denoms") + s.Require().ElementsMatch(denomsToBlacklist, actualBlacklistedDenoms, "list of blacklisted denoms") + + // Finally, remove denoms from blacklist and confirm they were removed + for _, denom := range denomsToBlacklist { + s.App.RatelimitKeeper.RemoveDenomFromBlacklist(s.Ctx, denom) + } + for _, denom := range allDenoms { + isBlacklisted := s.App.RatelimitKeeper.IsDenomBlacklisted(s.Ctx, denom) + + if isInArray(denom, denomsToBlacklist) { + s.Require().False(isBlacklisted, "%s should have been removed from the blacklist", denom) + } else { + s.Require().False(isBlacklisted, "%s should never have been blacklisted", denom) + } + } +} + +// Adds a rate limit object to the store in preparation for the check rate limit tests +func (s *KeeperTestSuite) SetupCheckRateLimitAndUpdateFlowTest() { + channelValue := sdkmath.NewInt(100) + maxPercentSend := sdkmath.NewInt(10) + maxPercentRecv := sdkmath.NewInt(10) + + s.App.RatelimitKeeper.SetRateLimit(s.Ctx, types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelId: channelId, + }, + Quota: &types.Quota{ + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: 1, + }, + Flow: &types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + }, + }) + + s.App.RatelimitKeeper.RemoveDenomFromBlacklist(s.Ctx, denom) +} + +// Helper function to check the rate limit across a series of transfers +func (s *KeeperTestSuite) processCheckRateLimitAndUpdateFlowTestCase(tc checkRateLimitTestCase) { + s.SetupCheckRateLimitAndUpdateFlowTest() + + expectedInflow := sdkmath.NewInt(0) + expectedOutflow := sdkmath.NewInt(0) + for i, action := range tc.actions { + if action.addToBlacklist { + s.App.RatelimitKeeper.AddDenomToBlacklist(s.Ctx, denom) + continue + } else if action.removeFromBlacklist { + s.App.RatelimitKeeper.RemoveDenomFromBlacklist(s.Ctx, denom) + continue + } + + amount := sdkmath.NewInt(action.amount) + err := s.App.RatelimitKeeper.CheckRateLimitAndUpdateFlow(s.Ctx, action.direction, denom, channelId, amount) + + // Only check the error on the last action + if i == len(tc.actions)-1 && tc.expectedError != "" { + s.Require().ErrorContains(err, tc.expectedError, tc.name+"- action: #%d - error", i) + } else { + // All but the last action should succeed + s.Require().NoError(err, tc.name+"- action: #%d - no error", i) + + // Update expected flow + if action.direction == types.PACKET_RECV { + expectedInflow = expectedInflow.Add(amount) + } else { + expectedOutflow = expectedOutflow.Add(amount) + } + } + + // Confirm flow is updated properly (or left as is if the theshold was exceeded) + rateLimit, found := s.App.RatelimitKeeper.GetRateLimit(s.Ctx, denom, channelId) + s.Require().True(found) + s.Require().Equal(expectedInflow, rateLimit.Flow.Inflow, tc.name+"- action: #%d - inflow", i) + s.Require().Equal(expectedOutflow, rateLimit.Flow.Outflow, tc.name+"- action: #%d - outflow", i) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdateFlow_UnidirectionalFlow() { + testCases := []checkRateLimitTestCase{ + { + name: "send_under_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 5}, + {direction: types.PACKET_SEND, amount: 5}, + }, + }, + { + name: "send_over_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 5}, + {direction: types.PACKET_SEND, amount: 6}, + }, + expectedError: "Outflow exceeds quota", + }, + { + name: "recv_under_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 5}, + {direction: types.PACKET_RECV, amount: 5}, + }, + }, + { + name: "recv_over_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 5}, + {direction: types.PACKET_RECV, amount: 6}, + }, + expectedError: "Inflow exceeds quota", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_BidirectionalFlow() { + testCases := []checkRateLimitTestCase{ + { + name: "send_then_recv_under_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + }, + }, + { + name: "recv_then_send_under_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + }, + }, + { + name: "send_then_recv_over_inflow", + actions: []action{ + {direction: types.PACKET_SEND, amount: 2}, // -2, Net: -2 + {direction: types.PACKET_RECV, amount: 6}, // +6, Net: +4 + {direction: types.PACKET_SEND, amount: 2}, // -2, Net: +2 + {direction: types.PACKET_RECV, amount: 6}, // +6, Net: +8 + {direction: types.PACKET_SEND, amount: 2}, // -2, Net: +6 + {direction: types.PACKET_RECV, amount: 6}, // +6, Net: +12 (exceeds threshold) + }, + expectedError: "Inflow exceeds quota", + }, + { + name: "send_then_recv_over_outflow", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, // -6, Net: -6 + {direction: types.PACKET_RECV, amount: 2}, // +2, Net: -4 + {direction: types.PACKET_SEND, amount: 6}, // -6, Net: -10 + {direction: types.PACKET_RECV, amount: 2}, // +2, Net: -8 + {direction: types.PACKET_SEND, amount: 6}, // -6, Net: -14 (exceeds threshold) + }, + expectedError: "Outflow exceeds quota", + }, + { + name: "recv_then_send_over_inflow", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, // +6, Net: +6 + {direction: types.PACKET_SEND, amount: 2}, // -2, Net: +4 + {direction: types.PACKET_RECV, amount: 6}, // +6, Net: +10 + {direction: types.PACKET_SEND, amount: 2}, // -2, Net: +8 + {direction: types.PACKET_RECV, amount: 6}, // +6, Net: +14 (exceeds threshold) + }, + expectedError: "Inflow exceeds quota", + }, + { + name: "recv_then_send_over_outflow", + actions: []action{ + {direction: types.PACKET_RECV, amount: 2}, // +2, Net: +2 + {direction: types.PACKET_SEND, amount: 6}, // -6, Net: -4 + {direction: types.PACKET_RECV, amount: 2}, // +2, Net: -2 + {direction: types.PACKET_SEND, amount: 6}, // -6, Net: -8 + {direction: types.PACKET_RECV, amount: 2}, // +2, Net: -6 + {direction: types.PACKET_SEND, amount: 10}, // +6, Net: -12 (exceeds threshold) + }, + expectedError: "Outflow exceeds quota", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_Blacklist() { + testCases := []checkRateLimitTestCase{ + { + name: "add_then_remove_from_blacklist", // should succeed + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + {removeFromBlacklist: true}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + }, + }, + { + name: "send_recv_blacklist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToBlacklist: true}, + {direction: types.PACKET_SEND, amount: 6}, + }, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + { + name: "send_recv_blacklist_recv", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToBlacklist: true}, + {direction: types.PACKET_RECV, amount: 6}, + }, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + { + name: "recv_send_blacklist_send", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + {direction: types.PACKET_SEND, amount: 6}, + }, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + { + name: "recv_send_blacklist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + {direction: types.PACKET_RECV, amount: 6}, + }, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} diff --git a/x/ratelimit/module.go b/x/ratelimit/module.go new file mode 100644 index 0000000000..6f40811627 --- /dev/null +++ b/x/ratelimit/module.go @@ -0,0 +1,173 @@ +package ratelimit + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/client/cli" + "github.com/Stride-Labs/stride/v5/x/ratelimit/keeper" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface for the capability module. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the capability module's name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +func (AppModuleBasic) RegisterCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers the module's interface types +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns the capability module's default genesis state. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis performs genesis state validation for the capability module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterRESTRoutes registers the capability module's REST service handlers. +func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) { +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd returns the capability module's root tx command. +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd returns the capability module's root query command. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface for the capability module. +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + } +} + +// Name returns the capability module's name. +func (am AppModule) Name() string { + return am.AppModuleBasic.Name() +} + +// Route returns the capability module's message routing key. +func (am AppModule) Route() sdk.Route { + return sdk.NewRoute(types.RouterKey, NewMessageHandler(am.keeper)) +} + +// QuerierRoute returns the capability module's query routing key. +func (AppModule) QuerierRoute() string { return types.QuerierRoute } + +// LegacyQuerierHandler returns the capability module's Querier. +func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a GRPC query service to respond to the +// module-specific GRPC queries. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + // types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) +} + +// RegisterInvariants registers the capability module's invariants. +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the capability module's genesis initialization It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the capability module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion implements ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock executes all ABCI BeginBlock logic respective to the capability module. +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock executes all ABCI EndBlock logic respective to the capability module. It +// returns no validator updates. +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} diff --git a/x/ratelimit/types/codec.go b/x/ratelimit/types/codec.go new file mode 100644 index 0000000000..dc5103217e --- /dev/null +++ b/x/ratelimit/types/codec.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +func RegisterCodec(cdc *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*govtypes.Content)(nil), + &AddRateLimitProposal{}, + &UpdateRateLimitProposal{}, + &RemoveRateLimitProposal{}, + &ResetRateLimitProposal{}, + ) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/ratelimit/types/errors.go b/x/ratelimit/types/errors.go new file mode 100644 index 0000000000..3ec52ce3a0 --- /dev/null +++ b/x/ratelimit/types/errors.go @@ -0,0 +1,24 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +// x/ratelimit module sentinel errors +var ( + ErrRateLimitAlreadyExists = errorsmod.Register(ModuleName, 1, + "ratelimit key duplicated") + ErrRateLimitNotFound = errorsmod.Register(ModuleName, 2, + "rate limit not found") + ErrZeroChannelValue = errorsmod.Register(ModuleName, 3, + "channel value is zero") + ErrQuotaExceeded = errorsmod.Register(ModuleName, 4, + "quota exceeded") + ErrInvalidClientState = errorsmod.Register(ModuleName, 5, + "unable to determine client state from channelId") + ErrChannelNotFound = errorsmod.Register(ModuleName, 6, + "channel does not exist") + ErrDenomIsBlacklisted = errorsmod.Register(ModuleName, 7, + "denom is blacklisted", + ) +) diff --git a/x/ratelimit/types/events.go b/x/ratelimit/types/events.go new file mode 100644 index 0000000000..d99c0cec98 --- /dev/null +++ b/x/ratelimit/types/events.go @@ -0,0 +1,16 @@ +package types + +var ( + EventTransferDenied = "transfer_denied" + + EventRateLimitExceeded = "rate_limit_exceeded" + EventBlacklistedDenom = "blacklisted_denom" + + AttributeKeyReason = "reason" + AttributeKeyModule = "module" + AttributeKeyAction = "action" + AttributeKeyDenom = "denom" + AttributeKeyChannel = "channel" + AttributeKeyAmount = "amount" + AttributeKeyError = "error" +) diff --git a/x/ratelimit/types/expected_keepers.go b/x/ratelimit/types/expected_keepers.go new file mode 100644 index 0000000000..4e71ff7567 --- /dev/null +++ b/x/ratelimit/types/expected_keepers.go @@ -0,0 +1,29 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + ibcexported "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// BankKeeper defines the banking contract that must be fulfilled when +// creating a x/ratelimit keeper. +type BankKeeper interface { + GetSupply(ctx sdk.Context, denom string) sdk.Coin +} + +// ChannelKeeper defines the channel contract that must be fulfilled when +// creating a x/ratelimit keeper. +type ChannelKeeper interface { + GetChannel(ctx sdk.Context, portID string, channelID string) (channeltypes.Channel, bool) + GetChannelClientState(ctx sdk.Context, portID string, channelID string) (string, exported.ClientState, error) +} + +// ICS4Wrapper defines the expected ICS4Wrapper for middleware +type ICS4Wrapper interface { + WriteAcknowledgement(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, acknowledgement ibcexported.Acknowledgement) error + SendPacket(ctx sdk.Context, channelCap *capabilitytypes.Capability, packet ibcexported.PacketI) error + GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) +} diff --git a/x/ratelimit/types/flow.go b/x/ratelimit/types/flow.go new file mode 100644 index 0000000000..db68fe8cb8 --- /dev/null +++ b/x/ratelimit/types/flow.go @@ -0,0 +1,47 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" +) + +// Initializes a new flow from the channel value +func NewFlow(channelValue sdkmath.Int) Flow { + flow := Flow{ + ChannelValue: channelValue, + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + } + + return flow +} + +// Adds an amount to the rate limit's flow after an incoming packet was received +// Returns an error if the new inflow will cause the rate limit to exceed its quota +func (f *Flow) AddInflow(amount sdkmath.Int, quota Quota) error { + netInflow := f.Inflow.Sub(f.Outflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_RECV, netInflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, + "Inflow exceeds quota - Net Inflow: %v, Channel Value: %v, Threshold: %v%%", + netInflow, f.ChannelValue, quota.MaxPercentRecv) + } + + f.Inflow = f.Inflow.Add(amount) + return nil +} + +// Adds an amount to the rate limit's flow after a packet was sent +// Returns an error if the new outflow will cause the rate limit to exceed its quota +func (f *Flow) AddOutflow(amount sdkmath.Int, quota Quota) error { + netOutflow := f.Outflow.Sub(f.Inflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_SEND, netOutflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, + "Outflow exceeds quota - Net Outflow: %v, Channel Value: %v, Threshold: %v%%", + netOutflow, f.ChannelValue, quota.MaxPercentSend) + } + + f.Outflow = f.Outflow.Add(amount) + return nil +} diff --git a/x/ratelimit/types/flow_test.go b/x/ratelimit/types/flow_test.go new file mode 100644 index 0000000000..f8639942e4 --- /dev/null +++ b/x/ratelimit/types/flow_test.go @@ -0,0 +1,228 @@ +package types_test + +import ( + "testing" + + sdkmath "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func TestAddInflow(t *testing.T) { + totalValue := sdkmath.NewInt(100) + quota := types.Quota{ + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(10), + DurationHours: uint64(1), + } + + tests := []struct { + name string + flow types.Flow + expectedFlow types.Flow + amount sdkmath.Int + succeeds bool + }{ + { + name: "AddInflow__Successful__Zero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(5), + expectedFlow: types.Flow{ + Inflow: sdkmath.NewInt(5), + Outflow: sdkmath.ZeroInt(), + ChannelValue: totalValue, + }, + succeeds: true, + }, + { + name: "AddInflow__Successful__Nonzero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(100), + Outflow: sdkmath.NewInt(100), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(5), + expectedFlow: types.Flow{ + Inflow: sdkmath.NewInt(105), + Outflow: sdkmath.NewInt(100), + ChannelValue: totalValue, + }, + succeeds: true, + }, + { + name: "AddInflow__Failure__Zero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(15), + succeeds: false, + }, + { + name: "AddInflow__Failure__Nonzero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(100), + Outflow: sdkmath.NewInt(100), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(15), + succeeds: false, + }, + { + name: "AddInflow__Successful__Large amount but net outflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(1), + Outflow: sdkmath.NewInt(10), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(15), + expectedFlow: types.Flow{ + Inflow: sdkmath.NewInt(16), + Outflow: sdkmath.NewInt(10), + ChannelValue: totalValue, + }, + succeeds: true, + }, + { + name: "AddInflow__Failure__Small amount but net inflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(10), + Outflow: sdkmath.NewInt(1), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(5), + succeeds: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + initialFlow := test.flow + err := test.flow.AddInflow(test.amount, quota) + actualFlow := test.flow + + if test.succeeds { + require.NoError(t, err) + require.Equal(t, test.expectedFlow, actualFlow) + } else { + require.ErrorContains(t, err, "Inflow exceeds quota", "test: %v", test.name) + require.Equal(t, initialFlow, actualFlow) + } + }) + } +} + +func TestOutInflow(t *testing.T) { + totalValue := sdkmath.NewInt(100) + quota := types.Quota{ + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(10), + DurationHours: uint64(1), + } + + tests := []struct { + name string + flow types.Flow + expectedFlow types.Flow + amount sdkmath.Int + succeeds bool + }{ + { + name: "AddOutflow__Successful__Zero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(5), + expectedFlow: types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.NewInt(5), + ChannelValue: totalValue, + }, + succeeds: true, + }, + { + name: "AddOutflow__Successful__Nonzero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(100), + Outflow: sdkmath.NewInt(100), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(5), + expectedFlow: types.Flow{ + Inflow: sdkmath.NewInt(100), + Outflow: sdkmath.NewInt(105), + ChannelValue: totalValue, + }, + succeeds: true, + }, + { + name: "AddOutflow__Failure__Zero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(15), + succeeds: false, + }, + { + name: "AddOutflow__Failure__Nonzero inflow and outflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(100), + Outflow: sdkmath.NewInt(100), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(15), + succeeds: false, + }, + { + name: "AddOutflow__Succeesful__Large amount but net inflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(10), + Outflow: sdkmath.NewInt(1), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(15), + expectedFlow: types.Flow{ + Inflow: sdkmath.NewInt(10), + Outflow: sdkmath.NewInt(16), + ChannelValue: totalValue, + }, + succeeds: true, + }, + { + name: "AddOutflow__Failure__Small amount but net outflow", + flow: types.Flow{ + Inflow: sdkmath.NewInt(1), + Outflow: sdkmath.NewInt(10), + ChannelValue: totalValue, + }, + amount: sdkmath.NewInt(5), + succeeds: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + initialFlow := test.flow + err := test.flow.AddOutflow(test.amount, quota) + actualFlow := test.flow + + if test.succeeds { + require.NoError(t, err) + require.Equal(t, test.expectedFlow, actualFlow) + } else { + require.ErrorContains(t, err, "Outflow exceeds quota", "test: %v", test.name) + require.Equal(t, initialFlow, actualFlow) + } + }) + } +} diff --git a/x/ratelimit/types/genesis.go b/x/ratelimit/types/genesis.go new file mode 100644 index 0000000000..0ec003beca --- /dev/null +++ b/x/ratelimit/types/genesis.go @@ -0,0 +1,15 @@ +package types + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + RateLimits: []RateLimit{}, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + return gs.Params.Validate() +} diff --git a/x/ratelimit/types/genesis.pb.go b/x/ratelimit/types/genesis.pb.go new file mode 100644 index 0000000000..ffdd9d294f --- /dev/null +++ b/x/ratelimit/types/genesis.pb.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stride/ratelimit/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ratelimit module's genesis state. +type GenesisState struct { + // params defines all the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` + // list of rate limits + RateLimits []RateLimit `protobuf:"bytes,2,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits" yaml:"rate_limits"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_9e224b293959881c, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "stride.ratelimit.GenesisState") +} + +func init() { proto.RegisterFile("stride/ratelimit/genesis.proto", fileDescriptor_9e224b293959881c) } + +var fileDescriptor_9e224b293959881c = []byte{ + // 264 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x2e, 0x29, 0xca, + 0x4c, 0x49, 0xd5, 0x2f, 0x4a, 0x2c, 0x49, 0xcd, 0xc9, 0xcc, 0xcd, 0x2c, 0xd1, 0x4f, 0x4f, 0xcd, + 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x80, 0xc8, 0xeb, 0xc1, + 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x2c, + 0x86, 0x39, 0x05, 0x89, 0x45, 0x89, 0xb9, 0x50, 0x63, 0xa4, 0x14, 0x30, 0xa4, 0xe1, 0x2c, 0x88, + 0x0a, 0xa5, 0x8d, 0x8c, 0x5c, 0x3c, 0xee, 0x10, 0xab, 0x83, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0xdc, + 0xb9, 0xd8, 0x20, 0x46, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x49, 0xe8, 0xa1, 0x3b, 0x45, + 0x2f, 0x00, 0x2c, 0xef, 0x24, 0x7a, 0xe2, 0x9e, 0x3c, 0xc3, 0xa7, 0x7b, 0xf2, 0xbc, 0x95, 0x89, + 0xb9, 0x39, 0x56, 0x4a, 0x10, 0x5d, 0x4a, 0x41, 0x50, 0xed, 0x42, 0x11, 0x5c, 0xdc, 0x20, 0x2d, + 0xf1, 0x60, 0x3d, 0xc5, 0x12, 0x4c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0xd2, 0x98, 0xa6, 0x05, 0x25, + 0x96, 0xa4, 0xfa, 0x80, 0x58, 0x4e, 0x52, 0x50, 0x03, 0x85, 0x20, 0x06, 0x22, 0xe9, 0x56, 0x0a, + 0xe2, 0x2a, 0x82, 0x29, 0x2b, 0x76, 0xf2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, + 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, + 0x86, 0x28, 0xa3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0x60, 0xb0, + 0x45, 0xba, 0x3e, 0x89, 0x49, 0xc5, 0xfa, 0xd0, 0x60, 0x28, 0x33, 0xd1, 0xaf, 0x40, 0x0a, 0x8b, + 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, 0x70, 0x40, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x33, 0x61, 0x8e, 0x18, 0x93, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/gov.pb.go b/x/ratelimit/types/gov.pb.go new file mode 100644 index 0000000000..e5921a36dd --- /dev/null +++ b/x/ratelimit/types/gov.pb.go @@ -0,0 +1,1922 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stride/ratelimit/gov.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type AddRateLimitProposal struct { + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Denom string `protobuf:"bytes,3,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,6,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,7,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` + Deposit string `protobuf:"bytes,8,opt,name=deposit,proto3" json:"deposit,omitempty" yaml:"deposit"` +} + +func (m *AddRateLimitProposal) Reset() { *m = AddRateLimitProposal{} } +func (*AddRateLimitProposal) ProtoMessage() {} +func (*AddRateLimitProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_3ad7ef7cb59a1c37, []int{0} +} +func (m *AddRateLimitProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AddRateLimitProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AddRateLimitProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AddRateLimitProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddRateLimitProposal.Merge(m, src) +} +func (m *AddRateLimitProposal) XXX_Size() int { + return m.Size() +} +func (m *AddRateLimitProposal) XXX_DiscardUnknown() { + xxx_messageInfo_AddRateLimitProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_AddRateLimitProposal proto.InternalMessageInfo + +type UpdateRateLimitProposal struct { + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Denom string `protobuf:"bytes,3,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,6,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,7,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` + Deposit string `protobuf:"bytes,8,opt,name=deposit,proto3" json:"deposit,omitempty" yaml:"deposit"` +} + +func (m *UpdateRateLimitProposal) Reset() { *m = UpdateRateLimitProposal{} } +func (*UpdateRateLimitProposal) ProtoMessage() {} +func (*UpdateRateLimitProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_3ad7ef7cb59a1c37, []int{1} +} +func (m *UpdateRateLimitProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateRateLimitProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateRateLimitProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateRateLimitProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRateLimitProposal.Merge(m, src) +} +func (m *UpdateRateLimitProposal) XXX_Size() int { + return m.Size() +} +func (m *UpdateRateLimitProposal) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRateLimitProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRateLimitProposal proto.InternalMessageInfo + +type RemoveRateLimitProposal struct { + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Denom string `protobuf:"bytes,3,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + Deposit string `protobuf:"bytes,5,opt,name=deposit,proto3" json:"deposit,omitempty" yaml:"deposit"` +} + +func (m *RemoveRateLimitProposal) Reset() { *m = RemoveRateLimitProposal{} } +func (*RemoveRateLimitProposal) ProtoMessage() {} +func (*RemoveRateLimitProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_3ad7ef7cb59a1c37, []int{2} +} +func (m *RemoveRateLimitProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveRateLimitProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveRateLimitProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveRateLimitProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveRateLimitProposal.Merge(m, src) +} +func (m *RemoveRateLimitProposal) XXX_Size() int { + return m.Size() +} +func (m *RemoveRateLimitProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveRateLimitProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveRateLimitProposal proto.InternalMessageInfo + +type ResetRateLimitProposal struct { + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Denom string `protobuf:"bytes,3,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + Deposit string `protobuf:"bytes,5,opt,name=deposit,proto3" json:"deposit,omitempty" yaml:"deposit"` +} + +func (m *ResetRateLimitProposal) Reset() { *m = ResetRateLimitProposal{} } +func (*ResetRateLimitProposal) ProtoMessage() {} +func (*ResetRateLimitProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_3ad7ef7cb59a1c37, []int{3} +} +func (m *ResetRateLimitProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResetRateLimitProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResetRateLimitProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResetRateLimitProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetRateLimitProposal.Merge(m, src) +} +func (m *ResetRateLimitProposal) XXX_Size() int { + return m.Size() +} +func (m *ResetRateLimitProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResetRateLimitProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetRateLimitProposal proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AddRateLimitProposal)(nil), "stride.ratelimit.AddRateLimitProposal") + proto.RegisterType((*UpdateRateLimitProposal)(nil), "stride.ratelimit.UpdateRateLimitProposal") + proto.RegisterType((*RemoveRateLimitProposal)(nil), "stride.ratelimit.RemoveRateLimitProposal") + proto.RegisterType((*ResetRateLimitProposal)(nil), "stride.ratelimit.ResetRateLimitProposal") +} + +func init() { proto.RegisterFile("stride/ratelimit/gov.proto", fileDescriptor_3ad7ef7cb59a1c37) } + +var fileDescriptor_3ad7ef7cb59a1c37 = []byte{ + // 436 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x54, 0x41, 0x6b, 0x13, 0x41, + 0x14, 0xde, 0x31, 0x4d, 0x6b, 0x47, 0x0d, 0x65, 0x08, 0x76, 0x28, 0xb8, 0x1b, 0x02, 0x4a, 0x0f, + 0x76, 0x17, 0xd4, 0x53, 0x6f, 0xf6, 0x64, 0x21, 0x87, 0x32, 0x45, 0x10, 0x2f, 0x61, 0xb2, 0xf3, + 0x48, 0x06, 0x77, 0x66, 0x96, 0x99, 0xc9, 0x92, 0xfe, 0x03, 0x8f, 0x1e, 0x45, 0x10, 0xf2, 0x57, + 0xf4, 0xd4, 0x63, 0x8f, 0xe2, 0x21, 0x48, 0x72, 0xf1, 0xec, 0x2f, 0x90, 0x9d, 0x4d, 0x25, 0xe0, + 0x49, 0x3c, 0xa8, 0xe0, 0x69, 0xf7, 0x7d, 0xdf, 0xf7, 0xde, 0xf0, 0xf1, 0x1e, 0x1f, 0x3e, 0x70, + 0xde, 0x4a, 0x01, 0x99, 0xe5, 0x1e, 0x0a, 0xa9, 0xa4, 0xcf, 0xc6, 0xa6, 0x4a, 0x4b, 0x6b, 0xbc, + 0x21, 0x7b, 0x0d, 0x97, 0xfe, 0xe0, 0x0e, 0xba, 0x63, 0x33, 0x36, 0x81, 0xcc, 0xea, 0xbf, 0x46, + 0xd7, 0x7f, 0xd7, 0xc2, 0xdd, 0xa7, 0x42, 0x30, 0xee, 0x61, 0x50, 0xcb, 0xce, 0xac, 0x29, 0x8d, + 0xe3, 0x05, 0xe9, 0xe2, 0xb6, 0x97, 0xbe, 0x00, 0x8a, 0x7a, 0xe8, 0x70, 0x97, 0x35, 0x05, 0xe9, + 0xe1, 0x5b, 0x02, 0x5c, 0x6e, 0x65, 0xe9, 0xa5, 0xd1, 0xf4, 0x46, 0xe0, 0x36, 0xa1, 0xba, 0x4f, + 0x80, 0x36, 0x8a, 0xb6, 0x9a, 0xbe, 0x50, 0x90, 0x7b, 0x18, 0xe7, 0x13, 0xae, 0x35, 0x14, 0x43, + 0x29, 0xe8, 0x56, 0xa0, 0x76, 0xd7, 0xc8, 0xa9, 0x20, 0x2f, 0xf0, 0x9e, 0xe2, 0xb3, 0x61, 0x09, + 0x36, 0x07, 0xed, 0x87, 0x0e, 0xb4, 0xa0, 0xed, 0x5a, 0x74, 0x92, 0x5e, 0x2e, 0x92, 0xe8, 0xf3, + 0x22, 0x79, 0x30, 0x96, 0x7e, 0x32, 0x1d, 0xa5, 0xb9, 0x51, 0x59, 0x6e, 0x9c, 0x32, 0x6e, 0xfd, + 0x39, 0x72, 0xe2, 0x55, 0xe6, 0x2f, 0x4a, 0x70, 0xe9, 0xa9, 0xf6, 0xac, 0xa3, 0xf8, 0xec, 0xac, + 0x19, 0x73, 0x0e, 0xfa, 0xa7, 0xc9, 0x16, 0xf2, 0x8a, 0x6e, 0xff, 0xee, 0x64, 0x06, 0x79, 0x45, + 0xee, 0xe3, 0x8e, 0x98, 0x5a, 0x5e, 0x9b, 0x1e, 0x4e, 0xcc, 0xd4, 0x3a, 0xba, 0xd3, 0x43, 0x87, + 0x5b, 0xec, 0xce, 0x35, 0xfa, 0xac, 0x06, 0xc9, 0x43, 0xbc, 0x23, 0xa0, 0x34, 0x4e, 0x7a, 0x7a, + 0x33, 0xbc, 0x4b, 0xbe, 0x2d, 0x92, 0xce, 0x05, 0x57, 0xc5, 0x71, 0x7f, 0x4d, 0xf4, 0xd9, 0xb5, + 0xe4, 0xf8, 0xf6, 0xeb, 0x79, 0x12, 0xbd, 0x9d, 0x27, 0xd1, 0xd7, 0x79, 0x82, 0xfa, 0xef, 0x5b, + 0x78, 0xff, 0x79, 0x29, 0xb8, 0x87, 0xff, 0xfb, 0xf9, 0x1b, 0xf7, 0xf3, 0x11, 0xe1, 0x7d, 0x06, + 0xca, 0x54, 0x7f, 0x7a, 0x3f, 0x1b, 0x26, 0xda, 0xbf, 0x6a, 0xe2, 0x03, 0xc2, 0x77, 0x19, 0x38, + 0xf0, 0xff, 0xae, 0x87, 0x93, 0xc1, 0xe5, 0x32, 0x46, 0x57, 0xcb, 0x18, 0x7d, 0x59, 0xc6, 0xe8, + 0xcd, 0x2a, 0x8e, 0xae, 0x56, 0x71, 0xf4, 0x69, 0x15, 0x47, 0x2f, 0x1f, 0x6d, 0x5c, 0xcf, 0x79, + 0x88, 0xc4, 0xa3, 0x01, 0x1f, 0xb9, 0x6c, 0x1d, 0x9d, 0xd5, 0x93, 0x6c, 0xb6, 0x91, 0x9f, 0xe1, + 0x9a, 0x46, 0xdb, 0x21, 0x1a, 0x1f, 0x7f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x53, 0x21, 0xfa, 0xd1, + 0x60, 0x05, 0x00, 0x00, +} + +func (this *AddRateLimitProposal) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AddRateLimitProposal) + if !ok { + that2, ok := that.(AddRateLimitProposal) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Title != that1.Title { + return false + } + if this.Description != that1.Description { + return false + } + if this.Denom != that1.Denom { + return false + } + if this.ChannelId != that1.ChannelId { + return false + } + if !this.MaxPercentSend.Equal(that1.MaxPercentSend) { + return false + } + if !this.MaxPercentRecv.Equal(that1.MaxPercentRecv) { + return false + } + if this.DurationHours != that1.DurationHours { + return false + } + if this.Deposit != that1.Deposit { + return false + } + return true +} +func (this *UpdateRateLimitProposal) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UpdateRateLimitProposal) + if !ok { + that2, ok := that.(UpdateRateLimitProposal) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Title != that1.Title { + return false + } + if this.Description != that1.Description { + return false + } + if this.Denom != that1.Denom { + return false + } + if this.ChannelId != that1.ChannelId { + return false + } + if !this.MaxPercentSend.Equal(that1.MaxPercentSend) { + return false + } + if !this.MaxPercentRecv.Equal(that1.MaxPercentRecv) { + return false + } + if this.DurationHours != that1.DurationHours { + return false + } + if this.Deposit != that1.Deposit { + return false + } + return true +} +func (this *RemoveRateLimitProposal) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RemoveRateLimitProposal) + if !ok { + that2, ok := that.(RemoveRateLimitProposal) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Title != that1.Title { + return false + } + if this.Description != that1.Description { + return false + } + if this.Denom != that1.Denom { + return false + } + if this.ChannelId != that1.ChannelId { + return false + } + if this.Deposit != that1.Deposit { + return false + } + return true +} +func (this *ResetRateLimitProposal) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResetRateLimitProposal) + if !ok { + that2, ok := that.(ResetRateLimitProposal) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Title != that1.Title { + return false + } + if this.Description != that1.Description { + return false + } + if this.Denom != that1.Denom { + return false + } + if this.ChannelId != that1.ChannelId { + return false + } + if this.Deposit != that1.Deposit { + return false + } + return true +} +func (m *AddRateLimitProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddRateLimitProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddRateLimitProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Deposit) > 0 { + i -= len(m.Deposit) + copy(dAtA[i:], m.Deposit) + i = encodeVarintGov(dAtA, i, uint64(len(m.Deposit))) + i-- + dAtA[i] = 0x42 + } + if m.DurationHours != 0 { + i = encodeVarintGov(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x38 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGov(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGov(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintGov(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x22 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintGov(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGov(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Title) > 0 { + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGov(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateRateLimitProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateRateLimitProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateRateLimitProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Deposit) > 0 { + i -= len(m.Deposit) + copy(dAtA[i:], m.Deposit) + i = encodeVarintGov(dAtA, i, uint64(len(m.Deposit))) + i-- + dAtA[i] = 0x42 + } + if m.DurationHours != 0 { + i = encodeVarintGov(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x38 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGov(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGov(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintGov(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x22 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintGov(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGov(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Title) > 0 { + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGov(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveRateLimitProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveRateLimitProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveRateLimitProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Deposit) > 0 { + i -= len(m.Deposit) + copy(dAtA[i:], m.Deposit) + i = encodeVarintGov(dAtA, i, uint64(len(m.Deposit))) + i-- + dAtA[i] = 0x2a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintGov(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x22 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintGov(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGov(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Title) > 0 { + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGov(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResetRateLimitProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetRateLimitProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetRateLimitProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Deposit) > 0 { + i -= len(m.Deposit) + copy(dAtA[i:], m.Deposit) + i = encodeVarintGov(dAtA, i, uint64(len(m.Deposit))) + i-- + dAtA[i] = 0x2a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintGov(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x22 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintGov(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGov(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Title) > 0 { + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintGov(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGov(dAtA []byte, offset int, v uint64) int { + offset -= sovGov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AddRateLimitProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Title) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovGov(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovGov(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovGov(uint64(m.DurationHours)) + } + l = len(m.Deposit) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + return n +} + +func (m *UpdateRateLimitProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Title) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovGov(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovGov(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovGov(uint64(m.DurationHours)) + } + l = len(m.Deposit) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + return n +} + +func (m *RemoveRateLimitProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Title) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Deposit) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + return n +} + +func (m *ResetRateLimitProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Title) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + l = len(m.Deposit) + if l > 0 { + n += 1 + l + sovGov(uint64(l)) + } + return n +} + +func sovGov(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGov(x uint64) (n int) { + return sovGov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AddRateLimitProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddRateLimitProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddRateLimitProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deposit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGov(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGov + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateRateLimitProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRateLimitProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRateLimitProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deposit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGov(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGov + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveRateLimitProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveRateLimitProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveRateLimitProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deposit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGov(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGov + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetRateLimitProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetRateLimitProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetRateLimitProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGov + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGov + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGov + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deposit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGov(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGov + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGov(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGov + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGov + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGov + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGov + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGov + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGov + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGov = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGov = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGov = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/gov_add_rate_limit.go b/x/ratelimit/types/gov_add_rate_limit.go new file mode 100644 index 0000000000..f8cf8a64ff --- /dev/null +++ b/x/ratelimit/types/gov_add_rate_limit.go @@ -0,0 +1,95 @@ +package types + +import ( + "fmt" + + "regexp" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +const ( + ProposalTypeAddRateLimit = "AddRateLimit" +) + +func init() { + govtypes.RegisterProposalType(ProposalTypeAddRateLimit) +} + +var ( + _ govtypes.Content = &AddRateLimitProposal{} +) + +func NewAddRateLimitProposal(title, description, denom, channelId string, maxPercentSend sdkmath.Int, maxPercentRecv sdkmath.Int, durationHours uint64) govtypes.Content { + return &AddRateLimitProposal{ + Title: title, + Description: description, + Denom: denom, + ChannelId: channelId, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +func (p *AddRateLimitProposal) GetTitle() string { return p.Title } + +func (p *AddRateLimitProposal) GetDescription() string { return p.Description } + +func (p *AddRateLimitProposal) ProposalRoute() string { return RouterKey } + +func (p *AddRateLimitProposal) ProposalType() string { + return ProposalTypeAddRateLimit +} + +func (p *AddRateLimitProposal) ValidateBasic() error { + err := govtypes.ValidateAbstract(p) + if err != nil { + return err + } + + if p.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", p.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, p.ChannelId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", p.ChannelId) + } + if !matched { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid channel-id (%s), must be of the format 'channel-{N}'", p.ChannelId) + } + + if p.MaxPercentSend.GT(sdkmath.NewInt(100)) || p.MaxPercentSend.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", p.MaxPercentSend) + } + + if p.MaxPercentRecv.GT(sdkmath.NewInt(100)) || p.MaxPercentRecv.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", p.MaxPercentRecv) + } + + if p.MaxPercentRecv.IsZero() && p.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "either the max send or max receive threshold must be greater than 0") + } + + if p.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +func (p AddRateLimitProposal) String() string { + return fmt.Sprintf(`Add Rate Limit Proposal: + Title: %s + Description: %s + Denom: %s + ChannelId: %s + MaxPercentSend: %v + MaxPercentRecv: %v + DurationHours: %d + `, p.Title, p.Description, p.Denom, p.ChannelId, p.MaxPercentSend, p.MaxPercentRecv, p.DurationHours) +} diff --git a/x/ratelimit/types/gov_add_rate_limit_test.go b/x/ratelimit/types/gov_add_rate_limit_test.go new file mode 100644 index 0000000000..f5d175d691 --- /dev/null +++ b/x/ratelimit/types/gov_add_rate_limit_test.go @@ -0,0 +1,187 @@ +package types_test + +import ( + "testing" + + sdkmath "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func TestGovAddRateLimit(t *testing.T) { + apptesting.SetupConfig() + + validTitle := "AddRateLimit" + validDescription := "Adding a rate limit" + validDenom := "denom" + validChannelId := "channel-0" + validMaxPercentSend := sdkmath.NewInt(10) + validMaxPercentRecv := sdkmath.NewInt(10) + validDurationHours := uint64(60) + + tests := []struct { + name string + proposal types.AddRateLimitProposal + err string + }{ + { + name: "successful proposal", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + }, + { + name: "invalid title", + proposal: types.AddRateLimitProposal{ + Title: "", + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "title cannot be blank", + }, + { + name: "invalid description", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: "", + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "description cannot be blank", + }, + { + name: "invalid denom", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: "", + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "invalid denom", + }, + { + name: "invalid channel-id", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: "channel-", + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "invalid channel-id", + }, + { + name: "invalid send percent (lt 0)", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: sdkmath.NewInt(-1), + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid send percent (gt 100)", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: sdkmath.NewInt(101), + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid receive percent (lt 0)", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: sdkmath.NewInt(-1), + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid receive percent (gt 100)", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: sdkmath.NewInt(101), + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid send and receive percent", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: sdkmath.ZeroInt(), + MaxPercentRecv: sdkmath.ZeroInt(), + DurationHours: validDurationHours, + }, + err: "either the max send or max receive threshold must be greater than 0", + }, + { + name: "invalid duration", + proposal: types.AddRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: 0, + }, + err: "duration can not be zero", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.err == "" { + require.NoError(t, test.proposal.ValidateBasic(), "test: %v", test.name) + require.Equal(t, test.proposal.Denom, validDenom, "denom") + require.Equal(t, test.proposal.ChannelId, validChannelId, "channel-id") + require.Equal(t, test.proposal.MaxPercentSend, validMaxPercentSend, "maxPercentSend") + require.Equal(t, test.proposal.MaxPercentRecv, validMaxPercentRecv, "maxPercentRecv") + require.Equal(t, test.proposal.DurationHours, validDurationHours, "durationHours") + } else { + require.ErrorContains(t, test.proposal.ValidateBasic(), test.err, "test: %v", test.name) + } + }) + } +} diff --git a/x/ratelimit/types/gov_remove_rate_limit.go b/x/ratelimit/types/gov_remove_rate_limit.go new file mode 100644 index 0000000000..d16074c74a --- /dev/null +++ b/x/ratelimit/types/gov_remove_rate_limit.go @@ -0,0 +1,72 @@ +package types + +import ( + "fmt" + + "regexp" + + errorsmod "cosmossdk.io/errors" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +const ( + ProposalTypeRemoveRateLimit = "RemoveRateLimit" +) + +func init() { + govtypes.RegisterProposalType(ProposalTypeRemoveRateLimit) +} + +var ( + _ govtypes.Content = &RemoveRateLimitProposal{} +) + +func NewRemoveRateLimitProposal(title, description, denom, channelId string) govtypes.Content { + return &RemoveRateLimitProposal{ + Title: title, + Description: description, + Denom: denom, + ChannelId: channelId, + } +} + +func (p *RemoveRateLimitProposal) GetTitle() string { return p.Title } + +func (p *RemoveRateLimitProposal) GetDescription() string { return p.Description } + +func (p *RemoveRateLimitProposal) ProposalRoute() string { return RouterKey } + +func (p *RemoveRateLimitProposal) ProposalType() string { + return ProposalTypeRemoveRateLimit +} + +func (p *RemoveRateLimitProposal) ValidateBasic() error { + err := govtypes.ValidateAbstract(p) + if err != nil { + return err + } + + if p.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", p.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, p.ChannelId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", p.ChannelId) + } + if !matched { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid channel-id (%s), must be of the format 'channel-{N}'", p.ChannelId) + } + + return nil +} + +func (p RemoveRateLimitProposal) String() string { + return fmt.Sprintf(`Remove Rate Limit Proposal: + Title: %s + Description: %s + Denom: %s + ChannelId: %s + `, p.Title, p.Description, p.Denom, p.ChannelId) +} diff --git a/x/ratelimit/types/gov_remove_rate_limit_test.go b/x/ratelimit/types/gov_remove_rate_limit_test.go new file mode 100644 index 0000000000..4497d2dc34 --- /dev/null +++ b/x/ratelimit/types/gov_remove_rate_limit_test.go @@ -0,0 +1,87 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func TestGovRemoveRateLimit(t *testing.T) { + apptesting.SetupConfig() + + validTitle := "RemoveRateLimit" + validDescription := "Removing a rate limit" + validDenom := "denom" + validChannelId := "channel-0" + + tests := []struct { + name string + proposal types.RemoveRateLimitProposal + err string + }{ + { + name: "successful message", + proposal: types.RemoveRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + }, + }, + { + name: "invalid title", + proposal: types.RemoveRateLimitProposal{ + Title: "", + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + }, + err: "title cannot be blank", + }, + { + name: "invalid description", + proposal: types.RemoveRateLimitProposal{ + Title: validTitle, + Description: "", + Denom: validDenom, + ChannelId: validChannelId, + }, + err: "description cannot be blank", + }, + { + name: "invalid denom", + proposal: types.RemoveRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: "", + ChannelId: validChannelId, + }, + err: "invalid denom", + }, + { + name: "invalid channel-id", + proposal: types.RemoveRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: "chan-1", + }, + err: "invalid channel-id", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.err == "" { + require.NoError(t, test.proposal.ValidateBasic(), "test: %v", test.name) + require.Equal(t, test.proposal.Denom, validDenom, "denom") + require.Equal(t, test.proposal.ChannelId, validChannelId, "channelId") + } else { + require.ErrorContains(t, test.proposal.ValidateBasic(), test.err, "test: %v", test.name) + } + }) + } +} diff --git a/x/ratelimit/types/gov_reset_rate_limit.go b/x/ratelimit/types/gov_reset_rate_limit.go new file mode 100644 index 0000000000..91717751d6 --- /dev/null +++ b/x/ratelimit/types/gov_reset_rate_limit.go @@ -0,0 +1,72 @@ +package types + +import ( + "fmt" + + "regexp" + + errorsmod "cosmossdk.io/errors" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +const ( + ProposalTypeResetRateLimit = "ResetRateLimit" +) + +func init() { + govtypes.RegisterProposalType(ProposalTypeResetRateLimit) +} + +var ( + _ govtypes.Content = &ResetRateLimitProposal{} +) + +func NewResetRateLimitProposal(title, description, denom, channelId string) govtypes.Content { + return &ResetRateLimitProposal{ + Title: title, + Description: description, + Denom: denom, + ChannelId: channelId, + } +} + +func (p *ResetRateLimitProposal) GetTitle() string { return p.Title } + +func (p *ResetRateLimitProposal) GetDescription() string { return p.Description } + +func (p *ResetRateLimitProposal) ProposalRoute() string { return RouterKey } + +func (p *ResetRateLimitProposal) ProposalType() string { + return ProposalTypeResetRateLimit +} + +func (p *ResetRateLimitProposal) ValidateBasic() error { + err := govtypes.ValidateAbstract(p) + if err != nil { + return err + } + + if p.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", p.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, p.ChannelId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", p.ChannelId) + } + if !matched { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid channel-id (%s), must be of the format 'channel-{N}'", p.ChannelId) + } + + return nil +} + +func (p ResetRateLimitProposal) String() string { + return fmt.Sprintf(`Reset Rate Limit Proposal: + Title: %s + Description: %s + Denom: %s + ChannelId: %s + `, p.Title, p.Description, p.Denom, p.ChannelId) +} diff --git a/x/ratelimit/types/gov_reset_rate_limit_test.go b/x/ratelimit/types/gov_reset_rate_limit_test.go new file mode 100644 index 0000000000..b0216115a2 --- /dev/null +++ b/x/ratelimit/types/gov_reset_rate_limit_test.go @@ -0,0 +1,87 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func TestGovResetRateLimit(t *testing.T) { + apptesting.SetupConfig() + + validTitle := "ResetRateLimit" + validDescription := "Resetting a rate limit" + validDenom := "denom" + validChannelId := "channel-0" + + tests := []struct { + name string + proposal types.ResetRateLimitProposal + err string + }{ + { + name: "successful proposal", + proposal: types.ResetRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + }, + }, + { + name: "invalid title", + proposal: types.ResetRateLimitProposal{ + Title: "", + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + }, + err: "title cannot be blank", + }, + { + name: "invalid description", + proposal: types.ResetRateLimitProposal{ + Title: validTitle, + Description: "", + Denom: validDenom, + ChannelId: validChannelId, + }, + err: "description cannot be blank", + }, + { + name: "invalid denom", + proposal: types.ResetRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: "", + ChannelId: validChannelId, + }, + err: "invalid denom", + }, + { + name: "invalid channel-id", + proposal: types.ResetRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: "chan-1", + }, + err: "invalid channel-id", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.err == "" { + require.NoError(t, test.proposal.ValidateBasic(), "test: %v", test.name) + require.Equal(t, test.proposal.Denom, validDenom, "denom") + require.Equal(t, test.proposal.ChannelId, validChannelId, "channelId") + } else { + require.ErrorContains(t, test.proposal.ValidateBasic(), test.err, "test: %v", test.name) + } + }) + } +} diff --git a/x/ratelimit/types/gov_update_rate_limit.go b/x/ratelimit/types/gov_update_rate_limit.go new file mode 100644 index 0000000000..0ab9836f81 --- /dev/null +++ b/x/ratelimit/types/gov_update_rate_limit.go @@ -0,0 +1,95 @@ +package types + +import ( + "fmt" + + "regexp" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +const ( + ProposalTypeUpdateRateLimit = "UpdateRateLimit" +) + +func init() { + govtypes.RegisterProposalType(ProposalTypeUpdateRateLimit) +} + +var ( + _ govtypes.Content = &UpdateRateLimitProposal{} +) + +func NewUpdateRateLimitProposal(title, description, denom, channelId string, maxPercentSend sdkmath.Int, maxPercentRecv sdkmath.Int, durationHours uint64) govtypes.Content { + return &UpdateRateLimitProposal{ + Title: title, + Description: description, + Denom: denom, + ChannelId: channelId, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +func (p *UpdateRateLimitProposal) GetTitle() string { return p.Title } + +func (p *UpdateRateLimitProposal) GetDescription() string { return p.Description } + +func (p *UpdateRateLimitProposal) ProposalRoute() string { return RouterKey } + +func (p *UpdateRateLimitProposal) ProposalType() string { + return ProposalTypeUpdateRateLimit +} + +func (p *UpdateRateLimitProposal) ValidateBasic() error { + err := govtypes.ValidateAbstract(p) + if err != nil { + return err + } + + if p.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", p.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, p.ChannelId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", p.ChannelId) + } + if !matched { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid channel-id (%s), must be of the format 'channel-{N}'", p.ChannelId) + } + + if p.MaxPercentSend.GT(sdkmath.NewInt(100)) || p.MaxPercentSend.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", p.MaxPercentSend) + } + + if p.MaxPercentRecv.GT(sdkmath.NewInt(100)) || p.MaxPercentRecv.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", p.MaxPercentRecv) + } + + if p.MaxPercentRecv.IsZero() && p.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "either the max send or max receive threshold must be greater than 0") + } + + if p.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +func (p UpdateRateLimitProposal) String() string { + return fmt.Sprintf(`Update Rate Limit Proposal: + Title: %s + Description: %s + Denom: %s + ChannelId: %s + MaxPercentSend: %v + MaxPercentRecv: %v + DurationHours: %d + `, p.Title, p.Description, p.Denom, p.ChannelId, p.MaxPercentSend, p.MaxPercentRecv, p.DurationHours) +} diff --git a/x/ratelimit/types/gov_update_rate_limit_test.go b/x/ratelimit/types/gov_update_rate_limit_test.go new file mode 100644 index 0000000000..8aafc4c89f --- /dev/null +++ b/x/ratelimit/types/gov_update_rate_limit_test.go @@ -0,0 +1,187 @@ +package types_test + +import ( + "testing" + + sdkmath "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + "github.com/Stride-Labs/stride/v5/app/apptesting" + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func TestGovUpdateRateLimit(t *testing.T) { + apptesting.SetupConfig() + + validTitle := "UpdateRateLimit" + validDescription := "Updating a rate limit" + validDenom := "denom" + validChannelId := "channel-0" + validMaxPercentSend := sdkmath.NewInt(10) + validMaxPercentRecv := sdkmath.NewInt(10) + validDurationHours := uint64(60) + + tests := []struct { + name string + proposal types.UpdateRateLimitProposal + err string + }{ + { + name: "successful proposal", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + }, + { + name: "invalid title", + proposal: types.UpdateRateLimitProposal{ + Title: "", + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "title cannot be blank", + }, + { + name: "invalid description", + proposal: types.UpdateRateLimitProposal{ + Title: validDescription, + Description: "", + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "description cannot be blank", + }, + { + name: "invalid denom", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: "", + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "invalid denom", + }, + { + name: "invalid channel-id", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: "channel-", + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "invalid channel-id", + }, + { + name: "invalid send percent (lt 0)", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: sdkmath.NewInt(-1), + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid send percent (gt 100)", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: sdkmath.NewInt(101), + MaxPercentRecv: validMaxPercentRecv, + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid receive percent (lt 0)", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: sdkmath.NewInt(-1), + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid receive percent (gt 100)", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: sdkmath.NewInt(101), + DurationHours: validDurationHours, + }, + err: "percent must be between 0 and 100", + }, + { + name: "invalid send and receive percent", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: sdkmath.ZeroInt(), + MaxPercentRecv: sdkmath.ZeroInt(), + DurationHours: validDurationHours, + }, + err: "either the max send or max receive threshold must be greater than 0", + }, + { + name: "invalid duration", + proposal: types.UpdateRateLimitProposal{ + Title: validTitle, + Description: validDescription, + Denom: validDenom, + ChannelId: validChannelId, + MaxPercentSend: validMaxPercentSend, + MaxPercentRecv: validMaxPercentRecv, + DurationHours: 0, + }, + err: "duration can not be zero", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.err == "" { + require.NoError(t, test.proposal.ValidateBasic(), "test: %v", test.name) + require.Equal(t, test.proposal.Denom, validDenom, "denom") + require.Equal(t, test.proposal.ChannelId, validChannelId, "channelId") + require.Equal(t, test.proposal.MaxPercentSend, validMaxPercentSend, "maxPercentSend") + require.Equal(t, test.proposal.MaxPercentRecv, validMaxPercentRecv, "maxPercentRecv") + require.Equal(t, test.proposal.DurationHours, validDurationHours, "durationHours") + } else { + require.ErrorContains(t, test.proposal.ValidateBasic(), test.err, "test: %v", test.name) + } + }) + } +} diff --git a/x/ratelimit/types/keys.go b/x/ratelimit/types/keys.go new file mode 100644 index 0000000000..1a703ca941 --- /dev/null +++ b/x/ratelimit/types/keys.go @@ -0,0 +1,24 @@ +package types + +const ( + ModuleName = "ratelimit" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName +) + +func KeyPrefix(p string) []byte { + return []byte(p) +} + +var ( + PathKeyPrefix = KeyPrefix("path") + RateLimitKeyPrefix = KeyPrefix("rate-limit") + BlacklistKeyPrefix = KeyPrefix("blacklist") +) diff --git a/x/ratelimit/types/params.go b/x/ratelimit/types/params.go new file mode 100644 index 0000000000..4f3215e350 --- /dev/null +++ b/x/ratelimit/types/params.go @@ -0,0 +1,32 @@ +package types + +import ( + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +// ParamKeyTable the param key table for launch module +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new Params instance +func NewParams() Params { + return Params{} +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams() +} + +// ParamSetPairs get the params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{} +} + +// Validate validates the set of params +func (p Params) Validate() error { + return nil +} diff --git a/x/ratelimit/types/params.pb.go b/x/ratelimit/types/params.pb.go new file mode 100644 index 0000000000..79a367ac9f --- /dev/null +++ b/x/ratelimit/types/params.pb.go @@ -0,0 +1,263 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stride/ratelimit/params.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the ratelimit module's parameters. +type Params struct { +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_7af4964ecd08f136, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Params)(nil), "stride.ratelimit.Params") +} + +func init() { proto.RegisterFile("stride/ratelimit/params.proto", fileDescriptor_7af4964ecd08f136) } + +var fileDescriptor_7af4964ecd08f136 = []byte{ + // 137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2d, 0x2e, 0x29, 0xca, + 0x4c, 0x49, 0xd5, 0x2f, 0x4a, 0x2c, 0x49, 0xcd, 0xc9, 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, 0x48, 0x2c, + 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x80, 0x48, 0xeb, 0xc1, 0xa5, + 0x95, 0x38, 0xb8, 0xd8, 0x02, 0xc0, 0x2a, 0x9c, 0x7c, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, + 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, + 0x58, 0x8e, 0x21, 0xca, 0x28, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x3f, + 0x18, 0x6c, 0x80, 0xae, 0x4f, 0x62, 0x52, 0xb1, 0x3e, 0xd4, 0xae, 0x32, 0x13, 0xfd, 0x0a, 0x24, + 0x0b, 0x4b, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0x16, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x47, 0x7e, 0x00, 0xb5, 0x91, 0x00, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/query.pb.go b/x/ratelimit/types/query.pb.go new file mode 100644 index 0000000000..999cd3b920 --- /dev/null +++ b/x/ratelimit/types/query.pb.go @@ -0,0 +1,1765 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stride/ratelimit/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryAllRateLimitsRequest struct { +} + +func (m *QueryAllRateLimitsRequest) Reset() { *m = QueryAllRateLimitsRequest{} } +func (m *QueryAllRateLimitsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsRequest) ProtoMessage() {} +func (*QueryAllRateLimitsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{0} +} +func (m *QueryAllRateLimitsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsRequest.Merge(m, src) +} +func (m *QueryAllRateLimitsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsRequest proto.InternalMessageInfo + +type QueryAllRateLimitsResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryAllRateLimitsResponse) Reset() { *m = QueryAllRateLimitsResponse{} } +func (m *QueryAllRateLimitsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsResponse) ProtoMessage() {} +func (*QueryAllRateLimitsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{1} +} +func (m *QueryAllRateLimitsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsResponse.Merge(m, src) +} +func (m *QueryAllRateLimitsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsResponse proto.InternalMessageInfo + +func (m *QueryAllRateLimitsResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryRateLimitRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryRateLimitRequest) Reset() { *m = QueryRateLimitRequest{} } +func (m *QueryRateLimitRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitRequest) ProtoMessage() {} +func (*QueryRateLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{2} +} +func (m *QueryRateLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitRequest.Merge(m, src) +} +func (m *QueryRateLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitRequest proto.InternalMessageInfo + +func (m *QueryRateLimitRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryRateLimitRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type QueryRateLimitResponse struct { + RateLimit *RateLimit `protobuf:"bytes,1,opt,name=rate_limit,json=rateLimit,proto3" json:"rate_limit,omitempty"` +} + +func (m *QueryRateLimitResponse) Reset() { *m = QueryRateLimitResponse{} } +func (m *QueryRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitResponse) ProtoMessage() {} +func (*QueryRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{3} +} +func (m *QueryRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitResponse.Merge(m, src) +} +func (m *QueryRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitResponse proto.InternalMessageInfo + +func (m *QueryRateLimitResponse) GetRateLimit() *RateLimit { + if m != nil { + return m.RateLimit + } + return nil +} + +type QueryRateLimitsByChainIdRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryRateLimitsByChainIdRequest) Reset() { *m = QueryRateLimitsByChainIdRequest{} } +func (m *QueryRateLimitsByChainIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIdRequest) ProtoMessage() {} +func (*QueryRateLimitsByChainIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{4} +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIdRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIdRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIdRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +type QueryRateLimitsByChainIdResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChainIdResponse) Reset() { *m = QueryRateLimitsByChainIdResponse{} } +func (m *QueryRateLimitsByChainIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIdResponse) ProtoMessage() {} +func (*QueryRateLimitsByChainIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{5} +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIdResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIdResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIdResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryRateLimitsByChannelIdRequest struct { + ChannelId string `protobuf:"bytes,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryRateLimitsByChannelIdRequest) Reset() { *m = QueryRateLimitsByChannelIdRequest{} } +func (m *QueryRateLimitsByChannelIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChannelIdRequest) ProtoMessage() {} +func (*QueryRateLimitsByChannelIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{6} +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelIdRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelIdRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelIdRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type QueryRateLimitsByChannelIdResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChannelIdResponse) Reset() { *m = QueryRateLimitsByChannelIdResponse{} } +func (m *QueryRateLimitsByChannelIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChannelIdResponse) ProtoMessage() {} +func (*QueryRateLimitsByChannelIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_97a373ef8fcef03b, []int{7} +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelIdResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelIdResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelIdResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +func init() { + proto.RegisterType((*QueryAllRateLimitsRequest)(nil), "stride.ratelimit.QueryAllRateLimitsRequest") + proto.RegisterType((*QueryAllRateLimitsResponse)(nil), "stride.ratelimit.QueryAllRateLimitsResponse") + proto.RegisterType((*QueryRateLimitRequest)(nil), "stride.ratelimit.QueryRateLimitRequest") + proto.RegisterType((*QueryRateLimitResponse)(nil), "stride.ratelimit.QueryRateLimitResponse") + proto.RegisterType((*QueryRateLimitsByChainIdRequest)(nil), "stride.ratelimit.QueryRateLimitsByChainIdRequest") + proto.RegisterType((*QueryRateLimitsByChainIdResponse)(nil), "stride.ratelimit.QueryRateLimitsByChainIdResponse") + proto.RegisterType((*QueryRateLimitsByChannelIdRequest)(nil), "stride.ratelimit.QueryRateLimitsByChannelIdRequest") + proto.RegisterType((*QueryRateLimitsByChannelIdResponse)(nil), "stride.ratelimit.QueryRateLimitsByChannelIdResponse") +} + +func init() { proto.RegisterFile("stride/ratelimit/query.proto", fileDescriptor_97a373ef8fcef03b) } + +var fileDescriptor_97a373ef8fcef03b = []byte{ + // 537 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xb3, 0x85, 0x00, 0x99, 0x0a, 0x09, 0x2d, 0x2d, 0x4a, 0xdd, 0xe2, 0x06, 0x5f, 0x88, + 0xf8, 0xe3, 0x85, 0xa4, 0x15, 0x12, 0x50, 0x21, 0x8c, 0x38, 0x54, 0xca, 0x05, 0xc3, 0x89, 0x4b, + 0x70, 0xe2, 0xc5, 0xb1, 0xe4, 0x78, 0x53, 0xef, 0x06, 0x11, 0xa1, 0x5e, 0x78, 0x02, 0x24, 0xae, + 0x5c, 0x79, 0x08, 0x8e, 0xdc, 0x7a, 0x42, 0x95, 0xb8, 0x70, 0x42, 0x28, 0xe1, 0x41, 0x90, 0x77, + 0x1d, 0x97, 0x24, 0x4e, 0xeb, 0x48, 0xb9, 0x6d, 0x76, 0x66, 0xbe, 0xf9, 0xcd, 0xf8, 0xdb, 0xc0, + 0x16, 0x17, 0x91, 0xef, 0x52, 0x12, 0x39, 0x82, 0x06, 0x7e, 0xd7, 0x17, 0xe4, 0xa0, 0x4f, 0xa3, + 0x81, 0xd9, 0x8b, 0x98, 0x60, 0xf8, 0x8a, 0x8a, 0x9a, 0x69, 0x54, 0xab, 0xcc, 0xe4, 0xa7, 0x27, + 0x55, 0xa3, 0x6d, 0x79, 0x8c, 0x79, 0x01, 0x25, 0x4e, 0xcf, 0x27, 0x4e, 0x18, 0x32, 0xe1, 0x08, + 0x9f, 0x85, 0x3c, 0x89, 0xae, 0x79, 0xcc, 0x63, 0xf2, 0x48, 0xe2, 0x93, 0xba, 0x35, 0x36, 0x61, + 0xe3, 0x45, 0xdc, 0xf6, 0x69, 0x10, 0xd8, 0x8e, 0xa0, 0x8d, 0x58, 0x8e, 0xdb, 0xf4, 0xa0, 0x4f, + 0xb9, 0x30, 0xde, 0x80, 0x96, 0x15, 0xe4, 0x3d, 0x16, 0x72, 0x8a, 0x2d, 0x58, 0x8d, 0x09, 0x9a, + 0x12, 0x81, 0x97, 0x51, 0xe5, 0x5c, 0x75, 0xb5, 0xb6, 0x69, 0x4e, 0x83, 0x9b, 0x69, 0xa9, 0x75, + 0xfe, 0xe8, 0xf7, 0x76, 0xc1, 0x86, 0x28, 0xd5, 0x32, 0x1a, 0xb0, 0x2e, 0x3b, 0xa4, 0x39, 0x49, + 0x6b, 0xbc, 0x06, 0x45, 0x97, 0x86, 0xac, 0x5b, 0x46, 0x15, 0x54, 0x2d, 0xd9, 0xea, 0x07, 0xbe, + 0x0e, 0xd0, 0xee, 0x38, 0x61, 0x48, 0x83, 0xa6, 0xef, 0x96, 0x57, 0x64, 0xa8, 0x94, 0xdc, 0xec, + 0xbb, 0xc6, 0x2b, 0xb8, 0x36, 0xad, 0x96, 0xb0, 0x3e, 0x04, 0x38, 0x61, 0x95, 0x9a, 0xa7, 0xa3, + 0xda, 0xa5, 0x14, 0xd2, 0x78, 0x0c, 0xdb, 0x93, 0xaa, 0xdc, 0x1a, 0x3c, 0xeb, 0x38, 0x7e, 0xb8, + 0xef, 0x8e, 0x69, 0x37, 0xe0, 0x52, 0x3b, 0xbe, 0x89, 0xa9, 0x14, 0xf0, 0xc5, 0xb6, 0xca, 0x30, + 0xde, 0x42, 0x65, 0x7e, 0xf5, 0x12, 0x37, 0x69, 0xc1, 0x8d, 0xac, 0x3e, 0x6a, 0x33, 0x63, 0xce, + 0xc9, 0xfd, 0xa1, 0xe9, 0xfd, 0x75, 0xc0, 0x38, 0x4d, 0x63, 0x79, 0xb4, 0xb5, 0x1f, 0x45, 0x28, + 0xca, 0x56, 0xf8, 0x0b, 0x82, 0xcb, 0x13, 0xfe, 0xc2, 0xb7, 0x67, 0xa5, 0xe6, 0x5a, 0x54, 0xbb, + 0x93, 0x2f, 0x59, 0xa1, 0x1b, 0xf7, 0x3e, 0xfe, 0xfc, 0xfb, 0x79, 0xe5, 0x16, 0xae, 0x92, 0x97, + 0xb2, 0xea, 0x6e, 0xc3, 0x69, 0x71, 0x32, 0xff, 0x61, 0x71, 0xfc, 0x15, 0x41, 0x29, 0x15, 0xc2, + 0x37, 0xe7, 0x74, 0x9b, 0xb6, 0xaf, 0x56, 0x3d, 0x3b, 0x31, 0x41, 0x7a, 0x2e, 0x91, 0x9e, 0xe0, + 0xbd, 0x9c, 0x48, 0xe4, 0xc3, 0xc9, 0x17, 0x3c, 0x24, 0xad, 0x41, 0x53, 0xbd, 0x8c, 0x6f, 0x08, + 0xae, 0x66, 0x58, 0x0c, 0xdf, 0x3f, 0x0b, 0x64, 0xc6, 0xcc, 0x5a, 0x6d, 0x91, 0x92, 0x64, 0x8a, + 0x47, 0x72, 0x8a, 0x5d, 0x5c, 0xcf, 0xbb, 0x58, 0x39, 0x86, 0x7c, 0x30, 0x87, 0xf8, 0x3b, 0x82, + 0xf5, 0x4c, 0xcb, 0xe1, 0x7a, 0x3e, 0x94, 0x09, 0x93, 0x6b, 0x3b, 0x8b, 0x15, 0x25, 0x13, 0xec, + 0xc9, 0x09, 0x1e, 0xe0, 0xdd, 0x85, 0x26, 0x18, 0x7f, 0x08, 0xab, 0x71, 0x34, 0xd4, 0xd1, 0xf1, + 0x50, 0x47, 0x7f, 0x86, 0x3a, 0xfa, 0x34, 0xd2, 0x0b, 0xc7, 0x23, 0xbd, 0xf0, 0x6b, 0xa4, 0x17, + 0x5e, 0xd7, 0x3c, 0x5f, 0x74, 0xfa, 0x2d, 0xb3, 0xcd, 0xba, 0x59, 0xd2, 0xef, 0x76, 0xc8, 0xfb, + 0xff, 0xf4, 0xc5, 0xa0, 0x47, 0x79, 0xeb, 0x82, 0xfc, 0x73, 0xae, 0xff, 0x0b, 0x00, 0x00, 0xff, + 0xff, 0x60, 0x1b, 0xd8, 0x93, 0x24, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) + RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) + RateLimitsByChainId(ctx context.Context, in *QueryRateLimitsByChainIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIdResponse, error) + RateLimitsByChannelId(ctx context.Context, in *QueryRateLimitsByChannelIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelIdResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) { + out := new(QueryAllRateLimitsResponse) + err := c.cc.Invoke(ctx, "/stride.ratelimit.Query/AllRateLimits", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) { + out := new(QueryRateLimitResponse) + err := c.cc.Invoke(ctx, "/stride.ratelimit.Query/RateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChainId(ctx context.Context, in *QueryRateLimitsByChainIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIdResponse, error) { + out := new(QueryRateLimitsByChainIdResponse) + err := c.cc.Invoke(ctx, "/stride.ratelimit.Query/RateLimitsByChainId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChannelId(ctx context.Context, in *QueryRateLimitsByChannelIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelIdResponse, error) { + out := new(QueryRateLimitsByChannelIdResponse) + err := c.cc.Invoke(ctx, "/stride.ratelimit.Query/RateLimitsByChannelId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + AllRateLimits(context.Context, *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) + RateLimit(context.Context, *QueryRateLimitRequest) (*QueryRateLimitResponse, error) + RateLimitsByChainId(context.Context, *QueryRateLimitsByChainIdRequest) (*QueryRateLimitsByChainIdResponse, error) + RateLimitsByChannelId(context.Context, *QueryRateLimitsByChannelIdRequest) (*QueryRateLimitsByChannelIdResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllRateLimits(ctx context.Context, req *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllRateLimits not implemented") +} +func (*UnimplementedQueryServer) RateLimit(ctx context.Context, req *QueryRateLimitRequest) (*QueryRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimit not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChainId(ctx context.Context, req *QueryRateLimitsByChainIdRequest) (*QueryRateLimitsByChainIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChainId not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChannelId(ctx context.Context, req *QueryRateLimitsByChannelIdRequest) (*QueryRateLimitsByChannelIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChannelId not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllRateLimitsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllRateLimits(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/stride.ratelimit.Query/AllRateLimits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllRateLimits(ctx, req.(*QueryAllRateLimitsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/stride.ratelimit.Query/RateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimit(ctx, req.(*QueryRateLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChainId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChainIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChainId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/stride.ratelimit.Query/RateLimitsByChainId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChainId(ctx, req.(*QueryRateLimitsByChainIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChannelId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChannelIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChannelId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/stride.ratelimit.Query/RateLimitsByChannelId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChannelId(ctx, req.(*QueryRateLimitsByChannelIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "stride.ratelimit.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllRateLimits", + Handler: _Query_AllRateLimits_Handler, + }, + { + MethodName: "RateLimit", + Handler: _Query_RateLimit_Handler, + }, + { + MethodName: "RateLimitsByChainId", + Handler: _Query_RateLimitsByChainId_Handler, + }, + { + MethodName: "RateLimitsByChannelId", + Handler: _Query_RateLimitsByChannelId_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "stride/ratelimit/query.proto", +} + +func (m *QueryAllRateLimitsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllRateLimitsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RateLimit != nil { + { + size, err := m.RateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAllRateLimitsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllRateLimitsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RateLimit != nil { + l = m.RateLimit.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitsByChannelIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChannelIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAllRateLimitsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllRateLimitsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RateLimit == nil { + m.RateLimit = &RateLimit{} + } + if err := m.RateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/query.pb.gw.go b/x/ratelimit/types/query.pb.gw.go new file mode 100644 index 0000000000..18d915c008 --- /dev/null +++ b/x/ratelimit/types/query.pb.gw.go @@ -0,0 +1,474 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: stride/ratelimit/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllRateLimits(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_RateLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RateLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RateLimit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChainId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := client.RateLimitsByChainId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChainId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := server.RateLimitsByChainId(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChannelId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + msg, err := client.RateLimitsByChannelId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChannelId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + msg, err := server.RateLimitsByChannelId(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllRateLimits_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChainId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChannelId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllRateLimits_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChainId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChannelId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"Stride-Labs", "stride", "ratelimit", "ratelimits"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"Stride-Labs", "stride", "ratelimit", "channel_id", "by_denom"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChainId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"Stride-Labs", "stride", "ratelimit", "ratelimits", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChannelId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"Stride-Labs", "stride", "ratelimit", "ratelimits", "channel_id"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_AllRateLimits_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimit_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChainId_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChannelId_0 = runtime.ForwardResponseMessage +) diff --git a/x/ratelimit/types/quota.go b/x/ratelimit/types/quota.go new file mode 100644 index 0000000000..caf3bafe62 --- /dev/null +++ b/x/ratelimit/types/quota.go @@ -0,0 +1,22 @@ +package types + +import ( + sdkmath "cosmossdk.io/math" +) + +// CheckExceedsQuota checks if new in/out flow is going to reach the max in/out or not +func (q *Quota) CheckExceedsQuota(direction PacketDirection, amount sdkmath.Int, totalValue sdkmath.Int) bool { + // If there's no channel value (this should be almost impossible), it means there is no + // supply of the asset, so we shoudn't prevent inflows/outflows + if totalValue.IsZero() { + return false + } + var threshold sdkmath.Int + if direction == PACKET_RECV { + threshold = totalValue.Mul(q.MaxPercentRecv).Quo(sdkmath.NewInt(100)) + } else { + threshold = totalValue.Mul(q.MaxPercentSend).Quo(sdkmath.NewInt(100)) + } + + return amount.GT(threshold) +} diff --git a/x/ratelimit/types/quota_test.go b/x/ratelimit/types/quota_test.go new file mode 100644 index 0000000000..12eef7f0ec --- /dev/null +++ b/x/ratelimit/types/quota_test.go @@ -0,0 +1,79 @@ +package types_test + +import ( + "testing" + + sdkmath "cosmossdk.io/math" + "github.com/stretchr/testify/require" + + "github.com/Stride-Labs/stride/v5/x/ratelimit/types" +) + +func TestCheckExceedsQuota(t *testing.T) { + totalValue := sdkmath.NewInt(100) + amountUnderThreshold := sdkmath.NewInt(5) + amountOverThreshold := sdkmath.NewInt(15) + quota := types.Quota{ + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(10), + DurationHours: uint64(1), + } + + tests := []struct { + name string + direction types.PacketDirection + amount sdkmath.Int + totalValue sdkmath.Int + exceeded bool + }{ + { + name: "inflow exceeded threshold", + direction: types.PACKET_RECV, + amount: amountOverThreshold, + totalValue: totalValue, + exceeded: true, + }, + { + name: "inflow did not exceed threshold", + direction: types.PACKET_RECV, + amount: amountUnderThreshold, + totalValue: totalValue, + exceeded: false, + }, + { + name: "outflow exceeded threshold", + direction: types.PACKET_SEND, + amount: amountOverThreshold, + totalValue: totalValue, + exceeded: true, + }, + { + name: "outflow did not exceed threshold", + direction: types.PACKET_SEND, + amount: amountUnderThreshold, + totalValue: totalValue, + exceeded: false, + }, + { + name: "zero channel value send", + direction: types.PACKET_SEND, + amount: amountOverThreshold, + totalValue: sdkmath.ZeroInt(), + exceeded: false, + }, + { + name: "zero channel value recv", + direction: types.PACKET_RECV, + amount: amountOverThreshold, + totalValue: sdkmath.ZeroInt(), + exceeded: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := quota.CheckExceedsQuota(test.direction, test.amount, test.totalValue) + require.Equal(t, res, test.exceeded, "test: %s", test.name) + }) + } +} diff --git a/x/ratelimit/types/ratelimit.pb.go b/x/ratelimit/types/ratelimit.pb.go new file mode 100644 index 0000000000..1cdcd090fc --- /dev/null +++ b/x/ratelimit/types/ratelimit.pb.go @@ -0,0 +1,1220 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stride/ratelimit/ratelimit.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PacketDirection int32 + +const ( + PACKET_SEND PacketDirection = 0 + PACKET_RECV PacketDirection = 1 +) + +var PacketDirection_name = map[int32]string{ + 0: "PACKET_SEND", + 1: "PACKET_RECV", +} + +var PacketDirection_value = map[string]int32{ + "PACKET_SEND": 0, + "PACKET_RECV": 1, +} + +func (x PacketDirection) String() string { + return proto.EnumName(PacketDirection_name, int32(x)) +} + +func (PacketDirection) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a3e00ee2c967d747, []int{0} +} + +type Path struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e00ee2c967d747, []int{0} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(m, src) +} +func (m *Path) XXX_Size() int { + return m.Size() +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *Path) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type Quota struct { + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,3,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *Quota) Reset() { *m = Quota{} } +func (m *Quota) String() string { return proto.CompactTextString(m) } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e00ee2c967d747, []int{1} +} +func (m *Quota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quota.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quota) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quota.Merge(m, src) +} +func (m *Quota) XXX_Size() int { + return m.Size() +} +func (m *Quota) XXX_DiscardUnknown() { + xxx_messageInfo_Quota.DiscardUnknown(m) +} + +var xxx_messageInfo_Quota proto.InternalMessageInfo + +func (m *Quota) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type Flow struct { + Inflow github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=inflow,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"inflow"` + Outflow github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=outflow,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"outflow"` + ChannelValue github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,3,opt,name=channel_value,json=channelValue,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"channel_value"` +} + +func (m *Flow) Reset() { *m = Flow{} } +func (m *Flow) String() string { return proto.CompactTextString(m) } +func (*Flow) ProtoMessage() {} +func (*Flow) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e00ee2c967d747, []int{2} +} +func (m *Flow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Flow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Flow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Flow.Merge(m, src) +} +func (m *Flow) XXX_Size() int { + return m.Size() +} +func (m *Flow) XXX_DiscardUnknown() { + xxx_messageInfo_Flow.DiscardUnknown(m) +} + +var xxx_messageInfo_Flow proto.InternalMessageInfo + +type RateLimit struct { + Path *Path `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Quota *Quota `protobuf:"bytes,2,opt,name=quota,proto3" json:"quota,omitempty"` + Flow *Flow `protobuf:"bytes,3,opt,name=flow,proto3" json:"flow,omitempty"` +} + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (m *RateLimit) String() string { return proto.CompactTextString(m) } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_a3e00ee2c967d747, []int{3} +} +func (m *RateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) +} +func (m *RateLimit) XXX_Size() int { + return m.Size() +} +func (m *RateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimit proto.InternalMessageInfo + +func (m *RateLimit) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +func (m *RateLimit) GetQuota() *Quota { + if m != nil { + return m.Quota + } + return nil +} + +func (m *RateLimit) GetFlow() *Flow { + if m != nil { + return m.Flow + } + return nil +} + +func init() { + proto.RegisterEnum("stride.ratelimit.PacketDirection", PacketDirection_name, PacketDirection_value) + proto.RegisterType((*Path)(nil), "stride.ratelimit.Path") + proto.RegisterType((*Quota)(nil), "stride.ratelimit.Quota") + proto.RegisterType((*Flow)(nil), "stride.ratelimit.Flow") + proto.RegisterType((*RateLimit)(nil), "stride.ratelimit.RateLimit") +} + +func init() { proto.RegisterFile("stride/ratelimit/ratelimit.proto", fileDescriptor_a3e00ee2c967d747) } + +var fileDescriptor_a3e00ee2c967d747 = []byte{ + // 478 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xc1, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0x63, 0x96, 0x0e, 0xf5, 0x2b, 0xdb, 0x2a, 0x6b, 0x82, 0x6a, 0x12, 0x59, 0x55, 0x09, + 0x34, 0x4d, 0x6a, 0x22, 0x15, 0x2e, 0x88, 0x13, 0xdb, 0x3a, 0x6d, 0xa2, 0x42, 0xc5, 0x45, 0x13, + 0xe2, 0x12, 0xb9, 0x89, 0x69, 0xa2, 0x25, 0x76, 0x89, 0x9d, 0xae, 0xbc, 0x01, 0x47, 0xc4, 0x2b, + 0xf0, 0x32, 0x3b, 0xee, 0x88, 0x38, 0x4c, 0xa8, 0x3d, 0xf3, 0x0e, 0xc8, 0x4e, 0x0a, 0xd5, 0x10, + 0x07, 0xba, 0x53, 0x9c, 0xcf, 0x7f, 0xff, 0x92, 0xff, 0xe7, 0xff, 0x07, 0x4d, 0xa9, 0xb2, 0x38, + 0x64, 0x5e, 0x46, 0x15, 0x4b, 0xe2, 0x34, 0x56, 0x7f, 0x56, 0xee, 0x38, 0x13, 0x4a, 0xe0, 0x7a, + 0xa1, 0x70, 0x7f, 0xd7, 0x77, 0xb6, 0x47, 0x62, 0x24, 0xcc, 0xa6, 0xa7, 0x57, 0x85, 0xae, 0xf5, + 0x1c, 0xec, 0x3e, 0x55, 0x11, 0xde, 0x86, 0x4a, 0xc8, 0xb8, 0x48, 0x1b, 0xa8, 0x89, 0xf6, 0xaa, + 0xa4, 0x78, 0xc1, 0x0f, 0x01, 0x82, 0x88, 0x72, 0xce, 0x12, 0x3f, 0x0e, 0x1b, 0x77, 0xcc, 0x56, + 0xb5, 0xac, 0x9c, 0x86, 0xad, 0x19, 0x82, 0xca, 0xeb, 0x5c, 0x28, 0x8a, 0xdf, 0x42, 0x3d, 0xa5, + 0x53, 0x7f, 0xcc, 0xb2, 0x80, 0x71, 0xe5, 0x4b, 0xc6, 0xc3, 0x82, 0x74, 0xe0, 0x5e, 0x5e, 0xef, + 0x5a, 0xdf, 0xaf, 0x77, 0x1f, 0x8f, 0x62, 0x15, 0xe5, 0x43, 0x37, 0x10, 0xa9, 0x17, 0x08, 0x99, + 0x0a, 0x59, 0x3e, 0xda, 0x32, 0x3c, 0xf7, 0xd4, 0xc7, 0x31, 0x93, 0xee, 0x29, 0x57, 0x64, 0x33, + 0xa5, 0xd3, 0x7e, 0x81, 0x19, 0x30, 0x1e, 0xde, 0x24, 0x67, 0x2c, 0x98, 0x14, 0x3f, 0x72, 0x1b, + 0x32, 0x61, 0xc1, 0x04, 0x3f, 0x82, 0xcd, 0x30, 0xcf, 0xa8, 0x8a, 0x05, 0xf7, 0x23, 0x91, 0x67, + 0xb2, 0xb1, 0xd6, 0x44, 0x7b, 0x36, 0xd9, 0x58, 0x54, 0x4f, 0x74, 0xb1, 0xf5, 0x13, 0x81, 0x7d, + 0x9c, 0x88, 0x0b, 0x7c, 0x0c, 0xeb, 0x31, 0x7f, 0x9f, 0x88, 0x8b, 0x15, 0x9d, 0x95, 0xa7, 0xf1, + 0x09, 0xdc, 0x15, 0xb9, 0x32, 0xa0, 0xd5, 0x8c, 0x2c, 0x8e, 0xe3, 0x01, 0x6c, 0x2c, 0xae, 0x67, + 0x42, 0x93, 0x9c, 0x19, 0x03, 0xff, 0xcf, 0xbb, 0x57, 0x42, 0xce, 0x34, 0xa3, 0xf5, 0x05, 0x41, + 0x95, 0x50, 0xc5, 0x7a, 0x3a, 0x35, 0x78, 0x1f, 0xec, 0x31, 0x55, 0x91, 0xb1, 0x5c, 0xeb, 0xdc, + 0x77, 0x6f, 0xc6, 0xca, 0xd5, 0xe9, 0x21, 0x46, 0x83, 0xdb, 0x50, 0xf9, 0xa0, 0xd3, 0x60, 0x6c, + 0xd5, 0x3a, 0x0f, 0xfe, 0x16, 0x9b, 0xb0, 0x90, 0x42, 0xa5, 0xd1, 0xa6, 0x09, 0x6b, 0xff, 0x42, + 0xeb, 0xae, 0x13, 0xa3, 0xd9, 0x7f, 0x06, 0x5b, 0x7d, 0x1a, 0x9c, 0x33, 0x75, 0x14, 0x67, 0x2c, + 0xd0, 0x97, 0x83, 0xb7, 0xa0, 0xd6, 0x7f, 0x71, 0xf8, 0xb2, 0xfb, 0xc6, 0x1f, 0x74, 0x5f, 0x1d, + 0xd5, 0xad, 0xa5, 0x02, 0xe9, 0x1e, 0x9e, 0xd5, 0xd1, 0x8e, 0xfd, 0xe9, 0xab, 0x63, 0x1d, 0xf4, + 0x2e, 0x67, 0x0e, 0xba, 0x9a, 0x39, 0xe8, 0xc7, 0xcc, 0x41, 0x9f, 0xe7, 0x8e, 0x75, 0x35, 0x77, + 0xac, 0x6f, 0x73, 0xc7, 0x7a, 0xd7, 0x59, 0xea, 0xcf, 0xc0, 0x7c, 0xbc, 0xdd, 0xa3, 0x43, 0xe9, + 0x95, 0xc3, 0x35, 0x79, 0xea, 0x4d, 0x97, 0x26, 0xcc, 0xf4, 0x6b, 0xb8, 0x6e, 0xc6, 0xe6, 0xc9, + 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x16, 0x08, 0xa9, 0x82, 0x03, 0x00, 0x00, +} + +func (m *Path) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Path) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Path) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Quota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintRatelimit(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x18 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Flow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Flow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Flow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.ChannelValue.Size() + i -= size + if _, err := m.ChannelValue.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size := m.Outflow.Size() + i -= size + if _, err := m.Outflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.Inflow.Size() + i -= size + if _, err := m.Inflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flow != nil { + { + size, err := m.Flow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Quota != nil { + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Path != nil { + { + size, err := m.Path.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRatelimit(dAtA []byte, offset int, v uint64) int { + offset -= sovRatelimit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Path) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func (m *Quota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.MaxPercentSend.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovRatelimit(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovRatelimit(uint64(m.DurationHours)) + } + return n +} + +func (m *Flow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Inflow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.Outflow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.ChannelValue.Size() + n += 1 + l + sovRatelimit(uint64(l)) + return n +} + +func (m *RateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + if m.Quota != nil { + l = m.Quota.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + if m.Flow != nil { + l = m.Flow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func sovRatelimit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRatelimit(x uint64) (n int) { + return sovRatelimit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Path) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Path: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Path: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Flow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Flow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Flow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ChannelValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Path == nil { + m.Path = &Path{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quota == nil { + m.Quota = &Quota{} + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flow == nil { + m.Flow = &Flow{} + } + if err := m.Flow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRatelimit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRatelimit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRatelimit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRatelimit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRatelimit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRatelimit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRatelimit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/records/module_ibc.go b/x/records/module_ibc.go index 1be2b1f9cb..6881cdc950 100644 --- a/x/records/module_ibc.go +++ b/x/records/module_ibc.go @@ -152,9 +152,6 @@ func (im IBCModule) OnRecvPacket( packet channeltypes.Packet, relayer sdk.AccAddress, ) ibcexported.Acknowledgement { - wrapperAck := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) - // handle(wrapperAck) - _ = wrapperAck // NOTE: acknowledgement will be written synchronously during IBC handler execution. // doCustomLogic(packet) return im.app.OnRecvPacket(ctx, packet, relayer) diff --git a/x/stakeibc/client/cli/query_epoch_tracker_test.go b/x/stakeibc/client/cli/query_epoch_tracker_test.go index f8ad08459b..8d80da3f7d 100644 --- a/x/stakeibc/client/cli/query_epoch_tracker_test.go +++ b/x/stakeibc/client/cli/query_epoch_tracker_test.go @@ -92,6 +92,7 @@ func TestListEpochTracker(t *testing.T) { expected := []types.EpochTracker{ {EpochIdentifier: "day", EpochNumber: 1}, + {EpochIdentifier: "hour", EpochNumber: 1}, {EpochIdentifier: "mint", EpochNumber: 1}, {EpochIdentifier: "stride_epoch", EpochNumber: 1}, {EpochIdentifier: "week", EpochNumber: 1}, @@ -102,7 +103,7 @@ func TestListEpochTracker(t *testing.T) { require.NoError(t, net.Config.Codec.UnmarshalJSON(out.Bytes(), &actual)) require.NotNil(t, actual.EpochTracker) - require.Len(t, actual.EpochTracker, 4) + require.Len(t, actual.EpochTracker, len(expected)) actualTrim := []types.EpochTracker{} for _, epochTracker := range actual.EpochTracker { diff --git a/x/stakeibc/client/cli/tx_add_validator_proposal.go b/x/stakeibc/client/cli/tx_add_validator_proposal.go index 308bf0d0da..356de4e499 100644 --- a/x/stakeibc/client/cli/tx_add_validator_proposal.go +++ b/x/stakeibc/client/cli/tx_add_validator_proposal.go @@ -50,7 +50,7 @@ func CmdAddValidatorProposal() *cobra.Command { The proposal details must be supplied via a JSON file. Example: -$ %s tx gov submit-proposal add-validator --from= +$ %s tx gov submit-legacy-proposal add-validator --from= Where proposal.json contains: {