diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml index f85a4eb1..ca8f69c5 100644 --- a/.github/actions/setup-go/action.yml +++ b/.github/actions/setup-go/action.yml @@ -33,7 +33,7 @@ runs: echo "version=$VERSION" >> "$GITHUB_OUTPUT" - name: Setup Go - uses: actions/setup-go@v6 + uses: actions/setup-go@v6.2.0 with: go-version: ${{ steps.determine.outputs.version }} cache: true diff --git a/CHANGELOG.md b/CHANGELOG.md index fb3a6e25..d8bd3fad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,18 @@ --- +## 1.10.1 + +Changes included since `v1.10.0` (range: `v1.10.0..v1.10.1`). + +- Upgrade safety: add a conditional store loader for `v1.10.1` that only renames the legacy `Consensus` store when needed and avoids panics when the new `consensus` store already exists. +- Consensus params: ensure `x/consensus` params are present and repair missing/incomplete values during the upgrade. +- Devnet tooling: adaptive store loader can be enabled to reconcile on-disk stores against expected modules when skipping intermediate upgrades. +- Devnet tests: validator IBC tests now auto-detect the validator container (RPC + key) instead of assuming `supernova_validator_1`. +- Upgrade guidance: do not apply `v1.10.0` on testnet or mainnet. + +--- + ## 1.10.0 Changes included since `v1.9.1` (range: `v1.9.1..v1.10.0`). diff --git a/Makefile.devnet b/Makefile.devnet index 7b8a2d7a..6f68564b 100644 --- a/Makefile.devnet +++ b/Makefile.devnet @@ -109,7 +109,7 @@ devnet-build-default: _check-devnet-default-cfg EXTERNAL_GENESIS_FILE="$$(realpath $(DEFAULT_GENESIS_FILE))" \ EXTERNAL_CLAIMS_FILE="$$(realpath $(DEFAULT_CLAIMS_FILE))" -.PHONY: devnet-build-172 _check-devnet-172-cfg +.PHONY: devnet-build-172 _check-devnet-172-cfg devnet-build-191 _check-devnet-191-cfg devnet-build-172: @$(MAKE) devnet-build \ DEVNET_BUILD_LUMERA=0 \ @@ -121,6 +121,17 @@ _check-devnet-172-cfg: @[ -f "$$(realpath $(ORIG_GENESIS_FILE))" ] || (echo "Missing ORIG_GENESIS_FILE: $$(realpath $(ORIG_GENESIS_FILE))"; exit 1) @[ -f "$$(realpath $(DEFAULT_CLAIMS_FILE))" ] || (echo "Missing DEFAULT_CLAIMS_FILE: $$(realpath $(DEFAULT_CLAIMS_FILE))"; exit 1) +devnet-build-191: + @$(MAKE) devnet-build \ + DEVNET_BUILD_LUMERA=0 \ + DEVNET_BIN_DIR=devnet/bin-v1.9.1 \ + EXTERNAL_GENESIS_FILE="$$(realpath $(DEFAULT_GENESIS_FILE))" \ + EXTERNAL_CLAIMS_FILE="$$(realpath $(DEFAULT_CLAIMS_FILE))" + +_check-devnet-191-cfg: + @[ -f "$$(realpath $(DEFAULT_GENESIS_FILE))" ] || (echo "Missing DEFAULT_GENESIS_FILE: $$(realpath $(DEFAULT_GENESIS_FILE))"; exit 1) + @[ -f "$$(realpath $(DEFAULT_CLAIMS_FILE))" ] || (echo "Missing DEFAULT_CLAIMS_FILE: $$(realpath $(DEFAULT_CLAIMS_FILE))"; exit 1) + _check-devnet-default-cfg: @[ -f "$$(realpath $(DEFAULT_GENESIS_FILE))" ] || (echo "Missing DEFAULT_GENESIS_FILE: $$(realpath $(DEFAULT_GENESIS_FILE))"; exit 1) @[ -f "$$(realpath $(DEFAULT_CLAIMS_FILE))" ] || (echo "Missing DEFAULT_CLAIMS_FILE: $$(realpath $(DEFAULT_CLAIMS_FILE))"; exit 1) @@ -288,12 +299,19 @@ devnet-update-scripts: echo "No containers were updated. Ensure the devnet is running."; \ fi -.PHONY: devnet-new-172 devnet-upgrade-180 devnet-upgrade-184 +.PHONY: devnet-new-172 devnet-new-191 devnet-upgrade-180 devnet-upgrade-191 devnet-upgrade-1100 devnet-upgrade-1101 + devnet-upgrade-180: @cd devnet/scripts && ./upgrade.sh v1.8.0 auto-height ../bin-v1.8.0 -devnet-upgrade-184: - @cd devnet/scripts && ./upgrade.sh v1.8.4 auto-height ../bin-v1.8.4 +devnet-upgrade-191: + @cd devnet/scripts && ./upgrade.sh v1.9.1 auto-height ../bin-v1.9.1 + +devnet-upgrade-1100: + @cd devnet/scripts && ./upgrade.sh v1.10.0 auto-height ../bin-v1.10.0 + +devnet-upgrade-1101: + @cd devnet/scripts && ./upgrade.sh v1.10.1 auto-height ../bin devnet-new-172: $(MAKE) devnet-down @@ -302,6 +320,14 @@ devnet-new-172: sleep 10 $(MAKE) devnet-up + +devnet-new-191: + $(MAKE) devnet-down + $(MAKE) devnet-clean + $(MAKE) devnet-build-191 + sleep 10 + $(MAKE) devnet-up + devnet-deploy-tar: # Ensure required files exist from previous build @if [ ! -f "devnet/docker-compose.yml" ] || [ ! -f "devnet/bin/lumerad" ] || [ ! -f "devnet/bin/libwasmvm.x86_64.so" ]; then \ diff --git a/app/app.go b/app/app.go index ce049728..8059d222 100644 --- a/app/app.go +++ b/app/app.go @@ -20,7 +20,6 @@ import ( _ "cosmossdk.io/x/feegrant/module" // import for side-effects _ "cosmossdk.io/x/upgrade" // import for side-effects upgradekeeper "cosmossdk.io/x/upgrade/keeper" - upgradetypes "cosmossdk.io/x/upgrade/types" abci "github.com/cometbft/cometbft/abci/types" tmproto "github.com/cometbft/cometbft/proto/tendermint/types" dbm "github.com/cosmos/cosmos-db" @@ -360,7 +359,8 @@ func (app *App) setupUpgrades() { panic(fmt.Sprintf("upgrade plan %q is scheduled at height %d but not registered in this binary", upgradeInfo.Name, upgradeInfo.Height)) } - if upgradeConfig.StoreUpgrade == nil { + useAdaptiveStoreUpgrades := upgrades.ShouldEnableStoreUpgradeManager(params.ChainID) + if upgradeConfig.StoreUpgrade == nil && !useAdaptiveStoreUpgrades { app.Logger().Info("No store upgrades registered for pending plan", "name", upgradeInfo.Name) return } @@ -370,8 +370,31 @@ func (app *App) setupUpgrades() { return } - app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, upgradeConfig.StoreUpgrade)) - app.Logger().Info("Configured store loader for upgrade", "name", upgradeInfo.Name, "height", upgradeInfo.Height) + if useAdaptiveStoreUpgrades { + expectedStoreNames := upgrades.KVStoreNames(app.GetStoreKeys()) + selection := upgrades.StoreLoaderForUpgrade( + upgradeInfo.Name, + upgradeInfo.Height, + upgradeConfig.StoreUpgrade, + expectedStoreNames, + app.Logger(), + true, + ) + app.SetStoreLoader(selection.Loader) + app.Logger().Info(selection.LogMessage(), "name", upgradeInfo.Name, "height", upgradeInfo.Height) + return + } + + selection := upgrades.StoreLoaderForUpgrade( + upgradeInfo.Name, + upgradeInfo.Height, + upgradeConfig.StoreUpgrade, + nil, + app.Logger(), + false, + ) + app.SetStoreLoader(selection.Loader) + app.Logger().Info(selection.LogMessage(), "name", upgradeInfo.Name, "height", upgradeInfo.Height) } // LegacyAmino returns App's amino codec. diff --git a/app/upgrades/consensus_store_loader.go b/app/upgrades/consensus_store_loader.go new file mode 100644 index 00000000..1ab3f1e1 --- /dev/null +++ b/app/upgrades/consensus_store_loader.go @@ -0,0 +1,188 @@ +package upgrades + +import ( + "sort" + + "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" + upgradetypes "cosmossdk.io/x/upgrade/types" + + "github.com/cosmos/cosmos-sdk/baseapp" +) + +const ( + legacyConsensusStoreKey = "Consensus" + consensusStoreKey = "consensus" +) + +// ConsensusStoreLoader builds a store loader that safely renames the legacy +// consensus store (if present) and avoids panics when the new store already exists. +// +// If expectedStoreNames is provided, the loader will also compute adaptive store +// upgrades against the existing on-disk stores. +func ConsensusStoreLoader( + upgradeHeight int64, + baseUpgrades *storetypes.StoreUpgrades, + expectedStoreNames map[string]struct{}, + logger log.Logger, +) baseapp.StoreLoader { + fallbackLoader := upgradetypes.UpgradeStoreLoader(upgradeHeight, baseUpgrades) + + return func(ms storetypes.CommitMultiStore) error { + if upgradeHeight != ms.LastCommitID().Version+1 { + return baseapp.DefaultStoreLoader(ms) + } + + existingStoreNames, err := loadExistingStoreNames(ms) + if err != nil { + logger.Error("Failed to load existing stores; falling back to standard upgrade loader", "error", err) + return fallbackLoader(ms) + } + + effective := computeConsensusStoreUpgrades(baseUpgrades, expectedStoreNames, existingStoreNames, logger) + if len(effective.Added) == 0 && len(effective.Deleted) == 0 && len(effective.Renamed) == 0 { + logger.Info("No store upgrades required; loading latest version", "height", upgradeHeight) + return baseapp.DefaultStoreLoader(ms) + } + + logger.Info( + "Applying store upgrades", + "height", upgradeHeight, + "added", effective.Added, + "deleted", effective.Deleted, + "renamed", formatStoreRenames(effective.Renamed), + ) + + return ms.LoadLatestVersionAndUpgrade(&effective) + } +} + +func computeConsensusStoreUpgrades( + baseUpgrades *storetypes.StoreUpgrades, + expectedStoreNames map[string]struct{}, + existingStoreNames map[string]struct{}, + logger log.Logger, +) storetypes.StoreUpgrades { + var effective storetypes.StoreUpgrades + if expectedStoreNames != nil { + effective = computeAdaptiveStoreUpgrades(baseUpgrades, expectedStoreNames, existingStoreNames) + effective.Renamed = filterStoreRenames(effective.Renamed, existingStoreNames) + } else { + effective = cloneStoreUpgrades(baseUpgrades) + effective.Added = filterStoreAdds(effective.Added, existingStoreNames) + effective.Deleted = filterStoreDeletes(effective.Deleted, existingStoreNames) + effective.Renamed = filterStoreRenames(effective.Renamed, existingStoreNames) + } + + hasLegacy := storeExists(existingStoreNames, legacyConsensusStoreKey) + hasNew := storeExists(existingStoreNames, consensusStoreKey) + + switch { + case hasLegacy && !hasNew: + effective.Added = removeStoreName(effective.Added, consensusStoreKey) + effective.Deleted = removeStoreName(effective.Deleted, legacyConsensusStoreKey) + effective.Renamed = append(effective.Renamed, storetypes.StoreRename{ + OldKey: legacyConsensusStoreKey, + NewKey: consensusStoreKey, + }) + case !hasLegacy && !hasNew: + if expectedStoreNames == nil { + effective.Added = append(effective.Added, consensusStoreKey) + } + case hasLegacy && hasNew: + effective.Deleted = removeStoreName(effective.Deleted, legacyConsensusStoreKey) + logger.Info("Both legacy and new consensus stores exist; skipping rename", "old", legacyConsensusStoreKey, "new", consensusStoreKey) + } + + effective.Added = uniqueSortedStores(effective.Added) + effective.Deleted = uniqueSortedStores(effective.Deleted) + + return effective +} + +func cloneStoreUpgrades(base *storetypes.StoreUpgrades) storetypes.StoreUpgrades { + if base == nil { + return storetypes.StoreUpgrades{} + } + return storetypes.StoreUpgrades{ + Added: append([]string(nil), base.Added...), + Deleted: append([]string(nil), base.Deleted...), + Renamed: append([]storetypes.StoreRename(nil), base.Renamed...), + } +} + +func filterStoreAdds(added []string, existing map[string]struct{}) []string { + if len(added) == 0 { + return nil + } + out := make([]string, 0, len(added)) + for _, name := range added { + if !storeExists(existing, name) { + out = append(out, name) + } + } + return out +} + +func filterStoreDeletes(deleted []string, existing map[string]struct{}) []string { + if len(deleted) == 0 { + return nil + } + out := make([]string, 0, len(deleted)) + for _, name := range deleted { + if storeExists(existing, name) { + out = append(out, name) + } + } + return out +} + +func filterStoreRenames(renames []storetypes.StoreRename, existing map[string]struct{}) []storetypes.StoreRename { + if len(renames) == 0 { + return nil + } + out := make([]storetypes.StoreRename, 0, len(renames)) + for _, rename := range renames { + if storeExists(existing, rename.OldKey) && !storeExists(existing, rename.NewKey) { + out = append(out, rename) + } + } + return out +} + +func storeExists(existing map[string]struct{}, name string) bool { + if existing == nil { + return false + } + _, ok := existing[name] + return ok +} + +func removeStoreName(names []string, target string) []string { + if len(names) == 0 { + return nil + } + out := names[:0] + for _, name := range names { + if name != target { + out = append(out, name) + } + } + return out +} + +func uniqueSortedStores(names []string) []string { + if len(names) == 0 { + return nil + } + set := make(map[string]struct{}, len(names)) + for _, name := range names { + set[name] = struct{}{} + } + out := make([]string, 0, len(set)) + for name := range set { + out = append(out, name) + } + sort.Strings(out) + return out +} diff --git a/app/upgrades/consensus_store_loader_test.go b/app/upgrades/consensus_store_loader_test.go new file mode 100644 index 00000000..c27d400e --- /dev/null +++ b/app/upgrades/consensus_store_loader_test.go @@ -0,0 +1,55 @@ +package upgrades + +import ( + "testing" + + "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" + "github.com/stretchr/testify/require" +) + +func TestComputeConsensusStoreUpgrades_RenameWhenLegacyOnly(t *testing.T) { + expected := setOf("consensus") + existing := setOf("Consensus", "crisis") + base := &storetypes.StoreUpgrades{ + Deleted: []string{"crisis"}, + } + + effective := computeConsensusStoreUpgrades(base, expected, existing, log.NewNopLogger()) + + require.Len(t, effective.Renamed, 1) + require.Equal(t, storetypes.StoreRename{OldKey: "Consensus", NewKey: "consensus"}, effective.Renamed[0]) + require.NotContains(t, effective.Added, "consensus") + require.NotContains(t, effective.Deleted, "Consensus") + require.Contains(t, effective.Deleted, "crisis") +} + +func TestComputeConsensusStoreUpgrades_NoRenameWhenNewExists(t *testing.T) { + expected := setOf("consensus") + existing := setOf("consensus") + + effective := computeConsensusStoreUpgrades(nil, expected, existing, log.NewNopLogger()) + + require.Empty(t, effective.Renamed) + require.Empty(t, effective.Added) + require.Empty(t, effective.Deleted) +} + +func TestComputeConsensusStoreUpgrades_AddsConsensusWhenMissingNonAdaptive(t *testing.T) { + existing := map[string]struct{}{} + + effective := computeConsensusStoreUpgrades(nil, nil, existing, log.NewNopLogger()) + + require.Contains(t, effective.Added, "consensus") + require.Empty(t, effective.Renamed) +} + +func TestComputeConsensusStoreUpgrades_NoRenameWhenBothExist(t *testing.T) { + expected := setOf("consensus") + existing := setOf("Consensus", "consensus") + + effective := computeConsensusStoreUpgrades(nil, expected, existing, log.NewNopLogger()) + + require.Empty(t, effective.Renamed) + require.NotContains(t, effective.Deleted, "Consensus") +} diff --git a/app/upgrades/internal/consensusparams/consensusparams.go b/app/upgrades/internal/consensusparams/consensusparams.go new file mode 100644 index 00000000..bd58f3a9 --- /dev/null +++ b/app/upgrades/internal/consensusparams/consensusparams.go @@ -0,0 +1,128 @@ +package consensusparams + +import ( + "errors" + "fmt" + + "cosmossdk.io/collections" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttypes "github.com/cometbft/cometbft/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + + appParams "github.com/LumeraProtocol/lumera/app/upgrades/params" +) + +func MigrateFromLegacy(ctx sdk.Context, p appParams.AppUpgradeParams, upgradeName string) error { + if err := requireKeepers(p, upgradeName); err != nil { + return err + } + + legacySubspace := p.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()) + legacyParams := baseapp.GetConsensusParams(ctx, legacySubspace) + if legacyParams == nil { + p.Logger.Info("Legacy consensus params missing; skipping migration", "upgrade", upgradeName) + return nil + } + if !isConsensusParamsComplete(*legacyParams) { + p.Logger.Info("Legacy consensus params incomplete; skipping migration", "upgrade", upgradeName) + return nil + } + + if err := baseapp.MigrateParams(ctx, legacySubspace, p.ConsensusParamsKeeper.ParamsStore); err != nil { + return fmt.Errorf("failed to migrate consensus params: %w", err) + } + p.Logger.Info("Legacy consensus params migrated", "upgrade", upgradeName) + return nil +} + +func EnsurePresent(ctx sdk.Context, p appParams.AppUpgradeParams, upgradeName string) error { + if err := requireKeepers(p, upgradeName); err != nil { + return err + } + + legacySubspace := p.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()) + defaultParams := cmttypes.DefaultConsensusParams().ToProto() + legacyParams := baseapp.GetConsensusParams(ctx, legacySubspace) + + targetParams := mergeConsensusParams(defaultParams, legacyParams) + + storeParams, err := p.ConsensusParamsKeeper.ParamsStore.Get(ctx) + switch { + case err == nil: + if !isConsensusParamsComplete(storeParams) { + fixed := mergeConsensusParams(targetParams, &storeParams) + if !isConsensusParamsComplete(fixed) { + return fmt.Errorf("consensus params remain incomplete after merge") + } + if err := p.ConsensusParamsKeeper.ParamsStore.Set(ctx, fixed); err != nil { + return fmt.Errorf("failed to repair consensus params: %w", err) + } + p.Logger.Info("Consensus params were incomplete; repaired using legacy/defaults") + } else { + p.Logger.Info("Consensus params already set; skipping repair") + } + case errors.Is(err, collections.ErrNotFound): + if err := p.ConsensusParamsKeeper.ParamsStore.Set(ctx, targetParams); err != nil { + return fmt.Errorf("failed to seed consensus params: %w", err) + } + logSource := "defaults" + if legacyParams != nil && isConsensusParamsComplete(*legacyParams) { + logSource = "legacy" + } + p.Logger.Info("Consensus params missing; seeded", "source", logSource) + default: + return fmt.Errorf("failed to read consensus params: %w", err) + } + + return nil +} + +func requireKeepers(p appParams.AppUpgradeParams, upgradeName string) error { + if p.ParamsKeeper == nil || p.ConsensusParamsKeeper == nil { + return fmt.Errorf("%s upgrade requires ParamsKeeper and ConsensusParamsKeeper", upgradeName) + } + return nil +} + +func isConsensusParamsComplete(p cmtproto.ConsensusParams) bool { + if p.Block == nil || p.Evidence == nil || p.Validator == nil { + return false + } + if p.Block.MaxBytes == 0 { + return false + } + if p.Evidence.MaxAgeNumBlocks <= 0 || p.Evidence.MaxAgeDuration <= 0 { + return false + } + if len(p.Validator.PubKeyTypes) == 0 { + return false + } + return true +} + +func mergeConsensusParams(base cmtproto.ConsensusParams, override *cmtproto.ConsensusParams) cmtproto.ConsensusParams { + if override == nil { + return base + } + + if override.Block != nil && override.Block.MaxBytes != 0 { + base.Block = override.Block + } + if override.Evidence != nil && override.Evidence.MaxAgeNumBlocks > 0 && override.Evidence.MaxAgeDuration > 0 { + base.Evidence = override.Evidence + } + if override.Validator != nil && len(override.Validator.PubKeyTypes) > 0 { + base.Validator = override.Validator + } + if override.Version != nil { + base.Version = override.Version + } + if override.Abci != nil { + base.Abci = override.Abci + } + return base +} diff --git a/app/upgrades/store_loader_selector.go b/app/upgrades/store_loader_selector.go new file mode 100644 index 00000000..0fe5be55 --- /dev/null +++ b/app/upgrades/store_loader_selector.go @@ -0,0 +1,60 @@ +package upgrades + +import ( + "fmt" + + "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" + upgradetypes "cosmossdk.io/x/upgrade/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + + upgrade_v1_10_1 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_1" +) + +type StoreLoaderSelection struct { + Loader baseapp.StoreLoader + LogLabel string +} + +// StoreLoaderForUpgrade returns the store loader to use for a given upgrade plan. +// When adaptive mode is enabled, expectedStoreNames should be provided. +func StoreLoaderForUpgrade( + upgradeName string, + upgradeHeight int64, + baseUpgrades *storetypes.StoreUpgrades, + expectedStoreNames map[string]struct{}, + logger log.Logger, + adaptive bool, +) StoreLoaderSelection { + if adaptive { + if upgradeName == upgrade_v1_10_1.UpgradeName { + return StoreLoaderSelection{ + Loader: ConsensusStoreLoader(upgradeHeight, baseUpgrades, expectedStoreNames, logger), + LogLabel: "consensus rename", + } + } + return StoreLoaderSelection{ + Loader: AdaptiveStoreLoader(upgradeHeight, baseUpgrades, expectedStoreNames, logger), + LogLabel: "adaptive mode", + } + } + + if upgradeName == upgrade_v1_10_1.UpgradeName { + return StoreLoaderSelection{ + Loader: ConsensusStoreLoader(upgradeHeight, baseUpgrades, nil, logger), + LogLabel: "consensus rename", + } + } + + return StoreLoaderSelection{ + Loader: upgradetypes.UpgradeStoreLoader(upgradeHeight, baseUpgrades), + } +} + +func (s StoreLoaderSelection) LogMessage() string { + if s.LogLabel == "" { + return "Configured store loader for upgrade" + } + return fmt.Sprintf("Configured store loader for upgrade (%s)", s.LogLabel) +} diff --git a/app/upgrades/store_loader_selector_test.go b/app/upgrades/store_loader_selector_test.go new file mode 100644 index 00000000..8acd8280 --- /dev/null +++ b/app/upgrades/store_loader_selector_test.go @@ -0,0 +1,66 @@ +package upgrades + +import ( + "testing" + + "cosmossdk.io/log" + "github.com/stretchr/testify/require" + + upgrade_v1_10_1 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_1" +) + +func TestStoreLoaderForUpgrade_AdaptiveConsensusRename(t *testing.T) { + selection := StoreLoaderForUpgrade( + upgrade_v1_10_1.UpgradeName, + 100, + nil, + map[string]struct{}{}, + log.NewNopLogger(), + true, + ) + + require.NotNil(t, selection.Loader) + require.Equal(t, "Configured store loader for upgrade (consensus rename)", selection.LogMessage()) +} + +func TestStoreLoaderForUpgrade_AdaptiveDefault(t *testing.T) { + selection := StoreLoaderForUpgrade( + "v9.9.9", + 100, + nil, + map[string]struct{}{}, + log.NewNopLogger(), + true, + ) + + require.NotNil(t, selection.Loader) + require.Equal(t, "Configured store loader for upgrade (adaptive mode)", selection.LogMessage()) +} + +func TestStoreLoaderForUpgrade_NonAdaptiveConsensusRename(t *testing.T) { + selection := StoreLoaderForUpgrade( + upgrade_v1_10_1.UpgradeName, + 100, + nil, + nil, + log.NewNopLogger(), + false, + ) + + require.NotNil(t, selection.Loader) + require.Equal(t, "Configured store loader for upgrade (consensus rename)", selection.LogMessage()) +} + +func TestStoreLoaderForUpgrade_NonAdaptiveDefault(t *testing.T) { + selection := StoreLoaderForUpgrade( + "v9.9.9", + 100, + nil, + nil, + log.NewNopLogger(), + false, + ) + + require.NotNil(t, selection.Loader) + require.Equal(t, "Configured store loader for upgrade", selection.LogMessage()) +} diff --git a/app/upgrades/store_upgrade_manager.go b/app/upgrades/store_upgrade_manager.go new file mode 100644 index 00000000..3947b466 --- /dev/null +++ b/app/upgrades/store_upgrade_manager.go @@ -0,0 +1,208 @@ +package upgrades + +import ( + "fmt" + "os" + "sort" + "strconv" + "strings" + + "cosmossdk.io/log" + storetypes "cosmossdk.io/store/types" + upgradetypes "cosmossdk.io/x/upgrade/types" + + "github.com/cosmos/cosmos-sdk/baseapp" +) + +// EnvEnableStoreUpgradeManager toggles the adaptive store upgrade manager. +// Intended for devnet environments where skipping intermediate upgrades is useful. +const EnvEnableStoreUpgradeManager = "LUMERA_ENABLE_STORE_UPGRADE_MANAGER" + +// ShouldEnableStoreUpgradeManager returns true when the adaptive store upgrade +// manager should be used for this chain. +func ShouldEnableStoreUpgradeManager(chainID string) bool { + if !IsDevnet(chainID) { + return false + } + return envBool(EnvEnableStoreUpgradeManager) +} + +// KVStoreNames returns the set of persistent KV store names registered in the app. +func KVStoreNames(storeKeys []storetypes.StoreKey) map[string]struct{} { + names := make(map[string]struct{}, len(storeKeys)) + for _, key := range storeKeys { + if _, ok := key.(*storetypes.KVStoreKey); !ok { + continue + } + names[key.Name()] = struct{}{} + } + return names +} + +// AdaptiveStoreLoader builds a store loader that merges explicit store upgrades with +// a diff between on-disk stores and the app's registered KV stores. +// This enables skipping intermediate upgrades in dev/test networks. +func AdaptiveStoreLoader( + upgradeHeight int64, + baseUpgrades *storetypes.StoreUpgrades, + expectedStoreNames map[string]struct{}, + logger log.Logger, +) baseapp.StoreLoader { + fallbackLoader := upgradetypes.UpgradeStoreLoader(upgradeHeight, baseUpgrades) + + return func(ms storetypes.CommitMultiStore) error { + if upgradeHeight != ms.LastCommitID().Version+1 { + return baseapp.DefaultStoreLoader(ms) + } + + existingStoreNames, err := loadExistingStoreNames(ms) + if err != nil { + logger.Error("Failed to load existing stores; falling back to standard upgrade loader", "error", err) + return fallbackLoader(ms) + } + + effective := computeAdaptiveStoreUpgrades(baseUpgrades, expectedStoreNames, existingStoreNames) + if len(effective.Added) == 0 && len(effective.Deleted) == 0 && len(effective.Renamed) == 0 { + logger.Info("No store upgrades required after diff; loading latest version", "height", upgradeHeight) + return baseapp.DefaultStoreLoader(ms) + } + + logger.Info( + "Applying adaptive store upgrades", + "height", upgradeHeight, + "added", effective.Added, + "deleted", effective.Deleted, + "renamed", formatStoreRenames(effective.Renamed), + ) + + return ms.LoadLatestVersionAndUpgrade(&effective) + } +} + +type commitInfoReader interface { + GetCommitInfo(int64) (*storetypes.CommitInfo, error) +} + +func loadExistingStoreNames(ms storetypes.CommitMultiStore) (map[string]struct{}, error) { + version := ms.LastCommitID().Version + if version == 0 { + return map[string]struct{}{}, nil + } + + reader, ok := ms.(commitInfoReader) + if !ok { + return nil, fmt.Errorf("commit multistore does not expose commit info") + } + + cInfo, err := reader.GetCommitInfo(version) + if err != nil { + return nil, fmt.Errorf("failed to read commit info for version %d: %w", version, err) + } + + names := make(map[string]struct{}, len(cInfo.StoreInfos)) + for _, info := range cInfo.StoreInfos { + names[info.Name] = struct{}{} + } + + return names, nil +} + +func computeAdaptiveStoreUpgrades( + baseUpgrades *storetypes.StoreUpgrades, + expectedStoreNames map[string]struct{}, + existingStoreNames map[string]struct{}, +) storetypes.StoreUpgrades { + if expectedStoreNames == nil { + expectedStoreNames = map[string]struct{}{} + } + if existingStoreNames == nil { + existingStoreNames = map[string]struct{}{} + } + + effective := storetypes.StoreUpgrades{} + if baseUpgrades != nil { + effective.Renamed = append([]storetypes.StoreRename(nil), baseUpgrades.Renamed...) + } + + added := make(map[string]struct{}) + deleted := make(map[string]struct{}) + + if baseUpgrades != nil { + for _, name := range baseUpgrades.Added { + added[name] = struct{}{} + } + for _, name := range baseUpgrades.Deleted { + deleted[name] = struct{}{} + } + } + + for name := range expectedStoreNames { + if _, exists := existingStoreNames[name]; !exists { + added[name] = struct{}{} + } + } + + for name := range existingStoreNames { + if _, expected := expectedStoreNames[name]; !expected { + deleted[name] = struct{}{} + } + } + + for name := range added { + if _, exists := existingStoreNames[name]; exists { + delete(added, name) + } + } + + for name := range deleted { + if _, exists := existingStoreNames[name]; !exists { + delete(deleted, name) + } + } + + for name := range added { + if _, exists := deleted[name]; exists { + delete(added, name) + } + } + + effective.Added = sortedKeys(added) + effective.Deleted = sortedKeys(deleted) + + return effective +} + +func sortedKeys(set map[string]struct{}) []string { + if len(set) == 0 { + return nil + } + keys := make([]string, 0, len(set)) + for key := range set { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func formatStoreRenames(renames []storetypes.StoreRename) []string { + if len(renames) == 0 { + return nil + } + out := make([]string, 0, len(renames)) + for _, rename := range renames { + out = append(out, fmt.Sprintf("%s->%s", rename.OldKey, rename.NewKey)) + } + return out +} + +func envBool(key string) bool { + value := strings.TrimSpace(os.Getenv(key)) + if value == "" { + return false + } + parsed, err := strconv.ParseBool(value) + if err != nil { + return false + } + return parsed +} diff --git a/app/upgrades/store_upgrade_manager_test.go b/app/upgrades/store_upgrade_manager_test.go new file mode 100644 index 00000000..25436904 --- /dev/null +++ b/app/upgrades/store_upgrade_manager_test.go @@ -0,0 +1,48 @@ +package upgrades + +import ( + "testing" + + storetypes "cosmossdk.io/store/types" + "github.com/stretchr/testify/require" +) + +func TestComputeAdaptiveStoreUpgrades(t *testing.T) { + expected := setOf("auth", "bank", "pfm") + existing := setOf("auth", "bank", "nft") + + base := &storetypes.StoreUpgrades{ + Added: []string{"pfm"}, + Deleted: []string{"nft", "crisis"}, + Renamed: []storetypes.StoreRename{{OldKey: "old", NewKey: "new"}}, + } + + effective := computeAdaptiveStoreUpgrades(base, expected, existing) + + require.ElementsMatch(t, []string{"pfm"}, effective.Added) + require.ElementsMatch(t, []string{"nft"}, effective.Deleted) + require.Equal(t, base.Renamed, effective.Renamed) +} + +func TestComputeAdaptiveStoreUpgradesFiltersExistingAdds(t *testing.T) { + expected := setOf("auth", "bank") + existing := setOf("auth", "bank") + + base := &storetypes.StoreUpgrades{ + Added: []string{"auth"}, + Deleted: []string{"crisis"}, + } + + effective := computeAdaptiveStoreUpgrades(base, expected, existing) + + require.Empty(t, effective.Added) + require.Empty(t, effective.Deleted) +} + +func setOf(names ...string) map[string]struct{} { + out := make(map[string]struct{}, len(names)) + for _, name := range names { + out[name] = struct{}{} + } + return out +} diff --git a/app/upgrades/upgrades.go b/app/upgrades/upgrades.go index 135c6bc0..c95cb262 100644 --- a/app/upgrades/upgrades.go +++ b/app/upgrades/upgrades.go @@ -11,11 +11,12 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" appParams "github.com/LumeraProtocol/lumera/app/upgrades/params" + upgrade_v1_10_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_0" + upgrade_v1_10_1 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_1" upgrade_v1_6_1 "github.com/LumeraProtocol/lumera/app/upgrades/v1_6_1" upgrade_v1_8_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_8_0" upgrade_v1_8_4 "github.com/LumeraProtocol/lumera/app/upgrades/v1_8_4" upgrade_v1_9_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_9_0" - upgrade_v1_10_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_0" ) // ================================================================================================================================= @@ -31,6 +32,7 @@ import ( // | v1.9.0 | custom | none | Backfills action/supernode secondary indices // | v1.9.1 | standard | none | Migrations only // | v1.10.0 | custom | drop crisis | Migrate consensus params from x/params to x/consensus; remove x/crisis +// | v1.10.1 | custom | drop crisis (if not already) | Ensure consensus params are present in x/consensus // ================================================================================================================================= type UpgradeConfig struct { @@ -57,6 +59,7 @@ var upgradeNames = []string{ upgrade_v1_9_0.UpgradeName, upgradeNameV191, upgrade_v1_10_0.UpgradeName, + upgrade_v1_10_1.UpgradeName, } var NoUpgradeConfig = UpgradeConfig{ @@ -114,7 +117,12 @@ func SetupUpgrades(upgradeName string, params appParams.AppUpgradeParams) (Upgra case upgrade_v1_10_0.UpgradeName: return UpgradeConfig{ StoreUpgrade: &upgrade_v1_10_0.StoreUpgrades, - Handler: upgrade_v1_10_0.CreateUpgradeHandler(params), + Handler: upgrade_v1_10_0.CreateUpgradeHandler(params), + }, true + case upgrade_v1_10_1.UpgradeName: + return UpgradeConfig{ + StoreUpgrade: &upgrade_v1_10_1.StoreUpgrades, + Handler: upgrade_v1_10_1.CreateUpgradeHandler(params), }, true // add future upgrades here diff --git a/app/upgrades/upgrades_test.go b/app/upgrades/upgrades_test.go index 5cba46e0..81235a27 100644 --- a/app/upgrades/upgrades_test.go +++ b/app/upgrades/upgrades_test.go @@ -11,11 +11,12 @@ import ( "github.com/stretchr/testify/require" appParams "github.com/LumeraProtocol/lumera/app/upgrades/params" + upgrade_v1_10_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_0" + upgrade_v1_10_1 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_1" upgrade_v1_6_1 "github.com/LumeraProtocol/lumera/app/upgrades/v1_6_1" upgrade_v1_8_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_8_0" upgrade_v1_8_4 "github.com/LumeraProtocol/lumera/app/upgrades/v1_8_4" upgrade_v1_9_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_9_0" - upgrade_v1_10_0 "github.com/LumeraProtocol/lumera/app/upgrades/v1_10_0" actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" ) @@ -31,6 +32,7 @@ func TestUpgradeNamesOrder(t *testing.T) { upgrade_v1_9_0.UpgradeName, upgradeNameV191, upgrade_v1_10_0.UpgradeName, + upgrade_v1_10_1.UpgradeName, } require.Equal(t, expected, upgradeNames, "upgradeNames should stay in ascending order") } @@ -76,7 +78,7 @@ func TestSetupUpgradesAndHandlers(t *testing.T) { // v1.9.0 requires full keeper wiring; exercising it here would require // a full app harness. This test only verifies registration and gating. - if upgradeName == upgrade_v1_9_0.UpgradeName || upgradeName == upgrade_v1_10_0.UpgradeName { + if upgradeName == upgrade_v1_9_0.UpgradeName || upgradeName == upgrade_v1_10_0.UpgradeName || upgradeName == upgrade_v1_10_1.UpgradeName { continue } @@ -121,6 +123,8 @@ func expectStoreUpgrade(upgradeName, chainID string) bool { return IsMainnet(chainID) case upgrade_v1_10_0.UpgradeName: return true + case upgrade_v1_10_1.UpgradeName: + return true default: return false } diff --git a/app/upgrades/v1_10_0/upgrade.go b/app/upgrades/v1_10_0/upgrade.go index f8b36533..7e3d518b 100644 --- a/app/upgrades/v1_10_0/upgrade.go +++ b/app/upgrades/v1_10_0/upgrade.go @@ -6,11 +6,10 @@ import ( upgradetypes "cosmossdk.io/x/upgrade/types" - "github.com/cosmos/cosmos-sdk/baseapp" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + consensusparams "github.com/LumeraProtocol/lumera/app/upgrades/internal/consensusparams" appParams "github.com/LumeraProtocol/lumera/app/upgrades/params" ) @@ -25,16 +24,8 @@ func CreateUpgradeHandler(p appParams.AppUpgradeParams) upgradetypes.UpgradeHand ctx := sdk.UnwrapSDKContext(goCtx) - if p.ParamsKeeper == nil || p.ConsensusParamsKeeper == nil { - return nil, fmt.Errorf("%s upgrade requires ParamsKeeper and ConsensusParamsKeeper", UpgradeName) - } - - // Use the legacy baseapp paramspace to read existing consensus params from x/params. - // This is required for in-place upgrades where consensus params were historically stored in x/params. - legacySubspace := p.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()) - // Migrate consensus params into x/consensus (ConsensusParamsKeeper), which is collections-backed in v0.53+. - if err := baseapp.MigrateParams(ctx, legacySubspace, p.ConsensusParamsKeeper.ParamsStore); err != nil { - return nil, fmt.Errorf("failed to migrate consensus params: %w", err) + if err := consensusparams.MigrateFromLegacy(ctx, p, UpgradeName); err != nil { + return nil, err } // Run all module migrations after consensus params have been moved. diff --git a/app/upgrades/v1_10_1/store.go b/app/upgrades/v1_10_1/store.go new file mode 100644 index 00000000..c46a3355 --- /dev/null +++ b/app/upgrades/v1_10_1/store.go @@ -0,0 +1,12 @@ +package v1_10_1 + +import ( + storetypes "cosmossdk.io/store/types" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" +) + +// StoreUpgrades defines store changes for v1.10.1. +// This mirrors v1.10.0 so chains upgrading directly from v1.9.1 still drop the crisis store. +var StoreUpgrades = storetypes.StoreUpgrades{ + Deleted: []string{crisistypes.StoreKey}, +} diff --git a/app/upgrades/v1_10_1/upgrade.go b/app/upgrades/v1_10_1/upgrade.go new file mode 100644 index 00000000..0456119d --- /dev/null +++ b/app/upgrades/v1_10_1/upgrade.go @@ -0,0 +1,43 @@ +package v1_10_1 + +import ( + "context" + "fmt" + + upgradetypes "cosmossdk.io/x/upgrade/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + + consensusparams "github.com/LumeraProtocol/lumera/app/upgrades/internal/consensusparams" + appParams "github.com/LumeraProtocol/lumera/app/upgrades/params" +) + +// UpgradeName is the on-chain name used for this upgrade. +const UpgradeName = "v1.10.1" + +// CreateUpgradeHandler migrates consensus params from x/params to x/consensus, +// then repairs consensus params if they are missing or incomplete. +func CreateUpgradeHandler(p appParams.AppUpgradeParams) upgradetypes.UpgradeHandler { + return func(goCtx context.Context, _ upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + p.Logger.Info(fmt.Sprintf("Starting upgrade %s...", UpgradeName)) + + ctx := sdk.UnwrapSDKContext(goCtx) + + if err := consensusparams.EnsurePresent(ctx, p, UpgradeName); err != nil { + return nil, err + } + + // Run all module migrations after consensus params have been verified. + p.Logger.Info("Running module migrations...") + newVM, err := p.ModuleManager.RunMigrations(ctx, p.Configurator, fromVM) + if err != nil { + p.Logger.Error("Failed to run migrations", "error", err) + return nil, fmt.Errorf("failed to run migrations: %w", err) + } + p.Logger.Info("Module migrations completed.") + + p.Logger.Info(fmt.Sprintf("Successfully completed upgrade %s", UpgradeName)) + return newVM, nil + } +} diff --git a/devnet/go.mod b/devnet/go.mod index 447afd22..f5f27906 100644 --- a/devnet/go.mod +++ b/devnet/go.mod @@ -16,8 +16,8 @@ replace ( require ( cosmossdk.io/api v0.9.2 cosmossdk.io/math v1.5.3 - github.com/LumeraProtocol/lumera v1.9.1 - github.com/LumeraProtocol/sdk-go v1.0.6 + github.com/LumeraProtocol/lumera v1.10.0 + github.com/LumeraProtocol/sdk-go v1.0.8 github.com/cosmos/cosmos-sdk v0.53.5 github.com/cosmos/ibc-go/v10 v10.5.0 github.com/stretchr/testify v1.11.1 @@ -41,7 +41,7 @@ require ( github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect github.com/LumeraProtocol/rq-go v0.2.1 // indirect - github.com/LumeraProtocol/supernode/v2 v2.4.23 // indirect + github.com/LumeraProtocol/supernode/v2 v2.4.27 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -66,7 +66,7 @@ require ( github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect github.com/cosmos/gogoproto v1.7.2 // indirect - github.com/cosmos/iavl v1.2.4 // indirect + github.com/cosmos/iavl v1.2.6 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.16.0 // indirect github.com/danieljoos/wincred v1.2.2 // indirect @@ -138,9 +138,9 @@ require ( github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect @@ -169,18 +169,18 @@ require ( go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/arch v0.17.0 // indirect - golang.org/x/crypto v0.43.0 // indirect + golang.org/x/crypto v0.47.0 // indirect golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect - golang.org/x/net v0.46.1-0.20251013234738-63d1a5100f82 // indirect - golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/text v0.30.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect google.golang.org/grpc v1.77.0 // indirect - google.golang.org/protobuf v1.36.10 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect lukechampine.com/blake3 v1.4.1 // indirect diff --git a/devnet/go.sum b/devnet/go.sum index ded2db5a..8df15cb8 100644 --- a/devnet/go.sum +++ b/devnet/go.sum @@ -54,8 +54,8 @@ cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6Q cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= -cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1 h1:nlMUeKu6CGrO7Gxt5S31qT3g27CHmBJHsZPjqHApVTI= -cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1/go.mod h1:xgv0ejeOk5yeDraPW5tv+PfBkCDt4yYa/+u45MyP+bM= +cosmossdk.io/client/v2 v2.0.0-beta.11 h1:iHbjDw/NuNz2OVaPmx0iE9eu2HrbX+WAv2u9guRcd6o= +cosmossdk.io/client/v2 v2.0.0-beta.11/go.mod h1:ZmmxMUpALO2r1aG6fNOonE7f8I1g/WsafJgVAeQ0ffs= cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= @@ -72,12 +72,12 @@ cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= -cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= -cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= -cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= -cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= -cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= -cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= +cosmossdk.io/x/circuit v0.2.0 h1:RJPMBQWCQU77EcM9HDTBnqRhq21fcUxgWZl7BZylJZo= +cosmossdk.io/x/circuit v0.2.0/go.mod h1:CjiGXDeZs64nMv0fG+QmvGVTcn7n3Sv4cDszMRR2JqU= +cosmossdk.io/x/evidence v0.2.0 h1:o72zbmgCM7U0v7z7b0XnMB+NqX0tFamqb1HHkQbhrZ0= +cosmossdk.io/x/evidence v0.2.0/go.mod h1:zx/Xqy+hnGVzkqVuVuvmP9KsO6YCl4SfbAetYi+k+sE= +cosmossdk.io/x/feegrant v0.2.0 h1:oq3WVpoJdxko/XgWmpib63V1mYy9ZQN/1qxDajwGzJ8= +cosmossdk.io/x/feegrant v0.2.0/go.mod h1:9CutZbmhulk/Yo6tQSVD5LG8Lk40ZAQ1OX4d1CODWAE= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= cosmossdk.io/x/upgrade v0.2.0 h1:ZHy0xny3wBCSLomyhE06+UmQHWO8cYlVYjfFAJxjz5g= @@ -93,10 +93,10 @@ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEK github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CosmWasm/wasmd v0.55.0-ibc2.0 h1:9bH+QDnSGxmZhjSykLYGtW4sltzGFFVm10Awk683q2Y= -github.com/CosmWasm/wasmd v0.55.0-ibc2.0/go.mod h1:c9l+eycjUB2zNVLIGjAXd7QrFEbxVTEa1Fh1Mx74VwQ= -github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0 h1:QoagSm5iYuRSPYDxgRxsa6hVfDppUp4+bOwY7bDuMO0= -github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0/go.mod h1:oknpb1bFERvvKcY7vHRp1F/Y/z66xVrsl7n9uWkOAlM= +github.com/CosmWasm/wasmd v0.61.6 h1:wa1rY/mZi8OYnf0f6a02N7o3vBockOfL3P37hSH0XtY= +github.com/CosmWasm/wasmd v0.61.6/go.mod h1:Wg2gfY2qrjjFY8UvpkTCRdy8t67qebOQn7UvRiGRzDw= +github.com/CosmWasm/wasmvm/v3 v3.0.2 h1:+MLkOX+IdklITLqfG26PCFv5OXdZvNb8z5Wq5JFXTRM= +github.com/CosmWasm/wasmvm/v3 v3.0.2/go.mod h1:oknpb1bFERvvKcY7vHRp1F/Y/z66xVrsl7n9uWkOAlM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -109,14 +109,14 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.9.1 h1:4hI0sHHrZOiKP+S3GpLNHYeQTatXBftmcUE3ZDA91mU= -github.com/LumeraProtocol/lumera v1.9.1/go.mod h1:38BX04sncJe191asQ4rU/EeYyVflybkU0VN4LDvLKps= +github.com/LumeraProtocol/lumera v1.10.0 h1:IIuvqlFNUPoSkTJ3DoKDNHtr3E0+8GmE4CiNbgTzI2s= +github.com/LumeraProtocol/lumera v1.10.0/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= -github.com/LumeraProtocol/sdk-go v1.0.6 h1:lSGT4zLvU8lEgU6zQyqdIhHkNZrKmBHWx0OcqAathpg= -github.com/LumeraProtocol/sdk-go v1.0.6/go.mod h1:/F2KoTP8Uv15Sr6Yb9Vjel4n/Kw0j12nXwzA9jKcdaQ= -github.com/LumeraProtocol/supernode/v2 v2.4.23 h1:vkOJwVtXvuosh4q1YV2E9AVqzXzoREM5EFxh7XWEJ4Y= -github.com/LumeraProtocol/supernode/v2 v2.4.23/go.mod h1:2juzppFSk/vP0kRsROIRxqc4WHBfm3dq9twD6KndWrA= +github.com/LumeraProtocol/sdk-go v1.0.8 h1:8M4QgrrmblDM42ABaKxFfjeF9/xtTHDkRwTYHEbtrSk= +github.com/LumeraProtocol/sdk-go v1.0.8/go.mod h1:1vk9PHzQGVU0V7EnWANTyUrXJmBIRXW9ayOGhXbXVAM= +github.com/LumeraProtocol/supernode/v2 v2.4.27 h1:Bw2tpuA2uly8ajYT+Q5bKRWyUugPlKHV3S5oMQGGoF4= +github.com/LumeraProtocol/supernode/v2 v2.4.27/go.mod h1:tTsXf0CV8OHAzVDQH/IGjHQ1fJtp0ABZmavkVCoYE4U= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -252,8 +252,8 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= github.com/cosmos/gogoproto v1.7.2 h1:5G25McIraOC0mRFv9TVO139Uh3OklV2hczr13KKVHCA= github.com/cosmos/gogoproto v1.7.2/go.mod h1:8S7w53P1Y1cHwND64o0BnArT6RmdgIvsBuco6uTllsk= -github.com/cosmos/iavl v1.2.4 h1:IHUrG8dkyueKEY72y92jajrizbkZKPZbMmG14QzsEkw= -github.com/cosmos/iavl v1.2.4/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/iavl v1.2.6 h1:Hs3LndJbkIB+rEvToKJFXZvKo6Vy0Ex1SJ54hhtioIs= +github.com/cosmos/iavl v1.2.6/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0 h1:epKcbFAeWRRw1i1jZnYzLIEm9sgUPaL1RftuRjjUKGw= github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0/go.mod h1:S4ZQwf5/LhpOi8JXSAese/6QQDk87nTdicJPlZ5q9UQ= github.com/cosmos/ibc-go/v10 v10.5.0 h1:NI+cX04fXdu9JfP0V0GYeRi1ENa7PPdq0BYtVYo8Zrs= @@ -381,9 +381,8 @@ github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= -github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -521,8 +520,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= -github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= +github.com/hashicorp/go-getter v1.7.9 h1:G9gcjrDixz7glqJ+ll5IWvggSBR+R0B54DSRt4qfdC4= +github.com/hashicorp/go-getter v1.7.9/go.mod h1:dyFCmT1AQkDfOIt9NH8pw9XBDqNrIKJT5ylbpi7zPNE= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -664,8 +663,6 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -766,8 +763,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -783,8 +780,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -818,8 +815,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shamaton/msgpack/v2 v2.2.0 h1:IP1m01pHwCrMa6ZccP9B3bqxEMKMSmMVAVKk54g3L/Y= -github.com/shamaton/msgpack/v2 v2.2.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= +github.com/shamaton/msgpack/v2 v2.2.3 h1:uDOHmxQySlvlUYfQwdjxyybAOzjlQsD1Vjy+4jmO9NM= +github.com/shamaton/msgpack/v2 v2.2.3/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -890,8 +887,8 @@ github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= +github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -982,8 +979,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1079,8 +1076,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= -golang.org/x/net v0.46.1-0.20251013234738-63d1a5100f82 h1:6/3JGEh1C88g7m+qzzTbl3A0FtsLguXieqofVLU/JAo= -golang.org/x/net v0.46.1-0.20251013234738-63d1a5100f82/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1108,8 +1105,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1188,8 +1185,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1199,8 +1196,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1216,8 +1213,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1412,8 +1409,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/devnet/hermes/scripts/hermes-channel.sh b/devnet/hermes/scripts/hermes-channel.sh index a476b01a..10da3c98 100644 --- a/devnet/hermes/scripts/hermes-channel.sh +++ b/devnet/hermes/scripts/hermes-channel.sh @@ -17,11 +17,11 @@ LOG_PREFIX="[channel-setup]" # Keep only the JSON part of each line (drop logger prefixes like "[ts] ... CMD output: { ... }") extract_json() { - # Strip anything before the first JSON brace/bracket, then: - # - keep objects: lines starting with '{' - # - keep arrays that start with an object: lines starting with '[{' - # - drop timestamp lines like "[2025-10-14T...]" which are NOT JSON - sed 's/^[^{[]*//' | awk ' + # Strip anything before the first JSON brace/bracket, then: + # - keep objects: lines starting with '{' + # - keep arrays that start with an object: lines starting with '[{' + # - drop timestamp lines like "[2025-10-14T...]" which are NOT JSON + sed 's/^[^{[]*//' | awk ' /^[[:space:]]*{/ { print; next } /^[[:space:]]*\[\{/ { print; next } ' @@ -30,13 +30,13 @@ extract_json() { # Read Hermes output line-by-line and return the FIRST line that contains `"result":` # as a single JSON object. If none found, return {}. first_result_object() { - local line - line="$(extract_json | grep -m1 -E '"result"[[:space:]]*:' || true)" - if [ -n "$line" ]; then - printf '%s\n' "$line" - else - echo "{}" - fi + local line + line="$(extract_json | grep -m1 -E '"result"[[:space:]]*:' || true)" + if [ -n "$line" ]; then + printf '%s\n' "$line" + else + echo "{}" + fi } # Return a normalized **array** from `.result`: @@ -44,9 +44,9 @@ first_result_object() { # - if .result is an object -> wrap it as [ .result ] # - otherwise -> [] result_items() { - # Slurp all JSON docs, pick the LAST object that has "result", - # then normalize result to an array: array -> itself; object -> [object]; else -> []. - extract_json | jq -cs ' + # Slurp all JSON docs, pick the LAST object that has "result", + # then normalize result to an array: array -> itself; object -> [object]; else -> []. + extract_json | jq -cs ' def norm(x): if (x|type)=="array" then x elif (x|type)=="object" then [x] @@ -60,134 +60,134 @@ result_items() { } log_info() { - local msg="$1" - local ts line - ts="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" - line="[$ts] ${LOG_PREFIX} ${msg}" - - # 1) show on console even inside $( ... ): send to STDERR - printf '%s\n' "$line" >&2 - - # 2) optionally also write to a file if set - if [ -n "${ENTRY_LOG_FILE:-}" ]; then - # create dir if needed; ignore errors but don't crash - mkdir -p "$(dirname -- "$ENTRY_LOG_FILE")" 2>/dev/null || true - printf '%s\n' "$line" >> "$ENTRY_LOG_FILE" 2>/dev/null || true - fi + local msg="$1" + local ts line + ts="$(date -u +"%Y-%m-%dT%H:%M:%SZ")" + line="[$ts] ${LOG_PREFIX} ${msg}" + + # 1) show on console even inside $( ... ): send to STDERR + printf '%s\n' "$line" >&2 + + # 2) optionally also write to a file if set + if [ -n "${ENTRY_LOG_FILE:-}" ]; then + # create dir if needed; ignore errors but don't crash + mkdir -p "$(dirname -- "$ENTRY_LOG_FILE")" 2>/dev/null || true + printf '%s\n' "$line" >>"$ENTRY_LOG_FILE" 2>/dev/null || true + fi } fmt_cmd() { - local out="" arg - for arg in "$@"; do - if [ -z "${out}" ]; then - out=$(printf '%q' "${arg}") - else - out="${out} $(printf '%q' "${arg}")" - fi - done - printf '%s' "${out}" + local out="" arg + for arg in "$@"; do + if [ -z "${out}" ]; then + out=$(printf '%q' "${arg}") + else + out="${out} $(printf '%q' "${arg}")" + fi + done + printf '%s' "${out}" } log_cmd_start() { - log_info "CMD start: $(fmt_cmd "$@")" + log_info "CMD start: $(fmt_cmd "$@")" } log_cmd_result() { - local rc="$1" - shift - if [ "${rc}" -eq 0 ]; then - log_info "CMD success (rc=${rc}): $(fmt_cmd "$@")" - else - log_info "CMD failure (rc=${rc}): $(fmt_cmd "$@")" - fi + local rc="$1" + shift + if [ "${rc}" -eq 0 ]; then + log_info "CMD success (rc=${rc}): $(fmt_cmd "$@")" + else + log_info "CMD failure (rc=${rc}): $(fmt_cmd "$@")" + fi } log_cmd_output() { - local label="$1" - local payload="$2" - local count=0 - if [ -z "${payload}" ]; then - return 0 - fi - while IFS= read -r line; do - log_info "${label}: ${line}" - count=$((count + 1)) - if [ "${count}" -ge 200 ]; then - log_info "${label}: ... (truncated after 200 lines)" - break - fi - done <<< "${payload}" + local label="$1" + local payload="$2" + local count=0 + if [ -z "${payload}" ]; then + return 0 + fi + while IFS= read -r line; do + log_info "${label}: ${line}" + count=$((count + 1)) + if [ "${count}" -ge 200 ]; then + log_info "${label}: ... (truncated after 200 lines)" + break + fi + done <<<"${payload}" } # Log when a Hermes JSON query returns zero items log_query_empty() { - local label="$1" - local json_payload="$2" - local count - count="$(printf '%s\n' "${json_payload}" \ - | result_items \ - | jq 'length' 2>/dev/null || echo 0)" - if [ "${count}" -eq 0 ]; then - log "${label}: query returned 0 items" - fi + local label="$1" + local json_payload="$2" + local count + count="$(printf '%s\n' "${json_payload}" | + result_items | + jq 'length' 2>/dev/null || echo 0)" + if [ "${count}" -eq 0 ]; then + log "${label}: query returned 0 items" + fi } ran() { - local cmd=("$@") - log_cmd_start "${cmd[@]}" - "${cmd[@]}" - local rc=$? - log_cmd_result "${rc}" "${cmd[@]}" - return "${rc}" + local cmd=("$@") + log_cmd_start "${cmd[@]}" + "${cmd[@]}" + local rc=$? + log_cmd_result "${rc}" "${cmd[@]}" + return "${rc}" } ran_capture() { - local cmd=("$@") - log_cmd_start "${cmd[@]}" - local output rc - if output=$("${cmd[@]}" 2>&1); then - rc=0 - else - rc=$? - fi - log_cmd_output "CMD output" "${output}" - log_cmd_result "${rc}" "${cmd[@]}" - printf '%s' "${output}" - return "${rc}" + local cmd=("$@") + log_cmd_start "${cmd[@]}" + local output rc + if output=$("${cmd[@]}" 2>&1); then + rc=0 + else + rc=$? + fi + log_cmd_output "CMD output" "${output}" + log_cmd_result "${rc}" "${cmd[@]}" + printf '%s' "${output}" + return "${rc}" } mkdir -p "${HERMES_STATUS_DIR}" CHANNEL_INFO_FILE="${HERMES_STATUS_DIR}/channel_transfer.json" log() { - log_info "$1" + log_info "$1" } log_cmd() { - log_cmd_start "$@" + log_cmd_start "$@" } run() { - ran "$@" + ran "$@" } run_capture() { - ran_capture "$@" + ran_capture "$@" } require_jq() { - if ! command -v jq >/dev/null 2>&1; then - log "jq is required but not installed in this container" - exit 1 - fi + if ! command -v jq >/dev/null 2>&1; then + log "jq is required but not installed in this container" + exit 1 + fi } parse_key_address() { - local key_name="$1" - local json_payload="$2" - printf '%s\n' "${json_payload}" \ - | extract_json \ - | jq -r --arg name "${key_name}" ' + local key_name="$1" + local json_payload="$2" + printf '%s\n' "${json_payload}" | + extract_json | + jq -r --arg name "${key_name}" ' .result as $r | if ($r|type)=="array" then ($r | map(select(.name==$name)) | .[0].address // empty) @@ -200,169 +200,169 @@ parse_key_address() { } check_account_rest() { - local rest_addr="$1" - local addr="$2" - local url - - if [ -z "${rest_addr}" ]; then - return 2 - fi - - url="${rest_addr%/}/cosmos/auth/v1beta1/accounts/${addr}" - local out - out="$(run_capture curl -s "${url}" || true)" - if [ -z "${out}" ]; then - return 2 - fi - if printf '%s\n' "${out}" | jq -e '.account' >/dev/null 2>&1; then - return 0 - fi - if printf '%s\n' "${out}" | jq -e '.code' >/dev/null 2>&1; then - printf '%s\n' "${out}" >&2 - return 1 - fi - return 2 + local rest_addr="$1" + local addr="$2" + local url + + if [ -z "${rest_addr}" ]; then + return 2 + fi + + url="${rest_addr%/}/cosmos/auth/v1beta1/accounts/${addr}" + local out + out="$(run_capture curl -s "${url}" || true)" + if [ -z "${out}" ]; then + return 2 + fi + if printf '%s\n' "${out}" | jq -e '.account' >/dev/null 2>&1; then + return 0 + fi + if printf '%s\n' "${out}" | jq -e '.code' >/dev/null 2>&1; then + printf '%s\n' "${out}" >&2 + return 1 + fi + return 2 } ensure_chain_account() { - local chain_id="$1" - local addr="$2" - local label="$3" - - if [ -z "${addr}" ] || [ "${addr}" = "null" ]; then - log "Missing ${label} address; cannot verify account on ${chain_id}" - exit 1 - fi - - log "Checking ${label} account on ${chain_id} (${addr})" - local rest_addr="" - if [ "${chain_id}" = "${LUMERA_CHAIN_ID}" ] && [ -n "${LUMERA_REST_ADDR}" ]; then - rest_addr="${LUMERA_REST_ADDR}" - elif [ "${chain_id}" = "${SIMD_CHAIN_ID}" ] && [ -n "${SIMD_REST_ADDR}" ]; then - rest_addr="${SIMD_REST_ADDR}" - fi - - if [ -z "${rest_addr}" ]; then - log "No REST address configured for ${chain_id}; skipping ${label} account check" - return 0 - fi - - log "Checking ${label} account via REST (${rest_addr})" - if check_account_rest "${rest_addr}" "${addr}"; then - return 0 - fi - - case $? in - 1) - log "${label} account ${addr} not found on ${chain_id}" - log "Ensure the relayer account is funded in genesis or re-run devnet build." - exit 1 - ;; - *) - log "REST account check unavailable for ${chain_id}; skipping ${label} account check" - ;; - esac + local chain_id="$1" + local addr="$2" + local label="$3" + + if [ -z "${addr}" ] || [ "${addr}" = "null" ]; then + log "Missing ${label} address; cannot verify account on ${chain_id}" + exit 1 + fi + + log "Checking ${label} account on ${chain_id} (${addr})" + local rest_addr="" + if [ "${chain_id}" = "${LUMERA_CHAIN_ID}" ] && [ -n "${LUMERA_REST_ADDR}" ]; then + rest_addr="${LUMERA_REST_ADDR}" + elif [ "${chain_id}" = "${SIMD_CHAIN_ID}" ] && [ -n "${SIMD_REST_ADDR}" ]; then + rest_addr="${SIMD_REST_ADDR}" + fi + + if [ -z "${rest_addr}" ]; then + log "No REST address configured for ${chain_id}; skipping ${label} account check" + return 0 + fi + + log "Checking ${label} account via REST (${rest_addr})" + if check_account_rest "${rest_addr}" "${addr}"; then + return 0 + fi + + case $? in + 1) + log "${label} account ${addr} not found on ${chain_id}" + log "Ensure the relayer account is funded in genesis or re-run devnet build." + exit 1 + ;; + *) + log "REST account check unavailable for ${chain_id}; skipping ${label} account check" + ;; + esac } ensure_client() { - local host_chain="$1" - local reference_chain="$2" - local client_id="" - local query_json - - log "Querying existing clients on chain ${host_chain} referencing ${reference_chain}" - query_json="$(run_capture hermes --json query clients --host-chain "${host_chain}" || true)" - log_query_empty "clients (${host_chain})" "${query_json}" - client_id="$(printf '%s\n' "${query_json}" \ - | result_items \ - | jq -r --arg ref "${reference_chain}" \ - 'map(select(.chain_id==$ref)) | .[0].client_id // empty' || true)" - if [ -n "${client_id}" ] && [ "${client_id}" != "null" ]; then - log "Reusing existing client ${client_id} on ${host_chain} (counterparty ${reference_chain})" - printf '%s\n' "${client_id}" - return 0 - fi - log "No matching client on ${host_chain} for counterparty ${reference_chain}; will create a new client" - - local create_json - create_json="$(run_capture hermes --json create client --host-chain "${host_chain}" --reference-chain "${reference_chain}")" - local result_line - result_line="$(printf '%s\n' "${create_json}")" - client_id="$(printf '%s' "${result_line}" \ - | result_items \ - | jq -r '.[0].CreateClient.client_id // empty' 2>/dev/null || true)" - if [ -z "${client_id}" ] || [ "${client_id}" = "null" ]; then - log "Failed to create client on ${host_chain} referencing ${reference_chain}" - printf '%s\n' "${create_json}" | first_result_object | jq . >&2 || true - exit 1 - fi - log "Created client ${client_id} on ${host_chain} referencing ${reference_chain}" - printf '%s\n' "${client_id}" + local host_chain="$1" + local reference_chain="$2" + local client_id="" + local query_json + + log "Querying existing clients on chain ${host_chain} referencing ${reference_chain}" + query_json="$(run_capture hermes --json query clients --host-chain "${host_chain}" || true)" + log_query_empty "clients (${host_chain})" "${query_json}" + client_id="$(printf '%s\n' "${query_json}" | + result_items | + jq -r --arg ref "${reference_chain}" \ + 'map(select(.chain_id==$ref)) | .[0].client_id // empty' || true)" + if [ -n "${client_id}" ] && [ "${client_id}" != "null" ]; then + log "Reusing existing client ${client_id} on ${host_chain} (counterparty ${reference_chain})" + printf '%s\n' "${client_id}" + return 0 + fi + log "No matching client on ${host_chain} for counterparty ${reference_chain}; will create a new client" + + local create_json + create_json="$(run_capture hermes --json create client --host-chain "${host_chain}" --reference-chain "${reference_chain}")" + local result_line + result_line="$(printf '%s\n' "${create_json}")" + client_id="$(printf '%s' "${result_line}" | + result_items | + jq -r '.[0].CreateClient.client_id // empty' 2>/dev/null || true)" + if [ -z "${client_id}" ] || [ "${client_id}" = "null" ]; then + log "Failed to create client on ${host_chain} referencing ${reference_chain}" + printf '%s\n' "${create_json}" | first_result_object | jq . >&2 || true + exit 1 + fi + log "Created client ${client_id} on ${host_chain} referencing ${reference_chain}" + printf '%s\n' "${client_id}" } ensure_connection() { - local a_chain="$1" - local b_chain="$2" - local a_client="$3" - local b_client="$4" - local connection_id="" - - local connections_json - connections_json="$(run_capture hermes --json query connections --chain "${a_chain}" --counterparty-chain "${b_chain}" || true)" - log_query_empty "connections (${a_chain})" "${connections_json}" - connection_id="$(printf '%s\n' "${connections_json}" \ - | result_items \ - | jq -r '.[0] // empty' || true)" - - if [ -n "${connection_id}" ] && [ "${connection_id}" != "null" ]; then - log "Reusing existing connection ${connection_id} on ${a_chain}" - printf '%s\n' "${connection_id}" - return 0 - fi - - log "No matching connection on ${a_chain} for clients (${a_client} <-> ${b_client}); will create a new connection" - - local create_conn_json - create_conn_json="$(run_capture hermes --json create connection \ - --a-chain "${a_chain}" \ - --b-chain "${b_chain}" \ - --delay 0)" - connection_id="$(printf '%s\n' "${create_conn_json}" \ - | result_items \ - | jq -r '.[0].a_side.connection_id // .[0].b_side.connection_id // .[0].connection_id // empty' \ - 2>/dev/null || true)" - - if [ -z "${connection_id}" ] || [ "${connection_id}" = "null" ]; then - log "Connection id not found in create-connection output; re-querying existing connections" - connections_json="$(run_capture hermes --json query connections --chain "${a_chain}" --counterparty-chain "${b_chain}" || true)" - log_query_empty "connections (${a_chain})" "${connections_json}" - connection_id="$(printf '%s\n' "${connections_json}" \ - | result_items \ - | jq -r '.[0] // empty' || true)" - fi - - if [ -z "${connection_id}" ] || [ "${connection_id}" = "null" ]; then - local conn_err - conn_err="$(printf '%s\n' "${create_conn_json}" \ - | first_result_object \ - | jq -r '.result // empty' || true)" - log "Failed to create connection between ${a_chain} and ${b_chain}" - if [ -n "${conn_err}" ] && [ "${conn_err}" != "null" ]; then - log "Hermes error: ${conn_err}" - fi - printf '%s\n' "${create_conn_json}" >&2 - exit 1 - fi - log "Created connection ${connection_id} (${a_chain} <-> ${b_chain})" - printf '%s\n' "${connection_id}" + local a_chain="$1" + local b_chain="$2" + local a_client="$3" + local b_client="$4" + local connection_id="" + + local connections_json + connections_json="$(run_capture hermes --json query connections --chain "${a_chain}" --counterparty-chain "${b_chain}" || true)" + log_query_empty "connections (${a_chain})" "${connections_json}" + connection_id="$(printf '%s\n' "${connections_json}" | + result_items | + jq -r '.[0] // empty' || true)" + + if [ -n "${connection_id}" ] && [ "${connection_id}" != "null" ]; then + log "Reusing existing connection ${connection_id} on ${a_chain}" + printf '%s\n' "${connection_id}" + return 0 + fi + + log "No matching connection on ${a_chain} for clients (${a_client} <-> ${b_client}); will create a new connection" + + local create_conn_json + create_conn_json="$(run_capture hermes --json create connection \ + --a-chain "${a_chain}" \ + --b-chain "${b_chain}" \ + --delay 0)" + connection_id="$(printf '%s\n' "${create_conn_json}" | + result_items | + jq -r '.[0].a_side.connection_id // .[0].b_side.connection_id // .[0].connection_id // empty' \ + 2>/dev/null || true)" + + if [ -z "${connection_id}" ] || [ "${connection_id}" = "null" ]; then + log "Connection id not found in create-connection output; re-querying existing connections" + connections_json="$(run_capture hermes --json query connections --chain "${a_chain}" --counterparty-chain "${b_chain}" || true)" + log_query_empty "connections (${a_chain})" "${connections_json}" + connection_id="$(printf '%s\n' "${connections_json}" | + result_items | + jq -r '.[0] // empty' || true)" + fi + + if [ -z "${connection_id}" ] || [ "${connection_id}" = "null" ]; then + local conn_err + conn_err="$(printf '%s\n' "${create_conn_json}" | + first_result_object | + jq -r '.result // empty' || true)" + log "Failed to create connection between ${a_chain} and ${b_chain}" + if [ -n "${conn_err}" ] && [ "${conn_err}" != "null" ]; then + log "Hermes error: ${conn_err}" + fi + printf '%s\n' "${create_conn_json}" >&2 + exit 1 + fi + log "Created connection ${connection_id} (${a_chain} <-> ${b_chain})" + printf '%s\n' "${connection_id}" } if command -v hermes >/dev/null 2>&1; then - HERMES_VERSION_OUTPUT="$(hermes version 2>&1 || true)" - log "Hermes CLI detected: ${HERMES_VERSION_OUTPUT}" + HERMES_VERSION_OUTPUT="$(hermes version 2>&1 || true)" + log "Hermes CLI detected: ${HERMES_VERSION_OUTPUT}" else - log "Hermes CLI not found in PATH" - exit 1 + log "Hermes CLI not found in PATH" + exit 1 fi log "Using Hermes config: ${HERMES_CONFIG_PATH}" @@ -371,42 +371,42 @@ log "Lumera chain: ${LUMERA_CHAIN_ID}, SIMD chain: ${SIMD_CHAIN_ID}" require_jq if [ ! -s "${LUMERA_MNEMONIC_FILE}" ]; then - log "Lumera mnemonic file ${LUMERA_MNEMONIC_FILE} missing" - exit 1 + log "Lumera mnemonic file ${LUMERA_MNEMONIC_FILE} missing" + exit 1 fi if [ ! -s "${SIMD_MNEMONIC_FILE}" ]; then - log "SIMD mnemonic file ${SIMD_MNEMONIC_FILE} missing" - exit 1 + log "SIMD mnemonic file ${SIMD_MNEMONIC_FILE} missing" + exit 1 fi if ! OUT="$(run_capture hermes keys add \ - --chain "${LUMERA_CHAIN_ID}" \ - --key-name "${HERMES_KEY_NAME}" \ - --mnemonic-file "${LUMERA_MNEMONIC_FILE}" \ - --overwrite 2>&1)"; then - log "Failed to import Lumera key: ${OUT}" - exit 1 + --chain "${LUMERA_CHAIN_ID}" \ + --key-name "${HERMES_KEY_NAME}" \ + --mnemonic-file "${LUMERA_MNEMONIC_FILE}" \ + --overwrite 2>&1)"; then + log "Failed to import Lumera key: ${OUT}" + exit 1 fi log "Imported Lumera key ${HERMES_KEY_NAME}" lumera_relayer_addr="$(parse_key_address "${HERMES_KEY_NAME}" "$(run_capture hermes --json keys list --chain "${LUMERA_CHAIN_ID}")")" if [ -n "${lumera_relayer_addr}" ] && [ "${lumera_relayer_addr}" != "null" ]; then - log "Lumera relayer address: ${lumera_relayer_addr}" + log "Lumera relayer address: ${lumera_relayer_addr}" fi ensure_chain_account "${LUMERA_CHAIN_ID}" "${lumera_relayer_addr}" "Lumera relayer" if ! OUT="$(run_capture hermes keys add \ - --chain "${SIMD_CHAIN_ID}" \ - --key-name "${HERMES_KEY_NAME}" \ - --mnemonic-file "${SIMD_MNEMONIC_FILE}" \ - --overwrite 2>&1)"; then - log "Failed to import SIMD key: ${OUT}" - exit 1 + --chain "${SIMD_CHAIN_ID}" \ + --key-name "${HERMES_KEY_NAME}" \ + --mnemonic-file "${SIMD_MNEMONIC_FILE}" \ + --overwrite 2>&1)"; then + log "Failed to import SIMD key: ${OUT}" + exit 1 fi log "Imported SIMD key ${HERMES_KEY_NAME}" simd_relayer_addr="$(parse_key_address "${HERMES_KEY_NAME}" "$(run_capture hermes --json keys list --chain "${SIMD_CHAIN_ID}")")" if [ -n "${simd_relayer_addr}" ] && [ "${simd_relayer_addr}" != "null" ]; then - log "SIMD relayer address: ${simd_relayer_addr}" + log "SIMD relayer address: ${simd_relayer_addr}" fi ensure_chain_account "${SIMD_CHAIN_ID}" "${simd_relayer_addr}" "SIMD relayer" @@ -414,16 +414,16 @@ existing="" log "Querying existing transfer channels on ${LUMERA_CHAIN_ID}" channels_json="$(run_capture hermes --json query channels --chain "${LUMERA_CHAIN_ID}" --counterparty-chain "${SIMD_CHAIN_ID}" || true)" log_query_empty "channels (${LUMERA_CHAIN_ID})" "${channels_json}" -existing="$(printf '%s\n' "${channels_json}" \ - | result_items \ - | jq -r 'map(select(.port_id=="transfer")) | .[0].channel_id // empty' || true)" +existing="$(printf '%s\n' "${channels_json}" | + result_items | + jq -r 'map(select(.port_id=="transfer")) | .[0].channel_id // empty' || true)" if [ -z "${existing}" ]; then - log "No existing 'transfer' channel from ${LUMERA_CHAIN_ID} to ${SIMD_CHAIN_ID} found; will create" + log "No existing 'transfer' channel from ${LUMERA_CHAIN_ID} to ${SIMD_CHAIN_ID} found; will create" fi if [ -n "${existing}" ]; then - log "Channel already exists: ${existing}" - cat <"${CHANNEL_INFO_FILE}" + log "Channel already exists: ${existing}" + cat <"${CHANNEL_INFO_FILE}" { "channel_id": "${existing}", "port_id": "transfer", @@ -431,8 +431,8 @@ if [ -n "${existing}" ]; then "last_updated": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" } EOF - log "Wrote channel metadata to ${CHANNEL_INFO_FILE}" - exit 0 + log "Wrote channel metadata to ${CHANNEL_INFO_FILE}" + exit 0 fi a_client_id="$(ensure_client "${LUMERA_CHAIN_ID}" "${SIMD_CHAIN_ID}")" @@ -441,61 +441,61 @@ connection_id="$(ensure_connection "${LUMERA_CHAIN_ID}" "${SIMD_CHAIN_ID}" "${a_ log "Creating transfer channel between ${LUMERA_CHAIN_ID} and ${SIMD_CHAIN_ID} using connection ${connection_id}" if chan_out="$(run_capture hermes --json create channel \ - --a-chain "${LUMERA_CHAIN_ID}" \ - --a-connection "${connection_id}" \ - --a-port transfer --b-port transfer \ - --order unordered)"; then - log "Hermes channel creation command completed" + --a-chain "${LUMERA_CHAIN_ID}" \ + --a-connection "${connection_id}" \ + --a-port transfer --b-port transfer \ + --order unordered)"; then + log "Hermes channel creation command completed" else - log "Channel creation command failed (non-zero exit code)" - printf '%s\n' "$chan_out" >&2 - exit 1 + log "Channel creation command failed (non-zero exit code)" + printf '%s\n' "$chan_out" >&2 + exit 1 fi log "Hermes create channel raw output:" && echo "${chan_out}" || true -status_ok="$(printf '%s\n' "$chan_out" \ - | extract_json | grep -m1 -E '"result"[[:space:]]*:' \ - | jq -r '.status // "success"' 2>/dev/null || echo "error")" +status_ok="$(printf '%s\n' "$chan_out" | + extract_json | grep -m1 -E '"result"[[:space:]]*:' | + jq -r '.status // "success"' 2>/dev/null || echo "error")" if [ "$status_ok" != "success" ]; then - log "Hermes returned non-success JSON status: $status_ok" - printf '%s\n' "$chan_out" >&2 - exit 1 + log "Hermes returned non-success JSON status: $status_ok" + printf '%s\n' "$chan_out" >&2 + exit 1 fi -new_channel_id="$(printf '%s\n' "$chan_out" \ - | first_result_object \ - | jq -r '.result.a_side.channel_id // .result.b_side.channel_id // empty' \ - 2>/dev/null || true)" +new_channel_id="$(printf '%s\n' "$chan_out" | + first_result_object | + jq -r '.result.a_side.channel_id // .result.b_side.channel_id // empty' \ + 2>/dev/null || true)" if [ -z "${new_channel_id:-}" ]; then - log "Unable to extract channel id from command output; polling existing channels..." - for attempt in $(seq 1 10); do - log "Polling attempt ${attempt}/10 on ${LUMERA_CHAIN_ID}" - new_channel_id="$(run_capture hermes --json query channels --chain "${LUMERA_CHAIN_ID}" --counterparty-chain "${SIMD_CHAIN_ID}" \ - | result_items \ - | jq -r 'map(select(.port_id=="transfer")) |.[0].channel_id // empty' \ - 2>/dev/null || true)" - if [ -n "${new_channel_id}" ]; then - break - fi - log "Polling attempt ${attempt}/10 on ${SIMD_CHAIN_ID}" - new_channel_id="$( - run_capture hermes --json query channels --chain "${SIMD_CHAIN_ID}" --counterparty-chain "${LUMERA_CHAIN_ID}" \ - | result_items \ - | jq -r 'map(select(.port_id=="transfer")) | .[0].channel_id // empty' \ - 2>/dev/null || true - )" - if [ -n "${new_channel_id}" ]; then - break - fi - sleep 3 - done + log "Unable to extract channel id from command output; polling existing channels..." + for attempt in $(seq 1 10); do + log "Polling attempt ${attempt}/10 on ${LUMERA_CHAIN_ID}" + new_channel_id="$(run_capture hermes --json query channels --chain "${LUMERA_CHAIN_ID}" --counterparty-chain "${SIMD_CHAIN_ID}" | + result_items | + jq -r 'map(select(.port_id=="transfer")) |.[0].channel_id // empty' \ + 2>/dev/null || true)" + if [ -n "${new_channel_id}" ]; then + break + fi + log "Polling attempt ${attempt}/10 on ${SIMD_CHAIN_ID}" + new_channel_id="$( + run_capture hermes --json query channels --chain "${SIMD_CHAIN_ID}" --counterparty-chain "${LUMERA_CHAIN_ID}" | + result_items | + jq -r 'map(select(.port_id=="transfer")) | .[0].channel_id // empty' \ + 2>/dev/null || true + )" + if [ -n "${new_channel_id}" ]; then + break + fi + sleep 3 + done fi if [ -z "${new_channel_id:-}" ]; then - log "Channel creation command failed: could not determine new channel id" - exit 1 + log "Channel creation command failed: could not determine new channel id" + exit 1 fi log "New channel detected: ${new_channel_id}" diff --git a/devnet/hermes/scripts/hermes-configure.sh b/devnet/hermes/scripts/hermes-configure.sh index 50a62796..61d92aff 100644 --- a/devnet/hermes/scripts/hermes-configure.sh +++ b/devnet/hermes/scripts/hermes-configure.sh @@ -8,78 +8,78 @@ ENTRY_LOG_FILE="${ENTRY_LOG_FILE:-/root/logs/entrypoint.log}" LOG_PREFIX="[hermes-configure]" log_info() { - local msg="$1" - local line - line=$(printf '[%s] %s %s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "${LOG_PREFIX}" "${msg}") - printf '%s\n' "${line}" - printf '%s\n' "${line}" >> "${ENTRY_LOG_FILE}" + local msg="$1" + local line + line=$(printf '[%s] %s %s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "${LOG_PREFIX}" "${msg}") + printf '%s\n' "${line}" + printf '%s\n' "${line}" >>"${ENTRY_LOG_FILE}" } fmt_cmd() { - local out="" arg - for arg in "$@"; do - if [ -z "${out}" ]; then - out=$(printf '%q' "${arg}") - else - out="${out} $(printf '%q' "${arg}")" - fi - done - printf '%s' "${out}" + local out="" arg + for arg in "$@"; do + if [ -z "${out}" ]; then + out=$(printf '%q' "${arg}") + else + out="${out} $(printf '%q' "${arg}")" + fi + done + printf '%s' "${out}" } log_cmd_start() { - log_info "CMD start: $(fmt_cmd "$@")" + log_info "CMD start: $(fmt_cmd "$@")" } log_cmd_result() { - local rc="$1" - shift - if [ "${rc}" -eq 0 ]; then - log_info "CMD success (rc=${rc}): $(fmt_cmd "$@")" - else - log_info "CMD failure (rc=${rc}): $(fmt_cmd "$@")" - fi + local rc="$1" + shift + if [ "${rc}" -eq 0 ]; then + log_info "CMD success (rc=${rc}): $(fmt_cmd "$@")" + else + log_info "CMD failure (rc=${rc}): $(fmt_cmd "$@")" + fi } log_cmd_output() { - local label="$1" - local payload="$2" - local count=0 - if [ -z "${payload}" ]; then - return 0 - fi - while IFS= read -r line; do - log_info "${label}: ${line}" - count=$((count + 1)) - if [ "${count}" -ge 40 ]; then - log_info "${label}: ... (truncated after 40 lines)" - break - fi - done <<< "${payload}" + local label="$1" + local payload="$2" + local count=0 + if [ -z "${payload}" ]; then + return 0 + fi + while IFS= read -r line; do + log_info "${label}: ${line}" + count=$((count + 1)) + if [ "${count}" -ge 40 ]; then + log_info "${label}: ... (truncated after 40 lines)" + break + fi + done <<<"${payload}" } ran() { - local cmd=("$@") - log_cmd_start "${cmd[@]}" - "${cmd[@]}" - local rc=$? - log_cmd_result "${rc}" "${cmd[@]}" - return "${rc}" + local cmd=("$@") + log_cmd_start "${cmd[@]}" + "${cmd[@]}" + local rc=$? + log_cmd_result "${rc}" "${cmd[@]}" + return "${rc}" } ran_capture() { - local cmd=("$@") - log_cmd_start "${cmd[@]}" - local output rc - if output=$("${cmd[@]}" 2>&1); then - rc=0 - else - rc=$? - fi - log_cmd_output "CMD output" "${output}" - log_cmd_result "${rc}" "${cmd[@]}" - printf '%s' "${output}" - return "${rc}" + local cmd=("$@") + log_cmd_start "${cmd[@]}" + local output rc + if output=$("${cmd[@]}" 2>&1); then + rc=0 + else + rc=$? + fi + log_cmd_output "CMD output" "${output}" + log_cmd_result "${rc}" "${cmd[@]}" + printf '%s' "${output}" + return "${rc}" } : "${LUMERA_CHAIN_ID:=lumera-devnet-1}" @@ -99,13 +99,13 @@ CONFIG_DIR="$(dirname "${HERMES_CONFIG_PATH}")" ran mkdir -p "${CONFIG_DIR}" if [ ! -f "${HERMES_CONFIG_PATH}" ]; then - ran cp "${HERMES_TEMPLATE_PATH}" "${HERMES_CONFIG_PATH}" + ran cp "${HERMES_TEMPLATE_PATH}" "${HERMES_CONFIG_PATH}" fi ensure_mode_enabled() { - local section="$1" - local value="$2" - if ! ran python3 - "$HERMES_CONFIG_PATH" "$section" "$value" <<'PY' + local section="$1" + local value="$2" + if ! ran python3 - "$HERMES_CONFIG_PATH" "$section" "$value" <<'PY'; then import pathlib import re import sys @@ -138,21 +138,20 @@ if in_section and not replaced: path.write_text('\n'.join(out) + '\n') PY - then - log_info "Failed to enforce ${section}.enabled=${value}" - return 1 - fi - log_info "Ensured ${section}.enabled=${value}" + log_info "Failed to enforce ${section}.enabled=${value}" + return 1 + fi + log_info "Ensured ${section}.enabled=${value}" } ensure_mode_enabled "mode.channels" "true" ensure_mode_enabled "mode.connections" "true" append_chain() { - local chain_id="$1" - local block="$2" - log_info "Updating Hermes chain entry for ${chain_id}" - if ! ran python3 - "$HERMES_CONFIG_PATH" "$chain_id" <<'PY' + local chain_id="$1" + local block="$2" + log_info "Updating Hermes chain entry for ${chain_id}" + if ! ran python3 - "$HERMES_CONFIG_PATH" "$chain_id" <<'PY'; then import pathlib, re, sys config_path = pathlib.Path(sys.argv[1]) chain_id = sys.argv[2] @@ -166,19 +165,19 @@ pattern = re.compile(r"^\s*id\s*=\s*['\"]{}['\"]\s*$".format(re.escape(chain_id) kept = [blk for blk in blocks if not pattern.search(blk)] config_path.write_text(head + ''.join(kept)) PY - then - log_info "Failed to normalise configuration for chain ${chain_id}" - return 1 - fi - if printf "\n%s\n" "${block}" >> "${HERMES_CONFIG_PATH}"; then - log_info "Appended configuration block for ${chain_id}" - else - log_info "Failed to append configuration block for ${chain_id}" - return 1 - fi + log_info "Failed to normalise configuration for chain ${chain_id}" + return 1 + fi + if printf "\n%s\n" "${block}" >>"${HERMES_CONFIG_PATH}"; then + log_info "Appended configuration block for ${chain_id}" + else + log_info "Failed to append configuration block for ${chain_id}" + return 1 + fi } -lumera_block=$(cat <> "${ENTRY_LOG_FILE}" + local msg="$1" + local line + line=$(printf '[%s] %s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "${msg}") + printf '%s\n' "${line}" + printf '%s\n' "${line}" >>"${ENTRY_LOG_FILE}" } fmt_cmd() { - local out="" arg - for arg in "$@"; do - if [ -z "${out}" ]; then - out=$(printf '%q' "${arg}") - else - out="${out} $(printf '%q' "${arg}")" - fi - done - printf '%s' "${out}" + local out="" arg + for arg in "$@"; do + if [ -z "${out}" ]; then + out=$(printf '%q' "${arg}") + else + out="${out} $(printf '%q' "${arg}")" + fi + done + printf '%s' "${out}" } log_cmd_start() { - log_info "CMD start: $(fmt_cmd "$@")" + log_info "CMD start: $(fmt_cmd "$@")" } log_cmd_result() { - local rc="$1" - shift - if [ "${rc}" -eq 0 ]; then - log_info "CMD success (rc=${rc}): $(fmt_cmd "$@")" - else - log_info "CMD failure (rc=${rc}): $(fmt_cmd "$@")" - fi + local rc="$1" + shift + if [ "${rc}" -eq 0 ]; then + log_info "CMD success (rc=${rc}): $(fmt_cmd "$@")" + else + log_info "CMD failure (rc=${rc}): $(fmt_cmd "$@")" + fi } log_cmd_output() { - local label="$1" - local payload="$2" - local lines_logged=0 - if [ -z "${payload}" ]; then - return 0 - fi - while IFS= read -r line; do - log_info "${label}: ${line}" - lines_logged=$((lines_logged + 1)) - if [ "${lines_logged}" -ge 40 ]; then - log_info "${label}: ... (truncated after 40 lines)" - break - fi - done <<< "${payload}" + local label="$1" + local payload="$2" + local lines_logged=0 + if [ -z "${payload}" ]; then + return 0 + fi + while IFS= read -r line; do + log_info "${label}: ${line}" + lines_logged=$((lines_logged + 1)) + if [ "${lines_logged}" -ge 40 ]; then + log_info "${label}: ... (truncated after 40 lines)" + break + fi + done <<<"${payload}" } ran() { - local cmd=("$@") - log_cmd_start "${cmd[@]}" - "${cmd[@]}" - local rc=$? - log_cmd_result "${rc}" "${cmd[@]}" - return "${rc}" + local cmd=("$@") + log_cmd_start "${cmd[@]}" + "${cmd[@]}" + local rc=$? + log_cmd_result "${rc}" "${cmd[@]}" + return "${rc}" } ran_capture() { - local cmd=("$@") - log_cmd_start "${cmd[@]}" - local output rc - if output=$("${cmd[@]}" 2>&1); then - rc=0 - else - rc=$? - fi - log_cmd_output "CMD output" "${output}" - log_cmd_result "${rc}" "${cmd[@]}" - printf '%s' "${output}" - return "${rc}" + local cmd=("$@") + log_cmd_start "${cmd[@]}" + local output rc + if output=$("${cmd[@]}" 2>&1); then + rc=0 + else + rc=$? + fi + log_cmd_output "CMD output" "${output}" + log_cmd_result "${rc}" "${cmd[@]}" + printf '%s' "${output}" + return "${rc}" } SHARED_DIR="/shared" @@ -130,44 +130,44 @@ HERMES_RELAYER_MNEMONIC_FILE="${HERMES_SHARED_DIR}/lumera-hermes-relayer.mnemoni LEGACY_RELAYER_MNEMONIC_FILE="${HERMES_SHARED_DIR}/hermes-relayer.mnemonic" if [ ! -s "${HERMES_RELAYER_MNEMONIC_FILE}" ] && [ -s "${LEGACY_RELAYER_MNEMONIC_FILE}" ]; then - log_info "Legacy relayer mnemonic file detected; copying to ${HERMES_RELAYER_MNEMONIC_FILE}" - cp "${LEGACY_RELAYER_MNEMONIC_FILE}" "${HERMES_RELAYER_MNEMONIC_FILE}" + log_info "Legacy relayer mnemonic file detected; copying to ${HERMES_RELAYER_MNEMONIC_FILE}" + cp "${LEGACY_RELAYER_MNEMONIC_FILE}" "${HERMES_RELAYER_MNEMONIC_FILE}" fi if [ -s "${HERMES_RELAYER_MNEMONIC_FILE}" ]; then - if [ -z "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then - SIMAPP_KEY_RELAYER_MNEMONIC="$(cat "${HERMES_RELAYER_MNEMONIC_FILE}")" - else - existing_relayer_mnemonic="$(cat "${HERMES_RELAYER_MNEMONIC_FILE}")" - if [ "${SIMAPP_KEY_RELAYER_MNEMONIC}" != "${existing_relayer_mnemonic}" ]; then - log_info "Relayer mnemonic file ${HERMES_RELAYER_MNEMONIC_FILE} differs from SIMAPP_KEY_RELAYER_MNEMONIC; using file to keep Hermes/simd aligned" - SIMAPP_KEY_RELAYER_MNEMONIC="${existing_relayer_mnemonic}" - fi - fi + if [ -z "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then + SIMAPP_KEY_RELAYER_MNEMONIC="$(cat "${HERMES_RELAYER_MNEMONIC_FILE}")" + else + existing_relayer_mnemonic="$(cat "${HERMES_RELAYER_MNEMONIC_FILE}")" + if [ "${SIMAPP_KEY_RELAYER_MNEMONIC}" != "${existing_relayer_mnemonic}" ]; then + log_info "Relayer mnemonic file ${HERMES_RELAYER_MNEMONIC_FILE} differs from SIMAPP_KEY_RELAYER_MNEMONIC; using file to keep Hermes/simd aligned" + SIMAPP_KEY_RELAYER_MNEMONIC="${existing_relayer_mnemonic}" + fi + fi fi if command -v jq >/dev/null 2>&1 && [ -f "${CONFIG_JSON}" ]; then - LUMERA_CHAIN_ID="${LUMERA_CHAIN_ID:-$(jq -r '.chain.id' "${CONFIG_JSON}")}" - LUMERA_BOND_DENOM="${LUMERA_BOND_DENOM:-$(jq -r '.chain.denom.bond' "${CONFIG_JSON}")}" + LUMERA_CHAIN_ID="${LUMERA_CHAIN_ID:-$(jq -r '.chain.id' "${CONFIG_JSON}")}" + LUMERA_BOND_DENOM="${LUMERA_BOND_DENOM:-$(jq -r '.chain.denom.bond' "${CONFIG_JSON}")}" fi if [ -z "${LUMERA_CHAIN_ID:-}" ] || [ "${LUMERA_CHAIN_ID}" = "null" ]; then - LUMERA_CHAIN_ID="lumera-devnet-1" + LUMERA_CHAIN_ID="lumera-devnet-1" fi if [ -z "${LUMERA_BOND_DENOM:-}" ] || [ "${LUMERA_BOND_DENOM}" = "null" ]; then - LUMERA_BOND_DENOM="ulume" + LUMERA_BOND_DENOM="ulume" fi if command -v jq >/dev/null 2>&1 && [ -f "${VALIDATORS_JSON}" ]; then - FIRST_VALIDATOR_SERVICE="$(jq -r '([.[] | select(."network-maker"==true) | .name] | first) // empty' "${VALIDATORS_JSON}")" - if [ -z "${FIRST_VALIDATOR_SERVICE}" ] || [ "${FIRST_VALIDATOR_SERVICE}" = "null" ]; then - FIRST_VALIDATOR_SERVICE="$(jq -r '.[0].name // empty' "${VALIDATORS_JSON}")" - fi + FIRST_VALIDATOR_SERVICE="$(jq -r '([.[] | select(."network-maker"==true) | .name] | first) // empty' "${VALIDATORS_JSON}")" + if [ -z "${FIRST_VALIDATOR_SERVICE}" ] || [ "${FIRST_VALIDATOR_SERVICE}" = "null" ]; then + FIRST_VALIDATOR_SERVICE="$(jq -r '.[0].name // empty' "${VALIDATORS_JSON}")" + fi fi if [ -z "${FIRST_VALIDATOR_SERVICE:-}" ] || [ "${FIRST_VALIDATOR_SERVICE}" = "null" ]; then - FIRST_VALIDATOR_SERVICE="supernova_validator_1" + FIRST_VALIDATOR_SERVICE="supernova_validator_1" fi # Inside the compose network every validator exposes the default container ports. @@ -196,7 +196,7 @@ export SIMD_CHAIN_ID SIMD_DENOM SIMD_RPC_PORT SIMD_GRPC_PORT export HERMES_KEY_NAME LUMERA_MNEMONIC_FILE SIMD_MNEMONIC_FILE if [ -n "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ] && [ ! -s "${SIMD_MNEMONIC_FILE}" ]; then - printf '%s\n' "${SIMAPP_KEY_RELAYER_MNEMONIC}" > "${SIMD_MNEMONIC_FILE}" + printf '%s\n' "${SIMAPP_KEY_RELAYER_MNEMONIC}" >"${SIMD_MNEMONIC_FILE}" fi HERMES_CONFIG_DEFAULT="/root/.hermes/config.toml" @@ -206,307 +206,307 @@ log_info "SIMD home: ${SIMD_HOME}" log_info "Hermes config: ${HERMES_CONFIG_PATH}" init_simd_home() { - export SIMD_HOME SIMAPP_HOME="${SIMD_HOME}" - export SIMD_CHAIN_ID CHAIN_ID="${SIMD_CHAIN_ID}" - export SIMD_MONIKER MONIKER="${SIMD_MONIKER}" - export SIMD_KEY_NAME KEY_NAME="${SIMD_KEY_NAME}" - export RELAYER_KEY_NAME - export SIMD_KEYRING KEYRING="${SIMD_KEYRING}" - export SIMD_DENOM DENOM="${SIMD_DENOM}" STAKE_DENOM="${SIMD_DENOM}" - export SIMD_GENESIS_BALANCE ACCOUNT_BALANCE="${SIMD_GENESIS_BALANCE}" - export SIMD_STAKE_AMOUNT STAKING_AMOUNT="${SIMD_STAKE_AMOUNT}" - export SIMD_TEST_KEY_NAME SIMD_TEST_ACCOUNT_BALANCE - export MINIMUM_GAS_PRICES - export SIMAPP_KEY_RELAYER_MNEMONIC - export SIMAPP_SHARED_DIR="${SHARED_DIR}" - - log_info "Ensuring simd home initialised (logs -> ${SIMAPP_INIT_LOG})" - if ran /root/scripts/init-simapp.sh >>"${SIMAPP_INIT_LOG}" 2>&1; then - log_info "simd home initialised" - else - local rc=$? - log_info "init-simapp.sh failed with exit code ${rc} (see ${SIMAPP_INIT_LOG})" - return ${rc} - fi + export SIMD_HOME SIMAPP_HOME="${SIMD_HOME}" + export SIMD_CHAIN_ID CHAIN_ID="${SIMD_CHAIN_ID}" + export SIMD_MONIKER MONIKER="${SIMD_MONIKER}" + export SIMD_KEY_NAME KEY_NAME="${SIMD_KEY_NAME}" + export RELAYER_KEY_NAME + export SIMD_KEYRING KEYRING="${SIMD_KEYRING}" + export SIMD_DENOM DENOM="${SIMD_DENOM}" STAKE_DENOM="${SIMD_DENOM}" + export SIMD_GENESIS_BALANCE ACCOUNT_BALANCE="${SIMD_GENESIS_BALANCE}" + export SIMD_STAKE_AMOUNT STAKING_AMOUNT="${SIMD_STAKE_AMOUNT}" + export SIMD_TEST_KEY_NAME SIMD_TEST_ACCOUNT_BALANCE + export MINIMUM_GAS_PRICES + export SIMAPP_KEY_RELAYER_MNEMONIC + export SIMAPP_SHARED_DIR="${SHARED_DIR}" + + log_info "Ensuring simd home initialised (logs -> ${SIMAPP_INIT_LOG})" + if ran /root/scripts/init-simapp.sh >>"${SIMAPP_INIT_LOG}" 2>&1; then + log_info "simd home initialised" + else + local rc=$? + log_info "init-simapp.sh failed with exit code ${rc} (see ${SIMAPP_INIT_LOG})" + return ${rc} + fi } wait_for_lumera_rpc() { - local url="${LUMERA_RPC_ADDR}/status" - log_info "Waiting for Lumera RPC (${url})..." - for _ in $(seq 1 120); do - if curl -sf "${url}" >/dev/null 2>&1; then - log_info "Lumera RPC is reachable." - return 0 - fi - sleep 2 - done - log_info "Lumera RPC did not become ready in time." - return 1 + local url="${LUMERA_RPC_ADDR}/status" + log_info "Waiting for Lumera RPC (${url})..." + for _ in $(seq 1 120); do + if curl -sf "${url}" >/dev/null 2>&1; then + log_info "Lumera RPC is reachable." + return 0 + fi + sleep 2 + done + log_info "Lumera RPC did not become ready in time." + return 1 } current_height() { - local url="$1" - curl -sf "${url}" 2>/dev/null \ - | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null \ - | awk '($1 ~ /^[0-9]+$/) { print $1; next } { print 0 }' + local url="$1" + curl -sf "${url}" 2>/dev/null | + jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null | + awk '($1 ~ /^[0-9]+$/) { print $1; next } { print 0 }' } wait_for_height() { - local label="$1" - local url="$2" - local target="$3" - local retries="${4:-180}" - local delay="${5:-2}" - - log_info "Waiting for ${label} height to reach at least ${target}..." - for _ in $(seq 1 "${retries}"); do - local height - height="$(current_height "${url}")" - if (( height >= target )); then - log_info "${label} height is ${height} (>= ${target})." - return 0 - fi - sleep "${delay}" - done - log_info "Timed out waiting for ${label} height ${target}; continuing anyway." - return 1 + local label="$1" + local url="$2" + local target="$3" + local retries="${4:-180}" + local delay="${5:-2}" + + log_info "Waiting for ${label} height to reach at least ${target}..." + for _ in $(seq 1 "${retries}"); do + local height + height="$(current_height "${url}")" + if ((height >= target)); then + log_info "${label} height is ${height} (>= ${target})." + return 0 + fi + sleep "${delay}" + done + log_info "Timed out waiting for ${label} height ${target}; continuing anyway." + return 1 } current_lumera_height() { - current_height "${LUMERA_RPC_ADDR}/status" + current_height "${LUMERA_RPC_ADDR}/status" } wait_for_lumera_height() { - local target="$1" - local retries="${2:-180}" - local delay="${3:-2}" + local target="$1" + local retries="${2:-180}" + local delay="${3:-2}" - wait_for_height "Lumera" "${LUMERA_RPC_ADDR}/status" "${target}" "${retries}" "${delay}" + wait_for_height "Lumera" "${LUMERA_RPC_ADDR}/status" "${target}" "${retries}" "${delay}" } wait_for_lumera_blocks() { - local blocks="${1:-5}" - local start - start="$(current_lumera_height)" - local target=$(( start + blocks )) - if (( target < blocks )); then - target="${blocks}" - fi - wait_for_lumera_height "${target}" + local blocks="${1:-5}" + local start + start="$(current_lumera_height)" + local target=$((start + blocks)) + if ((target < blocks)); then + target="${blocks}" + fi + wait_for_lumera_height "${target}" } start_simd() { - log_info "Starting simd node..." - simd start \ - --home "${SIMD_HOME}" \ - --pruning=nothing \ - --rpc.laddr "tcp://0.0.0.0:${SIMD_RPC_PORT}" \ - --grpc.address "0.0.0.0:${SIMD_GRPC_PORT}" \ - --address "tcp://0.0.0.0:${SIMD_P2P_PORT}" \ - --api.enable true \ - --api.address "tcp://0.0.0.0:${SIMD_API_PORT}" \ - --minimum-gas-prices "0${SIMD_DENOM}" \ - >"${SIMD_LOG_FILE}" 2>&1 & - SIMD_PID=$! - echo "${SIMD_PID}" > /var/run/simd.pid - log_info "simd started with PID ${SIMD_PID}, logs -> ${SIMD_LOG_FILE}" + log_info "Starting simd node..." + simd start \ + --home "${SIMD_HOME}" \ + --pruning=nothing \ + --rpc.laddr "tcp://0.0.0.0:${SIMD_RPC_PORT}" \ + --grpc.address "0.0.0.0:${SIMD_GRPC_PORT}" \ + --address "tcp://0.0.0.0:${SIMD_P2P_PORT}" \ + --api.enable true \ + --api.address "tcp://0.0.0.0:${SIMD_API_PORT}" \ + --minimum-gas-prices "0${SIMD_DENOM}" \ + >"${SIMD_LOG_FILE}" 2>&1 & + SIMD_PID=$! + echo "${SIMD_PID}" >/var/run/simd.pid + log_info "simd started with PID ${SIMD_PID}, logs -> ${SIMD_LOG_FILE}" } wait_for_simd() { - local url="http://127.0.0.1:${SIMD_RPC_PORT}/status" - log_info "Waiting for simd RPC (${url})..." - for _ in $(seq 1 120); do - if curl -sf "${url}" >/dev/null 2>&1; then - log_info "simd RPC is online." - return 0 - fi - sleep 1 - done - log_info "simd RPC did not become ready in time." - return 1 + local url="http://127.0.0.1:${SIMD_RPC_PORT}/status" + log_info "Waiting for simd RPC (${url})..." + for _ in $(seq 1 120); do + if curl -sf "${url}" >/dev/null 2>&1; then + log_info "simd RPC is online." + return 0 + fi + sleep 1 + done + log_info "simd RPC did not become ready in time." + return 1 } current_simd_height() { - current_height "http://127.0.0.1:${SIMD_RPC_PORT}/status" + current_height "http://127.0.0.1:${SIMD_RPC_PORT}/status" } wait_for_simd_height() { - local target="$1" - local retries="${2:-180}" - local delay="${3:-1}" + local target="$1" + local retries="${2:-180}" + local delay="${3:-1}" - wait_for_height "simd" "http://127.0.0.1:${SIMD_RPC_PORT}/status" "${target}" "${retries}" "${delay}" + wait_for_height "simd" "http://127.0.0.1:${SIMD_RPC_PORT}/status" "${target}" "${retries}" "${delay}" } fund_simd_test_account() { - local key_name="${SIMD_TEST_KEY_NAME}" - if [ -z "${key_name}" ]; then - log_info "SIMD test key name is empty; skipping funding" - return 0 - fi - - local addr - addr="$(simd --home "${SIMD_HOME}" keys show "${key_name}" -a --keyring-backend "${SIMD_KEYRING}" 2>/dev/null || true)" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -z "${addr}" ]; then - log_info "SIMD test key ${key_name} not found; skipping funding" - return 0 - fi - - local target amount current - target="${SIMD_TEST_ACCOUNT_BALANCE}" - amount="${target%${SIMD_DENOM}}" - if [ "${amount}" = "${target}" ]; then - log_info "SIMD test account balance ${target} missing denom ${SIMD_DENOM}; skipping funding" - return 0 - fi - - current="$(simd --home "${SIMD_HOME}" query bank balances "${addr}" --node "http://127.0.0.1:${SIMD_RPC_PORT}" --output json 2>/dev/null \ - | jq -r --arg denom "${SIMD_DENOM}" '.balances[]? | select(.denom==$denom) | .amount' 2>/dev/null || echo 0)" - if ! [[ "${current}" =~ ^[0-9]+$ ]]; then - current=0 - fi - - if (( current >= amount )); then - log_info "SIMD test account already funded (${current}${SIMD_DENOM} >= ${target})" - return 0 - fi - - local topup - topup=$(( amount - current )) - log_info "Funding SIMD test account ${addr} with ${topup}${SIMD_DENOM}" - local tx_output - if tx_output=$(simd --home "${SIMD_HOME}" tx bank send "${SIMD_KEY_NAME}" "${addr}" "${topup}${SIMD_DENOM}" \ - --keyring-backend "${SIMD_KEYRING}" \ - --chain-id "${SIMD_CHAIN_ID}" \ - --node "http://127.0.0.1:${SIMD_RPC_PORT}" \ - --gas auto \ - --gas-adjustment 1.3 \ - --gas-prices "${MINIMUM_GAS_PRICES}" \ - -y 2>&1); then - log_info "Funding transaction for SIMD test account succeeded" - else - log_info "Funding transaction for SIMD test account failed: ${tx_output}" - fi + local key_name="${SIMD_TEST_KEY_NAME}" + if [ -z "${key_name}" ]; then + log_info "SIMD test key name is empty; skipping funding" + return 0 + fi + + local addr + addr="$(simd --home "${SIMD_HOME}" keys show "${key_name}" -a --keyring-backend "${SIMD_KEYRING}" 2>/dev/null || true)" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -z "${addr}" ]; then + log_info "SIMD test key ${key_name} not found; skipping funding" + return 0 + fi + + local target amount current + target="${SIMD_TEST_ACCOUNT_BALANCE}" + amount="${target%${SIMD_DENOM}}" + if [ "${amount}" = "${target}" ]; then + log_info "SIMD test account balance ${target} missing denom ${SIMD_DENOM}; skipping funding" + return 0 + fi + + current="$(simd --home "${SIMD_HOME}" query bank balances "${addr}" --node "http://127.0.0.1:${SIMD_RPC_PORT}" --output json 2>/dev/null | + jq -r --arg denom "${SIMD_DENOM}" '.balances[]? | select(.denom==$denom) | .amount' 2>/dev/null || echo 0)" + if ! [[ "${current}" =~ ^[0-9]+$ ]]; then + current=0 + fi + + if ((current >= amount)); then + log_info "SIMD test account already funded (${current}${SIMD_DENOM} >= ${target})" + return 0 + fi + + local topup + topup=$((amount - current)) + log_info "Funding SIMD test account ${addr} with ${topup}${SIMD_DENOM}" + local tx_output + if tx_output=$(simd --home "${SIMD_HOME}" tx bank send "${SIMD_KEY_NAME}" "${addr}" "${topup}${SIMD_DENOM}" \ + --keyring-backend "${SIMD_KEYRING}" \ + --chain-id "${SIMD_CHAIN_ID}" \ + --node "http://127.0.0.1:${SIMD_RPC_PORT}" \ + --gas auto \ + --gas-adjustment 1.3 \ + --gas-prices "${MINIMUM_GAS_PRICES}" \ + -y 2>&1); then + log_info "Funding transaction for SIMD test account succeeded" + else + log_info "Funding transaction for SIMD test account failed: ${tx_output}" + fi } fund_simd_relayer_account() { - local key_name="${RELAYER_KEY_NAME}" - if [ -z "${key_name}" ]; then - log_info "SIMD relayer key name is empty; skipping funding" - return 0 - fi - if [ "${key_name}" = "${SIMD_KEY_NAME}" ]; then - log_info "SIMD relayer key matches validator key; skipping funding" - return 0 - fi - - log_info "Resolving SIMD relayer key ${key_name} for funding" - local addr - addr="$(simd --home "${SIMD_HOME}" keys show "${key_name}" -a --keyring-backend "${SIMD_KEYRING}" 2>/dev/null || true)" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -z "${addr}" ]; then - log_info "SIMD relayer key ${key_name} not found; skipping funding" - return 0 - fi - log_info "SIMD relayer address: ${addr}" - - local target amount current - target="${SIMD_RELAYER_ACCOUNT_BALANCE}" - amount="${target%${SIMD_DENOM}}" - if [ "${amount}" = "${target}" ]; then - log_info "SIMD relayer balance ${target} missing denom ${SIMD_DENOM}; skipping funding" - return 0 - fi - - current="$(simd --home "${SIMD_HOME}" query bank balances "${addr}" --node "http://127.0.0.1:${SIMD_RPC_PORT}" --output json 2>/dev/null \ - | jq -r --arg denom "${SIMD_DENOM}" '.balances[]? | select(.denom==$denom) | .amount' 2>/dev/null || echo 0)" - if ! [[ "${current}" =~ ^[0-9]+$ ]]; then - current=0 - fi - log_info "SIMD relayer current balance: ${current}${SIMD_DENOM}" - - if (( current >= amount )); then - log_info "SIMD relayer account already funded (${current}${SIMD_DENOM} >= ${target})" - return 0 - fi - - local topup - topup=$(( amount - current )) - log_info "Funding SIMD relayer account ${addr} with ${topup}${SIMD_DENOM}" - local tx_output - if tx_output=$(simd --home "${SIMD_HOME}" tx bank send "${SIMD_KEY_NAME}" "${addr}" "${topup}${SIMD_DENOM}" \ - --keyring-backend "${SIMD_KEYRING}" \ - --chain-id "${SIMD_CHAIN_ID}" \ - --node "http://127.0.0.1:${SIMD_RPC_PORT}" \ - --gas auto \ - --gas-adjustment 1.3 \ - --gas-prices "${MINIMUM_GAS_PRICES}" \ - -y 2>&1); then - log_info "Funding transaction for SIMD relayer account succeeded" - else - log_info "Funding transaction for SIMD relayer account failed: ${tx_output}" - fi + local key_name="${RELAYER_KEY_NAME}" + if [ -z "${key_name}" ]; then + log_info "SIMD relayer key name is empty; skipping funding" + return 0 + fi + if [ "${key_name}" = "${SIMD_KEY_NAME}" ]; then + log_info "SIMD relayer key matches validator key; skipping funding" + return 0 + fi + + log_info "Resolving SIMD relayer key ${key_name} for funding" + local addr + addr="$(simd --home "${SIMD_HOME}" keys show "${key_name}" -a --keyring-backend "${SIMD_KEYRING}" 2>/dev/null || true)" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -z "${addr}" ]; then + log_info "SIMD relayer key ${key_name} not found; skipping funding" + return 0 + fi + log_info "SIMD relayer address: ${addr}" + + local target amount current + target="${SIMD_RELAYER_ACCOUNT_BALANCE}" + amount="${target%${SIMD_DENOM}}" + if [ "${amount}" = "${target}" ]; then + log_info "SIMD relayer balance ${target} missing denom ${SIMD_DENOM}; skipping funding" + return 0 + fi + + current="$(simd --home "${SIMD_HOME}" query bank balances "${addr}" --node "http://127.0.0.1:${SIMD_RPC_PORT}" --output json 2>/dev/null | + jq -r --arg denom "${SIMD_DENOM}" '.balances[]? | select(.denom==$denom) | .amount' 2>/dev/null || echo 0)" + if ! [[ "${current}" =~ ^[0-9]+$ ]]; then + current=0 + fi + log_info "SIMD relayer current balance: ${current}${SIMD_DENOM}" + + if ((current >= amount)); then + log_info "SIMD relayer account already funded (${current}${SIMD_DENOM} >= ${target})" + return 0 + fi + + local topup + topup=$((amount - current)) + log_info "Funding SIMD relayer account ${addr} with ${topup}${SIMD_DENOM}" + local tx_output + if tx_output=$(simd --home "${SIMD_HOME}" tx bank send "${SIMD_KEY_NAME}" "${addr}" "${topup}${SIMD_DENOM}" \ + --keyring-backend "${SIMD_KEYRING}" \ + --chain-id "${SIMD_CHAIN_ID}" \ + --node "http://127.0.0.1:${SIMD_RPC_PORT}" \ + --gas auto \ + --gas-adjustment 1.3 \ + --gas-prices "${MINIMUM_GAS_PRICES}" \ + -y 2>&1); then + log_info "Funding transaction for SIMD relayer account succeeded" + else + log_info "Funding transaction for SIMD relayer account failed: ${tx_output}" + fi } start_hermes() { - log_info "Starting Hermes relayer..." - hermes --config "${HERMES_CONFIG_PATH}" start >"${HERMES_LOG_FILE}" 2>&1 & - HERMES_PID=$! - echo "${HERMES_PID}" > /var/run/hermes.pid - log_info "Hermes started with PID ${HERMES_PID}, logs -> ${HERMES_LOG_FILE}" + log_info "Starting Hermes relayer..." + hermes --config "${HERMES_CONFIG_PATH}" start >"${HERMES_LOG_FILE}" 2>&1 & + HERMES_PID=$! + echo "${HERMES_PID}" >/var/run/hermes.pid + log_info "Hermes started with PID ${HERMES_PID}, logs -> ${HERMES_LOG_FILE}" } cleanup() { - log_info "Caught termination, stopping processes..." - [[ -n "${HERMES_PID:-}" ]] && kill "${HERMES_PID}" >/dev/null 2>&1 || true - [[ -n "${SIMD_PID:-}" ]] && kill "${SIMD_PID}" >/dev/null 2>&1 || true - [[ -n "${TAIL_PID:-}" ]] && kill "${TAIL_PID}" >/dev/null 2>&1 || true - wait || true + log_info "Caught termination, stopping processes..." + [[ -n "${HERMES_PID:-}" ]] && kill "${HERMES_PID}" >/dev/null 2>&1 || true + [[ -n "${SIMD_PID:-}" ]] && kill "${SIMD_PID}" >/dev/null 2>&1 || true + [[ -n "${TAIL_PID:-}" ]] && kill "${TAIL_PID}" >/dev/null 2>&1 || true + wait || true } trap cleanup EXIT INT TERM if init_simd_home; then - log_info "simd home ready" + log_info "simd home ready" else - log_info "init_simd_home failed; continuing to keep container alive" + log_info "init_simd_home failed; continuing to keep container alive" fi if start_simd; then - if wait_for_simd; then - wait_for_simd_height 1 || true - fund_simd_relayer_account || true - fund_simd_test_account || true - else - log_info "wait_for_simd timed out; continuing to keep container alive" - fi + if wait_for_simd; then + wait_for_simd_height 1 || true + fund_simd_relayer_account || true + fund_simd_test_account || true + else + log_info "wait_for_simd timed out; continuing to keep container alive" + fi else - log_info "start_simd failed; continuing to keep container alive" + log_info "start_simd failed; continuing to keep container alive" fi if wait_for_lumera_rpc; then - : + : else - log_info "Lumera RPC unreachable after timeout" + log_info "Lumera RPC unreachable after timeout" fi wait_for_lumera_blocks 5 || true if ran /root/scripts/hermes-configure.sh; then - log_info "Hermes config ensured" + log_info "Hermes config ensured" else - log_info "hermes-configure.sh failed; continuing" + log_info "hermes-configure.sh failed; continuing" fi if ran /root/scripts/hermes-channel.sh; then - log_info "Hermes channel/keys ensured" + log_info "Hermes channel/keys ensured" else - log_info "hermes-channel.sh reported failure" + log_info "hermes-channel.sh reported failure" fi if start_hermes; then - : + : else - log_info "start_hermes failed; continuing to keep container alive" + log_info "start_hermes failed; continuing to keep container alive" fi log_info "Tailing logs (simd + hermes)..." @@ -519,6 +519,6 @@ wait -n "${SIMD_PID:-}" "${HERMES_PID:-}" || true log_info "Entering idle loop to keep container alive. Use Ctrl+C to exit." while true; do - sleep 300 & - wait $! || true + sleep 300 & + wait $! || true done diff --git a/devnet/hermes/scripts/init-simapp.sh b/devnet/hermes/scripts/init-simapp.sh index 19a769f7..7e16fe3d 100644 --- a/devnet/hermes/scripts/init-simapp.sh +++ b/devnet/hermes/scripts/init-simapp.sh @@ -4,23 +4,23 @@ set -euo pipefail log() { - echo "[SIMAPP] $*" >&2 + echo "[SIMAPP] $*" >&2 } run() { - log "$*" - "$@" + log "$*" + "$@" } run_capture() { - log "$*" - "$@" + log "$*" + "$@" } SIMD_BIN="${SIMD_BIN:-$(command -v simd 2>/dev/null || true)}" if [ -z "${SIMD_BIN}" ]; then - log "simd binary not found (SIMD_BIN unset and simd not in PATH)" - exit 1 + log "simd binary not found (SIMD_BIN unset and simd not in PATH)" + exit 1 fi SIMAPP_HOME="${SIMAPP_HOME:-${SIMD_HOME:-/root/.simd}}" @@ -41,110 +41,110 @@ SIMAPP_SHARED_DIR="${SIMAPP_SHARED_DIR:-/shared}" GENESIS_FILE="${SIMAPP_HOME}/config/genesis.json" prefixed_name() { - local prefix="$1" - local name="$2" - case "${name}" in - "${prefix}"*) printf '%s' "${name}" ;; - *) printf '%s%s' "${prefix}" "${name}" ;; - esac + local prefix="$1" + local name="$2" + case "${name}" in + "${prefix}"*) printf '%s' "${name}" ;; + *) printf '%s%s' "${prefix}" "${name}" ;; + esac } wait_for_file() { - local file="$1" - local timeout="${2:-60}" - local elapsed=0 - while [ ! -s "${file}" ]; do - if [ "${elapsed}" -ge "${timeout}" ]; then - return 1 - fi - sleep 1 - elapsed=$((elapsed + 1)) - done - return 0 + local file="$1" + local timeout="${2:-60}" + local elapsed=0 + while [ ! -s "${file}" ]; do + if [ "${elapsed}" -ge "${timeout}" ]; then + return 1 + fi + sleep 1 + elapsed=$((elapsed + 1)) + done + return 0 } key_mnemonic_file() { - local name="$1" - name="$(prefixed_name "simd-" "${name}")" - printf '%s/hermes/%s.mnemonic' "${SIMAPP_SHARED_DIR}" "${name}" + local name="$1" + name="$(prefixed_name "simd-" "${name}")" + printf '%s/hermes/%s.mnemonic' "${SIMAPP_SHARED_DIR}" "${name}" } key_address_file() { - local name="$1" - name="$(prefixed_name "simd-" "${name}")" - printf '%s/hermes/%s.address' "${SIMAPP_SHARED_DIR}" "${name}" + local name="$1" + name="$(prefixed_name "simd-" "${name}")" + printf '%s/hermes/%s.address' "${SIMAPP_SHARED_DIR}" "${name}" } record_key_mnemonic() { - local name="$1" - local mnemonic="$2" - [ -z "${mnemonic}" ] && return 0 - local file - file="$(key_mnemonic_file "${name}")" - mkdir -p "$(dirname "${file}")" - printf '%s\n' "${mnemonic}" > "${file}" + local name="$1" + local mnemonic="$2" + [ -z "${mnemonic}" ] && return 0 + local file + file="$(key_mnemonic_file "${name}")" + mkdir -p "$(dirname "${file}")" + printf '%s\n' "${mnemonic}" >"${file}" } record_key_address() { - local name="$1" - local addr file - addr="$("${SIMD_BIN}" --home "${SIMAPP_HOME}" keys show "${name}" -a --keyring-backend "${KEYRING}" 2>/dev/null || true)" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -n "${addr}" ]; then - log "Recorded ${name} address: ${addr}" - file="$(key_address_file "${name}")" - mkdir -p "$(dirname "${file}")" - printf '%s\n' "${addr}" > "${file}" - else - log "Failed to resolve address for key ${name}" - fi + local name="$1" + local addr file + addr="$("${SIMD_BIN}" --home "${SIMAPP_HOME}" keys show "${name}" -a --keyring-backend "${KEYRING}" 2>/dev/null || true)" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -n "${addr}" ]; then + log "Recorded ${name} address: ${addr}" + file="$(key_address_file "${name}")" + mkdir -p "$(dirname "${file}")" + printf '%s\n' "${addr}" >"${file}" + else + log "Failed to resolve address for key ${name}" + fi } LUMERA_RELAYER_MNEMONIC_FILE="${SIMAPP_SHARED_DIR}/hermes/lumera-hermes-relayer.mnemonic" record_relayer_mnemonic() { - local mnemonic="$1" - [ -z "${mnemonic}" ] && return 0 - log "Recording relayer mnemonic for key ${RELAYER_KEY_NAME}" - record_key_mnemonic "${RELAYER_KEY_NAME}" "${mnemonic}" + local mnemonic="$1" + [ -z "${mnemonic}" ] && return 0 + log "Recording relayer mnemonic for key ${RELAYER_KEY_NAME}" + record_key_mnemonic "${RELAYER_KEY_NAME}" "${mnemonic}" } if [ -s "${LUMERA_RELAYER_MNEMONIC_FILE}" ]; then - if [ -z "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then - log "Using relayer mnemonic from ${LUMERA_RELAYER_MNEMONIC_FILE}" - SIMAPP_KEY_RELAYER_MNEMONIC="$(cat "${LUMERA_RELAYER_MNEMONIC_FILE}")" - export SIMAPP_KEY_RELAYER_MNEMONIC - else - existing_relayer_mnemonic="$(cat "${LUMERA_RELAYER_MNEMONIC_FILE}")" - if [ "${SIMAPP_KEY_RELAYER_MNEMONIC}" != "${existing_relayer_mnemonic}" ]; then - SIMAPP_KEY_RELAYER_MNEMONIC="${existing_relayer_mnemonic}" - export SIMAPP_KEY_RELAYER_MNEMONIC - fi - fi + if [ -z "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then + log "Using relayer mnemonic from ${LUMERA_RELAYER_MNEMONIC_FILE}" + SIMAPP_KEY_RELAYER_MNEMONIC="$(cat "${LUMERA_RELAYER_MNEMONIC_FILE}")" + export SIMAPP_KEY_RELAYER_MNEMONIC + else + existing_relayer_mnemonic="$(cat "${LUMERA_RELAYER_MNEMONIC_FILE}")" + if [ "${SIMAPP_KEY_RELAYER_MNEMONIC}" != "${existing_relayer_mnemonic}" ]; then + SIMAPP_KEY_RELAYER_MNEMONIC="${existing_relayer_mnemonic}" + export SIMAPP_KEY_RELAYER_MNEMONIC + fi + fi fi already_init=0 if [ -f "${GENESIS_FILE}" ]; then - already_init=1 - if [ -n "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then - record_relayer_mnemonic "${SIMAPP_KEY_RELAYER_MNEMONIC}" - fi - log "simd already initialised at ${GENESIS_FILE}" + already_init=1 + if [ -n "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then + record_relayer_mnemonic "${SIMAPP_KEY_RELAYER_MNEMONIC}" + fi + log "simd already initialised at ${GENESIS_FILE}" else - log "Initialising simd home at ${SIMAPP_HOME}" + log "Initialising simd home at ${SIMAPP_HOME}" - if [ -d "${SIMAPP_HOME}" ]; then - if findmnt -rn -T "${SIMAPP_HOME}" >/dev/null 2>&1; then - log "${SIMAPP_HOME} is a mount point; clearing existing contents" - find "${SIMAPP_HOME}" -mindepth 1 -maxdepth 1 -exec rm -rf {} + - else - run rm -rf "${SIMAPP_HOME}" - fi - fi + if [ -d "${SIMAPP_HOME}" ]; then + if findmnt -rn -T "${SIMAPP_HOME}" >/dev/null 2>&1; then + log "${SIMAPP_HOME} is a mount point; clearing existing contents" + find "${SIMAPP_HOME}" -mindepth 1 -maxdepth 1 -exec rm -rf {} + + else + run rm -rf "${SIMAPP_HOME}" + fi + fi - mkdir -p "${SIMAPP_HOME}" + mkdir -p "${SIMAPP_HOME}" - run "${SIMD_BIN}" --home "${SIMAPP_HOME}" init "${MONIKER}" --chain-id "${CHAIN_ID}" + run "${SIMD_BIN}" --home "${SIMAPP_HOME}" init "${MONIKER}" --chain-id "${CHAIN_ID}" fi run "${SIMD_BIN}" --home "${SIMAPP_HOME}" config set client chain-id "${CHAIN_ID}" @@ -153,100 +153,100 @@ run "${SIMD_BIN}" --home "${SIMAPP_HOME}" config set app api.enable true || true run "${SIMD_BIN}" --home "${SIMAPP_HOME}" config set app minimum-gas-prices "${MINIMUM_GAS_PRICES}" if [ "${already_init}" -eq 1 ]; then - record_key_address "${KEY_NAME}" - if [ "${RELAYER_KEY_NAME}" != "${KEY_NAME}" ]; then - record_key_address "${RELAYER_KEY_NAME}" - fi - if [ -n "${TEST_KEY_NAME}" ]; then - record_key_address "${TEST_KEY_NAME}" - fi - exit 0 + record_key_address "${KEY_NAME}" + if [ "${RELAYER_KEY_NAME}" != "${KEY_NAME}" ]; then + record_key_address "${RELAYER_KEY_NAME}" + fi + if [ -n "${TEST_KEY_NAME}" ]; then + record_key_address "${TEST_KEY_NAME}" + fi + exit 0 fi ensure_key() { - local name="$1" - local upper var mnemonic json mnemonic_file - upper=$(echo "${name}" | tr '[:lower:]-' '[:upper:]_') - var="SIMAPP_KEY_${upper}_MNEMONIC" - mnemonic="${!var:-}" - mnemonic_file="$(key_mnemonic_file "${name}")" - - if [ "${name}" = "${RELAYER_KEY_NAME}" ] && [ -n "${LUMERA_RELAYER_MNEMONIC_FILE:-}" ]; then - log "Waiting for Lumera relayer mnemonic ${LUMERA_RELAYER_MNEMONIC_FILE}" - if wait_for_file "${LUMERA_RELAYER_MNEMONIC_FILE}" "${SIMAPP_RELAYER_MNEMONIC_WAIT_SECONDS:-60}"; then - mnemonic="$(cat "${LUMERA_RELAYER_MNEMONIC_FILE}")" - printf -v "${var}" '%s' "${mnemonic}" - export "${var}" - else - log "Lumera relayer mnemonic ${LUMERA_RELAYER_MNEMONIC_FILE} not available; proceeding without it" - fi - fi - - if [ -z "${mnemonic}" ] && [ -s "${mnemonic_file}" ]; then - log "Loading mnemonic for key ${name} from ${mnemonic_file}" - mnemonic="$(cat "${mnemonic_file}")" - printf -v "${var}" '%s' "${mnemonic}" - export "${var}" - fi - - if "${SIMD_BIN}" --home "${SIMAPP_HOME}" keys show "${name}" --keyring-backend "${KEYRING}" >/dev/null 2>&1; then - log "Key ${name} already exists in keyring" - record_key_mnemonic "${name}" "${mnemonic}" - if [ "${name}" = "${RELAYER_KEY_NAME}" ]; then - record_relayer_mnemonic "${mnemonic}" - fi - return 0 - fi - - if [ -n "${mnemonic}" ]; then - log "Restoring key ${name} from mnemonic" - printf '%s\n' "${mnemonic}" | "${SIMD_BIN}" --home "${SIMAPP_HOME}" keys add "${name}" --keyring-backend "${KEYRING}" --recover >/dev/null - else - log "Creating key ${name}" - json=$(run_capture "${SIMD_BIN}" --home "${SIMAPP_HOME}" keys add "${name}" --keyring-backend "${KEYRING}" --output json) - if command -v jq >/dev/null 2>&1; then - mnemonic=$(printf '%s' "${json}" | jq -r '.mnemonic // empty' 2>/dev/null || true) - if [ -n "${mnemonic}" ]; then - printf -v "${var}" '%s' "${mnemonic}" - export "${var}" - fi - fi - fi - - record_key_mnemonic "${name}" "${mnemonic}" - record_key_address "${name}" - - if [ "${name}" = "${RELAYER_KEY_NAME}" ]; then - record_relayer_mnemonic "${mnemonic}" - fi + local name="$1" + local upper var mnemonic json mnemonic_file + upper=$(echo "${name}" | tr '[:lower:]-' '[:upper:]_') + var="SIMAPP_KEY_${upper}_MNEMONIC" + mnemonic="${!var:-}" + mnemonic_file="$(key_mnemonic_file "${name}")" + + if [ "${name}" = "${RELAYER_KEY_NAME}" ] && [ -n "${LUMERA_RELAYER_MNEMONIC_FILE:-}" ]; then + log "Waiting for Lumera relayer mnemonic ${LUMERA_RELAYER_MNEMONIC_FILE}" + if wait_for_file "${LUMERA_RELAYER_MNEMONIC_FILE}" "${SIMAPP_RELAYER_MNEMONIC_WAIT_SECONDS:-60}"; then + mnemonic="$(cat "${LUMERA_RELAYER_MNEMONIC_FILE}")" + printf -v "${var}" '%s' "${mnemonic}" + export "${var}" + else + log "Lumera relayer mnemonic ${LUMERA_RELAYER_MNEMONIC_FILE} not available; proceeding without it" + fi + fi + + if [ -z "${mnemonic}" ] && [ -s "${mnemonic_file}" ]; then + log "Loading mnemonic for key ${name} from ${mnemonic_file}" + mnemonic="$(cat "${mnemonic_file}")" + printf -v "${var}" '%s' "${mnemonic}" + export "${var}" + fi + + if "${SIMD_BIN}" --home "${SIMAPP_HOME}" keys show "${name}" --keyring-backend "${KEYRING}" >/dev/null 2>&1; then + log "Key ${name} already exists in keyring" + record_key_mnemonic "${name}" "${mnemonic}" + if [ "${name}" = "${RELAYER_KEY_NAME}" ]; then + record_relayer_mnemonic "${mnemonic}" + fi + return 0 + fi + + if [ -n "${mnemonic}" ]; then + log "Restoring key ${name} from mnemonic" + printf '%s\n' "${mnemonic}" | "${SIMD_BIN}" --home "${SIMAPP_HOME}" keys add "${name}" --keyring-backend "${KEYRING}" --recover >/dev/null + else + log "Creating key ${name}" + json=$(run_capture "${SIMD_BIN}" --home "${SIMAPP_HOME}" keys add "${name}" --keyring-backend "${KEYRING}" --output json) + if command -v jq >/dev/null 2>&1; then + mnemonic=$(printf '%s' "${json}" | jq -r '.mnemonic // empty' 2>/dev/null || true) + if [ -n "${mnemonic}" ]; then + printf -v "${var}" '%s' "${mnemonic}" + export "${var}" + fi + fi + fi + + record_key_mnemonic "${name}" "${mnemonic}" + record_key_address "${name}" + + if [ "${name}" = "${RELAYER_KEY_NAME}" ]; then + record_relayer_mnemonic "${mnemonic}" + fi } ensure_key "${KEY_NAME}" if [ "${RELAYER_KEY_NAME}" != "${KEY_NAME}" ]; then - ensure_key "${RELAYER_KEY_NAME}" + ensure_key "${RELAYER_KEY_NAME}" fi if [ -n "${TEST_KEY_NAME}" ]; then - ensure_key "${TEST_KEY_NAME}" + ensure_key "${TEST_KEY_NAME}" fi run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis add-genesis-account "${KEY_NAME}" "${ACCOUNT_BALANCE}" --keyring-backend "${KEYRING}" if [ "${RELAYER_KEY_NAME}" != "${KEY_NAME}" ]; then - run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis add-genesis-account "${RELAYER_KEY_NAME}" "${ACCOUNT_BALANCE}" --keyring-backend "${KEYRING}" + run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis add-genesis-account "${RELAYER_KEY_NAME}" "${ACCOUNT_BALANCE}" --keyring-backend "${KEYRING}" fi if [ -n "${TEST_KEY_NAME}" ]; then - run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis add-genesis-account "${TEST_KEY_NAME}" "${TEST_ACCOUNT_BALANCE}" --keyring-backend "${KEYRING}" + run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis add-genesis-account "${TEST_KEY_NAME}" "${TEST_ACCOUNT_BALANCE}" --keyring-backend "${KEYRING}" fi run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis gentx "${KEY_NAME}" "${STAKING_AMOUNT}" --chain-id "${CHAIN_ID}" --keyring-backend "${KEYRING}" run "${SIMD_BIN}" --home "${SIMAPP_HOME}" genesis collect-gentxs if [ ! -f "${GENESIS_FILE}" ]; then - log "Failed to create genesis at ${GENESIS_FILE}" - exit 1 + log "Failed to create genesis at ${GENESIS_FILE}" + exit 1 fi if [ -n "${SIMAPP_KEY_RELAYER_MNEMONIC:-}" ]; then - record_relayer_mnemonic "${SIMAPP_KEY_RELAYER_MNEMONIC}" + record_relayer_mnemonic "${SIMAPP_KEY_RELAYER_MNEMONIC}" fi log "simd home initialised at ${SIMAPP_HOME}" diff --git a/devnet/scripts/configure.sh b/devnet/scripts/configure.sh index 94c27544..c2b86dec 100755 --- a/devnet/scripts/configure.sh +++ b/devnet/scripts/configure.sh @@ -6,7 +6,7 @@ echo "Configuring Lumera for docker compose ..." # --- parse args ----------------------------------- BIN_DIR_ARG="" show_help() { - cat <<'EOF' + cat <<'EOF' Usage: configure.sh [--bin-dir DIR] Options: @@ -17,14 +17,29 @@ EOF } while [[ $# -gt 0 ]]; do - case "$1" in - -b|--bin-dir) - [[ -n "${2:-}" && "${2:0:1}" != "-" ]] || { echo "ERROR: --bin-dir requires DIR" >&2; exit 2; } - BIN_DIR_ARG="$2"; shift 2;; - --bin-dir=*) BIN_DIR_ARG="${1#*=}"; shift;; - -h|--help) show_help; exit 0;; - *) echo "ERROR: unknown arg: $1" >&2; show_help; exit 2;; - esac + case "$1" in + -b | --bin-dir) + [[ -n "${2:-}" && "${2:0:1}" != "-" ]] || { + echo "ERROR: --bin-dir requires DIR" >&2 + exit 2 + } + BIN_DIR_ARG="$2" + shift 2 + ;; + --bin-dir=*) + BIN_DIR_ARG="${1#*=}" + shift + ;; + -h | --help) + show_help + exit 0 + ;; + *) + echo "ERROR: unknown arg: $1" >&2 + show_help + exit 2 + ;; + esac done # --- resolve script dir & BIN_DIR (CLI > autodetect ../bin > empty) ----------- @@ -37,30 +52,30 @@ REPO_ROOT="$(git -C "${SCRIPT_DIR}" rev-parse --show-toplevel 2>/dev/null || tru # --- resolve BIN_DIR (CLI > repo-root/bin > empty) ---------------------------- if [[ -n "${BIN_DIR_ARG}" ]]; then - # Absolute path stays absolute; relative is interpreted from REPO_ROOT - if [[ "${BIN_DIR_ARG}" = /* ]]; then - CAND="${BIN_DIR_ARG}" - else - CAND="${REPO_ROOT}/${BIN_DIR_ARG}" - fi - # Normalize & verify - if BIN_DIR="$(cd "${CAND}" 2>/dev/null && pwd)"; then - : - else - echo "[CONFIGURE] ERROR: --bin-dir '${BIN_DIR_ARG}' not found under ${REPO_ROOT}" >&2 - exit 1 - fi + # Absolute path stays absolute; relative is interpreted from REPO_ROOT + if [[ "${BIN_DIR_ARG}" = /* ]]; then + CAND="${BIN_DIR_ARG}" + else + CAND="${REPO_ROOT}/${BIN_DIR_ARG}" + fi + # Normalize & verify + if BIN_DIR="$(cd "${CAND}" 2>/dev/null && pwd)"; then + : + else + echo "[CONFIGURE] ERROR: --bin-dir '${BIN_DIR_ARG}' not found under ${REPO_ROOT}" >&2 + exit 1 + fi elif [[ -d "${SCRIPT_DIR}/../bin" ]]; then - BIN_DIR="$(cd "${SCRIPT_DIR}/../bin" && pwd)" + BIN_DIR="$(cd "${SCRIPT_DIR}/../bin" && pwd)" else - BIN_DIR="" + BIN_DIR="" fi if [[ -n "${BIN_DIR}" ]]; then - echo "[CONFIGURE] Using BIN_DIR=${BIN_DIR}" + echo "[CONFIGURE] Using BIN_DIR=${BIN_DIR}" else - echo "[CONFIGURE] ERROR ! BIN_DIR not provided andcould not be resolved" - exit 1 + echo "[CONFIGURE] ERROR ! BIN_DIR not provided and could not be resolved" + exit 1 fi # Require CONFIG_JSON environment variable @@ -72,15 +87,17 @@ echo "[CONFIGURE] Lumera chain config is $CONFIG_JSON" echo "[CONFIGURE] Lumera validators config is $VALIDATORS_JSON" if [ ! -f "${CONFIG_JSON}" ]; then - echo "[CONFIGURE] Missing ${CONFIG_JSON}"; exit 1 + echo "[CONFIGURE] Missing ${CONFIG_JSON}" + exit 1 fi if [ ! -f "${VALIDATORS_JSON}" ]; then - echo "[CONFIGURE] Missing ${VALIDATORS_JSON}"; exit 1 + echo "[CONFIGURE] Missing ${VALIDATORS_JSON}" + exit 1 fi -if [ ! command -v jq >/dev/null 2>&1 ]; then - echo "[CONFIGURE] jq is missing" +if ! command -v jq >/dev/null 2>&1; then + echo "[CONFIGURE] jq is missing" fi CHAIN_ID="$(jq -r '.chain.id' "${CONFIG_JSON}")" @@ -98,58 +115,58 @@ NM_UI_SRC="${BIN_DIR}/nm-ui" NM_UI_DST="${RELEASE_DIR}/nm-ui" install_supernode() { - if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${SN}" ]; then - echo "[CONFIGURE] Copying supernode binary from ${BIN_DIR} to ${RELEASE_DIR}" - cp -f "${BIN_DIR}/${SN}" "${RELEASE_DIR}/" - chmod 755 "${RELEASE_DIR}/${SN}" - fi + if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${SN}" ]; then + echo "[CONFIGURE] Copying supernode binary from ${BIN_DIR} to ${RELEASE_DIR}" + cp -f "${BIN_DIR}/${SN}" "${RELEASE_DIR}/" + chmod 755 "${RELEASE_DIR}/${SN}" + fi } install_nm() { - if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${NM}" ]; then - # if nm-config.toml is missing - return an error - if [ ! -f "${NM_CFG}" ]; then - echo "[CONFIGURE] Missing ${NM_CFG}" - exit 1 - fi - echo "[CONFIGURE] Copying network-maker file from ${BIN_DIR} to ${RELEASE_DIR}" - cp -f "${BIN_DIR}/${NM}" "${NM_CFG}" "${RELEASE_DIR}/" - chmod 755 "${RELEASE_DIR}/${NM}" - - if [ -d "${NM_UI_SRC}" ]; then - echo "[CONFIGURE] Copying network-maker UI from ${NM_UI_SRC} to ${NM_UI_DST}" - rm -rf "${NM_UI_DST}" - cp -r "${NM_UI_SRC}" "${NM_UI_DST}" - else - echo "[CONFIGURE] network-maker UI not found at ${NM_UI_SRC}; skipping UI copy" - fi - fi + if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${NM}" ]; then + # if nm-config.toml is missing - return an error + if [ ! -f "${NM_CFG}" ]; then + echo "[CONFIGURE] Missing ${NM_CFG}" + exit 1 + fi + echo "[CONFIGURE] Copying network-maker file from ${BIN_DIR} to ${RELEASE_DIR}" + cp -f "${BIN_DIR}/${NM}" "${NM_CFG}" "${RELEASE_DIR}/" + chmod 755 "${RELEASE_DIR}/${NM}" + + if [ -d "${NM_UI_SRC}" ]; then + echo "[CONFIGURE] Copying network-maker UI from ${NM_UI_SRC} to ${NM_UI_DST}" + rm -rf "${NM_UI_DST}" + cp -r "${NM_UI_SRC}" "${NM_UI_DST}" + else + echo "[CONFIGURE] network-maker UI not found at ${NM_UI_SRC}; skipping UI copy" + fi + fi } install_sncli() { - if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${SNCLI}" ]; then - # if sncli-config.toml is missing - return an error - if [ -f "${SNCLI_CFG}" ]; then - echo "[CONFIGURE] Copying sncli config from ${BIN_DIR} to ${RELEASE_DIR}" - cp -f "${SNCLI_CFG}" "${RELEASE_DIR}/" - fi - - echo "[CONFIGURE] Copying sncli binary from ${BIN_DIR} to ${RELEASE_DIR}" - cp -f "${BIN_DIR}/${SNCLI}" "${RELEASE_DIR}/" - chmod 755 "${RELEASE_DIR}/${SNCLI}" - fi + if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${SNCLI}" ]; then + # if sncli-config.toml is missing - return an error + if [ -f "${SNCLI_CFG}" ]; then + echo "[CONFIGURE] Copying sncli config from ${BIN_DIR} to ${RELEASE_DIR}" + cp -f "${SNCLI_CFG}" "${RELEASE_DIR}/" + fi + + echo "[CONFIGURE] Copying sncli binary from ${BIN_DIR} to ${RELEASE_DIR}" + cp -f "${BIN_DIR}/${SNCLI}" "${RELEASE_DIR}/" + chmod 755 "${RELEASE_DIR}/${SNCLI}" + fi } install_ibc_tests() { - local test_bins=("tests_validator" "tests_hermes") - local bin - for bin in "${test_bins[@]}"; do - if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${bin}" ]; then - echo "[CONFIGURE] Copying ${bin} binary from ${BIN_DIR} to ${RELEASE_DIR}" - cp -f "${BIN_DIR}/${bin}" "${RELEASE_DIR}/" - chmod 755 "${RELEASE_DIR}/${bin}" - fi - done + local test_bins=("tests_validator" "tests_hermes") + local bin + for bin in "${test_bins[@]}"; do + if [ -n "${BIN_DIR}" ] && [ -f "${BIN_DIR}/${bin}" ]; then + echo "[CONFIGURE] Copying ${bin} binary from ${BIN_DIR} to ${RELEASE_DIR}" + cp -f "${BIN_DIR}/${bin}" "${RELEASE_DIR}/" + chmod 755 "${RELEASE_DIR}/${bin}" + fi + done } mkdir -p "${CFG_DIR}" "${RELEASE_DIR}" diff --git a/devnet/scripts/devnet-deploy.sh b/devnet/scripts/devnet-deploy.sh index f8747278..d77d1480 100755 --- a/devnet/scripts/devnet-deploy.sh +++ b/devnet/scripts/devnet-deploy.sh @@ -4,21 +4,21 @@ set -e DEVNET_ROOT="/tmp/lumera-devnet" mkdir -p "$DEVNET_ROOT/shared" if [ -f external_genesis.json ]; then - cp external_genesis.json "$DEVNET_ROOT/shared/external_genesis.json" + cp external_genesis.json "$DEVNET_ROOT/shared/external_genesis.json" else - echo "No external genesis file found." - return 1 + echo "No external genesis file found." + exit 1 fi if [ -f claims.csv ]; then - cp claims.csv "$DEVNET_ROOT/shared/claims.csv" + cp claims.csv "$DEVNET_ROOT/shared/claims.csv" else - echo "No claims file found." - return 2 + echo "No claims file found." + exit 2 fi if [ -f "supernode" ]; then - mkdir -p "$DEVNET_ROOT/shared/release" - cp "supernode" "$DEVNET_ROOT/shared/release/supernode" + mkdir -p "$DEVNET_ROOT/shared/release" + cp "supernode" "$DEVNET_ROOT/shared/release/supernode" else - echo "No supernode binary found." + echo "No supernode binary found." fi docker compose build diff --git a/devnet/scripts/network-maker-setup.sh b/devnet/scripts/network-maker-setup.sh index 3aff53d5..dc39670f 100755 --- a/devnet/scripts/network-maker-setup.sh +++ b/devnet/scripts/network-maker-setup.sh @@ -42,7 +42,7 @@ NM_HOME="/root/.${NM}" NM_FILES_DIR="/root/nm-files" NM_FILES_DIR_SHARED="/shared/nm-files" NM_LOG="${NM_LOG:-/root/logs/network-maker.log}" -NM_TEMPLATE="${RELEASE_DIR}/nm-config.toml" # Your template in /shared/release (you said it's attached as config.toml) +NM_TEMPLATE="${RELEASE_DIR}/nm-config.toml" # Your template in /shared/release (you said it's attached as config.toml) NM_CONFIG="${NM_HOME}/config.toml" NM_GRPC_PORT="${NM_GRPC_PORT:-50051}" NM_HTTP_PORT="${NM_HTTP_PORT:-8080}" @@ -61,78 +61,90 @@ mkdir -p "${NODE_STATUS_DIR}" "$(dirname "${NM_LOG}")" "${NM_HOME}" # ----- tiny helpers ----- run() { - echo "+ $*" >&2 - "$@" + echo "+ $*" >&2 + "$@" } run_capture() { - echo "+ $*" >&2 # goes to stderr, not captured - "$@" + echo "+ $*" >&2 # goes to stderr, not captured + "$@" } have() { command -v "$1" >/dev/null 2>&1; } wait_for_file() { while [ ! -s "$1" ]; do sleep 1; done; } -fail_soft() { echo "[NM] $*"; exit 0; } # exit 0 so container keeps running +fail_soft() { + echo "[NM] $*" + exit 0 +} # exit 0 so container keeps running version_ge() { - printf '%s\n' "$2" "$1" | sort -V | head -n1 | grep -q "^$2$" + printf '%s\n' "$2" "$1" | sort -V | head -n1 | grep -q "^$2$" } # Fetch the latest block height from lumerad. latest_block_height() { - local status - status="$(curl -sf "${LUMERA_RPC_ADDR}/status" 2>/dev/null || true)" - local height - height="$(jq -r 'try .result.sync_info.latest_block_height // "0"' <<<"${status}")" - printf "%s" "${height:-0}" + local status + status="$(curl -sf "${LUMERA_RPC_ADDR}/status" 2>/dev/null || true)" + local height + height="$(jq -r 'try .result.sync_info.latest_block_height // "0"' <<<"${status}")" + printf "%s" "${height:-0}" } wait_for_block_height_increase() { - local prev_height="$1" - local timeout="${SUPERNODE_INSTALL_WAIT_TIMEOUT:-300}" - local elapsed=0 - - while (( elapsed < timeout )); do - local height - height="$(latest_block_height)" - if (( height > prev_height )); then - return 0 - fi - sleep 1 - ((elapsed++)) - done - echo "[NM] Timeout waiting for new block after height ${prev_height}." >&2 - exit 1 + local prev_height="$1" + local timeout="${SUPERNODE_INSTALL_WAIT_TIMEOUT:-300}" + local elapsed=0 + + while ((elapsed < timeout)); do + local height + height="$(latest_block_height)" + if ((height > prev_height)); then + return 0 + fi + sleep 1 + ((elapsed++)) + done + echo "[NM] Timeout waiting for new block after height ${prev_height}." >&2 + exit 1 } wait_for_tx_confirmation() { - local txhash="$1" - if ! ${DAEMON} q wait-tx "${txhash}" --timeout 90s >/dev/null 2>&1; then - local deadline ok out code height - deadline=$((SECONDS+120)) - ok=0 - while (( SECONDS < deadline )); do - out="$(${DAEMON} q tx "${txhash}" --output json 2>/dev/null || true)" - if jq -e . >/dev/null 2>&1 <<<"${out}"; then - code="$(jq -r 'try .code // "0"' <<<"${out}")" - height="$(jq -r 'try .height // "0"' <<<"${out}")" - if [ "${height}" != "0" ] && [ "${code}" = "0" ]; then - ok=1 - break - fi - fi - sleep 5 - done - [ "${ok}" = "1" ] || { echo "[NM] Funding tx ${txhash} failed or not found."; exit 1; } - fi + local txhash="$1" + if ! ${DAEMON} q wait-tx "${txhash}" --timeout 90s >/dev/null 2>&1; then + local deadline ok out code height + deadline=$((SECONDS + 120)) + ok=0 + while ((SECONDS < deadline)); do + out="$(${DAEMON} q tx "${txhash}" --output json 2>/dev/null || true)" + if jq -e . >/dev/null 2>&1 <<<"${out}"; then + code="$(jq -r 'try .code // "0"' <<<"${out}")" + height="$(jq -r 'try .height // "0"' <<<"${out}")" + if [ "${height}" != "0" ] && [ "${code}" = "0" ]; then + ok=1 + break + fi + fi + sleep 5 + done + [ "${ok}" = "1" ] || { + echo "[NM] Funding tx ${txhash} failed or not found." + exit 1 + } + fi } # ----- prerequisites / config reads ----- have jq || echo "[NM] WARNING: jq is missing; attempting to proceed." -[ -f "${CFG_CHAIN}" ] || { echo "[NM] Missing ${CFG_CHAIN}"; exit 1; } -[ -f "${CFG_VALS}" ] || { echo "[NM] Missing ${CFG_VALS}"; exit 1; } +[ -f "${CFG_CHAIN}" ] || { + echo "[NM] Missing ${CFG_CHAIN}" + exit 1 +} +[ -f "${CFG_VALS}" ] || { + echo "[NM] Missing ${CFG_VALS}" + exit 1 +} # Pull global chain settings CHAIN_ID="$(jq -r '.chain.id' "${CFG_CHAIN}")" @@ -143,24 +155,27 @@ DEFAULT_NM_MAX_ACCOUNTS=1 NM_MAX_ACCOUNTS="${DEFAULT_NM_MAX_ACCOUNTS}" NM_CFG_MAX_ACCOUNTS="$(jq -r 'try .["network-maker"].max_accounts // ""' "${CFG_CHAIN}")" if [[ "${NM_CFG_MAX_ACCOUNTS}" =~ ^[0-9]+$ ]]; then - if [ "${NM_CFG_MAX_ACCOUNTS}" -ge 1 ]; then - NM_MAX_ACCOUNTS="${NM_CFG_MAX_ACCOUNTS}" - else - echo "[NM] max_accounts must be >=1; using default ${DEFAULT_NM_MAX_ACCOUNTS}" - fi + if [ "${NM_CFG_MAX_ACCOUNTS}" -ge 1 ]; then + NM_MAX_ACCOUNTS="${NM_CFG_MAX_ACCOUNTS}" + else + echo "[NM] max_accounts must be >=1; using default ${DEFAULT_NM_MAX_ACCOUNTS}" + fi fi DEFAULT_NM_ACCOUNT_BALANCE="10000000${DENOM}" NM_ACCOUNT_BALANCE="$(jq -r 'try .["network-maker"].account_balance // ""' "${CFG_CHAIN}")" if [ -z "${NM_ACCOUNT_BALANCE}" ] || [ "${NM_ACCOUNT_BALANCE}" = "null" ]; then - NM_ACCOUNT_BALANCE="${DEFAULT_NM_ACCOUNT_BALANCE}" + NM_ACCOUNT_BALANCE="${DEFAULT_NM_ACCOUNT_BALANCE}" fi if [[ "${NM_ACCOUNT_BALANCE}" =~ ^[0-9]+$ ]]; then - NM_ACCOUNT_BALANCE="${NM_ACCOUNT_BALANCE}${DENOM}" + NM_ACCOUNT_BALANCE="${NM_ACCOUNT_BALANCE}${DENOM}" fi # Pull this validator record + node ports + optional NM flag VAL_REC_JSON="$(jq -c --arg m "$MONIKER" '[.[] | select(.moniker==$m)][0]' "${CFG_VALS}")" -[ -n "${VAL_REC_JSON}" ] && [ "${VAL_REC_JSON}" != "null" ] || { echo "[NM] Validator moniker ${MONIKER} not found in validators.json"; exit 1; } +[ -n "${VAL_REC_JSON}" ] && [ "${VAL_REC_JSON}" != "null" ] || { + echo "[NM] Validator moniker ${MONIKER} not found in validators.json" + exit 1 +} NM_ENABLED="$(echo "${VAL_REC_JSON}" | jq -r 'try .["network-maker"].enabled // .["network-maker"] // "false"')" NM_GRPC_PORT="$(echo "${VAL_REC_JSON}" | jq -r 'try .["network-maker"].grpc_port // empty')" @@ -170,38 +185,38 @@ if [ -z "${NM_HTTP_PORT}" ] || [ "${NM_HTTP_PORT}" = "null" ]; then NM_HTTP_PORT # ----- short-circuits ----- if [ "${START_MODE}" = "wait" ]; then - # Just wait until both lumerad RPC and supernode are reachable, then exit 0. - : + # Just wait until both lumerad RPC and supernode are reachable, then exit 0. + : else - # In run mode, skip entirely if prereqs say "not applicable". - if [ ! -f "${NM_SRC_BIN}" ]; then - fail_soft "network-maker binary not found at ${NM_SRC_BIN}; skipping." - fi - if [ "${NM_ENABLED}" != "true" ]; then - fail_soft "validators.json has \"network-maker\": false (or missing) for ${MONIKER}; skipping." - fi + # In run mode, skip entirely if prereqs say "not applicable". + if [ ! -f "${NM_SRC_BIN}" ]; then + fail_soft "network-maker binary not found at ${NM_SRC_BIN}; skipping." + fi + if [ "${NM_ENABLED}" != "true" ]; then + fail_soft "validators.json has \"network-maker\": false (or missing) for ${MONIKER}; skipping." + fi fi # ----- start network-maker (idempotent) ----- start_network_maker() { - if pgrep -x ${NM} >/dev/null 2>&1; then - echo "[NM] network-maker already running; skipping start." - else - echo "[NM] Starting network-maker…" - # If your binary uses a subcommand like "start", adjust below accordingly. - run ${NM} >"${NM_LOG}" 2>&1 & - echo "[NM] network-maker started; logging to ${NM_LOG}" - fi + if pgrep -x ${NM} >/dev/null 2>&1; then + echo "[NM] network-maker already running; skipping start." + else + echo "[NM] Starting network-maker…" + # If your binary uses a subcommand like "start", adjust below accordingly. + run ${NM} >"${NM_LOG}" 2>&1 & + echo "[NM] network-maker started; logging to ${NM_LOG}" + fi } stop_network_maker_if_running() { - if pgrep -x ${NM} >/dev/null 2>&1; then - echo "[NM] Stopping network-maker…" - pkill -x ${NM} - echo "[NM] network-maker stopped." - else - echo "[NM] network-maker is not running." - fi + if pgrep -x ${NM} >/dev/null 2>&1; then + echo "[NM] Stopping network-maker…" + pkill -x ${NM} + echo "[NM] network-maker stopped." + else + echo "[NM] network-maker is not running." + fi } # ----- waiters ----- @@ -210,102 +225,108 @@ stop_network_maker_if_running() { # - Creates directories if missing -> [""] # - If exists: inserts "" once (no duplicates), preserving existing entries add_dir_to_scanner() { - local dir="$1" - local cfg="$2" - - # Ensure file exists - [ -f "$cfg" ] || { echo "[NM] add_dir_to_scanner: config '$cfg' not found"; return 1; } - - # Read current value (empty if not set) - local current - if ! current="$(crudini --get "$cfg" scanner directories 2>/dev/null)"; then - current="" - fi - - # If not present, set to ["dir"] - if [ -z "$current" ]; then - crudini --set "$cfg" scanner directories "[\"$dir\"]" - return - fi - - # If present but not a bracketed list, overwrite safely - case "$current" in - \[*\]) ;; # looks like a [ ... ] - *) crudini --set "$cfg" scanner directories "[\"$dir\"]"; return ;; - esac - - # Extract inner list between the brackets - local inner="${current#[}" - inner="${inner%]}" - - # Normalize spaces around commas (optional; keeps things tidy) - inner="$(printf '%s' "$inner" | sed 's/[[:space:]]*,[[:space:]]*/, /g;s/^[[:space:]]*//;s/[[:space:]]*$//')" - - # If already contains the dir (quoted), do nothing - if printf '%s' "$inner" | grep -F -q "\"$dir\""; then - return - fi - - # Build new list: prepend by default - local new_inner - if [ -z "$inner" ]; then - new_inner="\"$dir\"" - else - new_inner="\"$dir\", $inner" - fi - - crudini --set "$cfg" scanner directories "[${new_inner}]" + local dir="$1" + local cfg="$2" + + # Ensure file exists + [ -f "$cfg" ] || { + echo "[NM] add_dir_to_scanner: config '$cfg' not found" + return 1 + } + + # Read current value (empty if not set) + local current + if ! current="$(crudini --get "$cfg" scanner directories 2>/dev/null)"; then + current="" + fi + + # If not present, set to ["dir"] + if [ -z "$current" ]; then + crudini --set "$cfg" scanner directories "[\"$dir\"]" + return + fi + + # If present but not a bracketed list, overwrite safely + case "$current" in + \[*\]) ;; # looks like a [ ... ] + *) + crudini --set "$cfg" scanner directories "[\"$dir\"]" + return + ;; + esac + + # Extract inner list between the brackets + local inner="${current#[}" + inner="${inner%]}" + + # Normalize spaces around commas (optional; keeps things tidy) + inner="$(printf '%s' "$inner" | sed 's/[[:space:]]*,[[:space:]]*/, /g;s/^[[:space:]]*//;s/[[:space:]]*$//')" + + # If already contains the dir (quoted), do nothing + if printf '%s' "$inner" | grep -F -q "\"$dir\""; then + return + fi + + # Build new list: prepend by default + local new_inner + if [ -z "$inner" ]; then + new_inner="\"$dir\"" + else + new_inner="\"$dir\", $inner" + fi + + crudini --set "$cfg" scanner directories "[${new_inner}]" } # Configure network-maker options configure_nm() { - local cfg="$NM_CONFIG" + local cfg="$NM_CONFIG" - # ----- write config from template and patch values ----- - if [ ! -f "${NM_TEMPLATE}" ]; then - echo "[NM] ERROR: Missing NM template: ${NM_TEMPLATE}" - exit 1 - fi + # ----- write config from template and patch values ----- + if [ ! -f "${NM_TEMPLATE}" ]; then + echo "[NM] ERROR: Missing NM template: ${NM_TEMPLATE}" + exit 1 + fi - cp -f "${NM_TEMPLATE}" "$cfg" + cp -f "${NM_TEMPLATE}" "$cfg" - mkdir -p "${NM_FILES_DIR}" "${NM_FILES_DIR_SHARED}" - add_dir_to_scanner "${NM_FILES_DIR}" "$cfg" - add_dir_to_scanner "${NM_FILES_DIR_SHARED}" "$cfg" - chmod a+w "${NM_FILES_DIR_SHARED}" + mkdir -p "${NM_FILES_DIR}" "${NM_FILES_DIR_SHARED}" + add_dir_to_scanner "${NM_FILES_DIR}" "$cfg" + add_dir_to_scanner "${NM_FILES_DIR_SHARED}" "$cfg" + chmod a+w "${NM_FILES_DIR_SHARED}" - echo "[NM] Scanner directories are configured to include: ${NM_FILES_DIR}, ${NM_FILES_DIR_SHARED}" + echo "[NM] Scanner directories are configured to include: ${NM_FILES_DIR}, ${NM_FILES_DIR_SHARED}" - echo "[NM] Configuring network-maker: $cfg" + echo "[NM] Configuring network-maker: $cfg" - # lumera section - crudini --set "$cfg" lumera grpc_endpoint "\"localhost:${LUMERA_GRPC_PORT}\"" - crudini --set "$cfg" lumera rpc_endpoint "\"$LUMERA_RPC_ADDR\"" - crudini --set "$cfg" lumera chain_id "\"$CHAIN_ID\"" - crudini --set "$cfg" lumera denom "\"$DENOM\"" + # lumera section + crudini --set "$cfg" lumera grpc_endpoint "\"localhost:${LUMERA_GRPC_PORT}\"" + crudini --set "$cfg" lumera rpc_endpoint "\"$LUMERA_RPC_ADDR\"" + crudini --set "$cfg" lumera chain_id "\"$CHAIN_ID\"" + crudini --set "$cfg" lumera denom "\"$DENOM\"" - # monitor (grpc/http) listeners - crudini --set "$cfg" network-maker grpc_listen "\"0.0.0.0:${NM_GRPC_PORT}\"" - crudini --set "$cfg" network-maker http_gateway_listen "\"0.0.0.0:${NM_HTTP_PORT}\"" + # monitor (grpc/http) listeners + crudini --set "$cfg" network-maker grpc_listen "\"0.0.0.0:${NM_GRPC_PORT}\"" + crudini --set "$cfg" network-maker http_gateway_listen "\"0.0.0.0:${NM_HTTP_PORT}\"" - # keyring section - crudini --set "$cfg" keyring backend "\"$KEYRING_BACKEND\"" - crudini --set "$cfg" keyring dir "\"${DAEMON_HOME}\"" + # keyring section + crudini --set "$cfg" keyring backend "\"$KEYRING_BACKEND\"" + crudini --set "$cfg" keyring dir "\"${DAEMON_HOME}\"" - update_nm_keyring_accounts "$cfg" + update_nm_keyring_accounts "$cfg" } update_nm_keyring_accounts() { - local cfg="$1" - local total_accounts="${#NM_ACCOUNT_KEY_NAMES[@]}" - if [ "${total_accounts}" -eq 0 ]; then - echo "[NM] WARNING: No network-maker accounts available to write into ${cfg}" - return - fi - - local tmp_cfg - tmp_cfg="$(mktemp)" - awk ' + local cfg="$1" + local total_accounts="${#NM_ACCOUNT_KEY_NAMES[@]}" + if [ "${total_accounts}" -eq 0 ]; then + echo "[NM] WARNING: No network-maker accounts available to write into ${cfg}" + return + fi + + local tmp_cfg + tmp_cfg="$(mktemp)" + awk ' /^[[:space:]]*\[\[keyring\.accounts\]\]/ { skip=1; next } { if (skip) { @@ -320,224 +341,223 @@ update_nm_keyring_accounts() { } print } - ' "${cfg}" > "${tmp_cfg}" - mv "${tmp_cfg}" "${cfg}" - - local idx - { - echo "" - for idx in "${!NM_ACCOUNT_KEY_NAMES[@]}"; do - printf '[[keyring.accounts]]\nkey_name = "%s"\naddress = "%s"\n\n' \ - "${NM_ACCOUNT_KEY_NAMES[$idx]}" "${NM_ACCOUNT_ADDRESSES[$idx]}" - done - } >> "${cfg}" - - echo "[NM] Configured ${total_accounts} network-maker account(s) in ${cfg}" + ' "${cfg}" >"${tmp_cfg}" + mv "${tmp_cfg}" "${cfg}" + + local idx + { + echo "" + for idx in "${!NM_ACCOUNT_KEY_NAMES[@]}"; do + printf '[[keyring.accounts]]\nkey_name = "%s"\naddress = "%s"\n\n' \ + "${NM_ACCOUNT_KEY_NAMES[$idx]}" "${NM_ACCOUNT_ADDRESSES[$idx]}" + done + } >>"${cfg}" + + echo "[NM] Configured ${total_accounts} network-maker account(s) in ${cfg}" } # Wait for lumerad RPC to become available wait_for_lumera() { - echo "[NM] Waiting for lumerad RPC at ${LUMERA_RPC_ADDR}..." - for i in $(seq 1 180); do - if curl -sf "${LUMERA_RPC_ADDR}/status" >/dev/null 2>&1; then - echo "[NM] lumerad RPC is up." - return 0 - fi - sleep 1 - done - echo "[NM] lumerad RPC did not become ready in time." - return 1 + echo "[NM] Waiting for lumerad RPC at ${LUMERA_RPC_ADDR}..." + for i in $(seq 1 180); do + if curl -sf "${LUMERA_RPC_ADDR}/status" >/dev/null 2>&1; then + echo "[NM] lumerad RPC is up." + return 0 + fi + sleep 1 + done + echo "[NM] lumerad RPC did not become ready in time." + return 1 } # Wait for supernode to become available wait_for_supernode() { - local ep="${SN_ENDPOINT}" - local host="${ep%:*}" - local port="${ep##*:}" - local timeout="${SUPERNODE_INSTALL_WAIT_TIMEOUT:-300}" - - echo "[NM] Waiting ${timeout} secs for supernode on ${host}:${port}…" - - # Consider local-only process check if endpoint is on this machine - local is_local=0 - case "$host" in - 127.0.0.1|localhost|"$IP_ADDR") is_local=1 ;; - esac - - for i in $(seq 1 "$timeout"); do - # If local endpoint, also accept presence of the process - if [ "$is_local" -eq 1 ] && pgrep -x supernode >/dev/null 2>&1; then - echo "[NM] supernode process detected." - return 0 - fi - - # TCP check - if (exec 3<>"/dev/tcp/${host}/${port}") 2>/dev/null; then - exec 3>&- - echo "[NM] supernode port ${port} at ${host} is reachable." - return 0 - fi - - sleep 1 - done - - echo "[NM] supernode did not become ready in time (${timeout}s) at ${host}:${port}." - return 1 + local ep="${SN_ENDPOINT}" + local host="${ep%:*}" + local port="${ep##*:}" + local timeout="${SUPERNODE_INSTALL_WAIT_TIMEOUT:-300}" + + echo "[NM] Waiting ${timeout} secs for supernode on ${host}:${port}…" + + # Consider local-only process check if endpoint is on this machine + local is_local=0 + case "$host" in + 127.0.0.1 | localhost | "$IP_ADDR") is_local=1 ;; + esac + + for i in $(seq 1 "$timeout"); do + # If local endpoint, also accept presence of the process + if [ "$is_local" -eq 1 ] && pgrep -x supernode >/dev/null 2>&1; then + echo "[NM] supernode process detected." + return 0 + fi + + # TCP check + if (exec 3<>"/dev/tcp/${host}/${port}") 2>/dev/null; then + exec 3>&- + echo "[NM] supernode port ${port} at ${host} is reachable." + return 0 + fi + + sleep 1 + done + + echo "[NM] supernode did not become ready in time (${timeout}s) at ${host}:${port}." + return 1 } # ----- optional network-maker install ----- install_network_maker_binary() { - if [ ! -f "${NM_DST_BIN}" ]; then - echo "[NM] Installing ${NM} binary..." - run cp -f "${NM_SRC_BIN}" "${NM_DST_BIN}" - run chmod +x "${NM_DST_BIN}" - else - if cmp -s "${NM_SRC_BIN}" "${NM_DST_BIN}"; then - echo "[NM] ${NM} binary already up-to-date at ${NM_DST_BIN}; skipping install." - else - echo "[NM] Updating ${NM} binary at ${NM_DST_BIN}..." - run cp -f "${NM_SRC_BIN}" "${NM_DST_BIN}" - run chmod +x "${NM_DST_BIN}" - fi - fi + if [ ! -f "${NM_DST_BIN}" ]; then + echo "[NM] Installing ${NM} binary..." + run cp -f "${NM_SRC_BIN}" "${NM_DST_BIN}" + run chmod +x "${NM_DST_BIN}" + else + if cmp -s "${NM_SRC_BIN}" "${NM_DST_BIN}"; then + echo "[NM] ${NM} binary already up-to-date at ${NM_DST_BIN}; skipping install." + else + echo "[NM] Updating ${NM} binary at ${NM_DST_BIN}..." + run cp -f "${NM_SRC_BIN}" "${NM_DST_BIN}" + run chmod +x "${NM_DST_BIN}" + fi + fi } ensure_nm_key() { - local key_name="$1" - local mnemonic_file="$2" - - if run ${DAEMON} keys show "${key_name}" --keyring-backend "${KEYRING_BACKEND}" >/dev/null 2>&1; then - echo "[NM] Key ${key_name} already exists." >&2 - else - if [ -s "${mnemonic_file}" ]; then - echo "[NM] Recovering ${key_name} from saved mnemonic." >&2 - (cat "${mnemonic_file}") | run ${DAEMON} keys add "${key_name}" --recover --keyring-backend "${KEYRING_BACKEND}" >/dev/null - else - echo "[NM] Creating new key ${key_name}…" >&2 - local mnemonic_json - mnemonic_json="$(run_capture ${DAEMON} keys add "${key_name}" --keyring-backend "${KEYRING_BACKEND}" --output json)" - echo "${mnemonic_json}" | jq -r .mnemonic > "${mnemonic_file}" - fi - sleep 5 - fi - - local addr - addr="$(run_capture ${DAEMON} keys show "${key_name}" -a --keyring-backend "${KEYRING_BACKEND}")" - printf "%s" "${addr}" + local key_name="$1" + local mnemonic_file="$2" + + if run ${DAEMON} keys show "${key_name}" --keyring-backend "${KEYRING_BACKEND}" >/dev/null 2>&1; then + echo "[NM] Key ${key_name} already exists." >&2 + else + if [ -s "${mnemonic_file}" ]; then + echo "[NM] Recovering ${key_name} from saved mnemonic." >&2 + (cat "${mnemonic_file}") | run ${DAEMON} keys add "${key_name}" --recover --keyring-backend "${KEYRING_BACKEND}" >/dev/null + else + echo "[NM] Creating new key ${key_name}…" >&2 + local mnemonic_json + mnemonic_json="$(run_capture ${DAEMON} keys add "${key_name}" --keyring-backend "${KEYRING_BACKEND}" --output json)" + echo "${mnemonic_json}" | jq -r .mnemonic >"${mnemonic_file}" + fi + sleep 5 + fi + + local addr + addr="$(run_capture ${DAEMON} keys show "${key_name}" -a --keyring-backend "${KEYRING_BACKEND}")" + printf "%s" "${addr}" } fund_nm_account_if_needed() { - local key_name="$1" - local account_addr="$2" - local genesis_addr="$3" - - local bal_json bal - bal_json="$(run_capture ${DAEMON} q bank balances "${account_addr}" --output json)" - bal="$(echo "${bal_json}" | jq -r --arg d "${DENOM}" '([.balances[]? | select(.denom==$d) | .amount] | first) // "0"')" - [[ -z "${bal}" ]] && bal="0" - echo "[NM] Current ${key_name} balance: ${bal}${DENOM}" >&2 - - if (( bal == 0 )); then - sleep 5 - echo "[NM] Funding ${key_name} with ${NM_ACCOUNT_BALANCE} from genesis address ${genesis_addr}…" >&2 - local send_json txhash - send_json="$(run_capture ${DAEMON} tx bank send "${genesis_addr}" "${account_addr}" "${NM_ACCOUNT_BALANCE}" \ - --chain-id "${CHAIN_ID}" --keyring-backend "${KEYRING_BACKEND}" \ - --gas auto --gas-adjustment 1.3 --fees "3000${DENOM}" \ - --yes --output json)" - txhash="$(echo "${send_json}" | jq -r .txhash)" - - if [ -n "${txhash}" ] && [ "${txhash}" != "null" ]; then - printf "%s" "${txhash}" - else - echo "[NM] Could not obtain txhash for funding transaction" >&2 - exit 1 - fi - else - echo "[NM] ${key_name} already funded; skipping." >&2 - printf "" - fi + local key_name="$1" + local account_addr="$2" + local genesis_addr="$3" + + local bal_json bal + bal_json="$(run_capture ${DAEMON} q bank balances "${account_addr}" --output json)" + bal="$(echo "${bal_json}" | jq -r --arg d "${DENOM}" '([.balances[]? | select(.denom==$d) | .amount] | first) // "0"')" + [[ -z "${bal}" ]] && bal="0" + echo "[NM] Current ${key_name} balance: ${bal}${DENOM}" >&2 + + if ((bal == 0)); then + sleep 5 + echo "[NM] Funding ${key_name} with ${NM_ACCOUNT_BALANCE} from genesis address ${genesis_addr}…" >&2 + local send_json txhash + send_json="$(run_capture ${DAEMON} tx bank send "${genesis_addr}" "${account_addr}" "${NM_ACCOUNT_BALANCE}" \ + --chain-id "${CHAIN_ID}" --keyring-backend "${KEYRING_BACKEND}" \ + --gas auto --gas-adjustment 1.3 --fees "3000${DENOM}" \ + --yes --output json)" + txhash="$(echo "${send_json}" | jq -r .txhash)" + + if [ -n "${txhash}" ] && [ "${txhash}" != "null" ]; then + printf "%s" "${txhash}" + else + echo "[NM] Could not obtain txhash for funding transaction" >&2 + exit 1 + fi + else + echo "[NM] ${key_name} already funded; skipping." >&2 + printf "" + fi } fund_nm_accounts() { - local genesis_addr="$1" - local prev_height="$2" - local total="${#NM_ACCOUNT_KEY_NAMES[@]}" - local idx key_name account_addr fund_tx - - if [ "${total}" -eq 0 ]; then - return - fi - - for idx in $(seq 0 $((total-1))); do - key_name="${NM_ACCOUNT_KEY_NAMES[$idx]}" - account_addr="${NM_ACCOUNT_ADDRESSES[$idx]}" - fund_tx="$(fund_nm_account_if_needed "${key_name}" "${account_addr}" "${genesis_addr}")" - if [ -n "${fund_tx}" ]; then - NM_FUND_TX_HASHES+=("${fund_tx}") - wait_for_block_height_increase "${prev_height}" - prev_height="$(latest_block_height)" - fi - done - - if [ "${#NM_FUND_TX_HASHES[@]}" -gt 0 ]; then - wait_for_all_funding_txs - fi + local genesis_addr="$1" + local prev_height="$2" + local total="${#NM_ACCOUNT_KEY_NAMES[@]}" + local idx key_name account_addr fund_tx + + if [ "${total}" -eq 0 ]; then + return + fi + + for idx in $(seq 0 $((total - 1))); do + key_name="${NM_ACCOUNT_KEY_NAMES[$idx]}" + account_addr="${NM_ACCOUNT_ADDRESSES[$idx]}" + fund_tx="$(fund_nm_account_if_needed "${key_name}" "${account_addr}" "${genesis_addr}")" + if [ -n "${fund_tx}" ]; then + NM_FUND_TX_HASHES+=("${fund_tx}") + wait_for_block_height_increase "${prev_height}" + prev_height="$(latest_block_height)" + fi + done + + if [ "${#NM_FUND_TX_HASHES[@]}" -gt 0 ]; then + wait_for_all_funding_txs + fi } wait_for_all_funding_txs() { - local txhash - for txhash in "${NM_FUND_TX_HASHES[@]}"; do - echo "[NM] Waiting for funding tx ${txhash} to confirm…" >&2 - wait_for_tx_confirmation "${txhash}" - done + local txhash + for txhash in "${NM_FUND_TX_HASHES[@]}"; do + echo "[NM] Waiting for funding tx ${txhash} to confirm…" >&2 + wait_for_tx_confirmation "${txhash}" + done } configure_nm_accounts() { - if [ ! -f "${GENESIS_ADDR_FILE}" ]; then - echo "[NM] ERROR: Missing ${GENESIS_ADDR_FILE} (created by validator-setup)." - exit 1 - fi - - local genesis_addr - genesis_addr="$(cat "${GENESIS_ADDR_FILE}")" - - NM_ACCOUNT_KEY_NAMES=() - NM_ACCOUNT_ADDRESSES=() - NM_FUND_TX_HASHES=() - : > "${NM_ADDR_FILE}" - - local idx key_name mnemonic_file account_addr - for idx in $(seq 1 "${NM_MAX_ACCOUNTS}"); do - if [ "${idx}" -eq 1 ]; then - key_name="${NM_KEY_PREFIX}" - mnemonic_file="${NM_MNEMONIC_FILE_BASE}" - else - key_name="${NM_KEY_PREFIX}-${idx}" - mnemonic_file="${NM_MNEMONIC_FILE_BASE}-${idx}" - fi - - account_addr="$(ensure_nm_key "${key_name}" "${mnemonic_file}")" - echo "[NM] ${key_name} address: ${account_addr}" - - NM_ACCOUNT_KEY_NAMES+=("${key_name}") - NM_ACCOUNT_ADDRESSES+=("${account_addr}") - printf "%s,%s\n" "${key_name}" "${account_addr}" >> "${NM_ADDR_FILE}" - done - - local starting_height - starting_height="$(latest_block_height)" - fund_nm_accounts "${genesis_addr}" "${starting_height}" - - echo "[NM] Prepared ${#NM_ACCOUNT_KEY_NAMES[@]} network-maker account(s)." + if [ ! -f "${GENESIS_ADDR_FILE}" ]; then + echo "[NM] ERROR: Missing ${GENESIS_ADDR_FILE} (created by validator-setup)." + exit 1 + fi + + local genesis_addr + genesis_addr="$(cat "${GENESIS_ADDR_FILE}")" + + NM_ACCOUNT_KEY_NAMES=() + NM_ACCOUNT_ADDRESSES=() + NM_FUND_TX_HASHES=() + : >"${NM_ADDR_FILE}" + + local idx key_name mnemonic_file account_addr + for idx in $(seq 1 "${NM_MAX_ACCOUNTS}"); do + if [ "${idx}" -eq 1 ]; then + key_name="${NM_KEY_PREFIX}" + mnemonic_file="${NM_MNEMONIC_FILE_BASE}" + else + key_name="${NM_KEY_PREFIX}-${idx}" + mnemonic_file="${NM_MNEMONIC_FILE_BASE}-${idx}" + fi + + account_addr="$(ensure_nm_key "${key_name}" "${mnemonic_file}")" + echo "[NM] ${key_name} address: ${account_addr}" + + NM_ACCOUNT_KEY_NAMES+=("${key_name}") + NM_ACCOUNT_ADDRESSES+=("${account_addr}") + printf "%s,%s\n" "${key_name}" "${account_addr}" >>"${NM_ADDR_FILE}" + done + + local starting_height + starting_height="$(latest_block_height)" + fund_nm_accounts "${genesis_addr}" "${starting_height}" + + echo "[NM] Prepared ${#NM_ACCOUNT_KEY_NAMES[@]} network-maker account(s)." } - # If in wait mode, just wait and exit if [ "${START_MODE}" = "wait" ]; then - wait_for_lumera || exit 1 - wait_for_supernode || exit 1 - exit 0 + wait_for_lumera || exit 1 + wait_for_supernode || exit 1 + exit 0 fi stop_network_maker_if_running diff --git a/devnet/scripts/query-proposals.sh b/devnet/scripts/query-proposals.sh index ff83d53a..92b29daf 100755 --- a/devnet/scripts/query-proposals.sh +++ b/devnet/scripts/query-proposals.sh @@ -10,7 +10,7 @@ COMPOSE_FILE="$SCRIPT_DIR/../docker-compose.yml" JSON_OUTPUT="${JSON_OUTPUT:-false}" print_usage() { - cat <&2 - print_usage >&2 - exit 1 - ;; - esac + case "$1" in + --json) + JSON_OUTPUT=true + shift + ;; + -h | --help) + print_usage + exit 0 + ;; + *) + echo "Unknown option: $1" >&2 + print_usage >&2 + exit 1 + ;; + esac done PROPOSALS_JSON="$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov proposals --output json 2>/dev/null)" + lumerad query gov proposals --output json 2>/dev/null)" COUNT="$(echo "$PROPOSALS_JSON" | jq '.proposals | length // 0')" if [[ "$COUNT" -eq 0 ]]; then - echo "No governance proposals found." - exit 0 + echo "No governance proposals found." + exit 0 fi LATEST="$(echo "$PROPOSALS_JSON" | jq '.proposals | sort_by(.id | tonumber) | last')" if [[ "$JSON_OUTPUT" == "true" ]]; then - echo "$LATEST" | jq - exit 0 + echo "$LATEST" | jq + exit 0 fi ID="$(echo "$LATEST" | jq -r '.id')" @@ -63,12 +63,12 @@ UPGRADE_HEIGHT="$(echo "$LATEST" | jq -r '.messages[] | select(.["@type"] == "/c printf "Latest proposal: ID=%s, Status=%s" "$ID" "$STATUS" if [[ -n "$TITLE" && "$TITLE" != "" ]]; then - printf ", Title=%s" "$TITLE" + printf ", Title=%s" "$TITLE" fi if [[ -n "$PROPOSER" && "$PROPOSER" != "" ]]; then - printf ", Proposer=%s" "$PROPOSER" + printf ", Proposer=%s" "$PROPOSER" fi if [[ -n "$UPGRADE_HEIGHT" && "$UPGRADE_HEIGHT" != "" ]]; then - printf ", Upgrade Height=%s" "$UPGRADE_HEIGHT" + printf ", Upgrade Height=%s" "$UPGRADE_HEIGHT" fi printf '\n' diff --git a/devnet/scripts/restart.sh b/devnet/scripts/restart.sh index 8a5f2c67..09f39e55 100755 --- a/devnet/scripts/restart.sh +++ b/devnet/scripts/restart.sh @@ -26,113 +26,113 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" STOP_SCRIPT="${STOP_SCRIPT:-${SCRIPT_DIR}/stop.sh}" log() { - echo "[RESTART] $*" + echo "[RESTART] $*" } run_stop() { - if [ ! -f "${STOP_SCRIPT}" ]; then - log "stop.sh not found at ${STOP_SCRIPT}" - exit 1 - fi - bash "${STOP_SCRIPT}" "$@" + if [ ! -f "${STOP_SCRIPT}" ]; then + log "stop.sh not found at ${STOP_SCRIPT}" + exit 1 + fi + bash "${STOP_SCRIPT}" "$@" } ensure_logs_dir() { - mkdir -p "${LOGS_DIR}" + mkdir -p "${LOGS_DIR}" } start_lumera() { - local pattern="${DAEMON} start --home ${DAEMON_HOME}" + local pattern="${DAEMON} start --home ${DAEMON_HOME}" - if pgrep -f "${pattern}" >/dev/null 2>&1 || pgrep -x "${DAEMON}" >/dev/null 2>&1; then - log "${DAEMON} already running." - return 0 - fi + if pgrep -f "${pattern}" >/dev/null 2>&1 || pgrep -x "${DAEMON}" >/dev/null 2>&1; then + log "${DAEMON} already running." + return 0 + fi - if ! command -v "${DAEMON}" >/dev/null 2>&1; then - log "Binary ${DAEMON} not found in PATH." - return 1 - fi + if ! command -v "${DAEMON}" >/dev/null 2>&1; then + log "Binary ${DAEMON} not found in PATH." + return 1 + fi - ensure_logs_dir - mkdir -p "$(dirname "${VALIDATOR_LOG}")" "${DAEMON_HOME}/config" + ensure_logs_dir + mkdir -p "$(dirname "${VALIDATOR_LOG}")" "${DAEMON_HOME}/config" - log "Starting ${DAEMON}..." - "${DAEMON}" start --home "${DAEMON_HOME}" >"${VALIDATOR_LOG}" 2>&1 & - log "${DAEMON} start requested; logging to ${VALIDATOR_LOG}" + log "Starting ${DAEMON}..." + "${DAEMON}" start --home "${DAEMON_HOME}" >"${VALIDATOR_LOG}" 2>&1 & + log "${DAEMON} start requested; logging to ${VALIDATOR_LOG}" } start_supernode() { - local names=("supernode-linux-amd64" "supernode") - local running=0 - - for name in "${names[@]}"; do - if pgrep -x "${name}" >/dev/null 2>&1; then - running=1 - break - fi - done - - if (( running == 1 )); then - log "Supernode already running." - return 0 - fi - - local bin="" - for name in "${names[@]}"; do - if command -v "${name}" >/dev/null 2>&1; then - bin="${name}" - break - fi - done - - if [ -z "${bin}" ]; then - log "Supernode binary not found; skipping start." - return 0 - fi - - ensure_logs_dir - mkdir -p "$(dirname "${SN_LOG}")" "${SN_BASEDIR}" - - log "Starting supernode (${bin})..." - P2P_USE_EXTERNAL_IP=${P2P_USE_EXTERNAL_IP:-false} "${bin}" start -d "${SN_BASEDIR}" >"${SN_LOG}" 2>&1 & - log "Supernode start requested; logging to ${SN_LOG}" + local names=("supernode-linux-amd64" "supernode") + local running=0 + + for name in "${names[@]}"; do + if pgrep -x "${name}" >/dev/null 2>&1; then + running=1 + break + fi + done + + if ((running == 1)); then + log "Supernode already running." + return 0 + fi + + local bin="" + for name in "${names[@]}"; do + if command -v "${name}" >/dev/null 2>&1; then + bin="${name}" + break + fi + done + + if [ -z "${bin}" ]; then + log "Supernode binary not found; skipping start." + return 0 + fi + + ensure_logs_dir + mkdir -p "$(dirname "${SN_LOG}")" "${SN_BASEDIR}" + + log "Starting supernode (${bin})..." + P2P_USE_EXTERNAL_IP=${P2P_USE_EXTERNAL_IP:-false} "${bin}" start -d "${SN_BASEDIR}" >"${SN_LOG}" 2>&1 & + log "Supernode start requested; logging to ${SN_LOG}" } start_network_maker() { - local name="network-maker" + local name="network-maker" - if pgrep -x "${name}" >/dev/null 2>&1; then - log "network-maker already running." - return 0 - fi + if pgrep -x "${name}" >/dev/null 2>&1; then + log "network-maker already running." + return 0 + fi - if ! command -v "${name}" >/dev/null 2>&1; then - log "network-maker binary not found; skipping start." - return 0 - fi + if ! command -v "${name}" >/dev/null 2>&1; then + log "network-maker binary not found; skipping start." + return 0 + fi - ensure_logs_dir - mkdir -p "$(dirname "${NM_LOG}")" + ensure_logs_dir + mkdir -p "$(dirname "${NM_LOG}")" - log "Starting network-maker..." - "${name}" >"${NM_LOG}" 2>&1 & - log "network-maker start requested; logging to ${NM_LOG}" + log "Starting network-maker..." + "${name}" >"${NM_LOG}" 2>&1 & + log "network-maker start requested; logging to ${NM_LOG}" } start_nginx() { - if pgrep -x nginx >/dev/null 2>&1; then - log "nginx already running." - return 0 - fi - - if [ ! -d "${NM_UI_DIR}" ] || [ ! -f "${NM_UI_DIR}/index.html" ]; then - log "network-maker UI not found at ${NM_UI_DIR}; skipping nginx start." - return 0 - fi - - mkdir -p /etc/nginx/conf.d - cat >/etc/nginx/conf.d/network-maker-ui.conf </dev/null 2>&1; then + log "nginx already running." + return 0 + fi + + if [ ! -d "${NM_UI_DIR}" ] || [ ! -f "${NM_UI_DIR}/index.html" ]; then + log "network-maker UI not found at ${NM_UI_DIR}; skipping nginx start." + return 0 + fi + + mkdir -p /etc/nginx/conf.d + cat >/etc/nginx/conf.d/network-maker-ui.conf <&2 - exit 1 + echo "Usage: $0 [nm|sn|lumera|nginx|all]" >&2 + exit 1 } target="${1:-all}" case "${target}" in - nm|network-maker) - restart_nm - ;; - sn|supernode) - restart_sn - ;; - nginx|ui) - restart_nginx - ;; - lumera|lumerad|chain) - restart_lumera - ;; - all|"") - restart_all - ;; - *) - usage - ;; +nm | network-maker) + restart_nm + ;; +sn | supernode) + restart_sn + ;; +nginx | ui) + restart_nginx + ;; +lumera | lumerad | chain) + restart_lumera + ;; +all | "") + restart_all + ;; +*) + usage + ;; esac diff --git a/devnet/scripts/start.sh b/devnet/scripts/start.sh index 7a1188d3..a4c762a1 100755 --- a/devnet/scripts/start.sh +++ b/devnet/scripts/start.sh @@ -12,7 +12,7 @@ # # run Waits for setup_complete, starts lumerad, and tails logs. # -# wait (optional) Wait for setup_complete and exit. +# wait (optional) Wait for setup_complete and exit. # # DOCKER COMPOSE: # - Image ENTRYPOINT should be: ["/bin/bash", "/root/scripts/start.sh"] (as in Dockerfile). @@ -68,16 +68,18 @@ NODE_STATUS_DIR="${STATUS_DIR}/${MONIKER}" NODE_SETUP_COMPLETE="${NODE_STATUS_DIR}/setup_complete" mkdir -p "${NODE_STATUS_DIR}" -if [ ! command -v jq >/dev/null 2>&1 ]; then - echo "[BOOT] jq is missing" +if ! command -v jq >/dev/null 2>&1; then + echo "[BOOT] jq is missing" fi if [ ! -f "${CFG_CHAIN}" ]; then - echo "[BOOT] Missing ${CFG_CHAIN}"; exit 1 + echo "[BOOT] Missing ${CFG_CHAIN}" + exit 1 fi if [ ! -f "${CFG_VALS}" ]; then - echo "[BOOT] Missing ${CFG_VALS}"; exit 1 + echo "[BOOT] Missing ${CFG_VALS}" + exit 1 fi PRIMARY_MONIKER="$(jq -r ' @@ -86,47 +88,47 @@ PRIMARY_MONIKER="$(jq -r ' ' "${CFG_VALS}")" if [ -z "${PRIMARY_MONIKER}" ] || [ "${PRIMARY_MONIKER}" = "null" ]; then - echo "[BOOT] Unable to determine primary validator from ${CFG_VALS}" - exit 1 + echo "[BOOT] Unable to determine primary validator from ${CFG_VALS}" + exit 1 fi PRIMARY_STARTED_FLAG="${STATUS_DIR}/${PRIMARY_MONIKER}/lumerad_started" wait_for_flag() { - local f="$1" - until [ -s "${f}" ]; do sleep 1; done + local f="$1" + until [ -s "${f}" ]; do sleep 1; done } inject_nm_ui_env() { - local api_base="${VITE_API_BASE:-}" - [ -z "${api_base}" ] && return 0 - [ -d "${NM_UI_DIR}" ] || return 0 - - local files - files="$(grep -rl "http://127.0.0.1:8080" "${NM_UI_DIR}" || true)" - if [ -z "${files}" ]; then - echo "[BOOT] network-maker UI: no API base placeholder found to inject." - return 0 - fi - - local escaped_base="${api_base//\//\\/}" - escaped_base="${escaped_base//&/\\&}" - echo "[BOOT] network-maker UI: injecting API base ${api_base}" - # Replace default API base baked into the static bundle with runtime value - while IFS= read -r f; do - sed -i "s|http://127.0.0.1:8080|${escaped_base}|g" "$f" - done <<<"${files}" + local api_base="${VITE_API_BASE:-}" + [ -z "${api_base}" ] && return 0 + [ -d "${NM_UI_DIR}" ] || return 0 + + local files + files="$(grep -rl "http://127.0.0.1:8080" "${NM_UI_DIR}" || true)" + if [ -z "${files}" ]; then + echo "[BOOT] network-maker UI: no API base placeholder found to inject." + return 0 + fi + + local escaped_base="${api_base//\//\\/}" + escaped_base="${escaped_base//&/\\&}" + echo "[BOOT] network-maker UI: injecting API base ${api_base}" + # Replace default API base baked into the static bundle with runtime value + while IFS= read -r f; do + sed -i "s|http://127.0.0.1:8080|${escaped_base}|g" "$f" + done <<<"${files}" } start_nm_ui_if_present() { - if [ ! -d "${NM_UI_DIR}" ] || [ ! -f "${NM_UI_DIR}/index.html" ]; then - echo "[BOOT] network-maker UI not found at ${NM_UI_DIR}; skipping nginx" - return - fi + if [ ! -d "${NM_UI_DIR}" ] || [ ! -f "${NM_UI_DIR}/index.html" ]; then + echo "[BOOT] network-maker UI not found at ${NM_UI_DIR}; skipping nginx" + return + fi - inject_nm_ui_env + inject_nm_ui_env - cat >/etc/nginx/conf.d/network-maker-ui.conf </etc/nginx/conf.d/network-maker-ui.conf </dev/null 2>&1; then - echo "[BOOT] nginx already running; skipping start for network-maker UI." - return - fi + if pgrep -x nginx >/dev/null 2>&1; then + echo "[BOOT] nginx already running; skipping start for network-maker UI." + return + fi - echo "[BOOT] Starting nginx to serve network-maker UI on port ${NM_UI_PORT}" - nginx + echo "[BOOT] Starting nginx to serve network-maker UI on port ${NM_UI_PORT}" + nginx } run() { - echo "+ $*" - "$@" + echo "+ $*" + "$@" } # Get current block height (integer), 0 if unknown current_height() { - curl -sf "${LUMERA_RPC_ADDR}/status" \ - | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null \ - | awk '{print ($1 ~ /^[0-9]+$/) ? $1 : 0}' + curl -sf "${LUMERA_RPC_ADDR}/status" | + jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null | + awk '{print ($1 ~ /^[0-9]+$/) ? $1 : 0}' } # Wait until height >= target (with timeout) wait_for_height_at_least() { - local target="$1" - local retries="${2:-180}" # ~180s - local delay="${3:-1}" - - echo "[BOOT] Waiting for block height >= ${target} ..." - for ((i=0; i= target )); then - echo "[BOOT] Height is ${h} (>= ${target}) — OK." - return 0 - fi - sleep "$delay" - done - echo "[BOOT] Timeout waiting for height >= ${target}." - return 1 + local target="$1" + local retries="${2:-180}" # ~180s + local delay="${3:-1}" + + echo "[BOOT] Waiting for block height >= ${target} ..." + for ((i = 0; i < retries; i++)); do + local h + h="$(current_height)" + if ((h >= target)); then + echo "[BOOT] Height is ${h} (>= ${target}) — OK." + return 0 + fi + sleep "$delay" + done + echo "[BOOT] Timeout waiting for height >= ${target}." + return 1 } # Wait for N new blocks from the current height (default 5) wait_for_n_blocks() { - local n="${1:-5}" - local start - start="$(current_height)" - local target=$(( start + n )) - # If the chain hasn't started yet (start==0), still use +n (so target=n) - (( target < n )) && target="$n" - wait_for_height_at_least "$target" + local n="${1:-5}" + local start + start="$(current_height)" + local target=$((start + n)) + # If the chain hasn't started yet (start==0), still use +n (so target=n) + ((target < n)) && target="$n" + wait_for_height_at_least "$target" } launch_supernode_setup() { - # Start optional supernode setup only in auto/run modes after init is done. - if [ -x "${SCRIPTS_DIR}/supernode-setup.sh" ] && [ -f "${RELEASE_DIR}/${SN}" ]; then - echo "[BOOT] ${MONIKER}: Launching Supernode setup in background..." - export LUMERA_RPC_PORT="${LUMERA_RPC_PORT:-26657}" - export LUMERA_GRPC_PORT="${LUMERA_GRPC_PORT:-9090}" - nohup bash "${SCRIPTS_DIR}/supernode-setup.sh" >"${SUPERNODE_SETUP_OUT}" 2>&1 & - fi + # Start optional supernode setup only in auto/run modes after init is done. + if [ -x "${SCRIPTS_DIR}/supernode-setup.sh" ] && [ -f "${RELEASE_DIR}/${SN}" ]; then + echo "[BOOT] ${MONIKER}: Launching Supernode setup in background..." + export LUMERA_RPC_PORT="${LUMERA_RPC_PORT:-26657}" + export LUMERA_GRPC_PORT="${LUMERA_GRPC_PORT:-9090}" + nohup bash "${SCRIPTS_DIR}/supernode-setup.sh" >"${SUPERNODE_SETUP_OUT}" 2>&1 & + fi } wait_for_validator_setup() { - echo "[BOOT] ${MONIKER}: Waiting for validator setup to complete..." - wait_for_flag "${SETUP_COMPLETE}" - wait_for_flag "${NODE_SETUP_COMPLETE}" - echo "[BOOT] ${MONIKER}: validator setup complete." + echo "[BOOT] ${MONIKER}: Waiting for validator setup to complete..." + wait_for_flag "${SETUP_COMPLETE}" + wait_for_flag "${NODE_SETUP_COMPLETE}" + echo "[BOOT] ${MONIKER}: validator setup complete." } install_wasm_lib() { - if [ -f "${WASMVM_SRC_LIB}" ]; then - if [ -f "${WASMVM_DST_LIB}" ] && cmp -s "${WASMVM_SRC_LIB}" "${WASMVM_DST_LIB}"; then - echo "[BOOT] libwasmvm.x86_64.so already up to date at ${WASMVM_DST_LIB}" - return - fi - echo "[BOOT] Installing libwasmvm.x86_64.so to ${WASMVM_DST_LIB}" - run cp -f "${WASMVM_SRC_LIB}" "${WASMVM_DST_LIB}" - run chmod 755 "${WASMVM_DST_LIB}" - else - echo "[BOOT] ${WASMVM_SRC_LIB} not found, assuming libwasmvm.x86_64.so is already installed" - fi + if [ -f "${WASMVM_SRC_LIB}" ]; then + if [ -f "${WASMVM_DST_LIB}" ] && cmp -s "${WASMVM_SRC_LIB}" "${WASMVM_DST_LIB}"; then + echo "[BOOT] libwasmvm.x86_64.so already up to date at ${WASMVM_DST_LIB}" + return + fi + echo "[BOOT] Installing libwasmvm.x86_64.so to ${WASMVM_DST_LIB}" + run cp -f "${WASMVM_SRC_LIB}" "${WASMVM_DST_LIB}" + run chmod 755 "${WASMVM_DST_LIB}" + else + echo "[BOOT] ${WASMVM_SRC_LIB} not found, assuming libwasmvm.x86_64.so is already installed" + fi } install_lumerad_binary() { - run cp -f "${LUMERA_SRC_BIN}" "${LUMERA_DST_BIN}" - run chmod +x "${LUMERA_DST_BIN}" + run cp -f "${LUMERA_SRC_BIN}" "${LUMERA_DST_BIN}" + run chmod +x "${LUMERA_DST_BIN}" } install_or_update_lumerad() { - if [ ! -f "${LUMERA_DST_BIN}" ]; then - if [ -f "${LUMERA_SRC_BIN}" ]; then - echo "[BOOT] ${LUMERAD} binary not found at ${LUMERA_DST_BIN}, installing..." - install_lumerad_binary - else - echo "[BOOT] ${LUMERA_SRC_BIN} not found, assuming ${LUMERAD} is already installed" - fi - else - run lumerad version || true - if [ -f "${LUMERA_SRC_BIN}" ]; then - if cmp -s "${LUMERA_SRC_BIN}" "${LUMERA_DST_BIN}"; then - echo "[BOOT] ${LUMERAD} binary already up to date at ${LUMERA_DST_BIN}" - else - echo "[BOOT] Updating ${LUMERAD} binary at ${LUMERA_DST_BIN}" - install_lumerad_binary - fi - else - echo "[BOOT] ${LUMERA_SRC_BIN} not found, assuming ${LUMERAD} is already installed" - fi - fi - install_wasm_lib - run lumerad version || true + if [ ! -f "${LUMERA_DST_BIN}" ]; then + if [ -f "${LUMERA_SRC_BIN}" ]; then + echo "[BOOT] ${LUMERAD} binary not found at ${LUMERA_DST_BIN}, installing..." + install_lumerad_binary + else + echo "[BOOT] ${LUMERA_SRC_BIN} not found, assuming ${LUMERAD} is already installed" + fi + else + run lumerad version || true + if [ -f "${LUMERA_SRC_BIN}" ]; then + if cmp -s "${LUMERA_SRC_BIN}" "${LUMERA_DST_BIN}"; then + echo "[BOOT] ${LUMERAD} binary already up to date at ${LUMERA_DST_BIN}" + else + echo "[BOOT] Updating ${LUMERAD} binary at ${LUMERA_DST_BIN}" + install_lumerad_binary + fi + else + echo "[BOOT] ${LUMERA_SRC_BIN} not found, assuming ${LUMERAD} is already installed" + fi + fi + install_wasm_lib + run lumerad version || true } launch_validator_setup() { - install_or_update_lumerad - if [ ! -s "${NODE_SETUP_COMPLETE}" ] && [ -x "${SCRIPTS_DIR}/validator-setup.sh" ]; then - echo "[BOOT] ${MONIKER}: launching validator-setup in background..." - nohup bash "${SCRIPTS_DIR}/validator-setup.sh" >"${VALIDATOR_SETUP_OUT}" 2>&1 & - fi + install_or_update_lumerad + if [ ! -s "${NODE_SETUP_COMPLETE}" ] && [ -x "${SCRIPTS_DIR}/validator-setup.sh" ]; then + echo "[BOOT] ${MONIKER}: launching validator-setup in background..." + nohup bash "${SCRIPTS_DIR}/validator-setup.sh" >"${VALIDATOR_SETUP_OUT}" 2>&1 & + fi } launch_network_maker_setup() { - if [ -x "${SCRIPTS_DIR}/network-maker-setup.sh" ] && [ -f "${RELEASE_DIR}/${NM}" ]; then - echo "[BOOT] ${MONIKER}: Launching Network Maker setup in background..." - nohup bash "${SCRIPTS_DIR}/network-maker-setup.sh" >"${NETWORK_MAKER_SETUP_OUT}" 2>&1 & - fi + if [ -x "${SCRIPTS_DIR}/network-maker-setup.sh" ] && [ -f "${RELEASE_DIR}/${NM}" ]; then + echo "[BOOT] ${MONIKER}: Launching Network Maker setup in background..." + nohup bash "${SCRIPTS_DIR}/network-maker-setup.sh" >"${NETWORK_MAKER_SETUP_OUT}" 2>&1 & + fi } start_lumera() { - if [ "${MONIKER}" != "${PRIMARY_MONIKER}" ]; then - echo "[BOOT] ${MONIKER}: Waiting for primary (${PRIMARY_MONIKER}) to start lumerad..." - wait_for_flag "${PRIMARY_STARTED_FLAG}" - fi - - echo "[BOOT] ${MONIKER}: Starting lumerad..." - run "${DAEMON}" start --home "${DAEMON_HOME}" >"${VALIDATOR_LOG}" 2>&1 & - - if [ "${MONIKER}" = "${PRIMARY_MONIKER}" ]; then - mkdir -p "$(dirname "${PRIMARY_STARTED_FLAG}")" - printf 'started\n' > "${PRIMARY_STARTED_FLAG}" - echo "[BOOT] ${MONIKER}: Marked primary lumerad as started." - fi + if [ "${MONIKER}" != "${PRIMARY_MONIKER}" ]; then + echo "[BOOT] ${MONIKER}: Waiting for primary (${PRIMARY_MONIKER}) to start lumerad..." + wait_for_flag "${PRIMARY_STARTED_FLAG}" + fi + + echo "[BOOT] ${MONIKER}: Starting lumerad..." + run "${DAEMON}" start --home "${DAEMON_HOME}" >"${VALIDATOR_LOG}" 2>&1 & + + if [ "${MONIKER}" = "${PRIMARY_MONIKER}" ]; then + mkdir -p "$(dirname "${PRIMARY_STARTED_FLAG}")" + printf 'started\n' >"${PRIMARY_STARTED_FLAG}" + echo "[BOOT] ${MONIKER}: Marked primary lumerad as started." + fi } tail_logs() { - touch "${VALIDATOR_LOG}" "${SUPERNODE_LOG}" "${SUPERNODE_SETUP_OUT}" "${VALIDATOR_SETUP_OUT}" "${NETWORK_MAKER_SETUP_OUT}" - exec tail -F "${VALIDATOR_LOG}" "${SUPERNODE_LOG}" "${SUPERNODE_SETUP_OUT}" "${VALIDATOR_SETUP_OUT}" "${NETWORK_MAKER_SETUP_OUT}" + touch "${VALIDATOR_LOG}" "${SUPERNODE_LOG}" "${SUPERNODE_SETUP_OUT}" "${VALIDATOR_SETUP_OUT}" "${NETWORK_MAKER_SETUP_OUT}" + exec tail -F "${VALIDATOR_LOG}" "${SUPERNODE_LOG}" "${SUPERNODE_SETUP_OUT}" "${VALIDATOR_SETUP_OUT}" "${NETWORK_MAKER_SETUP_OUT}" } run_auto_flow() { - launch_network_maker_setup - launch_supernode_setup - launch_validator_setup - wait_for_validator_setup - start_lumera - start_nm_ui_if_present - tail_logs + launch_network_maker_setup + launch_supernode_setup + launch_validator_setup + wait_for_validator_setup + start_lumera + start_nm_ui_if_present + tail_logs } case "${START_MODE}" in - auto|"") - run_auto_flow - ;; - - bootstrap) - launch_network_maker_setup - launch_supernode_setup - launch_validator_setup - wait_for_validator_setup - exit 0 - ;; - - run) - wait_for_validator_setup - wait_for_n_blocks 3 || { echo "[SN] Lumera chain not producing blocks in time; exiting."; exit 1; } - start_lumera - start_nm_ui_if_present - tail_logs - ;; - - wait) - wait_for_validator_setup - exit 0 - ;; - - *) - echo "[BOOT] Unknown START_MODE='${START_MODE}', defaulting to auto." - run_auto_flow - ;; +auto | "") + run_auto_flow + ;; + +bootstrap) + launch_network_maker_setup + launch_supernode_setup + launch_validator_setup + wait_for_validator_setup + exit 0 + ;; + +run) + wait_for_validator_setup + wait_for_n_blocks 3 || { + echo "[SN] Lumera chain not producing blocks in time; exiting." + exit 1 + } + start_lumera + start_nm_ui_if_present + tail_logs + ;; + +wait) + wait_for_validator_setup + exit 0 + ;; + +*) + echo "[BOOT] Unknown START_MODE='${START_MODE}', defaulting to auto." + run_auto_flow + ;; esac diff --git a/devnet/scripts/stop.sh b/devnet/scripts/stop.sh index 71fa0917..d093a6c2 100755 --- a/devnet/scripts/stop.sh +++ b/devnet/scripts/stop.sh @@ -10,107 +10,107 @@ DAEMON_HOME="${DAEMON_HOME:-/root/.lumera}" SN_BASEDIR="${SN_BASEDIR:-/root/.supernode}" log() { - echo "[STOP] $*" + echo "[STOP] $*" } stop_nm() { - local name="network-maker" + local name="network-maker" - if pgrep -x "${name}" >/dev/null 2>&1; then - log "Stopping network-maker..." - pkill -x "${name}" || true - log "network-maker stop requested." - else - log "network-maker is not running." - fi + if pgrep -x "${name}" >/dev/null 2>&1; then + log "Stopping network-maker..." + pkill -x "${name}" || true + log "network-maker stop requested." + else + log "network-maker is not running." + fi } stop_sn() { - local stopped=0 - local names=("supernode-linux-amd64" "supernode") + local stopped=0 + local names=("supernode-linux-amd64" "supernode") - for name in "${names[@]}"; do - if pgrep -x "${name}" >/dev/null 2>&1; then - stopped=1 - log "Stopping supernode (${name})..." - if command -v "${name}" >/dev/null 2>&1; then - "${name}" stop -d "${SN_BASEDIR}" >/dev/null 2>&1 || pkill -x "${name}" || true - else - pkill -x "${name}" || true - fi - fi - done + for name in "${names[@]}"; do + if pgrep -x "${name}" >/dev/null 2>&1; then + stopped=1 + log "Stopping supernode (${name})..." + if command -v "${name}" >/dev/null 2>&1; then + "${name}" stop -d "${SN_BASEDIR}" >/dev/null 2>&1 || pkill -x "${name}" || true + else + pkill -x "${name}" || true + fi + fi + done - if (( stopped == 0 )); then - log "Supernode is not running." - else - log "Supernode stop requested." - fi + if ((stopped == 0)); then + log "Supernode is not running." + else + log "Supernode stop requested." + fi } stop_nginx() { - if pgrep -x nginx >/dev/null 2>&1; then - log "Stopping nginx..." - if command -v nginx >/dev/null 2>&1; then - nginx -s quit >/dev/null 2>&1 || nginx -s stop >/dev/null 2>&1 || pkill -x nginx || true - else - pkill -x nginx || true - fi - log "nginx stop requested." - else - log "nginx is not running." - fi + if pgrep -x nginx >/dev/null 2>&1; then + log "Stopping nginx..." + if command -v nginx >/dev/null 2>&1; then + nginx -s quit >/dev/null 2>&1 || nginx -s stop >/dev/null 2>&1 || pkill -x nginx || true + else + pkill -x nginx || true + fi + log "nginx stop requested." + else + log "nginx is not running." + fi } stop_lumera() { - local pattern="${DAEMON} start --home ${DAEMON_HOME}" + local pattern="${DAEMON} start --home ${DAEMON_HOME}" - if pgrep -f "${pattern}" >/dev/null 2>&1; then - log "Stopping ${DAEMON}..." - pkill -f "${pattern}" || true - log "${DAEMON} stop requested." - return - fi + if pgrep -f "${pattern}" >/dev/null 2>&1; then + log "Stopping ${DAEMON}..." + pkill -f "${pattern}" || true + log "${DAEMON} stop requested." + return + fi - if pgrep -x "${DAEMON}" >/dev/null 2>&1; then - log "Stopping ${DAEMON}..." - pkill -x "${DAEMON}" || true - log "${DAEMON} stop requested." - else - log "${DAEMON} is not running." - fi + if pgrep -x "${DAEMON}" >/dev/null 2>&1; then + log "Stopping ${DAEMON}..." + pkill -x "${DAEMON}" || true + log "${DAEMON} stop requested." + else + log "${DAEMON} is not running." + fi } stop_all() { - stop_nm - stop_sn - stop_nginx - stop_lumera + stop_nm + stop_sn + stop_nginx + stop_lumera } usage() { - echo "Usage: $0 [nm|sn|lumera|nginx|all]" >&2 - exit 1 + echo "Usage: $0 [nm|sn|lumera|nginx|all]" >&2 + exit 1 } target="${1:-all}" case "${target}" in - nm|network-maker) - stop_nm - ;; - sn|supernode) - stop_sn - ;; - nginx|ui) - stop_nginx - ;; - lumera|lumerad|chain) - stop_lumera - ;; - all|"") - stop_all - ;; - *) - usage - ;; +nm | network-maker) + stop_nm + ;; +sn | supernode) + stop_sn + ;; +nginx | ui) + stop_nginx + ;; +lumera | lumerad | chain) + stop_lumera + ;; +all | "") + stop_all + ;; +*) + usage + ;; esac diff --git a/devnet/scripts/submit-param-proposal.sh b/devnet/scripts/submit-param-proposal.sh index 374db295..696322b9 100755 --- a/devnet/scripts/submit-param-proposal.sh +++ b/devnet/scripts/submit-param-proposal.sh @@ -27,13 +27,13 @@ LUMERA_SHARED="/tmp/${CHAIN_ID}/shared" FEES="5000${DENOM}" if [ $# -lt 3 ]; then - cat < [title] [description] Example: $0 action max_actions_per_block 25 EOF - exit 1 + exit 1 fi SUBSPACE="$1" @@ -48,11 +48,11 @@ mkdir -p "$LUMERA_SHARED" # Helper to sanitize filenames sanitize() { - local s - s="$(printf '%s' "$1" | tr '[:upper:]' '[:lower:]' | tr -c '[:alnum:]' '_')" - # collapse duplicate underscores and trim leading/trailing ones - s="$(echo "$s" | sed -e 's/_\{2,\}/_/g' -e 's/^_//;s/_$//')" - echo "$s" + local s + s="$(printf '%s' "$1" | tr '[:upper:]' '[:lower:]' | tr -c '[:alnum:]' '_')" + # collapse duplicate underscores and trim leading/trailing ones + s="$(echo "$s" | sed -e 's/_\{2,\}/_/g' -e 's/^_//;s/_$//')" + echo "$s" } SAFE_SUBSPACE="$(sanitize "$SUBSPACE")" @@ -61,79 +61,79 @@ HOST_PROPOSAL_FILE="${LUMERA_SHARED}/param_${SAFE_SUBSPACE}_${SAFE_KEY}.json" CONTAINER_PROPOSAL_FILE="/shared/param_${SAFE_SUBSPACE}_${SAFE_KEY}.json" get_min_deposit_from_gov_params() { - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov params --output json 2>/dev/null \ - | jq -r '.params.min_deposit[0].amount + .params.min_deposit[0].denom' + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query gov params --output json 2>/dev/null | + jq -r '.params.min_deposit[0].amount + .params.min_deposit[0].denom' } get_lumerad_version() { - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad version 2>/dev/null | tr -d '\r' + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad version 2>/dev/null | tr -d '\r' } fetch_current_params() { - case "$SUBSPACE" in - action) - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query action params --output json 2>/dev/null - ;; - *) - echo "❌ Unsupported subspace '$SUBSPACE' for automatic parameter retrieval" >&2 - exit 1 - ;; - esac + case "$SUBSPACE" in + action) + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query action params --output json 2>/dev/null + ;; + *) + echo "❌ Unsupported subspace '$SUBSPACE' for automatic parameter retrieval" >&2 + exit 1 + ;; + esac } update_params_value() { - local params_json="$1" - local updated + local params_json="$1" + local updated - if jq -e -n --argjson tmp "$VALUE" 'true' >/dev/null 2>&1; then - updated=$(echo "$params_json" | jq --arg key "$KEY" --argjson val "$VALUE" '.params[$key] = $val') - else - updated=$(echo "$params_json" | jq --arg key "$KEY" --arg val "$VALUE" '.params[$key] = $val') - fi + if jq -e -n --argjson tmp "$VALUE" 'true' >/dev/null 2>&1; then + updated=$(echo "$params_json" | jq --arg key "$KEY" --argjson val "$VALUE" '.params[$key] = $val') + else + updated=$(echo "$params_json" | jq --arg key "$KEY" --arg val "$VALUE" '.params[$key] = $val') + fi - if [ -z "$updated" ] || [ "$updated" = "null" ]; then - echo "❌ Failed to update parameter $KEY" >&2 - exit 1 - fi + if [ -z "$updated" ] || [ "$updated" = "null" ]; then + echo "❌ Failed to update parameter $KEY" >&2 + exit 1 + fi - echo "$updated" + echo "$updated" } derive_msg_type() { - local version major minor - version="$(get_lumerad_version)" - IFS='.' read -r major minor _ <<< "$version" - if [[ -n "$major" && -n "$minor" && "$major" =~ ^[0-9]+$ && "$minor" =~ ^[0-9]+$ ]]; then - if (( major > 1 )) || (( major == 1 && minor >= 8 )); then - echo "/lumera.action.v1.MsgUpdateParams" - return - fi - fi - echo "/lumera.action.MsgUpdateParams" + local version major minor + version="$(get_lumerad_version)" + IFS='.' read -r major minor _ <<<"$version" + if [[ -n "$major" && -n "$minor" && "$major" =~ ^[0-9]+$ && "$minor" =~ ^[0-9]+$ ]]; then + if ((major > 1)) || ((major == 1 && minor >= 8)); then + echo "/lumera.action.v1.MsgUpdateParams" + return + fi + fi + echo "/lumera.action.MsgUpdateParams" } # Read the governance account address (set during devnet init) GOV_KEY_FILE="${LUMERA_SHARED}/governance_address" if [ ! -f "$GOV_KEY_FILE" ]; then - echo "❌ Governance key address file not found at $GOV_KEY_FILE" - exit 1 + echo "❌ Governance key address file not found at $GOV_KEY_FILE" + exit 1 fi PROPOSER_ADDRESS="$(cat "$GOV_KEY_FILE")" GOV_KEY_NAME="governance_key" get_gov_module_address() { - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query auth module-accounts --output json 2>/dev/null \ - | jq -r '.accounts[] | select(.value.name=="gov") | .value.address' + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query auth module-accounts --output json 2>/dev/null | + jq -r '.accounts[] | select(.value.name=="gov") | .value.address' } AUTHORITY_ADDRESS="$(get_gov_module_address)" if [[ -z "$AUTHORITY_ADDRESS" || "$AUTHORITY_ADDRESS" == "null" ]]; then - echo "❌ Unable to determine gov module account address" - exit 1 + echo "❌ Unable to determine gov module account address" + exit 1 fi echo "Proposer address: $PROPOSER_ADDRESS" @@ -141,8 +141,8 @@ echo "Gov module authority: $AUTHORITY_ADDRESS" MIN_DEPOSIT="$(get_min_deposit_from_gov_params)" if [[ -z "$MIN_DEPOSIT" ]]; then - echo "❌ Failed to determine min deposit from gov params" - exit 1 + echo "❌ Failed to determine min deposit from gov params" + exit 1 fi CURRENT_PARAMS_JSON="$(fetch_current_params)" @@ -152,12 +152,12 @@ UPDATED_PARAMS_JSON="$(update_params_value "$CURRENT_PARAMS_JSON")" MSG_TYPE="$(derive_msg_type)" echo "$UPDATED_PARAMS_JSON" | jq --arg title "$TITLE" \ - --arg description "$DESCRIPTION" \ - --arg summary "$SUMMARY" \ - --arg deposit "$MIN_DEPOSIT" \ - --arg type "$MSG_TYPE" \ - --arg authority "$AUTHORITY_ADDRESS" \ - '{ + --arg description "$DESCRIPTION" \ + --arg summary "$SUMMARY" \ + --arg deposit "$MIN_DEPOSIT" \ + --arg type "$MSG_TYPE" \ + --arg authority "$AUTHORITY_ADDRESS" \ + '{ title: $title, description: $description, summary: $summary, @@ -169,42 +169,42 @@ echo "$UPDATED_PARAMS_JSON" | jq --arg title "$TITLE" \ params: .params } ] - }' > "$HOST_PROPOSAL_FILE" + }' >"$HOST_PROPOSAL_FILE" echo "🔧 Generated proposal JSON at $HOST_PROPOSAL_FILE" cat "$HOST_PROPOSAL_FILE" | jq || cat "$HOST_PROPOSAL_FILE" SUBMIT_OUTPUT="$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad tx gov submit-proposal "$CONTAINER_PROPOSAL_FILE" \ - --from "$GOV_KEY_NAME" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --fees "$FEES" \ - --broadcast-mode sync \ - --output json \ - --yes 2>/dev/null)" + lumerad tx gov submit-proposal "$CONTAINER_PROPOSAL_FILE" \ + --from "$GOV_KEY_NAME" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --fees "$FEES" \ + --broadcast-mode sync \ + --output json \ + --yes 2>/dev/null)" echo "📝 Transaction response:" echo "$SUBMIT_OUTPUT" | jq || echo "$SUBMIT_OUTPUT" TXHASH="$(echo "$SUBMIT_OUTPUT" | jq -r '.txhash // empty')" if [[ -z "$TXHASH" ]]; then - echo "❌ No transaction hash returned. Submission may have failed." - exit 1 + echo "❌ No transaction hash returned. Submission may have failed." + exit 1 fi echo "⏳ Waiting for transaction to be included..." WAIT_TX_OUTPUT="$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query wait-tx "$TXHASH" --output json --timeout 60s 2>/dev/null)" + lumerad query wait-tx "$TXHASH" --output json --timeout 60s 2>/dev/null)" echo "$WAIT_TX_OUTPUT" | jq || echo "$WAIT_TX_OUTPUT" CODE="$(echo "$WAIT_TX_OUTPUT" | jq -r '.code // 0')" RAW_LOG="$(echo "$WAIT_TX_OUTPUT" | jq -r '.raw_log // empty')" if [[ "$CODE" != "0" ]]; then - echo "🚫 Transaction failed (code: $CODE)" - echo "$RAW_LOG" - exit 1 + echo "🚫 Transaction failed (code: $CODE)" + echo "$RAW_LOG" + exit 1 fi PROPOSAL_ID="$(echo "$WAIT_TX_OUTPUT" | jq -r ' @@ -212,13 +212,13 @@ PROPOSAL_ID="$(echo "$WAIT_TX_OUTPUT" | jq -r ' ')" if [[ -z "$PROPOSAL_ID" ]]; then - echo "⚠️ Unable to extract proposal ID from wait-tx output." - exit 1 + echo "⚠️ Unable to extract proposal ID from wait-tx output." + exit 1 fi STATUS="$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov proposal "$PROPOSAL_ID" --output json 2>/dev/null \ - | jq -r '.proposal.status // empty')" + lumerad query gov proposal "$PROPOSAL_ID" --output json 2>/dev/null | + jq -r '.proposal.status // empty')" echo "✅ Params proposal submitted with ID: $PROPOSAL_ID (status: $STATUS)" exit 0 diff --git a/devnet/scripts/submit-upgrade-proposal.sh b/devnet/scripts/submit-upgrade-proposal.sh index 0f79d923..25259d90 100755 --- a/devnet/scripts/submit-upgrade-proposal.sh +++ b/devnet/scripts/submit-upgrade-proposal.sh @@ -24,32 +24,32 @@ CONTAINER_PROPOSAL_FILE="/shared/upgrade_${VERSION}.json" COMPOSE_FILE="../docker-compose.yml" get_current_height() { - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // empty' + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // empty' } # Usage: # get_proposal_id_by_version_with_retry "v1.8.0" # default 1 attempt # get_proposal_id_by_version_with_retry "v1.8.0" 5 # 5 retry attempts get_proposal_id_by_version_with_retry() { - local version="$1" - local attempts="${2:-1}" # Default to 1 attempt if not provided - local height_filter="$3" - local sleep_interval=2 - local proposal_id="" - local proposals_json + local version="$1" + local attempts="${2:-1}" # Default to 1 attempt if not provided + local height_filter="$3" + local sleep_interval=2 + local proposal_id="" + local proposals_json - for ((i = 1; i <= attempts; i++)); do - echo "🔄 Checking for proposal with version: $version (attempt $i of $attempts)..." >&2 + for ((i = 1; i <= attempts; i++)); do + echo "🔄 Checking for proposal with version: $version (attempt $i of $attempts)..." >&2 - proposals_json=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov proposals --output json 2>/dev/null) + proposals_json=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query gov proposals --output json 2>/dev/null) - echo "Proposals JSON:" >&2 - echo "$proposals_json" | jq >&2 + echo "Proposals JSON:" >&2 + echo "$proposals_json" | jq >&2 - if echo "$proposals_json" | jq -e '.proposals | type == "array"' > /dev/null; then - proposal_id=$(echo "$proposals_json" | jq -r --arg version "$version" --arg height "$height_filter" ' + if echo "$proposals_json" | jq -e '.proposals | type == "array"' >/dev/null; then + proposal_id=$(echo "$proposals_json" | jq -r --arg version "$version" --arg height "$height_filter" ' .proposals | map(select( [ @@ -68,82 +68,81 @@ get_proposal_id_by_version_with_retry() { | last | .id // empty ') - fi + fi - if [[ -n "$proposal_id" ]]; then - echo "✅ Found proposal ID for version $version: $proposal_id" >&2 - echo "$proposal_id" - return 0 - fi + if [[ -n "$proposal_id" ]]; then + echo "✅ Found proposal ID for version $version: $proposal_id" >&2 + echo "$proposal_id" + return 0 + fi - [[ $i -lt $attempts ]] && sleep "$sleep_interval" -done + [[ $i -lt $attempts ]] && sleep "$sleep_interval" + done + if [[ -z "$proposals_json" ]] || [[ "$proposals_json" != *"proposals"* ]]; then + return 1 + fi - if [[ -z "$proposals_json" ]] || [[ "$proposals_json" != *"proposals"* ]]; then - return 1 - fi - - echo "❌ Could not find proposal for version $version after $attempts attempt(s)." >&2 - return 1 + echo "❌ Could not find proposal for version $version after $attempts attempt(s)." >&2 + return 1 } get_proposal_status_by_id() { - local proposal_id="$1" - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov proposal "$proposal_id" --output json 2>/dev/null | - jq -r '.proposal.status' + local proposal_id="$1" + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query gov proposal "$proposal_id" --output json 2>/dev/null | + jq -r '.proposal.status' } get_proposal_deposit_by_id() { - local proposal_id="$1" - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov proposal "$proposal_id" --output json 2>/dev/null | - jq -r '.proposal.total_deposit[0].amount + .proposal.total_deposit[0].denom' + local proposal_id="$1" + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query gov proposal "$proposal_id" --output json 2>/dev/null | + jq -r '.proposal.total_deposit[0].amount + .proposal.total_deposit[0].denom' } # Returns the planned upgrade height for the given proposal ID (or empty) get_proposal_height_by_id() { - local proposal_id="$1" - docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov proposal "$proposal_id" --output json 2>/dev/null | - jq -r '.proposal.messages[]?.value.plan.height // empty' + local proposal_id="$1" + docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query gov proposal "$proposal_id" --output json 2>/dev/null | + jq -r '.proposal.messages[]?.value.plan.height // empty' } validate_height_param() { - CURRENT_HEIGHT=$(get_current_height) - if ! [[ "$CURRENT_HEIGHT" =~ ^[0-9]+$ ]]; then - echo "⚠️ Could not retrieve current block height. Is the chain running?" - exit 1 - fi - echo "Lumera current height: $CURRENT_HEIGHT" - - # Sanity check: warn if upgrade height is in the past - if [ "$UPGRADE_HEIGHT" -lt "$CURRENT_HEIGHT" ]; then - echo "❗ UPGRADE_HEIGHT ($UPGRADE_HEIGHT) is less than CURRENT_HEIGHT ($CURRENT_HEIGHT)." - echo "⚠️ The upgrade proposal will be ineffective and ignored by the chain!" - echo "💡 Consider choosing a future block height." - exit 1 - fi + CURRENT_HEIGHT=$(get_current_height) + if ! [[ "$CURRENT_HEIGHT" =~ ^[0-9]+$ ]]; then + echo "⚠️ Could not retrieve current block height. Is the chain running?" + exit 1 + fi + echo "Lumera current height: $CURRENT_HEIGHT" + + # Sanity check: warn if upgrade height is in the past + if [ "$UPGRADE_HEIGHT" -lt "$CURRENT_HEIGHT" ]; then + echo "❗ UPGRADE_HEIGHT ($UPGRADE_HEIGHT) is less than CURRENT_HEIGHT ($CURRENT_HEIGHT)." + echo "⚠️ The upgrade proposal will be ineffective and ignored by the chain!" + echo "💡 Consider choosing a future block height." + exit 1 + fi } get_min_deposit_from_gov_params() { - local output - output=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query gov params --output json 2>/dev/null | - jq -r '.params.min_deposit[0].amount + .params.min_deposit[0].denom') - echo "$output" + local output + output=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query gov params --output json 2>/dev/null | + jq -r '.params.min_deposit[0].amount + .params.min_deposit[0].denom') + echo "$output" } submit_proposal() { - AUTHORITY_ADDRESS=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query auth module-accounts --output json | \ - jq -r '.accounts[] | select(.value.name=="gov") | .value.address') + AUTHORITY_ADDRESS=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query auth module-accounts --output json | + jq -r '.accounts[] | select(.value.name=="gov") | .value.address') - MIN_DEPOSIT=$(get_min_deposit_from_gov_params) + MIN_DEPOSIT=$(get_min_deposit_from_gov_params) - echo "Generating proposal file at $HOST_PROPOSAL_FILE..." >&2 - cat > "$HOST_PROPOSAL_FILE" <&2 + cat >"$HOST_PROPOSAL_FILE" <&2 - cat "$HOST_PROPOSAL_FILE" | jq || cat "$HOST_PROPOSAL_FILE" - - echo "Submitting software upgrade proposal for version $VERSION at height ${UPGRADE_HEIGHT}..." >&2 - - # Run inside supernova_validator_1 container - PROPOSAL_OUTPUT=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad tx gov submit-proposal "$CONTAINER_PROPOSAL_FILE" \ - --from "$GOV_ADDRESS" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --fees 5000ulume \ - --broadcast-mode sync \ - --output json \ - --yes 2>/dev/null) - - echo "🔍 Proposal submission output:" >&2 - echo "$PROPOSAL_OUTPUT" | jq . >&2 - - TXHASH=$(echo "$PROPOSAL_OUTPUT" | jq -r '.txhash // empty') - LATEST_PROPOSAL_ID=$(get_proposal_id_by_version_with_retry "$VERSION" 5 "$UPGRADE_HEIGHT") - - if [[ -n "$LATEST_PROPOSAL_ID" ]]; then - echo "✅ Proposal submitted successfully with ID: $LATEST_PROPOSAL_ID" >&2 - else - echo "❌ Could not retrieve proposal ID. Attempting to fetch rejection reason from tx..." >&2 - if [[ -n "$TXHASH" ]]; then - RAW_LOG=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad query tx "$TXHASH" --output json 2>/dev/null | jq -r '.raw_log // empty') - echo "🚫 Rejection reason from tx $TXHASH:" >&2 - echo "$RAW_LOG" >&2 - else - echo "🚫 Proposal submission failed. No tx hash captured." >&2 - fi - exit 1 - fi + echo "🔍 Proposal JSON content at $HOST_PROPOSAL_FILE:" >&2 + cat "$HOST_PROPOSAL_FILE" | jq || cat "$HOST_PROPOSAL_FILE" + + echo "Submitting software upgrade proposal for version $VERSION at height ${UPGRADE_HEIGHT}..." >&2 + + # Run inside supernova_validator_1 container + PROPOSAL_OUTPUT=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad tx gov submit-proposal "$CONTAINER_PROPOSAL_FILE" \ + --from "$GOV_ADDRESS" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --fees 5000ulume \ + --broadcast-mode sync \ + --output json \ + --yes 2>/dev/null) + + echo "🔍 Proposal submission output:" >&2 + echo "$PROPOSAL_OUTPUT" | jq . >&2 + + TXHASH=$(echo "$PROPOSAL_OUTPUT" | jq -r '.txhash // empty') + LATEST_PROPOSAL_ID=$(get_proposal_id_by_version_with_retry "$VERSION" 5 "$UPGRADE_HEIGHT") + + if [[ -n "$LATEST_PROPOSAL_ID" ]]; then + echo "✅ Proposal submitted successfully with ID: $LATEST_PROPOSAL_ID" >&2 + else + echo "❌ Could not retrieve proposal ID. Attempting to fetch rejection reason from tx..." >&2 + if [[ -n "$TXHASH" ]]; then + RAW_LOG=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad query tx "$TXHASH" --output json 2>/dev/null | jq -r '.raw_log // empty') + echo "🚫 Rejection reason from tx $TXHASH:" >&2 + echo "$RAW_LOG" >&2 + else + echo "🚫 Proposal submission failed. No tx hash captured." >&2 + fi + exit 1 + fi } submit_proposal_deposit() { - echo "✅ Proposal is in deposit period. Funding it." >&2 - - # Get how much has already been deposited - CURRENT_DEPOSIT=$(get_proposal_deposit_by_id "$EXISTING_PROPOSAL_ID") - MIN_DEPOSIT=$(get_min_deposit_from_gov_params) - echo "Current deposit for proposal $EXISTING_PROPOSAL_ID: $CURRENT_DEPOSIT" >&2 - - CURRENT_AMOUNT=${CURRENT_DEPOSIT%ulume} - REQUIRED_AMOUNT=${MIN_DEPOSIT%ulume} - TO_DEPOSIT=$((REQUIRED_AMOUNT - CURRENT_AMOUNT)) - - if (( TO_DEPOSIT <= 0 )); then - echo "✅ Proposal already fully funded. No additional deposit required." >&2 - return - fi - - DEPOSIT_AMOUNT="${TO_DEPOSIT}ulume" - echo "➡️ Submitting deposit of $DEPOSIT_AMOUNT" >&2 - - DEPOSIT_OUTPUT=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad tx gov deposit "$EXISTING_PROPOSAL_ID" "$DEPOSIT_AMOUNT" \ - --from "$GOV_ADDRESS" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING" \ - --fees 5000ulume \ - --broadcast-mode sync \ - --output json \ - --yes 2>/dev/null) - echo "🔍 Proposal deposit JSON content:" >&2 - echo "$DEPOSIT_OUTPUT" | jq - - DEPOSIT_TXHASH=$(echo "$DEPOSIT_OUTPUT" | jq -r '.txhash // empty') - DEPOSIT_CODE=$(echo "$DEPOSIT_OUTPUT" | jq -r '.code // 0') - DEPOSIT_RAW_LOG=$(echo "$DEPOSIT_OUTPUT" | jq -r '.raw_log // empty') - - if [[ "$DEPOSIT_CODE" != "0" || -n "$DEPOSIT_RAW_LOG" ]]; then - echo "🚫 Deposit failed (txhash: $DEPOSIT_TXHASH)" >&2 - echo "🔍 Rejection reason:" >&2 - echo "$DEPOSIT_RAW_LOG" >&2 - exit 1 - fi - - echo "✅ Deposit transaction succeeded (txhash: $DEPOSIT_TXHASH)" >&2 + echo "✅ Proposal is in deposit period. Funding it." >&2 + + # Get how much has already been deposited + CURRENT_DEPOSIT=$(get_proposal_deposit_by_id "$EXISTING_PROPOSAL_ID") + MIN_DEPOSIT=$(get_min_deposit_from_gov_params) + echo "Current deposit for proposal $EXISTING_PROPOSAL_ID: $CURRENT_DEPOSIT" >&2 + + CURRENT_AMOUNT=${CURRENT_DEPOSIT%ulume} + REQUIRED_AMOUNT=${MIN_DEPOSIT%ulume} + TO_DEPOSIT=$((REQUIRED_AMOUNT - CURRENT_AMOUNT)) + + if ((TO_DEPOSIT <= 0)); then + echo "✅ Proposal already fully funded. No additional deposit required." >&2 + return + fi + + DEPOSIT_AMOUNT="${TO_DEPOSIT}ulume" + echo "➡️ Submitting deposit of $DEPOSIT_AMOUNT" >&2 + + DEPOSIT_OUTPUT=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad tx gov deposit "$EXISTING_PROPOSAL_ID" "$DEPOSIT_AMOUNT" \ + --from "$GOV_ADDRESS" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING" \ + --fees 5000ulume \ + --broadcast-mode sync \ + --output json \ + --yes 2>/dev/null) + echo "🔍 Proposal deposit JSON content:" >&2 + echo "$DEPOSIT_OUTPUT" | jq + + DEPOSIT_TXHASH=$(echo "$DEPOSIT_OUTPUT" | jq -r '.txhash // empty') + DEPOSIT_CODE=$(echo "$DEPOSIT_OUTPUT" | jq -r '.code // 0') + DEPOSIT_RAW_LOG=$(echo "$DEPOSIT_OUTPUT" | jq -r '.raw_log // empty') + + if [[ "$DEPOSIT_CODE" != "0" || -n "$DEPOSIT_RAW_LOG" ]]; then + echo "🚫 Deposit failed (txhash: $DEPOSIT_TXHASH)" >&2 + echo "🔍 Rejection reason:" >&2 + echo "$DEPOSIT_RAW_LOG" >&2 + exit 1 + fi + + echo "✅ Deposit transaction succeeded (txhash: $DEPOSIT_TXHASH)" >&2 } show_proposal_status() { - local version="$1" - local proposal_id="$2" - - if [[ -z "$proposal_id" ]]; then - echo "ℹ️ No upgrade proposal found for version $version." - return 1 - fi - - local status=$(get_proposal_status_by_id "$proposal_id") - echo "❗ Proposal for version $version already exists with ID: $proposal_id and status: $status" - - case "$status" in - PROPOSAL_STATUS_REJECTED) - echo "✅ Previous proposal was rejected." - ;; - PROPOSAL_STATUS_REJECTED_WITH_VETO) - echo "❗ Previous proposal was rejected with veto." - return 1 - ;; - PROPOSAL_STATUS_FAILED) - echo "❗ Previous proposal failed." - return 0 - ;; - PROPOSAL_STATUS_PASSED) - echo "✅ Proposal already passed. No need to resubmit." - exit 0 - ;; - PROPOSAL_STATUS_DEPOSIT_PERIOD) - echo "📥 Proposal is in deposit period. Deposit may be required." - exit 0 - ;; - PROPOSAL_STATUS_VOTING_PERIOD) - echo "⏳ Proposal is still active. Please wait or vote on it." - exit 0 - ;; - *) - echo "⚠️ Proposal in unknown state: $status" - exit 1 - ;; - esac + local version="$1" + local proposal_id="$2" + + if [[ -z "$proposal_id" ]]; then + echo "ℹ️ No upgrade proposal found for version $version." + return 1 + fi + + local status=$(get_proposal_status_by_id "$proposal_id") + echo "❗ Proposal for version $version already exists with ID: $proposal_id and status: $status" + + case "$status" in + PROPOSAL_STATUS_REJECTED) + echo "✅ Previous proposal was rejected." + ;; + PROPOSAL_STATUS_REJECTED_WITH_VETO) + echo "❗ Previous proposal was rejected with veto." + return 1 + ;; + PROPOSAL_STATUS_FAILED) + echo "❗ Previous proposal failed." + return 0 + ;; + PROPOSAL_STATUS_PASSED) + echo "✅ Proposal already passed. No need to resubmit." + exit 0 + ;; + PROPOSAL_STATUS_DEPOSIT_PERIOD) + echo "📥 Proposal is in deposit period. Deposit may be required." + exit 0 + ;; + PROPOSAL_STATUS_VOTING_PERIOD) + echo "⏳ Proposal is still active. Please wait or vote on it." + exit 0 + ;; + *) + echo "⚠️ Proposal in unknown state: $status" + exit 1 + ;; + esac } CURRENT_HEIGHT=$(get_current_height) if [ $# -eq 1 ]; then - EXISTING_PROPOSAL_ID=$(get_proposal_id_by_version_with_retry "$VERSION") - if [[ -n "$EXISTING_PROPOSAL_ID" ]]; then - show_proposal_status "$VERSION" "$EXISTING_PROPOSAL_ID" - fi + EXISTING_PROPOSAL_ID=$(get_proposal_id_by_version_with_retry "$VERSION") + if [[ -n "$EXISTING_PROPOSAL_ID" ]]; then + show_proposal_status "$VERSION" "$EXISTING_PROPOSAL_ID" + fi fi if [ $# -ne 2 ]; then - if [[ "$CURRENT_HEIGHT" =~ ^[0-9]+$ ]]; then - echo "💡 Lumera chain is running, current height: $CURRENT_HEIGHT" - fi + if [[ "$CURRENT_HEIGHT" =~ ^[0-9]+$ ]]; then + echo "💡 Lumera chain is running, current height: $CURRENT_HEIGHT" + fi - echo "Usage: $0 " - exit 1 + echo "Usage: $0 " + exit 1 fi # Read the governance address GOV_ADDRESS_FILE="${LUMERA_SHARED}/governance_address" if [ ! -f "$GOV_ADDRESS_FILE" ]; then - echo "❌ Governance address file not found at $GOV_ADDRESS_FILE" - exit 1 + echo "❌ Governance address file not found at $GOV_ADDRESS_FILE" + exit 1 fi GOV_ADDRESS=$(cat "$GOV_ADDRESS_FILE") GOV_KEY_NAME="governance_key" @@ -323,50 +322,50 @@ validate_height_param EXISTING_PROPOSAL_ID=$(get_proposal_id_by_version_with_retry "$VERSION") if [[ -n "$EXISTING_PROPOSAL_ID" ]]; then - show_proposal_status "$VERSION" "$EXISTING_PROPOSAL_ID" - - STATUS=$(get_proposal_status_by_id "$EXISTING_PROPOSAL_ID") - case "$STATUS" in - PROPOSAL_STATUS_DEPOSIT_PERIOD) - submit_proposal_deposit - exit 0 - ;; - PROPOSAL_STATUS_FAILED) - # Only resubmit if the new requested height is greater than the failed one - FAILED_HEIGHT=$(get_proposal_height_by_id "$EXISTING_PROPOSAL_ID") - CURRENT_HEIGHT=$(get_current_height) - if [[ -z "$FAILED_HEIGHT" ]]; then - echo "⚠️ Could not read failed proposal height; refusing to auto-resubmit." - exit 1 - fi - echo "ℹ️ Failed proposal height: $FAILED_HEIGHT; requested new height: $UPGRADE_HEIGHT" - # check that UPGRADE_HEIGHT is greater than CURRENT_HEIGHT - if (( UPGRADE_HEIGHT <= CURRENT_HEIGHT )); then - echo "🚫 New height ($UPGRADE_HEIGHT) must be greater than current height ($CURRENT_HEIGHT)." - exit 1 - fi - if (( UPGRADE_HEIGHT > FAILED_HEIGHT )); then - echo "✅ New height is greater than failed one — submitting a new proposal…" - submit_proposal - exit 0 - else - echo "🚫 New height ($UPGRADE_HEIGHT) must be greater than failed height ($FAILED_HEIGHT)." - exit 1 - fi - ;; - PROPOSAL_STATUS_REJECTED) - # Rejected (no veto): allow re-submit at caller’s chosen height - echo "ℹ️ Previous proposal was rejected. Submitting a new proposal…" - submit_proposal - exit 0 - ;; - PROPOSAL_STATUS_REJECTED_WITH_VETO) - # Keep current safety behavior for veto - echo "❗ Previous proposal was rejected with veto. Not resubmitting automatically." - exit 1 - ;; - esac + show_proposal_status "$VERSION" "$EXISTING_PROPOSAL_ID" + + STATUS=$(get_proposal_status_by_id "$EXISTING_PROPOSAL_ID") + case "$STATUS" in + PROPOSAL_STATUS_DEPOSIT_PERIOD) + submit_proposal_deposit + exit 0 + ;; + PROPOSAL_STATUS_FAILED) + # Only resubmit if the new requested height is greater than the failed one + FAILED_HEIGHT=$(get_proposal_height_by_id "$EXISTING_PROPOSAL_ID") + CURRENT_HEIGHT=$(get_current_height) + if [[ -z "$FAILED_HEIGHT" ]]; then + echo "⚠️ Could not read failed proposal height; refusing to auto-resubmit." + exit 1 + fi + echo "ℹ️ Failed proposal height: $FAILED_HEIGHT; requested new height: $UPGRADE_HEIGHT" + # check that UPGRADE_HEIGHT is greater than CURRENT_HEIGHT + if ((UPGRADE_HEIGHT <= CURRENT_HEIGHT)); then + echo "🚫 New height ($UPGRADE_HEIGHT) must be greater than current height ($CURRENT_HEIGHT)." + exit 1 + fi + if ((UPGRADE_HEIGHT > FAILED_HEIGHT)); then + echo "✅ New height is greater than failed one — submitting a new proposal…" + submit_proposal + exit 0 + else + echo "🚫 New height ($UPGRADE_HEIGHT) must be greater than failed height ($FAILED_HEIGHT)." + exit 1 + fi + ;; + PROPOSAL_STATUS_REJECTED) + # Rejected (no veto): allow re-submit at caller’s chosen height + echo "ℹ️ Previous proposal was rejected. Submitting a new proposal…" + submit_proposal + exit 0 + ;; + PROPOSAL_STATUS_REJECTED_WITH_VETO) + # Keep current safety behavior for veto + echo "❗ Previous proposal was rejected with veto. Not resubmitting automatically." + exit 1 + ;; + esac else - echo "🔄 No existing proposal found for version $VERSION. Proceeding to submit a new upgrade proposal" - submit_proposal + echo "🔄 No existing proposal found for version $VERSION. Proceeding to submit a new upgrade proposal" + submit_proposal fi diff --git a/devnet/scripts/supernode-setup.sh b/devnet/scripts/supernode-setup.sh index 48a371d7..7d375f2e 100755 --- a/devnet/scripts/supernode-setup.sh +++ b/devnet/scripts/supernode-setup.sh @@ -4,16 +4,16 @@ set -euo pipefail # Require MONIKER env (compose already sets it) if [ -z "${MONIKER:-}" ]; then - echo "[SN] MONIKER is not set; skipping supernode setup." - exit 0 + echo "[SN] MONIKER is not set; skipping supernode setup." + exit 0 fi -if [ ! command -v jq >/dev/null 2>&1 ]; then - echo "[SN] jq is missing" +if ! command -v jq >/dev/null 2>&1; then + echo "[SN] jq is missing" fi -if [ ! command -v curl >/dev/null 2>&1 ]; then - echo "[SN] curl is missing" +if ! command -v curl >/dev/null 2>&1; then + echo "[SN] curl is missing" fi DAEMON="lumerad" @@ -65,502 +65,544 @@ SNCLI_MIN_AMOUNT=10000 SNCLI_KEY_NAME="sncli-account" if [[ "$KEY_NAME" == *validator* ]]; then - SN_KEY_NAME="${KEY_NAME/validator/supernode}" + SN_KEY_NAME="${KEY_NAME/validator/supernode}" else - SN_KEY_NAME="${KEY_NAME}_sn" + SN_KEY_NAME="${KEY_NAME}_sn" fi run() { - echo "+ $*" - "$@" + echo "+ $*" + "$@" } run_capture() { - echo "+ $*" >&2 # goes to stderr, not captured - "$@" + echo "+ $*" >&2 # goes to stderr, not captured + "$@" } require_crudini() { - if ! command -v crudini >/dev/null 2>&1; then - echo "[SN] ERROR: crudini not found. Please install it (e.g., apt-get update && apt-get install -y crudini) and re-run." - return 1 - fi + if ! command -v crudini >/dev/null 2>&1; then + echo "[SN] ERROR: crudini not found. Please install it (e.g., apt-get update && apt-get install -y crudini) and re-run." + return 1 + fi } # Wait for a transaction to be included in a block wait_for_tx() { - local txhash="$1" - local timeout="${2:-90}" - local interval="${3:-3}" - - if [[ -z "$txhash" ]]; then - echo "[SN] wait_for_tx: missing tx hash" - return 2 - fi - - echo "[SN] Waiting for tx $txhash (up to ${timeout}s) via WebSocket…" - local wait_args=(q wait-tx "$txhash" --output json --timeout "${timeout}s") - [[ -n "$LUMERA_RPC_ADDR" ]] && wait_args+=(--node "$LUMERA_RPC_ADDR") - - # Try WebSocket subscription first - local out rc=0 - out="$($DAEMON "${wait_args[@]}" 2>&1)"; rc=$? - if [[ $rc -eq 0 ]] && jq -e . >/dev/null 2>&1 <<<"$out"; then - local code height gas_used gas_wanted raw_log ts - code=$(jq -r 'try .code // "null"' <<<"$out") - height=$(jq -r 'try .height // "0"' <<<"$out") - gas_used=$(jq -r 'try .gas_used // ""' <<<"$out") - gas_wanted=$(jq -r 'try .gas_wanted // ""' <<<"$out") - raw_log=$(jq -r 'try .raw_log // ""' <<<"$out") - ts=$(jq -r 'try .timestamp // ""' <<<"$out") - - if [[ "$code" == "0" || "$code" == "null" ]]; then - echo "[SN] Tx $txhash confirmed at height $height (gas $gas_used/$gas_wanted) $ts" - return 0 - else - echo "[SN] Tx $txhash FAILED at height $height: code=$code" - [[ -n "$raw_log" ]] && echo "[SN] raw_log: $raw_log" - return 1 - fi - else - echo "[SN] WebSocket wait failed/timeout; falling back to RPC polling…" - fi - - # Fallback: poll q tx by hash (works even if indexer is null, once node surfaces it) - local deadline=$((SECONDS + timeout)) - while (( SECONDS < deadline )); do - local tx_args=(q tx "$txhash" --output json) - [[ -n "$NODE" ]] && tx_args+=(--node "$NODE") - - out="$($DAEMON "${tx_args[@]}" 2>&1)" || true - - # If it's valid JSON, try to read fields; otherwise keep waiting on common "not found" cases - if jq -e . >/dev/null 2>&1 <<<"$out"; then - local height code codespace raw_log gas_used gas_wanted - height=$(jq -r 'try .height // "0"' <<<"$out") - code=$(jq -r 'try .code // "null"' <<<"$out") - codespace=$(jq -r 'try .codespace // ""' <<<"$out") - raw_log=$(jq -r 'try .raw_log // ""' <<<"$out") - gas_used=$(jq -r 'try .gas_used // ""' <<<"$out") - gas_wanted=$(jq -r 'try .gas_wanted // ""' <<<"$out") - - if [[ "$height" != "0" && "$height" != "null" ]]; then - if [[ "$code" == "0" || "$code" == "null" ]]; then - echo "[SN] Tx $txhash confirmed at height $height (gas $gas_used/$gas_wanted)" - return 0 - else - echo "[SN] Tx $txhash FAILED at height $height: code=$code codespace=${codespace:-N/A}" - [[ -n "$raw_log" ]] && echo "[SN] raw_log: $raw_log" - return 1 - fi - fi - else - # Non-JSON or "not found" cases: keep polling - # Typical texts: "tx (...) not found", RPC -32603, or empty while indexing. - : - fi - - sleep "$interval" - done - - echo "[SN] Timeout: tx $txhash not found/committed after ${timeout}s." - echo "[SN] Hints: ensure RPC reachable (set \$NODE), and node is not lagging." - return 2 + local txhash="$1" + local timeout="${2:-90}" + local interval="${3:-3}" + + if [[ -z "$txhash" ]]; then + echo "[SN] wait_for_tx: missing tx hash" + return 2 + fi + + echo "[SN] Waiting for tx $txhash (up to ${timeout}s) via WebSocket…" + local wait_args=(q wait-tx "$txhash" --output json --timeout "${timeout}s") + [[ -n "$LUMERA_RPC_ADDR" ]] && wait_args+=(--node "$LUMERA_RPC_ADDR") + + # Try WebSocket subscription first + local out rc=0 + out="$($DAEMON "${wait_args[@]}" 2>&1)" + rc=$? + if [[ $rc -eq 0 ]] && jq -e . >/dev/null 2>&1 <<<"$out"; then + local code height gas_used gas_wanted raw_log ts + code=$(jq -r 'try .code // "null"' <<<"$out") + height=$(jq -r 'try .height // "0"' <<<"$out") + gas_used=$(jq -r 'try .gas_used // ""' <<<"$out") + gas_wanted=$(jq -r 'try .gas_wanted // ""' <<<"$out") + raw_log=$(jq -r 'try .raw_log // ""' <<<"$out") + ts=$(jq -r 'try .timestamp // ""' <<<"$out") + + if [[ "$code" == "0" || "$code" == "null" ]]; then + echo "[SN] Tx $txhash confirmed at height $height (gas $gas_used/$gas_wanted) $ts" + return 0 + else + echo "[SN] Tx $txhash FAILED at height $height: code=$code" + [[ -n "$raw_log" ]] && echo "[SN] raw_log: $raw_log" + return 1 + fi + else + echo "[SN] WebSocket wait failed/timeout; falling back to RPC polling…" + fi + + # Fallback: poll q tx by hash (works even if indexer is null, once node surfaces it) + local deadline=$((SECONDS + timeout)) + while ((SECONDS < deadline)); do + local tx_args=(q tx "$txhash" --output json) + [[ -n "$NODE" ]] && tx_args+=(--node "$NODE") + + out="$($DAEMON "${tx_args[@]}" 2>&1)" || true + + # If it's valid JSON, try to read fields; otherwise keep waiting on common "not found" cases + if jq -e . >/dev/null 2>&1 <<<"$out"; then + local height code codespace raw_log gas_used gas_wanted + height=$(jq -r 'try .height // "0"' <<<"$out") + code=$(jq -r 'try .code // "null"' <<<"$out") + codespace=$(jq -r 'try .codespace // ""' <<<"$out") + raw_log=$(jq -r 'try .raw_log // ""' <<<"$out") + gas_used=$(jq -r 'try .gas_used // ""' <<<"$out") + gas_wanted=$(jq -r 'try .gas_wanted // ""' <<<"$out") + + if [[ "$height" != "0" && "$height" != "null" ]]; then + if [[ "$code" == "0" || "$code" == "null" ]]; then + echo "[SN] Tx $txhash confirmed at height $height (gas $gas_used/$gas_wanted)" + return 0 + else + echo "[SN] Tx $txhash FAILED at height $height: code=$code codespace=${codespace:-N/A}" + [[ -n "$raw_log" ]] && echo "[SN] raw_log: $raw_log" + return 1 + fi + fi + else + # Non-JSON or "not found" cases: keep polling + # Typical texts: "tx (...) not found", RPC -32603, or empty while indexing. + : + fi + + sleep "$interval" + done + + echo "[SN] Timeout: tx $txhash not found/committed after ${timeout}s." + echo "[SN] Hints: ensure RPC reachable (set \$NODE), and node is not lagging." + return 2 } # Get current block height (integer), 0 if unknown current_height() { - curl -sf "${LUMERA_RPC_ADDR}/status" \ - | jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null \ - | awk '{print ($1 ~ /^[0-9]+$/) ? $1 : 0}' + curl -sf "${LUMERA_RPC_ADDR}/status" | + jq -r '.result.sync_info.latest_block_height // "0"' 2>/dev/null | + awk '{print ($1 ~ /^[0-9]+$/) ? $1 : 0}' } # Wait until height >= target (with timeout) wait_for_height_at_least() { - local target="$1" - local retries="${2:-180}" # ~180s - local delay="${3:-1}" - - echo "[SN] Waiting for block height >= ${target} ..." - for ((i=0; i= target )); then - echo "[SN] Height is ${h} (>= ${target}) — OK." - return 0 - fi - sleep "$delay" - done - echo "[SN] Timeout waiting for height >= ${target}." - return 1 + local target="$1" + local retries="${2:-180}" # ~180s + local delay="${3:-1}" + + echo "[SN] Waiting for block height >= ${target} ..." + for ((i = 0; i < retries; i++)); do + local h + h="$(current_height)" + if ((h >= target)); then + echo "[SN] Height is ${h} (>= ${target}) — OK." + return 0 + fi + sleep "$delay" + done + echo "[SN] Timeout waiting for height >= ${target}." + return 1 } # Wait for N new blocks from the current height (default 5) wait_for_n_blocks() { - local n="${1:-5}" - local start - start="$(current_height)" - local target=$(( start + n )) - # If the chain hasn't started yet (start==0), still use +n (so target=n) - (( target < n )) && target="$n" - wait_for_height_at_least "$target" + local n="${1:-5}" + local start + start="$(current_height)" + local target=$((start + n)) + # If the chain hasn't started yet (start==0), still use +n (so target=n) + ((target < n)) && target="$n" + wait_for_height_at_least "$target" } wait_for_lumera() { - local rpc="${LUMERA_RPC_ADDR}/status" - echo "[SN] Waiting for lumerad RPC at ${rpc}..." - # Try up to 180s, 1s interval - for i in $(seq 1 180); do - if curl -sf "$rpc" >/dev/null 2>&1; then - echo "[SN] lumerad RPC is up." - return 0 - fi - sleep 1 - done - echo "[SN] lumerad RPC did not become ready in time." - return 1 + local rpc="${LUMERA_RPC_ADDR}/status" + echo "[SN] Waiting for lumerad RPC at ${rpc}..." + # Try up to 180s, 1s interval + for i in $(seq 1 180); do + if curl -sf "$rpc" >/dev/null 2>&1; then + echo "[SN] lumerad RPC is up." + return 0 + fi + sleep 1 + done + echo "[SN] lumerad RPC did not become ready in time." + return 1 } start_supernode() { - # Ensure only one supernode process runs - if pgrep -x ${SN} >/dev/null; then - echo "[SN] Supernode already running, skipping start." - else - echo "[SN] Waiting for at least one new block before starting supernode..." - wait_for_n_blocks 1 || { echo "[SN] Chain not progressing; cannot start supernode."; return 1; } - echo "[SN] Starting supernode..." - export P2P_USE_EXTERNAL_IP=false - run ${SN} start -d "$SN_BASEDIR" >"$SN_LOG" 2>&1 & - echo "[SN] Supernode started on ${SN_ENDPOINT}, logging to $SN_LOG" - fi + # Ensure only one supernode process runs + if pgrep -x ${SN} >/dev/null; then + echo "[SN] Supernode already running, skipping start." + else + echo "[SN] Waiting for at least one new block before starting supernode..." + wait_for_n_blocks 1 || { + echo "[SN] Chain not progressing; cannot start supernode." + return 1 + } + echo "[SN] Starting supernode..." + export P2P_USE_EXTERNAL_IP=false + run ${SN} start -d "$SN_BASEDIR" >"$SN_LOG" 2>&1 & + echo "[SN] Supernode started on ${SN_ENDPOINT}, logging to $SN_LOG" + fi } stop_supernode_if_running() { - if pgrep -x ${SN} >/dev/null; then - echo "[SN] Stopping supernode..." - run ${SN} stop -d "$SN_BASEDIR" >"$SN_LOG" 2>&1 & - echo "[SN] Supernode stopped." - else - echo "[SN] Supernode is not running." - fi + if pgrep -x ${SN} >/dev/null; then + echo "[SN] Stopping supernode..." + run ${SN} stop -d "$SN_BASEDIR" >"$SN_LOG" 2>&1 & + echo "[SN] Supernode stopped." + else + echo "[SN] Supernode is not running." + fi } install_supernode_binary() { - echo "[SN] Optional install: checking binaries at $SN_BIN_SRC or $SN_BIN_SRC_ALT" - - # 1) Pick source: prefer SN_BIN_SRC, else fallback to SN_BIN_SRC_ALT - local src="" - if [ -f "$SN_BIN_SRC" ]; then - src="$SN_BIN_SRC" - elif [ -f "$SN_BIN_SRC_ALT" ]; then - src="$SN_BIN_SRC_ALT" - else - echo "[SN] supernode binary not found in either location; skipping." - exit 0 - fi - echo "[SN] Using source: $src" - - # 2) Install to fixed destination name: $SN_BIN_DST (/usr/local/bin/supernode-linux-amd64) - if [ -f "$SN_BIN_DST" ]; then - if cmp -s "$src" "$SN_BIN_DST"; then - echo "[SN] supernode binary already installed and up-to-date." - else - echo "[SN] supernode binary is outdated; updating." - run cp -f "$src" "$SN_BIN_DST" - chmod +x "$SN_BIN_DST" - fi - else - echo "[SN] Installing supernode binary..." - run cp -f "$src" "$SN_BIN_DST" - chmod +x "$SN_BIN_DST" - fi - - # 3) Ensure /usr/local/bin/supernode -> supernode-linux-amd64 symlink - local link="/usr/local/bin/supernode" - if [ -e "$link" ] && [ ! -L "$link" ]; then - echo "[SN] Found regular file at $link; removing to create symlink." - rm -f "$link" - fi - - # Create/update symlink; ensure it points to supernode-linux-amd64 - ( - cd /usr/local/bin || exit 1 - if [ -L "supernode" ]; then - current_target="$(readlink supernode)" - if [ "$current_target" != "${SN}" ]; then - echo "[SN] Updating symlink supernode -> ${SN}" - ln -sfn "${SN}" "supernode" - else - echo "[SN] Symlink supernode already points to ${SN}" - fi - else - echo "[SN] Creating symlink supernode -> ${SN}" - ln -sfn "${SN}" "supernode" - fi - ) + echo "[SN] Optional install: checking binaries at $SN_BIN_SRC or $SN_BIN_SRC_ALT" + + # 1) Pick source: prefer SN_BIN_SRC, else fallback to SN_BIN_SRC_ALT + local src="" + if [ -f "$SN_BIN_SRC" ]; then + src="$SN_BIN_SRC" + elif [ -f "$SN_BIN_SRC_ALT" ]; then + src="$SN_BIN_SRC_ALT" + else + echo "[SN] supernode binary not found in either location; skipping." + exit 0 + fi + echo "[SN] Using source: $src" + + # 2) Install to fixed destination name: $SN_BIN_DST (/usr/local/bin/supernode-linux-amd64) + if [ -f "$SN_BIN_DST" ]; then + if cmp -s "$src" "$SN_BIN_DST"; then + echo "[SN] supernode binary already installed and up-to-date." + else + echo "[SN] supernode binary is outdated; updating." + run cp -f "$src" "$SN_BIN_DST" + chmod +x "$SN_BIN_DST" + fi + else + echo "[SN] Installing supernode binary..." + run cp -f "$src" "$SN_BIN_DST" + chmod +x "$SN_BIN_DST" + fi + + # 3) Ensure /usr/local/bin/supernode -> supernode-linux-amd64 symlink + local link="/usr/local/bin/supernode" + if [ -e "$link" ] && [ ! -L "$link" ]; then + echo "[SN] Found regular file at $link; removing to create symlink." + rm -f "$link" + fi + + # Create/update symlink; ensure it points to supernode-linux-amd64 + ( + cd /usr/local/bin || exit 1 + if [ -L "supernode" ]; then + current_target="$(readlink supernode)" + if [ "$current_target" != "${SN}" ]; then + echo "[SN] Updating symlink supernode -> ${SN}" + ln -sfn "${SN}" "supernode" + else + echo "[SN] Symlink supernode already points to ${SN}" + fi + else + echo "[SN] Creating symlink supernode -> ${SN}" + ln -sfn "${SN}" "supernode" + fi + ) } register_supernode() { - if is_sn_registered_active; then - echo "[SN] Supernode is already registered and in ACTIVE state; no action needed." - else - echo "[SN] Registering supernode..." - REG_TX_JSON="$(run_capture $DAEMON tx supernode register-supernode \ - "$VALOPER_ADDR" "$SN_ENDPOINT" "$SN_ADDR" \ - --from "$KEY_NAME" --chain-id "$CHAIN_ID" --keyring-backend "$KEYRING_BACKEND" \ - --gas auto --gas-adjustment 1.3 --fees "5000${DENOM}" -y --output json)" - REG_TX_HASH="$(echo "$REG_TX_JSON" | jq -r .txhash)" - if [[ -n "$REG_TX_HASH" && "$REG_TX_HASH" != "null" ]]; then - wait_for_tx "$REG_TX_HASH" || { echo "[SN] Registration tx failed/timeout"; exit 1; } - else - echo "[SN] Failed to obtain txhash for registration"; exit 1 - fi - if is_sn_registered_active; then - echo "[SN] Supernode registered successfully and is now ACTIVE." - else - echo "[SN] Supernode registration failed or not in ACTIVE state." - exit 1 - fi - fi + if is_sn_registered_active; then + echo "[SN] Supernode is already registered and in ACTIVE state; no action needed." + elif is_sn_blocked_state; then + echo "[SN] Supernode is in ${SN_LAST_STATE} state; skipping registration." + else + echo "[SN] Registering supernode..." + REG_TX_JSON="$(run_capture $DAEMON tx supernode register-supernode \ + "$VALOPER_ADDR" "$SN_ENDPOINT" "$SN_ADDR" \ + --from "$KEY_NAME" --chain-id "$CHAIN_ID" --keyring-backend "$KEYRING_BACKEND" \ + --gas auto --gas-adjustment 1.3 --fees "5000${DENOM}" -y --output json)" + REG_TX_HASH="$(echo "$REG_TX_JSON" | jq -r .txhash)" + if [[ -n "$REG_TX_HASH" && "$REG_TX_HASH" != "null" ]]; then + wait_for_tx "$REG_TX_HASH" || { + echo "[SN] Registration tx failed/timeout" + exit 1 + } + else + echo "[SN] Failed to obtain txhash for registration" + exit 1 + fi + if is_sn_registered_active; then + echo "[SN] Supernode registered successfully and is now ACTIVE." + else + echo "[SN] Supernode registration failed or not in ACTIVE state." + exit 1 + fi + fi } configure_supernode_p2p_listen() { - local ip_addr="$1" - local config_file="$SN_CONFIG" + local ip_addr="$1" + local config_file="$SN_CONFIG" - if [ -z "$ip_addr" ]; then - echo "[SN] No IP address provided!" - return 1 - fi - if [ ! -f "$config_file" ]; then - echo "[SN] config.yml not found at $config_file" - return 1 - fi + if [ -z "$ip_addr" ]; then + echo "[SN] No IP address provided!" + return 1 + fi + if [ ! -f "$config_file" ]; then + echo "[SN] config.yml not found at $config_file" + return 1 + fi - echo "[SN] Setting p2p.listen_address: ${ip_addr} in $config_file" + echo "[SN] Setting p2p.listen_address: ${ip_addr} in $config_file" - # 1. Remove any existing listen_address lines inside the p2p: block - sed -i '/^[[:space:]]*p2p:[[:space:]]*$/,/^[^[:space:]]/ { /^[[:space:]]*listen_address:[[:space:]]*/d }' "$config_file" + # 1. Remove any existing listen_address lines inside the p2p: block + sed -i '/^[[:space:]]*p2p:[[:space:]]*$/,/^[^[:space:]]/ { /^[[:space:]]*listen_address:[[:space:]]*/d }' "$config_file" - # 2. Insert the new listen_address line after the "p2p:" line with 4-space indent - sed -i '/^[[:space:]]*p2p:[[:space:]]*$/a\ listen_address: '"${ip_addr}" "$config_file" + # 2. Insert the new listen_address line after the "p2p:" line with 4-space indent + sed -i '/^[[:space:]]*p2p:[[:space:]]*$/a\ listen_address: '"${ip_addr}" "$config_file" } configure_supernode() { - echo "[SN] Ensuring SN key exists..." - mkdir -p "$SN_BASEDIR" "${NODE_STATUS_DIR}" - if [ -f "$SN_MNEMONIC_FILE" ]; then - if ! run $DAEMON keys show "$SN_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" >/dev/null 2>&1; then - (cat "$SN_MNEMONIC_FILE") | run $DAEMON keys add "$SN_KEY_NAME" --recover --keyring-backend "$KEYRING_BACKEND" >/dev/null - fi - else - run $DAEMON keys delete "$SN_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" -y || true - MNEMONIC_JSON="$(run_capture $DAEMON keys add "$SN_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" --output json)" - echo "[SN] Generated new supernode key: $MNEMONIC_JSON" - echo "$MNEMONIC_JSON" | jq -r .mnemonic > "$SN_MNEMONIC_FILE" - fi - - SN_ADDR="$(run_capture $DAEMON keys show "$SN_KEY_NAME" -a --keyring-backend "$KEYRING_BACKEND")" - echo "[SN] Supernode address: $SN_ADDR" - echo "$SN_ADDR" > "$SN_ADDR_FILE" - VAL_ADDR="$(run_capture $DAEMON keys show "$KEY_NAME" -a --keyring-backend "$KEYRING_BACKEND")" - echo "[SN] Validator address: $VAL_ADDR" - VALOPER_ADDR="$(run_capture $DAEMON keys show "$KEY_NAME" --bech val -a --keyring-backend "$KEYRING_BACKEND")" - echo "[SN] Validator operator address: $VALOPER_ADDR" - - GENESIS_ADDR="$(cat ${NODE_STATUS_DIR}/genesis-address)" - echo "[SN] Genesis address: $GENESIS_ADDR" - - SN_ENDPOINT="${IP_ADDR}:${SN_PORT}" - - echo "[SN] Init config if missing..." - if [ ! -f "$SN_BASEDIR/config.yml" ]; then - run ${SN} init -y --force \ - --basedir "$SN_BASEDIR" \ - --keyring-backend "$KEYRING_BACKEND" \ - --key-name "$SN_KEY_NAME" \ - --supernode-addr "$IP_ADDR" \ - --supernode-port "$SN_PORT" \ - --recover \ - --mnemonic "$(cat "$SN_MNEMONIC_FILE")" \ - --lumera-grpc "localhost:${LUMERA_GRPC_PORT}" \ - --chain-id "$CHAIN_ID" - - printf "[SN] Generated config\n%s\n" "$(cat "$SN_CONFIG")" - configure_supernode_p2p_listen "${IP_ADDR}" - fi - - echo "[SN] Checking SN balance for $SN_ADDR..." - BAL_JSON="$(run_capture $DAEMON q bank balances "$SN_ADDR" --output json)" - echo "[SN] Balance output: $BAL_JSON" - BAL="$( - echo "$BAL_JSON" \ - | jq -r --arg denom "$DENOM" ' + echo "[SN] Ensuring SN key exists..." + mkdir -p "$SN_BASEDIR" "${NODE_STATUS_DIR}" + if [ -f "$SN_MNEMONIC_FILE" ]; then + if ! run $DAEMON keys show "$SN_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" >/dev/null 2>&1; then + (cat "$SN_MNEMONIC_FILE") | run $DAEMON keys add "$SN_KEY_NAME" --recover --keyring-backend "$KEYRING_BACKEND" >/dev/null + fi + else + run $DAEMON keys delete "$SN_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" -y || true + MNEMONIC_JSON="$(run_capture $DAEMON keys add "$SN_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" --output json)" + echo "[SN] Generated new supernode key: $MNEMONIC_JSON" + echo "$MNEMONIC_JSON" | jq -r .mnemonic >"$SN_MNEMONIC_FILE" + fi + + SN_ADDR="$(run_capture $DAEMON keys show "$SN_KEY_NAME" -a --keyring-backend "$KEYRING_BACKEND")" + echo "[SN] Supernode address: $SN_ADDR" + echo "$SN_ADDR" >"$SN_ADDR_FILE" + VAL_ADDR="$(run_capture $DAEMON keys show "$KEY_NAME" -a --keyring-backend "$KEYRING_BACKEND")" + echo "[SN] Validator address: $VAL_ADDR" + VALOPER_ADDR="$(run_capture $DAEMON keys show "$KEY_NAME" --bech val -a --keyring-backend "$KEYRING_BACKEND")" + echo "[SN] Validator operator address: $VALOPER_ADDR" + + GENESIS_ADDR="$(cat ${NODE_STATUS_DIR}/genesis-address)" + echo "[SN] Genesis address: $GENESIS_ADDR" + + SN_ENDPOINT="${IP_ADDR}:${SN_PORT}" + + echo "[SN] Init config if missing..." + if [ ! -f "$SN_BASEDIR/config.yml" ]; then + run ${SN} init -y --force \ + --basedir "$SN_BASEDIR" \ + --keyring-backend "$KEYRING_BACKEND" \ + --key-name "$SN_KEY_NAME" \ + --supernode-addr "$IP_ADDR" \ + --supernode-port "$SN_PORT" \ + --recover \ + --mnemonic "$(cat "$SN_MNEMONIC_FILE")" \ + --lumera-grpc "localhost:${LUMERA_GRPC_PORT}" \ + --chain-id "$CHAIN_ID" + + printf "[SN] Generated config\n%s\n" "$(cat "$SN_CONFIG")" + configure_supernode_p2p_listen "${IP_ADDR}" + fi + + echo "[SN] Checking SN balance for $SN_ADDR..." + BAL_JSON="$(run_capture $DAEMON q bank balances "$SN_ADDR" --output json)" + echo "[SN] Balance output: $BAL_JSON" + BAL="$( + echo "$BAL_JSON" | + jq -r --arg denom "$DENOM" ' ([.balances[]? | select(.denom == $denom) | .amount] | first) // "0" ' - )" - echo "[SN] Current SN balance: $BAL" - # Normalize and compare numerically - [[ -z "$BAL" ]] && BAL="0" - if (( BAL < 1000000 )); then - echo "[SN] Funding Supernode account..." - SEND_TX_JSON="$(run_capture $DAEMON tx bank send "$GENESIS_ADDR" "$SN_ADDR" "10000000${DENOM}" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING_BACKEND" \ - --gas auto \ - --gas-adjustment 1.3 \ - --fees "3000$DENOM" \ - --output json --yes)" - echo "[SN] Send tx output: $SEND_TX_JSON" - SEND_TX_HASH="$(echo "$SEND_TX_JSON" | jq -r .txhash)" - if [ -n "$SEND_TX_HASH" ] && [ "$SEND_TX_HASH" != "null" ]; then - if ! wait_for_tx "$SEND_TX_HASH"; then - echo "[SN] Funding tx failed or not confirmed. Exiting." - exit 1 - fi - else - echo "[SN] Failed to get TXHASH for funding transaction." - exit 1 - fi - fi + )" + echo "[SN] Current SN balance: $BAL" + # Normalize and compare numerically + [[ -z "$BAL" ]] && BAL="0" + if ((BAL < 1000000)); then + echo "[SN] Funding Supernode account..." + SEND_TX_JSON="$(run_capture $DAEMON tx bank send "$GENESIS_ADDR" "$SN_ADDR" "10000000${DENOM}" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING_BACKEND" \ + --gas auto \ + --gas-adjustment 1.3 \ + --fees "3000$DENOM" \ + --output json --yes)" + echo "[SN] Send tx output: $SEND_TX_JSON" + SEND_TX_HASH="$(echo "$SEND_TX_JSON" | jq -r .txhash)" + if [ -n "$SEND_TX_HASH" ] && [ "$SEND_TX_HASH" != "null" ]; then + if ! wait_for_tx "$SEND_TX_HASH"; then + echo "[SN] Funding tx failed or not confirmed. Exiting." + exit 1 + fi + else + echo "[SN] Failed to get TXHASH for funding transaction." + exit 1 + fi + fi } # Returns 0 if registered to SN_ADDR and last state is SUPERNODE_STATE_ACTIVE, else 1 is_sn_registered_active() { - local info + local info - echo "[SN] Checking if supernode is registered..." - info="$(run_capture $DAEMON q supernode get-supernode "$VALOPER_ADDR" --output json)" - echo "[SN] Supernode info output: $info" + echo "[SN] Checking if supernode is registered..." + info="$(run_capture $DAEMON q supernode get-supernode "$VALOPER_ADDR" --output json)" + echo "[SN] Supernode info output: $info" - # Extract the supernode account (empty string if missing) - local acct - acct="$(echo "$info" | jq -r '.supernode.supernode_account // ""')" + # Extract the supernode account (empty string if missing) + local acct + acct="$(echo "$info" | jq -r '.supernode.supernode_account // ""')" - # Extract the last state by highest height (empty string if none) - # sort_by is safe even if heights are strings - local last_state - last_state="$(echo "$info" | jq -r ' + # Extract the last state by highest height (empty string if none) + # sort_by is safe even if heights are strings + local last_state + last_state="$(echo "$info" | jq -r ' (.supernode.states // []) | sort_by(.height | tonumber) | (last // empty) | .state // "" ')" - echo "[SN] Supernode: account='${acct}', last_state='${last_state}'" - if [[ -n "$acct" && "$acct" == "$SN_ADDR" && "$last_state" == "SUPERNODE_STATE_ACTIVE" ]]; then - return 0 - fi + echo "[SN] Supernode: account='${acct}', last_state='${last_state}'" + if [[ -n "$acct" && "$acct" == "$SN_ADDR" && "$last_state" == "SUPERNODE_STATE_ACTIVE" ]]; then + return 0 + fi - echo "[SN] Status: not active and/or account mismatch" - return 1 + echo "[SN] Status: not active and/or account mismatch" + return 1 +} + +# Returns 0 if last state is a non-registrable state, else 1 +is_sn_blocked_state() { + local info + SN_LAST_STATE="" + + echo "[SN] Checking if supernode is postponed..." + info="$(run_capture $DAEMON q supernode get-supernode "$VALOPER_ADDR" --output json)" + echo "[SN] Supernode info output: $info" + + local acct + acct="$(echo "$info" | jq -r '.supernode.supernode_account // ""')" + + local last_state + last_state="$(echo "$info" | jq -r ' + (.supernode.states // []) + | sort_by(.height | tonumber) + | (last // empty) + | .state // "" + ')" + + SN_LAST_STATE="$last_state" + echo "[SN] Supernode: account='${acct}', last_state='${last_state}'" + case "$last_state" in + SUPERNODE_STATE_POSTPONED | SUPERNODE_STATE_DISABLED | SUPERNODE_STATE_STOPPED | SUPERNODE_STATE_PENALIZED) + return 0 + ;; + *) + return 1 + ;; + esac } install_sncli_binary() { - echo "[SNCLI] Optional install: checking binaries at $SNCLI_BIN_SRC" - if [ -f "$SNCLI_BIN_SRC" ]; then - if [ -f "$SNCLI_BIN_DST" ]; then - if [ "$SNCLI_BIN_SRC" -nt "$SNCLI_BIN_DST" ] || ! cmp -s "$SNCLI_BIN_SRC" "$SNCLI_BIN_DST"; then - echo "[SNCLI] sncli binary is outdated; updating." - run cp -f "$SNCLI_BIN_SRC" "$SNCLI_BIN_DST" - chmod +x "$SNCLI_BIN_DST" - else - echo "[SNCLI] sncli binary already installed and up-to-date." - fi - else - echo "[SNCLI] Installing sncli binary..." - run cp -f "$SNCLI_BIN_SRC" "$SNCLI_BIN_DST" - chmod +x "$SNCLI_BIN_DST" - fi - else - echo "[SNCLI] sncli binary not found at $SNCLI_BIN_SRC; skipping." - return 0 - fi + echo "[SNCLI] Optional install: checking binaries at $SNCLI_BIN_SRC" + if [ -f "$SNCLI_BIN_SRC" ]; then + if [ -f "$SNCLI_BIN_DST" ]; then + if [ "$SNCLI_BIN_SRC" -nt "$SNCLI_BIN_DST" ] || ! cmp -s "$SNCLI_BIN_SRC" "$SNCLI_BIN_DST"; then + echo "[SNCLI] sncli binary is outdated; updating." + run cp -f "$SNCLI_BIN_SRC" "$SNCLI_BIN_DST" + chmod +x "$SNCLI_BIN_DST" + else + echo "[SNCLI] sncli binary already installed and up-to-date." + fi + else + echo "[SNCLI] Installing sncli binary..." + run cp -f "$SNCLI_BIN_SRC" "$SNCLI_BIN_DST" + chmod +x "$SNCLI_BIN_DST" + fi + else + echo "[SNCLI] sncli binary not found at $SNCLI_BIN_SRC; skipping." + return 0 + fi } configure_sncli() { - if [ ! -f "$SNCLI_BIN_DST" ]; then - echo "[SNCLI] sncli binary not found at $SNCLI_BIN_DST; skipping configuration." - return 0 - fi - - echo "[SNCLI] Configuring sncli..." - mkdir -p "$SNCLI_BASEDIR" - # Start from template if provided; otherwise ensure the file exists - if [ -f "${SNCLI_CFG_SRC}" ]; then - echo "[SNCLI] Using template ${SNCLI_CFG_SRC} -> ${SNCLI_CFG}" - cp -f "${SNCLI_CFG_SRC}" "${SNCLI_CFG}" - else - echo "[SNCLI] No sncli-config.toml template found; creating empty config at ${SNCLI_CFG}" - : > "${SNCLI_CFG}" - fi - - # Ensure sncli-account key exists - if [ -f "$SNCLI_MNEMONIC_FILE" ]; then - if ! run ${DAEMON} keys show "${SNCLI_KEY_NAME}" --keyring-backend "${KEYRING_BACKEND}" >/dev/null 2>&1; then - (cat "$SNCLI_MNEMONIC_FILE") | run $DAEMON keys add "$SNCLI_KEY_NAME" --recover --keyring-backend "$KEYRING_BACKEND" >/dev/null - fi - else - run $DAEMON keys delete "$SNCLI_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" -y || true - local mn_json - mn_json="$(run_capture $DAEMON keys add "$SNCLI_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" --output json)" - echo "[SNCLI] Generated new sncli key ${SNCLI_KEY_NAME}..." - echo "${mn_json}" | jq -r .mnemonic > "${SNCLI_MNEMONIC_FILE}" - fi - local addr - addr="$(${DAEMON} keys show "${SNCLI_KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" - echo "${addr}" > "${SNCLI_ADDR_FILE}" - echo "[SNCLI] sncli-account address: ${addr}" - - echo "[SNCLI] Checking ${SNCLI_KEY_NAME} balance for $addr..." - bal_json="$(run_capture $DAEMON q bank balances "$addr" --output json)" - echo "[SNCLI] Balance output: $bal_json" - bal="$( - echo "$bal_json" \ - | jq -r --arg denom "$DENOM" ' + if [ ! -f "$SNCLI_BIN_DST" ]; then + echo "[SNCLI] sncli binary not found at $SNCLI_BIN_DST; skipping configuration." + return 0 + fi + + echo "[SNCLI] Configuring sncli..." + mkdir -p "$SNCLI_BASEDIR" + # Start from template if provided; otherwise ensure the file exists + if [ -f "${SNCLI_CFG_SRC}" ]; then + echo "[SNCLI] Using template ${SNCLI_CFG_SRC} -> ${SNCLI_CFG}" + cp -f "${SNCLI_CFG_SRC}" "${SNCLI_CFG}" + else + echo "[SNCLI] No sncli-config.toml template found; creating empty config at ${SNCLI_CFG}" + : >"${SNCLI_CFG}" + fi + + # Ensure sncli-account key exists + if [ -f "$SNCLI_MNEMONIC_FILE" ]; then + if ! run ${DAEMON} keys show "${SNCLI_KEY_NAME}" --keyring-backend "${KEYRING_BACKEND}" >/dev/null 2>&1; then + (cat "$SNCLI_MNEMONIC_FILE") | run $DAEMON keys add "$SNCLI_KEY_NAME" --recover --keyring-backend "$KEYRING_BACKEND" >/dev/null + fi + else + run $DAEMON keys delete "$SNCLI_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" -y || true + local mn_json + mn_json="$(run_capture $DAEMON keys add "$SNCLI_KEY_NAME" --keyring-backend "$KEYRING_BACKEND" --output json)" + echo "[SNCLI] Generated new sncli key ${SNCLI_KEY_NAME}..." + echo "${mn_json}" | jq -r .mnemonic >"${SNCLI_MNEMONIC_FILE}" + fi + local addr + addr="$(${DAEMON} keys show "${SNCLI_KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" + echo "${addr}" >"${SNCLI_ADDR_FILE}" + echo "[SNCLI] sncli-account address: ${addr}" + + echo "[SNCLI] Checking ${SNCLI_KEY_NAME} balance for $addr..." + bal_json="$(run_capture $DAEMON q bank balances "$addr" --output json)" + echo "[SNCLI] Balance output: $bal_json" + bal="$( + echo "$bal_json" | + jq -r --arg denom "$DENOM" ' ([.balances[]? | select(.denom == $denom) | .amount] | first) // "0" ' - )" - echo "[SNCLI] Current ${SNCLI_KEY_NAME} balance: $bal" - # Normalize and compare numerically - [[ -z "$bal" ]] && bal="0" - if (( bal < ${SNCLI_MIN_AMOUNT} )); then - echo "[SNCLI] Funding ${SNCLI_KEY_NAME}..." - send_tx_json="$(run_capture $DAEMON tx bank send "$GENESIS_ADDR" "$addr" "${SNCLI_FUND_AMOUNT}${DENOM}" \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING_BACKEND" \ - --gas auto \ - --gas-adjustment 1.3 \ - --fees "3000${DENOM}" \ - --output json --yes)" - echo "[SNCLI] Send tx output: $send_tx_json" - send_tx_hash="$(echo "$send_tx_json" | jq -r .txhash)" - if [ -n "$send_tx_hash" ] && [ "$send_tx_hash" != "null" ]; then - if ! wait_for_tx "$send_tx_hash"; then - echo "[SNCLI] Funding tx failed or not confirmed. Exiting." - exit 1 - fi - else - echo "[SNCLI] Failed to get TXHASH for funding transaction." - exit 1 - fi - fi - - # --- [lumera] --- - crudini --set "${SNCLI_CFG}" lumera grpc_addr "\"localhost:${LUMERA_GRPC_PORT}\"" - crudini --set "${SNCLI_CFG}" lumera chain_id "\"${CHAIN_ID}\"" - - # --- [supernode] --- - if [ -n "${SN_ADDR:-}" ]; then - crudini --set "${SNCLI_CFG}" supernode address "\"${SN_ADDR}\"" - fi - crudini --set "${SNCLI_CFG}" supernode grpc_endpoint "\"${IP_ADDR}:${SN_PORT}\"" - crudini --set "${SNCLI_CFG}" supernode p2p_endpoint "\"${IP_ADDR}:${SN_P2P_PORT}\"" - - # --- [keyring] --- - crudini --set "${SNCLI_CFG}" keyring backend "\"${KEYRING_BACKEND}\"" - crudini --set "${SNCLI_CFG}" keyring key_name "\"${SNCLI_KEY_NAME}\"" - crudini --set "${SNCLI_CFG}" keyring local_address "\"$addr\"" + )" + echo "[SNCLI] Current ${SNCLI_KEY_NAME} balance: $bal" + # Normalize and compare numerically + [[ -z "$bal" ]] && bal="0" + if ((bal < ${SNCLI_MIN_AMOUNT})); then + echo "[SNCLI] Funding ${SNCLI_KEY_NAME}..." + send_tx_json="$(run_capture $DAEMON tx bank send "$GENESIS_ADDR" "$addr" "${SNCLI_FUND_AMOUNT}${DENOM}" \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING_BACKEND" \ + --gas auto \ + --gas-adjustment 1.3 \ + --fees "3000${DENOM}" \ + --output json --yes)" + echo "[SNCLI] Send tx output: $send_tx_json" + send_tx_hash="$(echo "$send_tx_json" | jq -r .txhash)" + if [ -n "$send_tx_hash" ] && [ "$send_tx_hash" != "null" ]; then + if ! wait_for_tx "$send_tx_hash"; then + echo "[SNCLI] Funding tx failed or not confirmed. Exiting." + exit 1 + fi + else + echo "[SNCLI] Failed to get TXHASH for funding transaction." + exit 1 + fi + fi + + # --- [lumera] --- + crudini --set "${SNCLI_CFG}" lumera grpc_addr "\"localhost:${LUMERA_GRPC_PORT}\"" + crudini --set "${SNCLI_CFG}" lumera chain_id "\"${CHAIN_ID}\"" + + # --- [supernode] --- + if [ -n "${SN_ADDR:-}" ]; then + crudini --set "${SNCLI_CFG}" supernode address "\"${SN_ADDR}\"" + fi + crudini --set "${SNCLI_CFG}" supernode grpc_endpoint "\"${IP_ADDR}:${SN_PORT}\"" + crudini --set "${SNCLI_CFG}" supernode p2p_endpoint "\"${IP_ADDR}:${SN_P2P_PORT}\"" + + # --- [keyring] --- + crudini --set "${SNCLI_CFG}" keyring backend "\"${KEYRING_BACKEND}\"" + crudini --set "${SNCLI_CFG}" keyring key_name "\"${SNCLI_KEY_NAME}\"" + crudini --set "${SNCLI_CFG}" keyring local_address "\"$addr\"" } @@ -570,9 +612,12 @@ stop_supernode_if_running install_supernode_binary install_sncli_binary # Ensure Lumera RPC is up before any chain ops -wait_for_lumera || exit 0 # don't fail the container if chain isn't ready; just skip SN +wait_for_lumera || exit 0 # don't fail the container if chain isn't ready; just skip SN # Wait for at least 5 blocks -wait_for_height_at_least 5 || { echo "[SN] Lumera chain not producing blocks in time; exiting."; exit 1; } +wait_for_height_at_least 5 || { + echo "[SN] Lumera chain not producing blocks in time; exiting." + exit 1 +} configure_supernode register_supernode diff --git a/devnet/scripts/upgrade-binaries.sh b/devnet/scripts/upgrade-binaries.sh index 2c9be30a..61be77cd 100755 --- a/devnet/scripts/upgrade-binaries.sh +++ b/devnet/scripts/upgrade-binaries.sh @@ -2,14 +2,14 @@ set -euo pipefail if [[ $# -ne 1 ]]; then - echo "Usage: $0 " >&2 - exit 1 + echo "Usage: $0 " >&2 + exit 1 fi BINARIES_DIR="$1" if [[ ! -d "${BINARIES_DIR}" ]]; then - echo "Binaries directory not found: ${BINARIES_DIR}" >&2 - exit 1 + echo "Binaries directory not found: ${BINARIES_DIR}" >&2 + exit 1 fi BINARIES_DIR="$(cd "${BINARIES_DIR}" && pwd)" @@ -18,8 +18,8 @@ DEVNET_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" COMPOSE_FILE="${DEVNET_ROOT}/docker-compose.yml" if [[ ! -f "${COMPOSE_FILE}" ]]; then - echo "docker-compose.yml not found at ${COMPOSE_FILE}" >&2 - exit 1 + echo "docker-compose.yml not found at ${COMPOSE_FILE}" >&2 + exit 1 fi DEVNET_RUNTIME_DIR="${DEVNET_DIR:-/tmp/lumera-devnet-1}" @@ -33,20 +33,20 @@ mkdir -p "${RELEASE_DIR}" shopt -s nullglob copied=0 for file in "${BINARIES_DIR}"/*; do - if [[ -f "${file}" ]]; then - cp -Sf "${file}" "${RELEASE_DIR}/" - copied=1 - fi + if [[ -f "${file}" ]]; then + cp -Sf "${file}" "${RELEASE_DIR}/" + copied=1 + fi done shopt -u nullglob if [[ "${copied}" -eq 0 ]]; then - echo "No files were copied from ${BINARIES_DIR}" >&2 - exit 1 + echo "No files were copied from ${BINARIES_DIR}" >&2 + exit 1 fi if [[ -f "${RELEASE_DIR}/lumerad" ]]; then - chmod +x "${RELEASE_DIR}/lumerad" + chmod +x "${RELEASE_DIR}/lumerad" fi echo "Restarting devnet containers..." diff --git a/devnet/scripts/upgrade.sh b/devnet/scripts/upgrade.sh index f8ac2883..7fc11321 100755 --- a/devnet/scripts/upgrade.sh +++ b/devnet/scripts/upgrade.sh @@ -2,8 +2,8 @@ set -euo pipefail if [[ $# -ne 3 ]]; then - echo "Usage: $0 " - exit 1 + echo "Usage: $0 " + exit 1 fi RELEASE_NAME="$1" @@ -17,34 +17,34 @@ SERVICE="${SERVICE_NAME:-supernova_validator_1}" AUTO_HEIGHT_OFFSET=100 if [[ ! -f "${COMPOSE_FILE}" ]]; then - echo "docker-compose.yml not found at ${COMPOSE_FILE}" >&2 - exit 1 + echo "docker-compose.yml not found at ${COMPOSE_FILE}" >&2 + exit 1 fi if [[ "${REQUESTED_HEIGHT}" == "auto-height" ]]; then - echo "Auto height requested. Determining current chain height from ${SERVICE}..." - CURRENT_HEIGHT="$(docker compose -f "${COMPOSE_FILE}" exec -T "${SERVICE}" \ - lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // empty' 2>/dev/null || true)" + echo "Auto height requested. Determining current chain height from ${SERVICE}..." + CURRENT_HEIGHT="$(docker compose -f "${COMPOSE_FILE}" exec -T "${SERVICE}" \ + lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // empty' 2>/dev/null || true)" - if ! [[ "${CURRENT_HEIGHT}" =~ ^[0-9]+$ ]]; then - echo "Failed to determine current block height for service ${SERVICE}." >&2 - exit 1 - fi + if ! [[ "${CURRENT_HEIGHT}" =~ ^[0-9]+$ ]]; then + echo "Failed to determine current block height for service ${SERVICE}." >&2 + exit 1 + fi - UPGRADE_HEIGHT=$((CURRENT_HEIGHT + AUTO_HEIGHT_OFFSET)) - echo "Current height is ${CURRENT_HEIGHT}. Scheduling upgrade at height ${UPGRADE_HEIGHT}." + UPGRADE_HEIGHT=$((CURRENT_HEIGHT + AUTO_HEIGHT_OFFSET)) + echo "Current height is ${CURRENT_HEIGHT}. Scheduling upgrade at height ${UPGRADE_HEIGHT}." else - UPGRADE_HEIGHT="${REQUESTED_HEIGHT}" + UPGRADE_HEIGHT="${REQUESTED_HEIGHT}" fi if ! [[ "${UPGRADE_HEIGHT}" =~ ^[0-9]+$ ]]; then - echo "Upgrade height must be a positive integer. Got: ${UPGRADE_HEIGHT}" >&2 - exit 1 + echo "Upgrade height must be a positive integer. Got: ${UPGRADE_HEIGHT}" >&2 + exit 1 fi if [[ ! -d "${BINARIES_DIR}" ]]; then - echo "Binaries directory not found: ${BINARIES_DIR}" >&2 - exit 1 + echo "Binaries directory not found: ${BINARIES_DIR}" >&2 + exit 1 fi BINARIES_DIR="$(cd "${BINARIES_DIR}" && pwd)" @@ -54,7 +54,7 @@ echo "Submitting software upgrade proposal for ${RELEASE_NAME} at height ${UPGRA echo "Retrieving proposal ID..." PROPOSAL_ID="$(docker compose -f "${COMPOSE_FILE}" exec -T supernova_validator_1 \ - lumerad query gov proposals --output json | jq -r --arg name "${RELEASE_NAME}" ' + lumerad query gov proposals --output json | jq -r --arg name "${RELEASE_NAME}" ' .proposals | map(select(.messages[]?.value.plan.name == $name)) | sort_by(.id | tonumber) @@ -63,17 +63,47 @@ PROPOSAL_ID="$(docker compose -f "${COMPOSE_FILE}" exec -T supernova_validator_1 ')" if [[ -z "${PROPOSAL_ID}" ]]; then - echo "Failed to determine proposal ID for ${RELEASE_NAME}" >&2 - exit 1 + echo "Failed to determine proposal ID for ${RELEASE_NAME}" >&2 + exit 1 fi echo "Found proposal ID: ${PROPOSAL_ID}" -echo "Casting votes for all validators..." -"${SCRIPT_DIR}/vote-all.sh" "${PROPOSAL_ID}" +# Determine proposal status and planned height +PROPOSAL_JSON="$(docker compose -f "${COMPOSE_FILE}" exec -T "${SERVICE}" \ + lumerad query gov proposal "${PROPOSAL_ID}" --output json 2>/dev/null || true)" +PROPOSAL_STATUS="" +PROPOSAL_HEIGHT="" +if [[ -n "${PROPOSAL_JSON}" ]]; then + PROPOSAL_STATUS="$(echo "${PROPOSAL_JSON}" | jq -r '.proposal.status // .status // empty' 2>/dev/null || true)" + PROPOSAL_HEIGHT="$(echo "${PROPOSAL_JSON}" | jq -r '.proposal.messages[]?.value.plan.height // empty' 2>/dev/null | head -n 1 || true)" +fi + +if [[ -n "${PROPOSAL_HEIGHT}" && "${PROPOSAL_HEIGHT}" =~ ^[0-9]+$ ]]; then + if [[ "${PROPOSAL_HEIGHT}" != "${UPGRADE_HEIGHT}" ]]; then + echo "⚠️ Proposal height (${PROPOSAL_HEIGHT}) differs from requested height (${UPGRADE_HEIGHT})." + echo "Using proposal height for wait/upgrade." + UPGRADE_HEIGHT="${PROPOSAL_HEIGHT}" + fi +else + echo "⚠️ Could not determine proposal height; continuing with ${UPGRADE_HEIGHT}." +fi + +if [[ "${PROPOSAL_STATUS}" == "PROPOSAL_STATUS_VOTING_PERIOD" ]]; then + echo "Casting votes for all validators..." + "${SCRIPT_DIR}/vote-all.sh" "${PROPOSAL_ID}" +else + echo "ℹ️ Skipping voting; proposal status is ${PROPOSAL_STATUS:-unknown}." +fi echo "Waiting for chain to reach height ${UPGRADE_HEIGHT}..." -"${SCRIPT_DIR}/wait-for-height.sh" "${UPGRADE_HEIGHT}" +CURRENT_HEIGHT_NOW="$(docker compose -f "${COMPOSE_FILE}" exec -T "${SERVICE}" \ + lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // empty' 2>/dev/null || true)" +if [[ "${CURRENT_HEIGHT_NOW}" =~ ^[0-9]+$ ]] && ((CURRENT_HEIGHT_NOW >= UPGRADE_HEIGHT)); then + echo "ℹ️ Current height ${CURRENT_HEIGHT_NOW} is already at or above upgrade height ${UPGRADE_HEIGHT}; skipping wait." +else + "${SCRIPT_DIR}/wait-for-height.sh" "${UPGRADE_HEIGHT}" +fi echo "Upgrading binaries from ${BINARIES_DIR}..." "${SCRIPT_DIR}/upgrade-binaries.sh" "${BINARIES_DIR}" diff --git a/devnet/scripts/validator-setup.sh b/devnet/scripts/validator-setup.sh index 51896289..2f15a7c6 100755 --- a/devnet/scripts/validator-setup.sh +++ b/devnet/scripts/validator-setup.sh @@ -32,21 +32,23 @@ HERMES_STATUS_DIR="${STATUS_DIR}/hermes" HERMES_RELAYER_KEY="${HERMES_RELAYER_KEY:-hermes-relayer}" HERMES_RELAYER_FILE_NAME="${HERMES_RELAYER_KEY}" if [[ "${HERMES_RELAYER_FILE_NAME}" != lumera-* ]]; then - HERMES_RELAYER_FILE_NAME="lumera-${HERMES_RELAYER_FILE_NAME}" + HERMES_RELAYER_FILE_NAME="lumera-${HERMES_RELAYER_FILE_NAME}" fi HERMES_RELAYER_MNEMONIC_FILE="${HERMES_SHARED_DIR}/${HERMES_RELAYER_FILE_NAME}.mnemonic" HERMES_RELAYER_ADDR_FILE="${HERMES_SHARED_DIR}/${HERMES_RELAYER_FILE_NAME}.address" HERMES_RELAYER_GENESIS_AMOUNT="${HERMES_RELAYER_GENESIS_AMOUNT:-10000000}" # in bond denom units # ----- read config from config.json ----- -if [ ! command -v jq >/dev/null 2>&1 ]; then - echo "[CONFIGURE] jq is missing" +if [ ! command -v jq ] >/dev/null 2>&1; then + echo "[CONFIGURE] jq is missing" fi if [ ! -f "${CFG_CHAIN}" ]; then - echo "[SETUP] Missing ${CFG_CHAIN}"; exit 1 + echo "[SETUP] Missing ${CFG_CHAIN}" + exit 1 fi if [ ! -f "${CFG_VALS}" ]; then - echo "[SETUP] Missing ${CFG_VALS}"; exit 1 + echo "[SETUP] Missing ${CFG_VALS}" + exit 1 fi CHAIN_ID="$(jq -r '.chain.id' "${CFG_CHAIN}")" @@ -56,9 +58,10 @@ DAEMON="$(jq -r '.daemon.binary' "${CFG_CHAIN}")" DAEMON_HOME_BASE="$(jq -r '.paths.base.container' "${CFG_CHAIN}")" DAEMON_DIR="$(jq -r '.paths.directories.daemon' "${CFG_CHAIN}")" -if [ -z "${CHAIN_ID}" ] || [ -z "${DENOM}" ] || [ -z "${KEYRING_BACKEND}" ] || \ - [ -z "${DAEMON}" ] || [ -z "${DAEMON_HOME_BASE}" ] || [ -z "${DAEMON_DIR}" ]; then - echo "[SETUP] Invalid config.json (missing required fields)"; exit 1 +if [ -z "${CHAIN_ID}" ] || [ -z "${DENOM}" ] || [ -z "${KEYRING_BACKEND}" ] || + [ -z "${DAEMON}" ] || [ -z "${DAEMON_HOME_BASE}" ] || [ -z "${DAEMON_DIR}" ]; then + echo "[SETUP] Invalid config.json (missing required fields)" + exit 1 fi DAEMON_HOME="${DAEMON_HOME_BASE}/${DAEMON_DIR}" @@ -76,7 +79,8 @@ mkdir -p "${LOCKS_DIR}" # ----- load this validator record ----- VAL_REC_JSON="$(jq -c --arg m "$MONIKER" '[.[] | select(.moniker==$m)][0]' "${CFG_VALS}")" if [ -z "${VAL_REC_JSON}" ] || [ "${VAL_REC_JSON}" = "null" ]; then - echo "[SETUP] Validator with moniker=${MONIKER} not found in validators.json"; exit 1 + echo "[SETUP] Validator with moniker=${MONIKER} not found in validators.json" + exit 1 fi KEY_NAME="$(echo "${VAL_REC_JSON}" | jq -r '.key_name')" @@ -89,408 +93,412 @@ PRIMARY_NAME="$(jq -r ' (map(select(.primary==true)) | if length>0 then .[0].moniker else empty end) // (.[0].moniker) ' "${CFG_VALS}")" -IS_PRIMARY="0"; [ "${MONIKER}" = "${PRIMARY_NAME}" ] && IS_PRIMARY="1" +IS_PRIMARY="0" +[ "${MONIKER}" = "${PRIMARY_NAME}" ] && IS_PRIMARY="1" echo "[SETUP] MONIKER=${MONIKER} KEY_NAME=${KEY_NAME} PRIMARY=${IS_PRIMARY} CHAIN_ID=${CHAIN_ID}" mkdir -p "${DAEMON_HOME}/config" # ----- helpers ----- run() { - echo "+ $*" - "$@" + echo "+ $*" + "$@" } run_capture() { - echo "+ $*" >&2 # goes to stderr, not captured - "$@" + echo "+ $*" >&2 # goes to stderr, not captured + "$@" } with_lock() { - local name="$1" - shift - local lock_file="${LOCKS_DIR}/${name}.lock" - mkdir -p "${LOCKS_DIR}" - if ! command -v flock >/dev/null 2>&1; then - "$@" - return - fi - { - flock -x 200 - "$@" - } 200>"${lock_file}" + local name="$1" + shift + local lock_file="${LOCKS_DIR}/${name}.lock" + mkdir -p "${LOCKS_DIR}" + if ! command -v flock >/dev/null 2>&1; then + "$@" + return + fi + { + flock -x 200 + "$@" + } 200>"${lock_file}" } write_with_lock() { - local lock_name="$1" - local dest="$2" - local value="$3" - with_lock "${lock_name}" bash -c 'printf "%s\n" "$1" > "$2"' _ "${value}" "${dest}" + local lock_name="$1" + local dest="$2" + local value="$3" + with_lock "${lock_name}" bash -c 'printf "%s\n" "$1" > "$2"' _ "${value}" "${dest}" } copy_with_lock() { - local lock_name="$1" - shift - with_lock "${lock_name}" "$@" + local lock_name="$1" + shift + with_lock "${lock_name}" "$@" } verify_gentx_file() { - local file="$1" - if [ ! -f "${file}" ]; then - echo "[SETUP] ERROR: gentx file ${file} not found" - return 1 - fi - return 0 + local file="$1" + if [ ! -f "${file}" ]; then + echo "[SETUP] ERROR: gentx file ${file} not found" + return 1 + fi + return 0 } write_node_markers() { - local nodeid - # write fixed container P2P port - echo "${DEFAULT_P2P_PORT}" > "${NODE_STATUS_DIR}/port" + local nodeid + # write fixed container P2P port + echo "${DEFAULT_P2P_PORT}" >"${NODE_STATUS_DIR}/port" - if [ -f "${CONFIG_TOML}" ]; then - nodeid="$(${DAEMON} tendermint show-node-id || true)" - [ -n "${nodeid}" ] && echo "${nodeid}" > "${NODE_STATUS_DIR}/nodeid" - fi + if [ -f "${CONFIG_TOML}" ]; then + nodeid="$(${DAEMON} tendermint show-node-id || true)" + [ -n "${nodeid}" ] && echo "${nodeid}" >"${NODE_STATUS_DIR}/nodeid" + fi - echo "[SETUP] status files in ${NODE_STATUS_DIR}:" - ls -l "${NODE_STATUS_DIR}" || true + echo "[SETUP] status files in ${NODE_STATUS_DIR}:" + ls -l "${NODE_STATUS_DIR}" || true } build_persistent_peers() { - : > "${PEERS_SHARED}" - while IFS= read -r other; do - [ -z "${other}" ] && continue - [ "${other}" = "${MONIKER}" ] && continue - local od="${STATUS_DIR}/${other}" - # Use service DNS name (compose service == moniker) to avoid IP churn. - if [ -s "${od}/nodeid" ] && [ -s "${od}/port" ]; then - echo "$(cat "${od}/nodeid")@${other}:$(cat "${od}/port")" >> "${PEERS_SHARED}" - fi - done < <(jq -r '.[].moniker' "${CFG_VALS}") - echo "[SETUP] persistent_peers:" - cat "${PEERS_SHARED}" || true + : >"${PEERS_SHARED}" + while IFS= read -r other; do + [ -z "${other}" ] && continue + [ "${other}" = "${MONIKER}" ] && continue + local od="${STATUS_DIR}/${other}" + # Use service DNS name (compose service == moniker) to avoid IP churn. + if [ -s "${od}/nodeid" ] && [ -s "${od}/port" ]; then + echo "$(cat "${od}/nodeid")@${other}:$(cat "${od}/port")" >>"${PEERS_SHARED}" + fi + done < <(jq -r '.[].moniker' "${CFG_VALS}") + echo "[SETUP] persistent_peers:" + cat "${PEERS_SHARED}" || true } apply_persistent_peers() { - if [ -f "${PEERS_SHARED}" ] && [ -f "${CONFIG_TOML}" ]; then - local peers; peers="$(paste -sd, "${PEERS_SHARED}" || true)" - if [ -n "${peers}" ]; then - sed -i -E "s|^persistent_peers *=.*$|persistent_peers = \"${peers}\"|g" "${CONFIG_TOML}" - echo "[SETUP] Applied persistent_peers to ${CONFIG_TOML}" - fi - - # Treat all validators as private peers so CometBFT accepts their non-routable addresses. - local peer_ids - peer_ids="$(cut -d@ -f1 "${PEERS_SHARED}" | paste -sd, || true)" - if [ -n "${peer_ids}" ]; then - sed -i -E "s|^private_peer_ids *=.*$|private_peer_ids = \"${peer_ids}\"|g" "${CONFIG_TOML}" - echo "[SETUP] Applied private_peer_ids to ${CONFIG_TOML}" - fi - fi + if [ -f "${PEERS_SHARED}" ] && [ -f "${CONFIG_TOML}" ]; then + local peers + peers="$(paste -sd, "${PEERS_SHARED}" || true)" + if [ -n "${peers}" ]; then + sed -i -E "s|^persistent_peers *=.*$|persistent_peers = \"${peers}\"|g" "${CONFIG_TOML}" + echo "[SETUP] Applied persistent_peers to ${CONFIG_TOML}" + fi + + # Treat all validators as private peers so CometBFT accepts their non-routable addresses. + local peer_ids + peer_ids="$(cut -d@ -f1 "${PEERS_SHARED}" | paste -sd, || true)" + if [ -n "${peer_ids}" ]; then + sed -i -E "s|^private_peer_ids *=.*$|private_peer_ids = \"${peer_ids}\"|g" "${CONFIG_TOML}" + echo "[SETUP] Applied private_peer_ids to ${CONFIG_TOML}" + fi + fi } configure_node_config() { - local api_port="${LUMERA_API_PORT:-1317}" - local grpc_port="${LUMERA_GRPC_PORT:-9090}" - local rpc_port="${LUMERA_RPC_PORT:-26657}" - - if ! command -v crudini >/dev/null 2>&1; then - echo "[SETUP] ERROR: crudini not found; cannot update configs" - exit 1 - fi - - if [ -f "${APP_TOML}" ]; then - run crudini --set "${APP_TOML}" '' minimum-gas-prices "\"0.0025ulume\"" - run crudini --set "${APP_TOML}" api enable "true" - run crudini --set "${APP_TOML}" api swagger "true" - run crudini --set "${APP_TOML}" api address "\"tcp://0.0.0.0:${api_port}\"" - run crudini --set "${APP_TOML}" grpc enable "true" - run crudini --set "${APP_TOML}" grpc address "\"0.0.0.0:${grpc_port}\"" - run crudini --set "${APP_TOML}" grpc-web enable "true" - echo "[SETUP] Updated ${APP_TOML} with API/GRPC configuration." - else - echo "[SETUP] WARNING: ${APP_TOML} not found; skipping app.toml update" - fi - - if [ -f "${CONFIG_TOML}" ]; then - run crudini --set "${CONFIG_TOML}" rpc laddr "\"tcp://0.0.0.0:${rpc_port}\"" - echo "[SETUP] Updated ${CONFIG_TOML} RPC configuration." - else - echo "[SETUP] WARNING: ${CONFIG_TOML} not found; skipping config.toml update" - fi + local api_port="${LUMERA_API_PORT:-1317}" + local grpc_port="${LUMERA_GRPC_PORT:-9090}" + local rpc_port="${LUMERA_RPC_PORT:-26657}" + + if ! command -v crudini >/dev/null 2>&1; then + echo "[SETUP] ERROR: crudini not found; cannot update configs" + exit 1 + fi + + if [ -f "${APP_TOML}" ]; then + run crudini --set "${APP_TOML}" '' minimum-gas-prices "\"0.0025ulume\"" + run crudini --set "${APP_TOML}" api enable "true" + run crudini --set "${APP_TOML}" api swagger "true" + run crudini --set "${APP_TOML}" api address "\"tcp://0.0.0.0:${api_port}\"" + run crudini --set "${APP_TOML}" grpc enable "true" + run crudini --set "${APP_TOML}" grpc address "\"0.0.0.0:${grpc_port}\"" + run crudini --set "${APP_TOML}" grpc-web enable "true" + echo "[SETUP] Updated ${APP_TOML} with API/GRPC configuration." + else + echo "[SETUP] WARNING: ${APP_TOML} not found; skipping app.toml update" + fi + + if [ -f "${CONFIG_TOML}" ]; then + run crudini --set "${CONFIG_TOML}" rpc laddr "\"tcp://0.0.0.0:${rpc_port}\"" + echo "[SETUP] Updated ${CONFIG_TOML} RPC configuration." + else + echo "[SETUP] WARNING: ${CONFIG_TOML} not found; skipping config.toml update" + fi } ensure_hermes_relayer_account() { - echo "[SETUP] Ensuring Hermes relayer account..." - mkdir -p "${HERMES_SHARED_DIR}" "${HERMES_STATUS_DIR}" - - local mnemonic="" - if [ -s "${HERMES_RELAYER_MNEMONIC_FILE}" ]; then - mnemonic="$(cat "${HERMES_RELAYER_MNEMONIC_FILE}")" - fi - - local relayer_addr - relayer_addr="$(run_capture ${DAEMON} keys show "${HERMES_RELAYER_KEY}" -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" - relayer_addr="$(printf '%s' "${relayer_addr}" | tr -d '\r\n')" - if [ -z "${relayer_addr}" ]; then - if [ -n "${mnemonic}" ]; then - printf '%s\n' "${mnemonic}" | run ${DAEMON} keys add "${HERMES_RELAYER_KEY}" --recover --keyring-backend "${KEYRING_BACKEND}" >/dev/null - else - local key_json - key_json="$(run_capture ${DAEMON} keys add "${HERMES_RELAYER_KEY}" --keyring-backend "${KEYRING_BACKEND}" --output json)" - mnemonic="$(printf '%s' "${key_json}" | jq -r '.mnemonic // empty' 2>/dev/null || true)" - fi - fi - - if [ -n "${mnemonic}" ]; then - write_with_lock "hermes-mnemonic" "${HERMES_RELAYER_MNEMONIC_FILE}" "${mnemonic}" - fi - - relayer_addr="$(run_capture ${DAEMON} keys show "${HERMES_RELAYER_KEY}" -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" - relayer_addr="$(printf '%s' "${relayer_addr}" | tr -d '\r\n')" - if [ -z "${relayer_addr}" ]; then - echo "[SETUP] ERROR: Unable to obtain Hermes relayer address" - exit 1 - fi - write_with_lock "hermes-addr" "${HERMES_RELAYER_ADDR_FILE}" "${relayer_addr}" - - local need_add=1 - if [ -f "${GENESIS_LOCAL}" ]; then - if jq -e --arg addr "${relayer_addr}" '.app_state.bank.balances[]? | select(.address==$addr)' "${GENESIS_LOCAL}" >/dev/null 2>&1; then - need_add=0 - fi - fi - - if [ "${need_add}" -eq 1 ]; then - echo "[SETUP] Adding Hermes relayer genesis balance: ${HERMES_RELAYER_GENESIS_AMOUNT}${DENOM}" - set +e - run ${DAEMON} genesis add-genesis-account "${relayer_addr}" "${HERMES_RELAYER_GENESIS_AMOUNT}${DENOM}" - local status=$? - set -e - if [ ${status} -ne 0 ]; then - echo "[SETUP] Failed to add Hermes relayer genesis account." - exit ${status} - fi - else - echo "[SETUP] Hermes relayer genesis account already present." - fi + echo "[SETUP] Ensuring Hermes relayer account..." + mkdir -p "${HERMES_SHARED_DIR}" "${HERMES_STATUS_DIR}" + + local mnemonic="" + if [ -s "${HERMES_RELAYER_MNEMONIC_FILE}" ]; then + mnemonic="$(cat "${HERMES_RELAYER_MNEMONIC_FILE}")" + fi + + local relayer_addr + relayer_addr="$(run_capture ${DAEMON} keys show "${HERMES_RELAYER_KEY}" -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" + relayer_addr="$(printf '%s' "${relayer_addr}" | tr -d '\r\n')" + if [ -z "${relayer_addr}" ]; then + if [ -n "${mnemonic}" ]; then + printf '%s\n' "${mnemonic}" | run ${DAEMON} keys add "${HERMES_RELAYER_KEY}" --recover --keyring-backend "${KEYRING_BACKEND}" >/dev/null + else + local key_json + key_json="$(run_capture ${DAEMON} keys add "${HERMES_RELAYER_KEY}" --keyring-backend "${KEYRING_BACKEND}" --output json)" + mnemonic="$(printf '%s' "${key_json}" | jq -r '.mnemonic // empty' 2>/dev/null || true)" + fi + fi + + if [ -n "${mnemonic}" ]; then + write_with_lock "hermes-mnemonic" "${HERMES_RELAYER_MNEMONIC_FILE}" "${mnemonic}" + fi + + relayer_addr="$(run_capture ${DAEMON} keys show "${HERMES_RELAYER_KEY}" -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" + relayer_addr="$(printf '%s' "${relayer_addr}" | tr -d '\r\n')" + if [ -z "${relayer_addr}" ]; then + echo "[SETUP] ERROR: Unable to obtain Hermes relayer address" + exit 1 + fi + write_with_lock "hermes-addr" "${HERMES_RELAYER_ADDR_FILE}" "${relayer_addr}" + + local need_add=1 + if [ -f "${GENESIS_LOCAL}" ]; then + if jq -e --arg addr "${relayer_addr}" '.app_state.bank.balances[]? | select(.address==$addr)' "${GENESIS_LOCAL}" >/dev/null 2>&1; then + need_add=0 + fi + fi + + if [ "${need_add}" -eq 1 ]; then + echo "[SETUP] Adding Hermes relayer genesis balance: ${HERMES_RELAYER_GENESIS_AMOUNT}${DENOM}" + set +e + run ${DAEMON} genesis add-genesis-account "${relayer_addr}" "${HERMES_RELAYER_GENESIS_AMOUNT}${DENOM}" + local status=$? + set -e + if [ ${status} -ne 0 ]; then + echo "[SETUP] Failed to add Hermes relayer genesis account." + exit ${status} + fi + else + echo "[SETUP] Hermes relayer genesis account already present." + fi } wait_for_file() { - while [ ! -s "$1" ]; - do sleep 1; - done; + while [ ! -s "$1" ]; do + sleep 1 + done } init_if_needed() { - if [ -f "${GENESIS_LOCAL}" ]; then - echo "[SETUP] ${MONIKER} already initialized (genesis exists)." - else - echo "[SETUP] Initializing ${MONIKER}..." - run ${DAEMON} init "${MONIKER}" --chain-id "${CHAIN_ID}" --overwrite - fi - - # ensure validator key exists - local addr - addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -z "${addr}" ]; then - run ${DAEMON} keys add "${KEY_NAME}" --keyring-backend "${KEYRING_BACKEND}" - else - echo "[SETUP] Key ${KEY_NAME} already exists with address ${addr}" - fi + if [ -f "${GENESIS_LOCAL}" ]; then + echo "[SETUP] ${MONIKER} already initialized (genesis exists)." + else + echo "[SETUP] Initializing ${MONIKER}..." + run ${DAEMON} init "${MONIKER}" --chain-id "${CHAIN_ID}" --overwrite + fi + + # ensure validator key exists + local addr + addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -z "${addr}" ]; then + run ${DAEMON} keys add "${KEY_NAME}" --keyring-backend "${KEYRING_BACKEND}" + else + echo "[SETUP] Key ${KEY_NAME} already exists with address ${addr}" + fi } # ----- primary validator ----- primary_validator_setup() { - init_if_needed - configure_node_config - - # must have external genesis + claims ready - if [ ! -f "${EXTERNAL_GENESIS}" ]; then - echo "ERROR: ${EXTERNAL_GENESIS} not found. Provide existing genesis."; exit 1 - fi - cp "${EXTERNAL_GENESIS}" "${GENESIS_LOCAL}" - [ -f "${CLAIMS_SHARED}" ] && cp "${CLAIMS_SHARED}" "${CLAIMS_LOCAL}" - - # unify denoms (bond/mint/crisis/gov) - tmp="${DAEMON_HOME}/config/tmp_genesis.json" - cat "${GENESIS_LOCAL}" | jq \ - --arg denom "${DENOM}" ' + init_if_needed + configure_node_config + + # must have external genesis + claims ready + if [ ! -f "${EXTERNAL_GENESIS}" ]; then + echo "ERROR: ${EXTERNAL_GENESIS} not found. Provide existing genesis." + exit 1 + fi + cp "${EXTERNAL_GENESIS}" "${GENESIS_LOCAL}" + [ -f "${CLAIMS_SHARED}" ] && cp "${CLAIMS_SHARED}" "${CLAIMS_LOCAL}" + + # unify denoms (bond/mint/crisis/gov) + tmp="${DAEMON_HOME}/config/tmp_genesis.json" + cat "${GENESIS_LOCAL}" | jq \ + --arg denom "${DENOM}" ' .app_state.staking.params.bond_denom = $denom | .app_state.mint.params.mint_denom = $denom | .app_state.crisis.constant_fee.denom = $denom | .app_state.gov.params.min_deposit[0].denom = $denom | .app_state.gov.params.expedited_min_deposit[0].denom = $denom - ' > "${tmp}" - mv "${tmp}" "${GENESIS_LOCAL}" - - # primary’s own account - echo "[SETUP] Creating key/account for ${KEY_NAME}..." - addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -z "${addr}" ]; then - echo "[SETUP] ERROR: Unable to obtain address for ${KEY_NAME}" - exit 1 - fi - run ${DAEMON} genesis add-genesis-account "${addr}" "${ACCOUNT_BAL}" - printf '%s\n' "${addr}" > "${NODE_STATUS_DIR}/genesis-address" - - # governance account - local gov_addr - gov_addr="$(run_capture ${DAEMON} keys show governance_key -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" - gov_addr="$(printf '%s' "${gov_addr}" | tr -d '\r\n')" - if [ -z "${gov_addr}" ]; then - run ${DAEMON} keys add governance_key --keyring-backend "${KEYRING_BACKEND}" >/dev/null - gov_addr="$(run_capture ${DAEMON} keys show governance_key -a --keyring-backend "${KEYRING_BACKEND}")" - gov_addr="$(printf '%s' "${gov_addr}" | tr -d '\r\n')" - fi - if [ -z "${gov_addr}" ]; then - echo "[SETUP] ERROR: Unable to obtain governance key address" - exit 1 - fi - printf '%s\n' "${gov_addr}" > ${SHARED_DIR}/governance_address - run ${DAEMON} genesis add-genesis-account "${gov_addr}" "1000000000000${DENOM}" - - ensure_hermes_relayer_account - - # share initial genesis to secondaries & flag - cp "${GENESIS_LOCAL}" "${GENESIS_SHARED}" - mkdir -p "${GENTX_DIR}" "${ADDR_DIR}" - echo "true" > "${GENESIS_READY_FLAG}" - - # write own markers before waiting for peers - write_node_markers - - # wait for all other nodes to publish nodeid/ip - total="$(jq -r 'length' "${CFG_VALS}")" - echo "[SETUP] Waiting for other node IDs/IPs..." - while true; do - found=0 - while IFS= read -r other; do - [ "${other}" = "${MONIKER}" ] && continue - od="${STATUS_DIR}/${other}" - [[ -s "${od}/nodeid" ]] && found=$((found+1)) - done < <(jq -r '.[].moniker' "${CFG_VALS}") - [ "${found}" -ge $((total-1)) ] && break - sleep 1 - done - - # collect gentx/addresses from secondaries - echo "[SETUP] Collecting addresses & gentx from secondaries..." - if compgen -G "${ADDR_DIR}/*" > /dev/null; then - while IFS= read -r file; do - [ -f "$file" ] || continue - bal="$(cat "$file")"; addr="$(basename "$file")" - run ${DAEMON} genesis add-genesis-account "${addr}" "${bal}" - done < <(find ${ADDR_DIR} -type f) - fi - - # primary gentx - run ${DAEMON} genesis gentx "${KEY_NAME}" "${STAKE_AMOUNT}" \ - --chain-id "${CHAIN_ID}" \ - --keyring-backend "${KEYRING_BACKEND}" - - for file in "${GENTX_LOCAL_DIR}"/gentx-*.json; do - [ -f "${file}" ] || continue - verify_gentx_file "${file}" || exit 1 - done - - # collect others' gentx - mkdir -p "${GENTX_LOCAL_DIR}" - if compgen -G "${GENTX_DIR}/*.json" > /dev/null; then - copy_with_lock "gentx" bash -c 'cp "$1"/*.json "$2"/' _ "${GENTX_DIR}" "${GENTX_LOCAL_DIR}" || true - for file in "${GENTX_LOCAL_DIR}"/gentx-*.json; do - [ -f "${file}" ] || continue - verify_gentx_file "${file}" || exit 1 - done - fi - run ${DAEMON} genesis collect-gentxs - - # publish final genesis - cp "${GENESIS_LOCAL}" "${FINAL_GENESIS_SHARED}" - echo "[SETUP] Final genesis published to ${FINAL_GENESIS_SHARED}" - - # build & apply persistent peers - build_persistent_peers - apply_persistent_peers - - echo "true" > "${SETUP_COMPLETE_FLAG}" - echo "true" > "${NODE_SETUP_COMPLETE_FLAG}" - echo "[SETUP] Primary setup complete." + ' >"${tmp}" + mv "${tmp}" "${GENESIS_LOCAL}" + + # primary’s own account + echo "[SETUP] Creating key/account for ${KEY_NAME}..." + addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -z "${addr}" ]; then + echo "[SETUP] ERROR: Unable to obtain address for ${KEY_NAME}" + exit 1 + fi + run ${DAEMON} genesis add-genesis-account "${addr}" "${ACCOUNT_BAL}" + printf '%s\n' "${addr}" >"${NODE_STATUS_DIR}/genesis-address" + + # governance account + local gov_addr + gov_addr="$(run_capture ${DAEMON} keys show governance_key -a --keyring-backend "${KEYRING_BACKEND}" 2>/dev/null || true)" + gov_addr="$(printf '%s' "${gov_addr}" | tr -d '\r\n')" + if [ -z "${gov_addr}" ]; then + run ${DAEMON} keys add governance_key --keyring-backend "${KEYRING_BACKEND}" >/dev/null + gov_addr="$(run_capture ${DAEMON} keys show governance_key -a --keyring-backend "${KEYRING_BACKEND}")" + gov_addr="$(printf '%s' "${gov_addr}" | tr -d '\r\n')" + fi + if [ -z "${gov_addr}" ]; then + echo "[SETUP] ERROR: Unable to obtain governance key address" + exit 1 + fi + printf '%s\n' "${gov_addr}" >${SHARED_DIR}/governance_address + run ${DAEMON} genesis add-genesis-account "${gov_addr}" "1000000000000${DENOM}" + + ensure_hermes_relayer_account + + # share initial genesis to secondaries & flag + cp "${GENESIS_LOCAL}" "${GENESIS_SHARED}" + mkdir -p "${GENTX_DIR}" "${ADDR_DIR}" + echo "true" >"${GENESIS_READY_FLAG}" + + # write own markers before waiting for peers + write_node_markers + + # wait for all other nodes to publish nodeid/ip + total="$(jq -r 'length' "${CFG_VALS}")" + echo "[SETUP] Waiting for other node IDs/IPs..." + while true; do + found=0 + while IFS= read -r other; do + [ "${other}" = "${MONIKER}" ] && continue + od="${STATUS_DIR}/${other}" + [[ -s "${od}/nodeid" ]] && found=$((found + 1)) + done < <(jq -r '.[].moniker' "${CFG_VALS}") + [ "${found}" -ge $((total - 1)) ] && break + sleep 1 + done + + # collect gentx/addresses from secondaries + echo "[SETUP] Collecting addresses & gentx from secondaries..." + if compgen -G "${ADDR_DIR}/*" >/dev/null; then + while IFS= read -r file; do + [ -f "$file" ] || continue + bal="$(cat "$file")" + addr="$(basename "$file")" + run ${DAEMON} genesis add-genesis-account "${addr}" "${bal}" + done < <(find ${ADDR_DIR} -type f) + fi + + # primary gentx + run ${DAEMON} genesis gentx "${KEY_NAME}" "${STAKE_AMOUNT}" \ + --chain-id "${CHAIN_ID}" \ + --keyring-backend "${KEYRING_BACKEND}" + + for file in "${GENTX_LOCAL_DIR}"/gentx-*.json; do + [ -f "${file}" ] || continue + verify_gentx_file "${file}" || exit 1 + done + + # collect others' gentx + mkdir -p "${GENTX_LOCAL_DIR}" + if compgen -G "${GENTX_DIR}/*.json" >/dev/null; then + copy_with_lock "gentx" bash -c 'cp "$1"/*.json "$2"/' _ "${GENTX_DIR}" "${GENTX_LOCAL_DIR}" || true + for file in "${GENTX_LOCAL_DIR}"/gentx-*.json; do + [ -f "${file}" ] || continue + verify_gentx_file "${file}" || exit 1 + done + fi + run ${DAEMON} genesis collect-gentxs + + # publish final genesis + cp "${GENESIS_LOCAL}" "${FINAL_GENESIS_SHARED}" + echo "[SETUP] Final genesis published to ${FINAL_GENESIS_SHARED}" + + # build & apply persistent peers + build_persistent_peers + apply_persistent_peers + + echo "true" >"${SETUP_COMPLETE_FLAG}" + echo "true" >"${NODE_SETUP_COMPLETE_FLAG}" + echo "[SETUP] Primary setup complete." } # ----- secondary validator ----- secondary_validator_setup() { - # wait for primary to publish accounts genesis - echo "[SETUP] Waiting for primary genesis_accounts_ready..." - wait_for_file "${GENESIS_READY_FLAG}" - wait_for_file "${GENESIS_SHARED}" - - init_if_needed - configure_node_config - - # copy initial genesis/claims - cp "${GENESIS_SHARED}" "${GENESIS_LOCAL}" - [ -f "${CLAIMS_SHARED}" ] && cp "${CLAIMS_SHARED}" "${CLAIMS_LOCAL}" - - # create key, add account, create gentx - addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -z "${addr}" ]; then - run ${DAEMON} keys add "${KEY_NAME}" --keyring-backend "${KEYRING_BACKEND}" >/dev/null - fi - addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" - addr="$(printf '%s' "${addr}" | tr -d '\r\n')" - if [ -z "${addr}" ]; then - echo "[SETUP] ERROR: Unable to obtain address for ${KEY_NAME}" - exit 1 - fi - run ${DAEMON} genesis add-genesis-account "${addr}" "${ACCOUNT_BAL}" - ensure_hermes_relayer_account - - mkdir -p "${GENTX_LOCAL_DIR}" "${GENTX_DIR}" "${ADDR_DIR}" - - if compgen -G "${GENTX_LOCAL_DIR}/gentx-*.json" > /dev/null; then - echo "[SETUP] gentx already exists in ${GENTX_LOCAL_DIR}, skipping generation" - else - run ${DAEMON} genesis gentx "${KEY_NAME}" "${STAKE_AMOUNT}" \ - --chain-id "${CHAIN_ID}" --keyring-backend "${KEYRING_BACKEND}" - fi - - local gentx_file - gentx_file="$(find "${GENTX_LOCAL_DIR}" -maxdepth 1 -type f -name 'gentx-*.json' -print | head -n1)" - if [ -z "${gentx_file}" ]; then - echo "[SETUP] ERROR: gentx generation failed for ${KEY_NAME} (no file produced)" - exit 1 - fi - verify_gentx_file "${gentx_file}" || exit 1 - - # share gentx & address - copy_with_lock "gentx" cp "${gentx_file}" "${GENTX_DIR}/${MONIKER}_gentx.json" - write_with_lock "addresses" "${ADDR_DIR}/${addr}" "${ACCOUNT_BAL}" - printf '%s\n' "${addr}" > "${NODE_STATUS_DIR}/genesis-address" - - # write own markers for peer discovery - write_node_markers - - # wait for persistent_peers and apply - wait_for_file "${PEERS_SHARED}" - apply_persistent_peers - - # wait for final genesis - echo "[SETUP] Waiting for final genesis from primary..." - wait_for_file "${FINAL_GENESIS_SHARED}" - cp "${FINAL_GENESIS_SHARED}" "${GENESIS_LOCAL}" - wait_for_file "${SETUP_COMPLETE_FLAG}" - - echo "[SETUP] Secondary setup complete." - echo "true" > "${NODE_SETUP_COMPLETE_FLAG}" + # wait for primary to publish accounts genesis + echo "[SETUP] Waiting for primary genesis_accounts_ready..." + wait_for_file "${GENESIS_READY_FLAG}" + wait_for_file "${GENESIS_SHARED}" + + init_if_needed + configure_node_config + + # copy initial genesis/claims + cp "${GENESIS_SHARED}" "${GENESIS_LOCAL}" + [ -f "${CLAIMS_SHARED}" ] && cp "${CLAIMS_SHARED}" "${CLAIMS_LOCAL}" + + # create key, add account, create gentx + addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -z "${addr}" ]; then + run ${DAEMON} keys add "${KEY_NAME}" --keyring-backend "${KEYRING_BACKEND}" >/dev/null + fi + addr="$(run_capture ${DAEMON} keys show "${KEY_NAME}" -a --keyring-backend "${KEYRING_BACKEND}")" + addr="$(printf '%s' "${addr}" | tr -d '\r\n')" + if [ -z "${addr}" ]; then + echo "[SETUP] ERROR: Unable to obtain address for ${KEY_NAME}" + exit 1 + fi + run ${DAEMON} genesis add-genesis-account "${addr}" "${ACCOUNT_BAL}" + ensure_hermes_relayer_account + + mkdir -p "${GENTX_LOCAL_DIR}" "${GENTX_DIR}" "${ADDR_DIR}" + + if compgen -G "${GENTX_LOCAL_DIR}/gentx-*.json" >/dev/null; then + echo "[SETUP] gentx already exists in ${GENTX_LOCAL_DIR}, skipping generation" + else + run ${DAEMON} genesis gentx "${KEY_NAME}" "${STAKE_AMOUNT}" \ + --chain-id "${CHAIN_ID}" --keyring-backend "${KEYRING_BACKEND}" + fi + + local gentx_file + gentx_file="$(find "${GENTX_LOCAL_DIR}" -maxdepth 1 -type f -name 'gentx-*.json' -print | head -n1)" + if [ -z "${gentx_file}" ]; then + echo "[SETUP] ERROR: gentx generation failed for ${KEY_NAME} (no file produced)" + exit 1 + fi + verify_gentx_file "${gentx_file}" || exit 1 + + # share gentx & address + copy_with_lock "gentx" cp "${gentx_file}" "${GENTX_DIR}/${MONIKER}_gentx.json" + write_with_lock "addresses" "${ADDR_DIR}/${addr}" "${ACCOUNT_BAL}" + printf '%s\n' "${addr}" >"${NODE_STATUS_DIR}/genesis-address" + + # write own markers for peer discovery + write_node_markers + + # wait for persistent_peers and apply + wait_for_file "${PEERS_SHARED}" + apply_persistent_peers + + # wait for final genesis + echo "[SETUP] Waiting for final genesis from primary..." + wait_for_file "${FINAL_GENESIS_SHARED}" + cp "${FINAL_GENESIS_SHARED}" "${GENESIS_LOCAL}" + wait_for_file "${SETUP_COMPLETE_FLAG}" + + echo "[SETUP] Secondary setup complete." + echo "true" >"${NODE_SETUP_COMPLETE_FLAG}" } # ----- main ----- if [ "${IS_PRIMARY}" = "1" ]; then - primary_validator_setup + primary_validator_setup else - secondary_validator_setup + secondary_validator_setup fi diff --git a/devnet/scripts/vote-all.sh b/devnet/scripts/vote-all.sh index 83c3169e..cbea4b2c 100755 --- a/devnet/scripts/vote-all.sh +++ b/devnet/scripts/vote-all.sh @@ -2,8 +2,8 @@ # Usage: ./vote_all.sh if [ -z "$1" ]; then - echo "Usage: $0 " - exit 1 + echo "Usage: $0 " + exit 1 fi # Configuration @@ -14,125 +14,133 @@ SERVICE_NAME="supernova_validator_1" LUMERA_SHARED="/tmp/lumera-devnet/shared" COMPOSE_FILE="../docker-compose.yml" FEES="5000ulume" +# Gas configuration +USE_GAS_AUTO="true" # "true" to use --gas auto with --gas-adjustment 1.3 +GAS_AMOUNT="120000" # Used when USE_GAS_AUTO="false" # Checking the votes with: # lumerad query gov votes --output json | jq check_votes() { - echo "🔍 Checking current votes for proposal ID: $PROPOSAL_ID" - VOTES_JSON=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE_NAME" \ - lumerad query gov votes "$PROPOSAL_ID" --output json) - echo "$VOTES_JSON" | jq - - if echo "$VOTES_JSON" | jq -e '.votes' > /dev/null; then - echo "ℹ️ Current Votes for Proposal $PROPOSAL_ID:" - echo "$VOTES_JSON" | jq '.votes[] | {voter, option: .options[0].option}' - else - echo "ℹ️ No votes available yet." - fi + echo "🔍 Checking current votes for proposal ID: $PROPOSAL_ID" + VOTES_JSON=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE_NAME" \ + lumerad query gov votes "$PROPOSAL_ID" --output json) + echo "$VOTES_JSON" | jq + + if echo "$VOTES_JSON" | jq -e '.votes' >/dev/null; then + echo "ℹ️ Current Votes for Proposal $PROPOSAL_ID:" + echo "$VOTES_JSON" | jq '.votes[] | {voter, option: .options[0].option}' + else + echo "ℹ️ No votes available yet." + fi } # Checking participation with: # lumerad query gov tally --output json | jq check_tally() { - echo "🔍 Checking current tally for proposal ID: $PROPOSAL_ID" - TALLY_JSON=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE_NAME" \ - lumerad query gov tally "$PROPOSAL_ID" --output json) - echo "$TALLY_JSON" | jq - - YES_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.yes_count // "0" | tonumber') - NO_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.no_count // "0" | tonumber') - ABSTAIN_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.abstain_count // "0" | tonumber') - NO_WITH_VETO_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.no_with_veto_count // "0" | tonumber') - - TOTAL_VOTES=$((YES_COUNT + NO_COUNT + ABSTAIN_COUNT + NO_WITH_VETO_COUNT)) - echo "📈 Total Votes Cast: $TOTAL_VOTES" + echo "🔍 Checking current tally for proposal ID: $PROPOSAL_ID" + TALLY_JSON=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE_NAME" \ + lumerad query gov tally "$PROPOSAL_ID" --output json) + echo "$TALLY_JSON" | jq + + YES_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.yes_count // "0" | tonumber') + NO_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.no_count // "0" | tonumber') + ABSTAIN_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.abstain_count // "0" | tonumber') + NO_WITH_VETO_COUNT=$(echo "$TALLY_JSON" | jq -r '.tally.no_with_veto_count // "0" | tonumber') + + TOTAL_VOTES=$((YES_COUNT + NO_COUNT + ABSTAIN_COUNT + NO_WITH_VETO_COUNT)) + echo "📈 Total Votes Cast: $TOTAL_VOTES" } vote_all() { - echo "🔍 Discovering validator services..." - - # Get all docker compose services and filter out the primary validator (_1) - VALIDATOR_SERVICES=$(docker compose -f "$COMPOSE_FILE" config --services | grep supernova_validator_ | grep -v '_1$') - - TX_HASHES=() - - for SERVICE in $VALIDATOR_SERVICES; do - echo "" - echo "🔍 Processing $SERVICE..." - - KEY_NAME="${SERVICE}_key" - VOTER_ADDRESS=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad keys show $KEY_NAME -a --keyring-backend "$KEYRING_BACKEND" 2>/dev/null) - - echo "🗳️ Voting YES on behalf of $SERVICE (address: $VOTER_ADDRESS)..." - - VOTE_JSON=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ - lumerad tx gov vote "$PROPOSAL_ID" yes \ - --from $VOTER_ADDRESS \ - --chain-id "$CHAIN_ID" \ - --keyring-backend "$KEYRING_BACKEND" \ - --gas auto \ - --gas-adjustment 1.3 \ - --fees "$FEES" \ - --output json \ - --broadcast-mode sync \ - --yes) - - if [ -z "$VOTE_JSON" ]; then - echo "❌ No JSON response received. The transaction command may have failed to execute." - else - echo "Vote transaction for $SERVICE:" - echo "$VOTE_JSON" | jq - fi - - TX_CODE_RAW=$(echo "$VOTE_JSON" | jq -r '.code // empty') - TX_HASH=$(echo "$VOTE_JSON" | jq -r '.txhash // ""') - - if [[ -z "$TX_CODE_RAW" ]]; then - TX_CODE=0 - else - TX_CODE=$TX_CODE_RAW - fi - - if ! [[ "$TX_CODE" =~ ^[0-9]+$ ]]; then - echo "⚠️ TX_CODE is not a valid number: $TX_CODE" - TX_CODE=1 - fi - - if [ "$TX_CODE" -ne 0 ] || [ -z "$TX_HASH" ]; then - RAW_LOG=$(echo "$VOTE_JSON" | jq -r '.raw_log // "unknown error"') - if [ -z "$TX_HASH" ]; then - echo "❌ Vote failed: $RAW_LOG" - else - echo "❌ Vote failed (txhash: $TX_HASH): $RAW_LOG" - fi - else - TX_HASHES+=("$TX_HASH") - fi - done - - # Wait before checking transaction results - echo "⏳ Waiting for transactions to be processed..." - sleep 5 - - echo "🔍 Verifying vote transactions..." - for TX_HASH in "${TX_HASHES[@]}"; do - RESULT=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE_NAME" \ - lumerad query tx "$TX_HASH" --output json 2>/dev/null) - - TX_CODE=$(echo "$RESULT" | jq -r '.code // 0') - RAW_LOG=$(echo "$RESULT" | jq -r '.raw_log // ""') - - if [[ "$TX_CODE" == "0" ]]; then - echo "✅ Transaction $TX_HASH succeeded" - else - echo "❌ Transaction $TX_HASH failed with code $TX_CODE: $RAW_LOG" - fi - done + echo "🔍 Discovering validator services..." + + # Get all docker compose services and filter out the primary validator (_1) + VALIDATOR_SERVICES=$(docker compose -f "$COMPOSE_FILE" config --services | grep supernova_validator_ | grep -v '_1$') + + TX_HASHES=() + + for SERVICE in $VALIDATOR_SERVICES; do + echo "" + echo "🔍 Processing $SERVICE..." + + KEY_NAME="${SERVICE}_key" + VOTER_ADDRESS=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad keys show $KEY_NAME -a --keyring-backend "$KEYRING_BACKEND" 2>/dev/null) + + echo "🗳️ Voting YES on behalf of $SERVICE (address: $VOTER_ADDRESS)..." + + if [ "$USE_GAS_AUTO" = "true" ]; then + GAS_FLAGS=(--gas auto --gas-adjustment 1.3) + else + GAS_FLAGS=(--gas "$GAS_AMOUNT") + fi + + VOTE_JSON=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE" \ + lumerad tx gov vote "$PROPOSAL_ID" yes \ + --from $VOTER_ADDRESS \ + --chain-id "$CHAIN_ID" \ + --keyring-backend "$KEYRING_BACKEND" \ + "${GAS_FLAGS[@]}" \ + --fees "$FEES" \ + --output json \ + --broadcast-mode sync \ + --yes) + + if [ -z "$VOTE_JSON" ]; then + echo "❌ No JSON response received. The transaction command may have failed to execute." + else + echo "Vote transaction for $SERVICE:" + echo "$VOTE_JSON" | jq + fi + + TX_CODE_RAW=$(echo "$VOTE_JSON" | jq -r '.code // empty') + TX_HASH=$(echo "$VOTE_JSON" | jq -r '.txhash // ""') + + if [[ -z "$TX_CODE_RAW" ]]; then + TX_CODE=0 + else + TX_CODE=$TX_CODE_RAW + fi + + if ! [[ "$TX_CODE" =~ ^[0-9]+$ ]]; then + echo "⚠️ TX_CODE is not a valid number: $TX_CODE" + TX_CODE=1 + fi + + if [ "$TX_CODE" -ne 0 ] || [ -z "$TX_HASH" ]; then + RAW_LOG=$(echo "$VOTE_JSON" | jq -r '.raw_log // "unknown error"') + if [ -z "$TX_HASH" ]; then + echo "❌ Vote failed: $RAW_LOG" + else + echo "❌ Vote failed (txhash: $TX_HASH): $RAW_LOG" + fi + else + TX_HASHES+=("$TX_HASH") + fi + done + + # Wait before checking transaction results + echo "⏳ Waiting for transactions to be processed..." + sleep 5 + + echo "🔍 Verifying vote transactions..." + for TX_HASH in "${TX_HASHES[@]}"; do + RESULT=$(docker compose -f "$COMPOSE_FILE" exec "$SERVICE_NAME" \ + lumerad query tx "$TX_HASH" --output json 2>/dev/null) + + TX_CODE=$(echo "$RESULT" | jq -r '.code // 0') + RAW_LOG=$(echo "$RESULT" | jq -r '.raw_log // ""') + + if [[ "$TX_CODE" == "0" ]]; then + echo "✅ Transaction $TX_HASH succeeded" + else + echo "❌ Transaction $TX_HASH failed with code $TX_CODE: $RAW_LOG" + fi + done } check_votes check_tally vote_all check_votes -check_tally \ No newline at end of file +check_tally diff --git a/devnet/scripts/wait-for-height.sh b/devnet/scripts/wait-for-height.sh index 168d8152..30d393f6 100755 --- a/devnet/scripts/wait-for-height.sh +++ b/devnet/scripts/wait-for-height.sh @@ -2,14 +2,14 @@ set -euo pipefail if [[ $# -ne 1 ]]; then - echo "Usage: $0 " - exit 1 + echo "Usage: $0 " + exit 1 fi TARGET_HEIGHT="$1" if ! [[ "$TARGET_HEIGHT" =~ ^[0-9]+$ ]]; then - echo "Target height must be a positive integer. Got: $TARGET_HEIGHT" >&2 - exit 1 + echo "Target height must be a positive integer. Got: $TARGET_HEIGHT" >&2 + exit 1 fi SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" @@ -23,17 +23,17 @@ deadline=$((SECONDS + TIMEOUT_SECONDS)) echo "Waiting for block height >= ${TARGET_HEIGHT} (service=${SERVICE}, timeout=${TIMEOUT_SECONDS}s)..." -while (( SECONDS < deadline )); do - height="$(docker compose -f "${COMPOSE_FILE}" exec -T "${SERVICE}" \ - lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0")" +while ((SECONDS < deadline)); do + height="$(docker compose -f "${COMPOSE_FILE}" exec -T "${SERVICE}" \ + lumerad status 2>/dev/null | jq -r '.sync_info.latest_block_height // "0"' 2>/dev/null || echo "0")" - if [[ "$height" =~ ^[0-9]+$ ]] && (( height >= TARGET_HEIGHT )); then - echo "Reached height ${height}." - exit 0 - fi + if [[ "$height" =~ ^[0-9]+$ ]] && ((height >= TARGET_HEIGHT)); then + echo "Reached height ${height}." + exit 0 + fi - echo "Current height ${height}." - sleep "${INTERVAL}" + echo "Current height ${height}." + sleep "${INTERVAL}" done echo "Timeout waiting for height ${TARGET_HEIGHT}." >&2 diff --git a/devnet/tests/validator/ibc_test.go b/devnet/tests/validator/ibc_test.go index efbad529..ce52e509 100644 --- a/devnet/tests/validator/ibc_test.go +++ b/devnet/tests/validator/ibc_test.go @@ -2,7 +2,9 @@ package validator import ( "encoding/json" + "fmt" "os" + "strings" "testing" "time" @@ -37,11 +39,11 @@ type ibcLumeraSuite struct { simdRecipient string simdREST string - info ibcutil.ChannelInfo - channels []ibcutil.Channel - channel *ibcutil.Channel - connections []ibcutil.Connection - connection *ibcutil.Connection + info ibcutil.ChannelInfo + channels []ibcutil.Channel + channel *ibcutil.Channel + connections []ibcutil.Connection + connection *ibcutil.Connection clientStatus string csClientID string csHeight int64 @@ -52,15 +54,12 @@ func (s *ibcLumeraSuite) SetupSuite() { // Load environment-driven configuration and shared channel metadata. s.channelInfoPath = getenv("CHANNEL_INFO_FILE", defaultChannelInfoPath) s.lumeraBin = getenv("LUMERA_BIN", defaultLumeraBin) - s.lumeraRPC = getenv("LUMERA_RPC_ADDR", defaultLumeraRPC) + s.lumeraRPC = resolveLumeraRPC() s.lumeraChainID = getenv("LUMERA_CHAIN_ID", defaultLumeraChainID) if val := os.Getenv("LUMERA_KEY_NAME"); val != "" { s.lumeraKeyName = val } else { - s.lumeraKeyName = defaultLumeraKeyName - if key := loadPrimaryValidatorKey(getenv("LUMERA_VALIDATORS_FILE", defaultValidatorsFile)); key != "" { - s.lumeraKeyName = key - } + s.lumeraKeyName = resolveLumeraKeyName() } s.lumeraGasPrices = getenv("LUMERA_GAS_PRICES", defaultLumeraGasPrices) s.lumeraDenom = getenv("LUMERA_DENOM", defaultLumeraDenom) @@ -226,3 +225,81 @@ func loadPrimaryValidatorKey(path string) string { } return "" } + +func resolveLumeraRPC() string { + if val := os.Getenv("LUMERA_RPC_ADDR"); val != "" { + return val + } + + if moniker := detectValidatorMoniker(); moniker != "" { + return fmt.Sprintf("http://%s:26657", moniker) + } + + return defaultLumeraRPC +} + +func resolveLumeraKeyName() string { + validatorsPath := getenv("LUMERA_VALIDATORS_FILE", defaultValidatorsFile) + if moniker := detectValidatorMoniker(); moniker != "" { + if key := loadValidatorKeyByMoniker(validatorsPath, moniker); key != "" { + return key + } + } + + if key := loadPrimaryValidatorKey(validatorsPath); key != "" { + return key + } + + return defaultLumeraKeyName +} + +func detectValidatorMoniker() string { + if val := strings.TrimSpace(os.Getenv("MONIKER")); val != "" { + return val + } + + if val := strings.TrimSpace(os.Getenv("HOSTNAME")); val != "" { + if moniker := normalizeMoniker(val); moniker != "" { + return moniker + } + } + + host, err := os.Hostname() + if err != nil { + return "" + } + return normalizeMoniker(host) +} + +func normalizeMoniker(host string) string { + host = strings.TrimSpace(host) + if host == "" { + return "" + } + host = strings.TrimPrefix(host, "lumera-") + if strings.HasPrefix(host, "supernova_validator_") { + return host + } + return "" +} + +func loadValidatorKeyByMoniker(path, moniker string) string { + data, err := os.ReadFile(path) + if err != nil { + return "" + } + var vals []struct { + Name string `json:"name"` + Moniker string `json:"moniker"` + KeyName string `json:"key_name"` + } + if err := json.Unmarshal(data, &vals); err != nil { + return "" + } + for _, v := range vals { + if v.Moniker == moniker || v.Name == moniker { + return v.KeyName + } + } + return "" +} diff --git a/tests/e2e/supernode/delegation.sh b/tests/e2e/supernode/delegation.sh index 05348ebb..d89c013d 100755 --- a/tests/e2e/supernode/delegation.sh +++ b/tests/e2e/supernode/delegation.sh @@ -8,6 +8,7 @@ CHAIN_ID="lumera-devnet-1" KEYRING_BACKEND="test" VALIDATOR_NUM=2 VALIDATOR_CONTAINER="lumera-validator${VALIDATOR_NUM}" +QUERY_CONTAINER="${QUERY_CONTAINER:-lumera-validator1}" # Delegation amount constants REDUCE_AMOUNT="10ulume" @@ -19,16 +20,16 @@ touch "$LOG_FILE" # Step 2: Setup Logging Function log() { - local msg="[$(date '+%Y-%m-%d %H:%M:%S')] $1" - echo "$msg" | tee -a "$LOG_FILE" + local msg="[$(date '+%Y-%m-%d %H:%M:%S')] $1" + echo "$msg" | tee -a "$LOG_FILE" } log_cmd() { - local cmd_output - log "Executing: $1" - cmd_output=$(eval "$1" 2>&1) - echo "$cmd_output" | tee -a "$LOG_FILE" - echo "----------------------------------------" | tee -a "$LOG_FILE" + local cmd_output + log "Executing: $1" + cmd_output=$(eval "$1" 2>&1) + echo "$cmd_output" | tee -a "$LOG_FILE" + echo "----------------------------------------" | tee -a "$LOG_FILE" } log "Starting stake test for validator ${VALIDATOR_NUM}" @@ -37,19 +38,19 @@ log "Starting stake test for validator ${VALIDATOR_NUM}" log "Step 3: Getting validator addresses..." VALIDATOR_ACCOUNT=$(docker exec "$VALIDATOR_CONTAINER" lumerad keys show validator${VALIDATOR_NUM}_key \ - --keyring-backend "$KEYRING_BACKEND" -a) + --keyring-backend "$KEYRING_BACKEND" -a) log "Validator Account: $VALIDATOR_ACCOUNT" VALIDATOR_OPERATOR=$(docker exec "$VALIDATOR_CONTAINER" lumerad keys show validator${VALIDATOR_NUM}_key \ - --keyring-backend "$KEYRING_BACKEND" --bech val -a) + --keyring-backend "$KEYRING_BACKEND" --bech val -a) log "Validator Operator: $VALIDATOR_OPERATOR" # Step 4: Check Initial Status log "Step 4: Checking initial validator status..." -log_cmd "docker exec lumera-validator1 lumerad query staking validator $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query staking validator $VALIDATOR_OPERATOR" log "Checking initial supernode status..." -log_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode $VALIDATOR_OPERATOR" # Step 5: Reduce Stake log "Step 5: Reducing stake by ${REDUCE_AMOUNT}..." @@ -69,12 +70,12 @@ sleep 10 # Step 6: Check Status After Reduction log "Step 6: Checking validator status after stake reduction..." -log_cmd "docker exec lumera-validator1 lumerad query staking validator $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query staking validator $VALIDATOR_OPERATOR" log "Checking supernode status after stake reduction..." -log_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode $VALIDATOR_OPERATOR" -# Step 7: Restore Stake +# Step 7: Restore Stake log "Step 7: Restoring stake by delegating ${RESTORE_AMOUNT} back..." log_cmd "docker exec $VALIDATOR_CONTAINER lumerad tx staking delegate \ $VALIDATOR_OPERATOR \ @@ -93,10 +94,10 @@ sleep 10 # Step 8: Final Status Check log "Step 8: Performing final status check..." log "Checking final validator status..." -log_cmd "docker exec lumera-validator1 lumerad query staking validator $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query staking validator $VALIDATOR_OPERATOR" log "Checking final supernode status..." -log_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode $VALIDATOR_OPERATOR" # Step 9: Complete -log "Test completed successfully. All output has been saved to $LOG_FILE" \ No newline at end of file +log "Test completed successfully. All output has been saved to $LOG_FILE" diff --git a/tests/e2e/supernode/jaling.sh b/tests/e2e/supernode/jaling.sh index e8587a6f..ae9c160c 100755 --- a/tests/e2e/supernode/jaling.sh +++ b/tests/e2e/supernode/jaling.sh @@ -8,6 +8,7 @@ CHAIN_ID="lumera-devnet-1" KEYRING_BACKEND="test" VALIDATOR_NUM=5 VALIDATOR_CONTAINER="lumera-validator${VALIDATOR_NUM}" +QUERY_CONTAINER="${QUERY_CONTAINER:-lumera-validator1}" SLEEP_FOR_JAIL=60 SLEEP_FOR_UNJAIL=90 @@ -17,16 +18,16 @@ touch "$LOG_FILE" # Step 2: Setup Logging Function log() { - local msg="[$(date '+%Y-%m-%d %H:%M:%S')] $1" - echo "$msg" | tee -a "$LOG_FILE" + local msg="[$(date '+%Y-%m-%d %H:%M:%S')] $1" + echo "$msg" | tee -a "$LOG_FILE" } log_cmd() { - local cmd_output - log "Executing: $1" - cmd_output=$(eval "$1" 2>&1) - echo "$cmd_output" | tee -a "$LOG_FILE" - echo "----------------------------------------" | tee -a "$LOG_FILE" + local cmd_output + log "Executing: $1" + cmd_output=$(eval "$1" 2>&1) + echo "$cmd_output" | tee -a "$LOG_FILE" + echo "----------------------------------------" | tee -a "$LOG_FILE" } log "Starting jail test for validator ${VALIDATOR_NUM}" @@ -35,19 +36,19 @@ log "Starting jail test for validator ${VALIDATOR_NUM}" log "Step 3: Getting validator addresses..." VALIDATOR_ACCOUNT=$(docker exec "$VALIDATOR_CONTAINER" lumerad keys show validator${VALIDATOR_NUM}_key \ - --keyring-backend "$KEYRING_BACKEND" -a) + --keyring-backend "$KEYRING_BACKEND" -a) log "Validator Account: $VALIDATOR_ACCOUNT" VALIDATOR_OPERATOR=$(docker exec "$VALIDATOR_CONTAINER" lumerad keys show validator${VALIDATOR_NUM}_key \ - --keyring-backend "$KEYRING_BACKEND" --bech val -a) + --keyring-backend "$KEYRING_BACKEND" --bech val -a) log "Validator Operator: $VALIDATOR_OPERATOR" # Step 4: Check Initial Status log "Step 4: Checking initial validator status..." -log_cmd "docker exec lumera-validator1 lumerad query staking validator $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query staking validator $VALIDATOR_OPERATOR" log "Checking initial supernode status..." -log_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode $VALIDATOR_OPERATOR" # Step 5: Stop Validator to Force Jailing log "Step 5: Stopping validator container to force jailing..." @@ -58,10 +59,10 @@ sleep "${SLEEP_FOR_JAIL}" # Step 6: Check Status After Jailing log "Step 6: Checking validator status after jailing..." -log_cmd "docker exec lumera-validator1 lumerad query staking validator $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query staking validator $VALIDATOR_OPERATOR" log "Checking supernode status after jailing..." -log_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode $VALIDATOR_OPERATOR" # Step 7: Restart Validator log "Step 7: Restarting validator container..." @@ -87,10 +88,10 @@ sleep 10 # Step 9: Final Status Check log "Step 9: Performing final status check..." log "Checking final validator status..." -log_cmd "docker exec lumera-validator1 lumerad query staking validator $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query staking validator $VALIDATOR_OPERATOR" log "Checking final supernode status..." -log_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode $VALIDATOR_OPERATOR" +log_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode $VALIDATOR_OPERATOR" # Step 10: Complete -log "Test completed successfully. All output has been saved to $LOG_FILE" \ No newline at end of file +log "Test completed successfully. All output has been saved to $LOG_FILE" diff --git a/tests/e2e/supernode/setup_five_supernodes.sh b/tests/e2e/supernode/setup_five_supernodes.sh index c6e9e0e2..7efd9fcd 100755 --- a/tests/e2e/supernode/setup_five_supernodes.sh +++ b/tests/e2e/supernode/setup_five_supernodes.sh @@ -9,28 +9,28 @@ CONTAINER_PREFIX="lumera-validator" # IP address mapping declare -A VALIDATOR_IPS=( - [1]="192.168.1.1" - [2]="192.168.1.2" - [3]="192.168.1.3" - [4]="192.168.1.4" - [5]="192.168.1.5" + [1]="192.168.1.1" + [2]="192.168.1.2" + [3]="192.168.1.3" + [4]="192.168.1.4" + [5]="192.168.1.5" ) # Logging Setup LOG_FILE="./setup_supernodes.log" -> "$LOG_FILE" +>"$LOG_FILE" # General log function log() { - local timestamp - timestamp="$(date '+%Y-%m-%d %H:%M:%S')" - echo -e "[$timestamp] [INFO] $*" | tee -a "$LOG_FILE" + local timestamp + timestamp="$(date '+%Y-%m-%d %H:%M:%S')" + echo -e "[$timestamp] [INFO] $*" | tee -a "$LOG_FILE" } # Function to run commands and log output run_cmd() { - log "Running: $*" - eval "$*" 2>&1 | tee -a "$LOG_FILE" + log "Running: $*" + eval "$*" 2>&1 | tee -a "$LOG_FILE" } # Helper Functions @@ -40,29 +40,29 @@ declare -A VAL_ACCOUNT declare -A VAL_OPERATOR query_addresses() { - local i="$1" - local container="${CONTAINER_PREFIX}${i}" - - VAL_ACCOUNT["$i"]="$( - docker exec "$container" lumerad keys show validator${i}_key \ - --keyring-backend "$KEYRING_BACKEND" -a - )" - - VAL_OPERATOR["$i"]="$( - docker exec "$container" lumerad keys show validator${i}_key \ - --keyring-backend "$KEYRING_BACKEND" --bech val -a - )" + local i="$1" + local container="${CONTAINER_PREFIX}${i}" + + VAL_ACCOUNT["$i"]="$( + docker exec "$container" lumerad keys show validator${i}_key \ + --keyring-backend "$KEYRING_BACKEND" -a + )" + + VAL_OPERATOR["$i"]="$( + docker exec "$container" lumerad keys show validator${i}_key \ + --keyring-backend "$KEYRING_BACKEND" --bech val -a + )" } register_supernode() { - local i="$1" - local container="${CONTAINER_PREFIX}${i}" - local valop="${VAL_OPERATOR[$i]}" - local valacct="${VAL_ACCOUNT[$i]}" - local ip="${VALIDATOR_IPS[$i]}" - - log "Registering supernode on ${container} (ValOp: ${valop}, Account: ${valacct}, IP: ${ip})" - run_cmd "docker exec ${container} lumerad tx supernode register-supernode \ + local i="$1" + local container="${CONTAINER_PREFIX}${i}" + local valop="${VAL_OPERATOR[$i]}" + local valacct="${VAL_ACCOUNT[$i]}" + local ip="${VALIDATOR_IPS[$i]}" + + log "Registering supernode on ${container} (ValOp: ${valop}, Account: ${valacct}, IP: ${ip})" + run_cmd "docker exec ${container} lumerad tx supernode register-supernode \ ${valop} \ ${ip} \ 1.0 \ @@ -79,20 +79,20 @@ log "===== Starting Supernode Registration Script =====" # Query addresses for all validators for i in {1..5}; do - log "Querying addresses for validator ${i} ..." - query_addresses "$i" - log " Validator ${i} Account: ${VAL_ACCOUNT[$i]}" - log " Validator ${i} Operator: ${VAL_OPERATOR[$i]}" + log "Querying addresses for validator ${i} ..." + query_addresses "$i" + log " Validator ${i} Account: ${VAL_ACCOUNT[$i]}" + log " Validator ${i} Operator: ${VAL_OPERATOR[$i]}" done # Register all validators as supernodes for i in {1..5}; do - register_supernode "$i" - log "Sleeping 3 seconds after registering validator ${i}..." - sleep 3 + register_supernode "$i" + log "Sleeping 3 seconds after registering validator ${i}..." + sleep 3 done log "Querying the list of registered supernodes (via lumera-validator1) ..." run_cmd "docker exec lumera-validator1 lumerad query supernode list-super-nodes" -log "===== Supernode Registration Complete =====" \ No newline at end of file +log "===== Supernode Registration Complete =====" diff --git a/tests/e2e/supernode/supernode.sh b/tests/e2e/supernode/supernode.sh index f9ce90ce..de8bf25f 100755 --- a/tests/e2e/supernode/supernode.sh +++ b/tests/e2e/supernode/supernode.sh @@ -1,39 +1,38 @@ #!/usr/bin/env bash # - set -euo pipefail # Configuration CHAIN_ID="lumera-devnet-1" KEYRING_BACKEND="test" CONTAINER_PREFIX="lumera-validator" +QUERY_CONTAINER="${QUERY_CONTAINER:-${CONTAINER_PREFIX}1}" -# Test validator number +# Test validator number TEST_VALIDATOR_NUM=5 - SLEEP_BETWEEN_OPS=5 # Logging Setup LOG_FILE="./test_supernode_state_transitions.log" -> "$LOG_FILE" +>"$LOG_FILE" log() { - local timestamp - timestamp="$(date '+%Y-%m-%d %H:%M:%S')" - echo -e "[$timestamp] [INFO] $*" | tee -a "$LOG_FILE" + local timestamp + timestamp="$(date '+%Y-%m-%d %H:%M:%S')" + echo -e "[$timestamp] [INFO] $*" | tee -a "$LOG_FILE" } error() { - local timestamp - timestamp="$(date '+%Y-%m-%d %H:%M:%S')" - echo -e "[$timestamp] [ERROR] $*" | tee -a "$LOG_FILE" >&2 + local timestamp + timestamp="$(date '+%Y-%m-%d %H:%M:%S')" + echo -e "[$timestamp] [ERROR] $*" | tee -a "$LOG_FILE" >&2 } run_cmd() { - log "Running: $*" - eval "$*" 2>&1 | tee -a "$LOG_FILE" + log "Running: $*" + eval "$*" 2>&1 | tee -a "$LOG_FILE" } ################################################################# @@ -44,26 +43,26 @@ declare -A VAL_ACCOUNT declare -A VAL_OPERATOR query_addresses() { - local i="$1" - local container="${CONTAINER_PREFIX}${i}" - - VAL_ACCOUNT["$i"]="$( - docker exec "$container" lumerad keys show validator${i}_key \ - --keyring-backend "$KEYRING_BACKEND" -a - )" - - VAL_OPERATOR["$i"]="$( - docker exec "$container" lumerad keys show validator${i}_key \ - --keyring-backend "$KEYRING_BACKEND" --bech val -a - )" + local i="$1" + local container="${CONTAINER_PREFIX}${i}" + + VAL_ACCOUNT["$i"]="$( + docker exec "$container" lumerad keys show validator${i}_key \ + --keyring-backend "$KEYRING_BACKEND" -a + )" + + VAL_OPERATOR["$i"]="$( + docker exec "$container" lumerad keys show validator${i}_key \ + --keyring-backend "$KEYRING_BACKEND" --bech val -a + )" } check_supernode_status() { - local i="$1" - local valop="${VAL_OPERATOR[$i]}" - - log "Checking supernode status for validator ${i} (${valop})" - run_cmd "docker exec lumera-validator1 lumerad query supernode get-supernode ${valop}" + local i="$1" + local valop="${VAL_OPERATOR[$i]}" + + log "Checking supernode status for validator ${i} (${valop})" + run_cmd "docker exec ${QUERY_CONTAINER} lumerad query supernode get-supernode ${valop}" } ################################################################# @@ -145,4 +144,4 @@ run_cmd "docker exec ${CONTAINER_PREFIX}${TEST_VALIDATOR_NUM} lumerad tx superno sleep "$SLEEP_BETWEEN_OPS" check_supernode_status "$TEST_VALIDATOR_NUM" -log "===== Completed Supernode State Transitions Test =====" \ No newline at end of file +log "===== Completed Supernode State Transitions Test ====="