diff --git a/.changeset/slow-deer-walk.md b/.changeset/slow-deer-walk.md new file mode 100644 index 00000000000..68e7b9c4158 --- /dev/null +++ b/.changeset/slow-deer-walk.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Expanded `admin profile` to collect PPROF profiles from LOOP Plugins. Added `-vitals` flag for more granular profiling. diff --git a/core/cmd/admin_commands.go b/core/cmd/admin_commands.go index 7faa388842f..7d6bbbe2bbd 100644 --- a/core/cmd/admin_commands.go +++ b/core/cmd/admin_commands.go @@ -2,6 +2,7 @@ package cmd import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -9,6 +10,7 @@ import ( "net/http" "os" "path/filepath" + "slices" "strings" "sync" "time" @@ -20,6 +22,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/sessions" "github.com/smartcontractkit/chainlink/v2/core/utils" + "github.com/smartcontractkit/chainlink/v2/core/web" "github.com/smartcontractkit/chainlink/v2/core/web/presenters" ) @@ -65,6 +68,10 @@ func initAdminSubCmds(s *Shell) []cli.Command { Usage: "output directory of the captured profile", Value: "/tmp/", }, + cli.StringSliceFlag{ + Name: "vitals, v", + Usage: "vitals to collect, can be specified multiple times. Options: 'allocs', 'block', 'cmdline', 'goroutine', 'heap', 'mutex', 'profile', 'threadcreate', 'trace'", + }, }, }, { @@ -319,16 +326,13 @@ func (s *Shell) Status(c *cli.Context) error { // Profile will collect pprof metrics and store them in a folder. func (s *Shell) Profile(c *cli.Context) error { ctx := s.ctx() - seconds := c.Uint("seconds") + seconds := c.Int("seconds") baseDir := c.String("output_dir") genDir := filepath.Join(baseDir, "debuginfo-"+time.Now().Format(time.RFC3339)) - if err := os.Mkdir(genDir, 0o755); err != nil { - return s.errorOut(err) - } - var wgPprof sync.WaitGroup - vitals := []string{ + vitals := c.StringSlice("vitals") + allVitals := []string{ "allocs", // A sampling of all past memory allocations "block", // Stack traces that led to blocking on synchronization primitives "cmdline", // The command line invocation of the current program @@ -339,18 +343,103 @@ func (s *Shell) Profile(c *cli.Context) error { "threadcreate", // Stack traces that led to the creation of new OS threads "trace", // A trace of execution of the current program. } - wgPprof.Add(len(vitals)) - s.Logger.Infof("Collecting profiles: %v", vitals) + if len(vitals) == 0 { + vitals = slices.Clone(allVitals) + } else if slices.ContainsFunc(vitals, func(s string) bool { return !slices.Contains(allVitals, s) }) { + return fmt.Errorf("invalid vitals: must be from the set: %v", allVitals) + } + + plugins, err := s.discoverPlugins(ctx) + if err != nil { + return s.errorOut(err) + } + var names []string + for _, group := range plugins { + if name := group.Labels[web.LabelMetaPluginName]; name != "" { + names = append(names, name) + } + } + + if len(names) == 0 { + s.Logger.Infof("Collecting profiles: %v", vitals) + } else { + s.Logger.Infof("Collecting profiles from host and %d plugins: %v", len(names), vitals) + } s.Logger.Infof("writing debug info to %s", genDir) + var wg sync.WaitGroup + errs := make([]error, len(names)+1) + wg.Add(len(names) + 1) + go func() { + defer wg.Done() + errs[0] = s.profile(ctx, genDir, "", vitals, seconds) + }() + for i, name := range names { + go func() { + defer wg.Done() + errs[i] = s.profile(ctx, genDir, name, vitals, seconds) + }() + } + wg.Wait() + + err = errors.Join(errs...) + if err != nil { + return s.errorOut(err) + } + return nil +} +func (s *Shell) discoverPlugins(ctx context.Context) ( + got []struct { + Targets []string `yaml:"targets"` + Labels map[string]string `yaml:"labels"` + }, + err error, +) { + resp, err := s.HTTP.Get(ctx, "/discovery") + if err != nil { + return + } + defer func() { + if resp.Body != nil { + resp.Body.Close() + } + }() + data, err := io.ReadAll(resp.Body) + if err != nil { + return + } + + if err = json.Unmarshal(data, &got); err != nil { + s.Logger.Errorf("failed to unmarshal discovery response: %s", string(data)) + return + } + return +} + +func (s *Shell) profile(ctx context.Context, genDir string, name string, vitals []string, seconds int) error { + lggr := s.Logger + path := "/v2" + if name != "" { + genDir = filepath.Join(genDir, "plugins", name) + path = "/plugins/" + name + lggr = lggr.With("plugin", name) + } + if err := os.MkdirAll(genDir, 0o755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + errs := make(chan error, len(vitals)) + var wgPprof sync.WaitGroup + wgPprof.Add(len(vitals)) for _, vt := range vitals { - go func(vt string) { + go func(ctx context.Context, vt string) { defer wgPprof.Done() - uri := fmt.Sprintf("/v2/debug/pprof/%s?seconds=%d", vt, seconds) + ctx, cancel := context.WithTimeout(ctx, time.Duration(max(seconds, 0)+web.PPROFOverheadSeconds)*time.Second) + defer cancel() + uri := fmt.Sprintf(path+"/debug/pprof/%s?seconds=%d", vt, seconds) resp, err := s.HTTP.Get(ctx, uri) if err != nil { - errs <- fmt.Errorf("error collecting %s: %w", vt, err) + errs <- fmt.Errorf("error collecting %s: %w", uri, err) return } defer func() { @@ -358,17 +447,16 @@ func (s *Shell) Profile(c *cli.Context) error { resp.Body.Close() } }() - if resp.StatusCode == http.StatusUnauthorized { - errs <- fmt.Errorf("error collecting %s: %w", vt, errUnauthorized) + switch { + case resp.StatusCode == http.StatusUnauthorized: + errs <- fmt.Errorf("error collecting %s: %w", uri, errUnauthorized) return - } - if resp.StatusCode == http.StatusBadRequest { - // best effort to interpret the underlying problem + case resp.StatusCode == http.StatusBadRequest: pprofVersion := resp.Header.Get("X-Go-Pprof") if pprofVersion == "1" { b, err2 := io.ReadAll(resp.Body) if err2 != nil { - errs <- fmt.Errorf("error collecting %s: %w", vt, err2) + errs <- fmt.Errorf("error collecting %s: %w", uri, err2) return } respContent := string(b) @@ -377,17 +465,25 @@ func (s *Shell) Profile(c *cli.Context) error { if strings.Contains(respContent, "profile duration exceeds server's WriteTimeout") { errs <- fmt.Errorf("%w: %s", ErrProfileTooLong, respContent) } else { - errs <- fmt.Errorf("error collecting %s: %w: %s", vt, errBadRequest, respContent) + errs <- fmt.Errorf("error collecting %s: %w: %s", uri, errBadRequest, respContent) } } else { - errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest) + errs <- fmt.Errorf("error collecting %s: %w", uri, errBadRequest) + } + return + case resp.StatusCode < 200 || resp.StatusCode > 299: + body, rerr := io.ReadAll(resp.Body) + if rerr != nil { + errs <- fmt.Errorf("error collecting %s: status %d: error reading response: %w", uri, resp.StatusCode, rerr) + } else { + errs <- fmt.Errorf("error collecting %s: status %d: %s", uri, resp.StatusCode, string(body)) } return } // write to file f, err := os.Create(filepath.Join(genDir, vt)) if err != nil { - errs <- fmt.Errorf("error creating file for %s: %w", vt, err) + errs <- fmt.Errorf("error creating file for %s: %w", uri, err) return } wc := utils.NewDeferableWriteCloser(f) @@ -395,20 +491,20 @@ func (s *Shell) Profile(c *cli.Context) error { _, err = io.Copy(wc, resp.Body) if err != nil { - errs <- fmt.Errorf("error writing to file for %s: %w", vt, err) + errs <- fmt.Errorf("error writing to file for %s: %w", uri, err) return } err = wc.Close() if err != nil { - errs <- fmt.Errorf("error closing file for %s: %w", vt, err) + errs <- fmt.Errorf("error closing file for %s: %w", uri, err) return } - }(vt) + }(ctx, vt) } wgPprof.Wait() close(errs) - // Atmost one err is emitted per vital. - s.Logger.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals)) + // At most one err is emitted per vital. + lggr.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals)) if len(errs) > 0 { var merr error for err := range errs { diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go index 3a69cd7f8c6..ee7009f9bde 100644 --- a/core/cmd/shell_local.go +++ b/core/cmd/shell_local.go @@ -148,6 +148,10 @@ func initLocalSubCmds(s *Shell, safe bool) []cli.Command { Usage: "output directory of the captured profile", Value: "/tmp/", }, + cli.StringSliceFlag{ + Name: "vitals, v", + Usage: "vitals to collect, can be specified multiple times. Options: 'allocs', 'block', 'cmdline', 'goroutine', 'heap', 'mutex', 'profile', 'threadcreate', 'trace'", + }, }, Hidden: true, Before: func(_ *cli.Context) error { diff --git a/core/web/loop_registry.go b/core/web/loop_registry.go index 4a17f36d95b..0cc7e1ffbcc 100644 --- a/core/web/loop_registry.go +++ b/core/web/loop_registry.go @@ -1,12 +1,16 @@ package web import ( + "bytes" + "context" "encoding/json" "fmt" "html" "io" "net/http" + "net/url" "os" + "strconv" "time" "github.com/gin-gonic/gin" @@ -19,13 +23,15 @@ import ( "github.com/smartcontractkit/chainlink/v2/plugins" ) +const LabelMetaPluginName = "__meta_plugin_name" + type LoopRegistryServer struct { exposedPromPort int discoveryHostName string // discovery endpoint hostname. must be accessible to external prom for scraping loopHostName string // internal hostname of loopps. used by node to forward external prom requests registry *plugins.LoopRegistry logger logger.SugaredLogger - client *http.Client + promClient *http.Client jsonMarshalFn func(any) ([]byte, error) } @@ -39,7 +45,7 @@ func NewLoopRegistryServer(app chainlink.Application) *LoopRegistryServer { jsonMarshalFn: json.Marshal, discoveryHostName: discoveryHostName, loopHostName: loopHostName, - client: &http.Client{Timeout: 1 * time.Second}, // some value much less than the prometheus poll interval will do there + promClient: &http.Client{Timeout: 1 * time.Second}, // some value much less than the prometheus poll interval will do there } } @@ -49,11 +55,13 @@ func (l *LoopRegistryServer) discoveryHandler(w http.ResponseWriter, req *http.R var groups []*targetgroup.Group // add node metrics to service discovery - groups = append(groups, metricTarget(l.discoveryHostName, l.exposedPromPort, "/metrics")) + groups = append(groups, pluginGroup(l.discoveryHostName, l.exposedPromPort, "/metrics")) // add all the plugins for _, registeredPlugin := range l.registry.List() { - groups = append(groups, metricTarget(l.discoveryHostName, l.exposedPromPort, pluginMetricPath(registeredPlugin.Name))) + group := pluginGroup(l.discoveryHostName, l.exposedPromPort, pluginMetricPath(registeredPlugin.Name)) + group.Labels[LabelMetaPluginName] = model.LabelValue(registeredPlugin.Name) + groups = append(groups, group) } b, err := l.jsonMarshalFn(groups) @@ -72,7 +80,7 @@ func (l *LoopRegistryServer) discoveryHandler(w http.ResponseWriter, req *http.R } } -func metricTarget(hostName string, port int, path string) *targetgroup.Group { +func pluginGroup(hostName string, port int, path string) *targetgroup.Group { return &targetgroup.Group{ Targets: []model.LabelSet{ // target address will be called by external prometheus @@ -95,7 +103,12 @@ func (l *LoopRegistryServer) pluginMetricHandler(gc *gin.Context) { // unlike discovery, this endpoint is internal btw the node and plugin pluginURL := fmt.Sprintf("http://%s:%d/metrics", l.loopHostName, p.EnvCfg.PrometheusPort) - res, err := l.client.Get(pluginURL) //nolint + req, err := http.NewRequestWithContext(gc.Request.Context(), "GET", pluginURL, nil) + if err != nil { + gc.Data(http.StatusInternalServerError, "text/plain", fmt.Appendf(nil, "error creating plugin metrics request: %s", err)) + return + } + res, err := l.promClient.Do(req) if err != nil { msg := "plugin metric handler failed to get plugin url " + html.EscapeString(pluginURL) l.logger.Errorw(msg, "err", err) @@ -114,6 +127,93 @@ func (l *LoopRegistryServer) pluginMetricHandler(gc *gin.Context) { gc.Data(http.StatusOK, "text/plain", b) } +const PPROFOverheadSeconds = 30 + +func pprofURLVals(gc *gin.Context) (urlVals url.Values, timeout time.Duration) { + urlVals = make(url.Values) + if db, ok := gc.GetQuery("debug"); ok { + urlVals.Set("debug", db) + } + if gc, ok := gc.GetQuery("gc"); ok { + urlVals.Set("gc", gc) + } + timeout = PPROFOverheadSeconds * time.Second + if sec, ok := gc.GetQuery("seconds"); ok { + urlVals.Set("seconds", sec) + if i, err := strconv.Atoi(sec); err == nil { + timeout = time.Duration(i+PPROFOverheadSeconds) * time.Second + } + } + return +} + +func (l *LoopRegistryServer) pluginPPROFHandler(gc *gin.Context) { + pluginName := gc.Param("name") + p, ok := l.registry.Get(pluginName) + if !ok { + gc.Data(http.StatusNotFound, "text/plain", fmt.Appendf(nil, "plugin %q does not exist", html.EscapeString(pluginName))) + return + } + + // unlike discovery, this endpoint is internal btw the node and plugin + pluginURL := fmt.Sprintf("http://%s:%d/debug/pprof/"+gc.Param("profile"), l.loopHostName, p.EnvCfg.PrometheusPort) + urlVals, timeout := pprofURLVals(gc) + if s := urlVals.Encode(); s != "" { + pluginURL += "?" + s + } + l.logger.Infow("Forwarding plugin pprof request", "plugin", pluginName, "url", pluginURL) + l.doRequest(gc, "GET", pluginURL, nil, timeout, pluginName) +} + +func (l *LoopRegistryServer) pluginPPROFPOSTSymbolHandler(gc *gin.Context) { + pluginName := gc.Param("name") + p, ok := l.registry.Get(pluginName) + if !ok { + gc.Data(http.StatusNotFound, "text/plain", fmt.Appendf(nil, "plugin %q does not exist", html.EscapeString(pluginName))) + return + } + + // unlike discovery, this endpoint is internal btw the node and plugin + pluginURL := fmt.Sprintf("http://%s:%d/debug/pprof/symbol", l.loopHostName, p.EnvCfg.PrometheusPort) + urlVals, timeout := pprofURLVals(gc) + if s := urlVals.Encode(); s != "" { + pluginURL += "?" + s + } + body, err := io.ReadAll(gc.Request.Body) + if err != nil { + gc.Data(http.StatusInternalServerError, "text/plain", fmt.Appendf(nil, "error reading plugin pprof request body: %s", err)) + return + } + l.doRequest(gc, "POST", pluginURL, bytes.NewReader(body), timeout, pluginName) +} + +func (l *LoopRegistryServer) doRequest(gc *gin.Context, method string, url string, body io.Reader, timeout time.Duration, pluginName string) { + ctx, cancel := context.WithTimeout(gc.Request.Context(), timeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, method, url, body) + if err != nil { + gc.Data(http.StatusInternalServerError, "text/plain", fmt.Appendf(nil, "error creating plugin pprof request: %s", err)) + return + } + res, err := http.DefaultClient.Do(req) + if err != nil { + msg := "plugin pprof handler failed to post plugin url " + html.EscapeString(url) + l.logger.Errorw(msg, "err", err) + gc.Data(http.StatusInternalServerError, "text/plain", fmt.Appendf(nil, "%s: %s", msg, err)) + return + } + defer res.Body.Close() + b, err := io.ReadAll(res.Body) + if err != nil { + msg := fmt.Sprintf("error reading plugin %q pprof", html.EscapeString(pluginName)) + l.logger.Errorw(msg, "err", err) + gc.Data(http.StatusInternalServerError, "text/plain", fmt.Appendf(nil, "%s: %s", msg, err)) + return + } + + gc.Data(http.StatusOK, "text/plain", b) +} + func initHostNames() (discoveryHost, loopHost string) { var exists bool discoveryHost, exists = env.PrometheusDiscoveryHostName.Lookup() diff --git a/core/web/loop_registry_test.go b/core/web/loop_registry_test.go index b62ae9cb341..e8ae4c09459 100644 --- a/core/web/loop_registry_test.go +++ b/core/web/loop_registry_test.go @@ -17,16 +17,19 @@ import ( "github.com/smartcontractkit/freeport" "github.com/smartcontractkit/chainlink-common/pkg/loop" + "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + "github.com/smartcontractkit/chainlink/v2/core/web" ) type mockLoopImpl struct { t *testing.T - *loop.PromServer + services.Service } // test prom var to avoid collision with real chainlink metrics @@ -45,19 +48,11 @@ func configurePromRegistry() { func newMockLoopImpl(t *testing.T, port int) *mockLoopImpl { return &mockLoopImpl{ - t: t, - PromServer: loop.PromServerOpts{Handler: testHandler}.New(port, logger.TestLogger(t).Named("mock-loop")), + t: t, + Service: loop.WebServerOpts{Handler: testHandler}.New(logger.TestLogger(t).Named("mock-loop"), port), } } -func (m *mockLoopImpl) start() { - require.NoError(m.t, m.Start()) -} - -func (m *mockLoopImpl) close() { - require.NoError(m.t, m.Close()) -} - func (m *mockLoopImpl) run() { testMetric.Inc() } @@ -79,7 +74,10 @@ func TestLoopRegistry(t *testing.T) { // note we expect this to be an ordered result expectedLabels := []model.LabelSet{ model.LabelSet{"__metrics_path__": model.LabelValue(expectedCoreEndPoint)}, - model.LabelSet{"__metrics_path__": model.LabelValue(expectedLooppEndPoint)}, + model.LabelSet{ + "__metrics_path__": model.LabelValue(expectedLooppEndPoint), + web.LabelMetaPluginName: model.LabelValue("mockLoopImpl"), + }, } require.NoError(t, app.KeyStore.OCR().Add(ctx, cltest.DefaultOCRKey)) @@ -95,8 +93,7 @@ func TestLoopRegistry(t *testing.T) { // our mock loop impl and isolated from the default prom register configurePromRegistry() mockLoop := newMockLoopImpl(t, loop.EnvCfg.PrometheusPort) - mockLoop.start() - defer mockLoop.close() + servicetest.Run(t, mockLoop) mockLoop.run() client := app.NewHTTPClient(nil) diff --git a/core/web/router.go b/core/web/router.go index 04e67e9ae6a..0e1636c0f2e 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -74,7 +74,6 @@ func NewRouter(app chainlink.Application, prometheus *ginprom.Prometheus) (*gin. engine.Use(prometheus.Instrument()) } engine.Use(helmet.Default()) - rl := config.WebServer().RateLimit() api := engine.Group( "/", @@ -233,6 +232,8 @@ func loopRoutes(app chainlink.Application, r *gin.RouterGroup) { loopRegistry := NewLoopRegistryServer(app) r.GET("/discovery", ginHandlerFromHTTP(loopRegistry.discoveryHandler)) r.GET("/plugins/:name/metrics", loopRegistry.pluginMetricHandler) + r.GET("/plugins/:name/debug/pprof/*profile", loopRegistry.pluginPPROFHandler) + r.POST("/plugins/:name/debug/pprof/symbol", loopRegistry.pluginPPROFPOSTSymbolHandler) } func v2Routes(app chainlink.Application, r *gin.RouterGroup) { diff --git a/main_test.go b/main_test.go index 92a810cf490..7735b7de53f 100644 --- a/main_test.go +++ b/main_test.go @@ -63,6 +63,8 @@ func TestScripts(t *testing.T) { } t.Parallel() + require.NoError(t, os.Setenv("TMPDIR", "/tmp")) // osx default is too long for go-plugin sockets + visitor := txtar.NewDirVisitor("testdata/scripts", txtar.Recurse, func(path string) error { t.Run(strings.TrimPrefix(path, "testdata/scripts/"), func(t *testing.T) { t.Parallel() diff --git a/plugins/cmd/chainlink-evm/main.go b/plugins/cmd/chainlink-evm/main.go index cd467ddf4ab..810526afa0b 100644 --- a/plugins/cmd/chainlink-evm/main.go +++ b/plugins/cmd/chainlink-evm/main.go @@ -116,6 +116,9 @@ func (c *pluginRelayer) NewRelayer(ctx context.Context, configTOML string, keyst evmKeystore := keys.NewChainStore(keystore, cfg.EVM.ChainID.ToInt()) mailMon := mailbox.NewMonitor(c.AppID, logger.Named(c.Logger, "Mailbox")) + if err := mailMon.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start mailbox monitor: %w", err) + } c.SubService(mailMon) chain, err := legacyevm.NewTOMLChain(&cfg.EVM, legacyevm.ChainRelayOpts{ diff --git a/plugins/plugins.private.yaml b/plugins/plugins.private.yaml index 62e82d313ee..1640d7cdfbc 100644 --- a/plugins/plugins.private.yaml +++ b/plugins/plugins.private.yaml @@ -1,5 +1,4 @@ # This file defines private plugins to be installed via `loopinstall`. - # Common plugin configuration defaults: # The `-s` flag is added to strip debug information from the binary to reduce @@ -10,17 +9,17 @@ defaults: plugins: cron: - moduleURI: "github.com/smartcontractkit/capabilities/cron" - gitRef: "17f6545c0ff1742b774e0b87860563cde0ad14bb" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." flags: "-tags timetzdata" kvstore: - enabled: false moduleURI: "github.com/smartcontractkit/capabilities/kvstore" - gitRef: "17f6545c0ff1742b774e0b87860563cde0ad14bb" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." readcontract: - moduleURI: "github.com/smartcontractkit/capabilities/readcontract" - gitRef: "17f6545c0ff1742b774e0b87860563cde0ad14bb" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." consensus: - moduleURI: "github.com/smartcontractkit/capabilities/consensus" @@ -29,15 +28,15 @@ plugins: workflowevent: - enabled: false moduleURI: "github.com/smartcontractkit/capabilities/workflowevent" - gitRef: "17f6545c0ff1742b774e0b87860563cde0ad14bb" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." httpaction: - moduleURI: "github.com/smartcontractkit/capabilities/http_action" - gitRef: "e925fcc435c163eebfaf149d1dd49ad71562d5de" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." httptrigger: - moduleURI: "github.com/smartcontractkit/capabilities/http_trigger" - gitRef: "17f6545c0ff1742b774e0b87860563cde0ad14bb" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." evm: - moduleURI: "github.com/smartcontractkit/capabilities/chain_capabilities/evm" @@ -49,7 +48,7 @@ plugins: installPath: "." mock: - moduleURI: "github.com/smartcontractkit/capabilities/mock" - gitRef: "17f6545c0ff1742b774e0b87860563cde0ad14bb" + gitRef: "50c56e227d2bb8bfe656ec2d4288ce8336c5fa9f" installPath: "." confidential-http: - moduleURI: "github.com/smartcontractkit/confidential-compute/enclave/apps/confidential-http/capability" diff --git a/plugins/plugins.public.yaml b/plugins/plugins.public.yaml index aadcb78b2cf..1af031cf074 100644 --- a/plugins/plugins.public.yaml +++ b/plugins/plugins.public.yaml @@ -22,8 +22,6 @@ plugins: cosmos: - moduleURI: "github.com/smartcontractkit/chainlink-cosmos" - # Git reference - can be a tag, branch, or commit hash - # If not specified, uses the latest version. gitRef: "v0.5.2-0.20260219133256-b46d473fd6f5" installPath: "./pkg/cosmos/cmd/chainlink-cosmos" # These will be copied into /usr/lib in the container. diff --git a/testdata/scripts/admin/profile/help.txtar b/testdata/scripts/admin/profile/help.txtar index 8838e0b4dcf..4dafdbaf04b 100644 --- a/testdata/scripts/admin/profile/help.txtar +++ b/testdata/scripts/admin/profile/help.txtar @@ -11,4 +11,5 @@ USAGE: OPTIONS: --seconds value, -s value duration of profile capture (default: 8) --output_dir value, -o value output directory of the captured profile (default: "/tmp/") + --vitals value, -v value vitals to collect, can be specified multiple times. Options: 'allocs', 'block', 'cmdline', 'goroutine', 'heap', 'mutex', 'profile', 'threadcreate', 'trace' diff --git a/testdata/scripts/admin/profile/multi-chain-loopp.txtar b/testdata/scripts/admin/profile/multi-chain-loopp.txtar new file mode 100644 index 00000000000..ad6ec0abc7b --- /dev/null +++ b/testdata/scripts/admin/profile/multi-chain-loopp.txtar @@ -0,0 +1,106 @@ +env CL_EVM_CMD=chainlink-evm +env CL_SOLANA_CMD=chainlink-solana + +# start node +exec sh -c 'eval "echo \"$(cat config.toml.tmpl)\" > config.toml"' +exec chainlink node -c config.toml start -p password -a creds & + +# initialize client +env NODEURL=http://localhost:$PORT +exec curl --retry 10 --retry-max-time 60 --retry-connrefused $NODEURL +exec chainlink --remote-node-url $NODEURL admin login -file creds --bypass-version-check + +exec chainlink --remote-node-url $NODEURL admin profile -seconds 1 -output_dir ./profiles +stderr 'Collecting profiles from host and 8 plugins: \[allocs block cmdline goroutine heap mutex profile threadcreate trace\]' + +exec sh -c 'eval "ls -R $WORK"' + +# Ensure we have a profile dir for host and each plugin +stdout $WORK/profiles/debuginfo-.*:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/aptos.42:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/cosmos.Foo:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/evm.1:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/solana.Bar:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/starknet.Baz:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/sui.67:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/ton.-217:$ +stdout $WORK/profiles/debuginfo-[^/]*/plugins/tron.1001:$ + +-- go:build.integration -- +-- testdb.txt -- +CL_DATABASE_URL +-- testport.txt -- +PORT + +-- password -- +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ +-- creds -- +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs + +-- config.toml.tmpl -- +InsecurePPROFHeap = true + +[Webserver] +HTTPPort = $PORT + +[[Aptos]] +ChainID = '42' + +[[Aptos.Nodes]] +Name = 'primary' +URL = 'http://aptos.blockchain' + +[[Cosmos]] +ChainID = 'Foo' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[[Solana]] +ChainID = 'Bar' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Starknet]] +ChainID = 'Baz' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' + +[[Sui]] +ChainID = '67' +NetworkName = 'foo' +NetworkNameFull = 'foobar' + +[[Sui.Nodes]] +Name = 'node' +URL = 'https://sui.rpc' + +[[Ton]] +ChainID = '-217' + +[[Ton.Nodes]] +Name = 'node' +URL = 'liteserver://dummypublickey@127.0.0.1:4443' + +[[Tron]] +ChainID = '1001' + +[[Tron.Nodes]] +Name = 'example' +URL = 'http://tron.org' +SolidityURL = 'https://solidity.evm' diff --git a/testdata/scripts/health/multi-chain-loopp.txtar b/testdata/scripts/health/multi-chain-loopp.txtar index ba02124f246..fee61cd072d 100644 --- a/testdata/scripts/health/multi-chain-loopp.txtar +++ b/testdata/scripts/health/multi-chain-loopp.txtar @@ -126,8 +126,7 @@ ok EVM.1.RelayerService.PluginRelayerClient.PluginEVM.HeadTracker ! EVM.1.RelayerService.PluginRelayerClient.PluginEVM.HeadTracker.HeadListener Listener connected = false, receiving heads = false ok EVM.1.RelayerService.PluginRelayerClient.PluginEVM.LogBroadcaster -! EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor - service is "Unstarted", not started +ok EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor ok EVM.1.RelayerService.PluginRelayerClient.PluginEVM.PluginRelayerConfigEmitter ok EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Relayer ok EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Txm @@ -189,8 +188,6 @@ ok WorkflowStore -- out-unhealthy.txt -- ! EVM.1.RelayerService.PluginRelayerClient.PluginEVM.HeadTracker.HeadListener Listener connected = false, receiving heads = false -! EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor - service is "Unstarted", not started -- out.json -- { @@ -398,8 +395,8 @@ ok WorkflowStore "id": "EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor", "attributes": { "name": "EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor", - "status": "failing", - "output": "service is \"Unstarted\", not started" + "status": "passing", + "output": "" } }, { @@ -928,15 +925,6 @@ ok WorkflowStore "status": "failing", "output": "Listener connected = false, receiving heads = false" } - }, - { - "type": "checks", - "id": "EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor", - "attributes": { - "name": "EVM.1.RelayerService.PluginRelayerClient.PluginEVM.Mailbox.Monitor", - "status": "failing", - "output": "service is \"Unstarted\", not started" - } } ] } diff --git a/testdata/scripts/node/profile/help.txtar b/testdata/scripts/node/profile/help.txtar index 02bc54ab114..ee198fa1b47 100644 --- a/testdata/scripts/node/profile/help.txtar +++ b/testdata/scripts/node/profile/help.txtar @@ -11,4 +11,5 @@ USAGE: OPTIONS: --seconds value, -s value duration of profile capture (default: 8) --output_dir value, -o value output directory of the captured profile (default: "/tmp/") + --vitals value, -v value vitals to collect, can be specified multiple times. Options: 'allocs', 'block', 'cmdline', 'goroutine', 'heap', 'mutex', 'profile', 'threadcreate', 'trace'