diff --git a/cmd/cluster/context.go b/cmd/cluster/context.go index 18c0dfa18..3937ff3b8 100644 --- a/cmd/cluster/context.go +++ b/cmd/cluster/context.go @@ -1,15 +1,12 @@ package cluster import ( - "encoding/json" "fmt" "io" "math" "net" "os" "os/exec" - "sort" - "strconv" "strings" "sync" "time" @@ -26,7 +23,6 @@ import ( "github.com/openshift/osdctl/cmd/dynatrace" "github.com/openshift/osdctl/pkg/osdCloud" "github.com/openshift/osdctl/pkg/osdctlConfig" - "github.com/openshift/osdctl/pkg/printer" "github.com/openshift/osdctl/pkg/provider/pagerduty" "github.com/openshift/osdctl/pkg/utils" "github.com/spf13/cobra" @@ -46,32 +42,61 @@ const ( delimiter = ">> " ) -type contextOptions struct { +// ContextOptions is a pure configuration struct containing all the parameters +// needed to query cluster context information. It has no methods except validation. +type ContextOptions struct { + ClusterID string + Days int + Pages int + FullScan bool + Verbose bool + Output string + AWSProfile string + OAuthToken string + UserToken string + JiraToken string + TeamIDs []string +} + +// Validate ensures the query options are valid +func (o ContextOptions) Validate() error { + if o.Days < 1 { + return fmt.Errorf("cannot have a days value lower than 1") + } + switch o.Output { + case shortOutputConfigValue: + return nil + case longOutputConfigValue: + return nil + case jsonOutputConfigValue: + return nil + default: + return fmt.Errorf("unknown Output Format: %s", o.Output) + } +} + +// ContextCache holds the runtime state and cluster information needed during execution. +type ContextCache struct { cluster *cmv1.Cluster - output string - verbose bool - full bool clusterID string externalClusterID string baseDomain string organizationID string - days int - pages int - oauthtoken string - usertoken string infraID string - awsProfile string - jiratoken string - team_ids []string regionID string + + // Query options - configuration for the context query + queryOpts ContextOptions } type contextData struct { // Cluster info - ClusterName string - ClusterVersion string - ClusterID string + ClusterName string + ClusterVersion string + ClusterID string + ExternalClusterID string + InfraID string // Current OCM environment (e.g., "production" or "stage") OCMEnv string @@ -79,6 +104,9 @@ type contextData struct { // RegionID (used for region-locked clusters) RegionID string + // Cluster object for advanced queries + Cluster *cmv1.Cluster + // Dynatrace Environment URL and Logs URL DyntraceEnvURL string DyntraceLogsURL string @@ -126,14 +154,21 @@ type contextData struct { // newCmdContext implements the context command to show the current context of a cluster func newCmdContext() *cobra.Command { - ops := newContextOptions() + var queryOpts ContextOptions + contextCmd := &cobra.Command{ Use: "context --cluster-id ", Short: "Shows the context of a specified cluster", Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - // Instead of passing args to setup, we now rely on the flag + // Validate query options + if err := queryOpts.Validate(); err != nil { + return err + } + + // Create and setup context options with query configuration + ops := newContextOptions(queryOpts) err := ops.setup() if err != nil { return err @@ -143,33 +178,31 @@ func newCmdContext() *cobra.Command { }, } - contextCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Provide internal ID of the cluster") + contextCmd.Flags().StringVarP(&queryOpts.ClusterID, "cluster-id", "C", "", "Provide internal ID of the cluster") _ = contextCmd.MarkFlagRequired("cluster-id") - contextCmd.Flags().StringVarP(&ops.output, "output", "o", "long", "Valid formats are ['long', 'short', 'json']. Output is set to 'long' by default") - contextCmd.Flags().StringVarP(&ops.awsProfile, "profile", "p", "", "AWS Profile") - contextCmd.Flags().BoolVarP(&ops.verbose, "verbose", "", false, "Verbose output") - contextCmd.Flags().BoolVar(&ops.full, "full", false, "Run full suite of checks.") - contextCmd.Flags().IntVarP(&ops.days, "days", "d", 30, "Command will display X days of Error SLs sent to the cluster. Days is set to 30 by default") - contextCmd.Flags().IntVar(&ops.pages, "pages", 40, "Command will display X pages of Cloud Trail logs for the cluster. Pages is set to 40 by default") - contextCmd.Flags().StringVar(&ops.oauthtoken, "oauthtoken", "", fmt.Sprintf("Pass in PD oauthtoken directly. If not passed in, by default will read `pd_oauth_token` from ~/.config/%s.\nPD OAuth tokens can be generated by visiting %s", osdctlConfig.ConfigFileName, PagerDutyTokenRegistrationUrl)) - contextCmd.Flags().StringVar(&ops.usertoken, "usertoken", "", fmt.Sprintf("Pass in PD usertoken directly. If not passed in, by default will read `pd_user_token` from ~/config/%s", osdctlConfig.ConfigFileName)) - contextCmd.Flags().StringVar(&ops.jiratoken, "jiratoken", "", fmt.Sprintf("Pass in the Jira access token directly. If not passed in, by default will read `jira_token` from ~/.config/%s.\nJira access tokens can be registered by visiting %s/%s", osdctlConfig.ConfigFileName, JiraBaseURL, JiraTokenRegistrationPath)) - contextCmd.Flags().StringArrayVarP(&ops.team_ids, "team-ids", "t", []string{}, fmt.Sprintf("Pass in PD team IDs directly to filter the PD Alerts by team. Can also be defined as `team_ids` in ~/.config/%s\nWill show all PD Alerts for all PD service IDs if none is defined", osdctlConfig.ConfigFileName)) + contextCmd.Flags().StringVarP(&queryOpts.Output, "output", "o", "long", "Valid formats are ['long', 'short', 'json']. Output is set to 'long' by default") + contextCmd.Flags().StringVarP(&queryOpts.AWSProfile, "profile", "p", "", "AWS Profile") + contextCmd.Flags().BoolVarP(&queryOpts.Verbose, "verbose", "", false, "Verbose output") + contextCmd.Flags().BoolVar(&queryOpts.FullScan, "full", false, "Run full suite of checks.") + contextCmd.Flags().IntVarP(&queryOpts.Days, "days", "d", 30, "Command will display X days of Error SLs sent to the cluster. Days is set to 30 by default") + contextCmd.Flags().IntVar(&queryOpts.Pages, "pages", 40, "Command will display X pages of Cloud Trail logs for the cluster. Pages is set to 40 by default") + contextCmd.Flags().StringVar(&queryOpts.OAuthToken, "oauthtoken", "", fmt.Sprintf("Pass in PD oauthtoken directly. If not passed in, by default will read `pd_oauth_token` from ~/.config/%s.\nPD OAuth tokens can be generated by visiting %s", osdctlConfig.ConfigFileName, PagerDutyTokenRegistrationUrl)) + contextCmd.Flags().StringVar(&queryOpts.UserToken, "usertoken", "", fmt.Sprintf("Pass in PD usertoken directly. If not passed in, by default will read `pd_user_token` from ~/config/%s", osdctlConfig.ConfigFileName)) + contextCmd.Flags().StringVar(&queryOpts.JiraToken, "jiratoken", "", fmt.Sprintf("Pass in the Jira access token directly. If not passed in, by default will read `jira_token` from ~/.config/%s.\nJira access tokens can be registered by visiting %s/%s", osdctlConfig.ConfigFileName, JiraBaseURL, JiraTokenRegistrationPath)) + contextCmd.Flags().StringArrayVarP(&queryOpts.TeamIDs, "team-ids", "t", []string{}, fmt.Sprintf("Pass in PD team IDs directly to filter the PD Alerts by team. Can also be defined as `team_ids` in ~/.config/%s\nWill show all PD Alerts for all PD service IDs if none is defined", osdctlConfig.ConfigFileName)) return contextCmd } -func newContextOptions() *contextOptions { - return &contextOptions{} -} - -func (o *contextOptions) setup() error { - if o.days < 1 { - return fmt.Errorf("cannot have a days value lower than 1") +func newContextOptions(queryOpts ContextOptions) *ContextCache { + return &ContextCache{ + queryOpts: queryOpts, } +} +func (o *ContextCache) setup() error { // Create OCM client to talk to cluster API - defer utils.StartDelayTracker(o.verbose, "OCM Clusters").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "OCM Clusters").End() ocmClient, err := utils.CreateConnection() if err != nil { return err @@ -181,7 +214,7 @@ func (o *contextOptions) setup() error { }() // Use the clusterID flag value instead of args - clusterArgs := []string{o.clusterID} + clusterArgs := []string{o.queryOpts.ClusterID} clusters := utils.GetClusters(ocmClient, clusterArgs) if len(clusters) != 1 { return fmt.Errorf("unexpected number of clusters matched input. Expected 1 got %d", len(clusters)) @@ -193,12 +226,12 @@ func (o *contextOptions) setup() error { o.baseDomain = o.cluster.DNS().BaseDomain() o.infraID = o.cluster.InfraID() - if o.usertoken == "" { - o.usertoken = viper.GetString(pagerduty.PagerDutyUserTokenConfigKey) + if o.queryOpts.UserToken == "" { + o.queryOpts.UserToken = viper.GetString(pagerduty.PagerDutyUserTokenConfigKey) } - if o.oauthtoken == "" { - o.oauthtoken = viper.GetString(pagerduty.PagerDutyOauthTokenConfigKey) + if o.queryOpts.OAuthToken == "" { + o.queryOpts.OAuthToken = viper.GetString(pagerduty.PagerDutyOauthTokenConfigKey) } sub, err := utils.GetSubFromClusterID(ocmClient, *o.cluster) @@ -212,19 +245,7 @@ func (o *contextOptions) setup() error { return nil } -func (o *contextOptions) run() error { - var printFunc func(*contextData, io.Writer) - switch o.output { - case shortOutputConfigValue: - printFunc = o.printShortOutput - case longOutputConfigValue: - printFunc = o.printLongOutput - case jsonOutputConfigValue: - printFunc = o.printJsonOutput - default: - return fmt.Errorf("unknown Output Format: %s", o.output) - } - +func (o *ContextCache) run() error { currentData, dataErrors := o.generateContextData() if currentData == nil { fmt.Fprintf(os.Stderr, "Failed to query cluster info: %+v", dataErrors) @@ -238,117 +259,35 @@ func (o *contextOptions) run() error { } } - printFunc(currentData, os.Stdout) - - return nil + // Use the presenter to render output + presenter := NewClusterContextPresenter(os.Stdout) + return presenter.Render(currentData, o.queryOpts) } -func (o *contextOptions) printLongOutput(data *contextData, w io.Writer) { - data.printClusterHeader(w) - - fmt.Fprintln(w, strings.TrimSpace(data.Description)) - fmt.Println() - printNetworkInfo(data, w) - fmt.Println() - utils.PrintHandoverAnnouncements(data.HandoverAnnouncements) - fmt.Println() - utils.PrintLimitedSupportReasons(data.LimitedSupportReasons) - fmt.Println() - printJIRASupportExceptions(data.SupportExceptions, w) - fmt.Println() - utils.PrintServiceLogs(data.ServiceLogs, o.verbose, o.days) - fmt.Println() - utils.PrintJiraIssues(data.JiraIssues) - fmt.Println() - utils.PrintPDAlerts(data.PdAlerts, data.pdServiceID) - fmt.Println() - - if o.full { - printHistoricalPDAlertSummary(data.HistoricalAlerts, data.pdServiceID, o.days, w) - fmt.Println() - - printCloudTrailLogs(data.CloudtrailEvents, w) - fmt.Println() - } - - // Print other helpful links - o.printOtherLinks(data, w) - fmt.Println() - - // Print Dynatrace URL - printDynatraceResources(data, w) - - // Print User Banned Details - printUserBannedStatus(data, w) - - // Print SDNtoOVN Migration Status - printSDNtoOVNMigrationStatus(data, w) -} - -func (o *contextOptions) printShortOutput(data *contextData, w io.Writer) { - data.printClusterHeader(w) - - highAlertCount := 0 - lowAlertCount := 0 - for _, alerts := range data.PdAlerts { - for _, alert := range alerts { - if strings.ToLower(alert.Urgency) == "high" { - highAlertCount++ - } else { - lowAlertCount++ - } - } - } - - historicalAlertsString := "N/A" - historicalAlertsCount := 0 - if data.HistoricalAlerts != nil { - for _, histAlerts := range data.HistoricalAlerts { - for _, histAlert := range histAlerts { - historicalAlertsCount += histAlert.Count - } - } - historicalAlertsString = fmt.Sprintf("%d", historicalAlertsCount) - } - - var numInternalServiceLogs int - for _, serviceLog := range data.ServiceLogs { - if serviceLog.InternalOnly() { - numInternalServiceLogs++ - } +func GenerateContextData(clusterId string) (string, []error) { + queryOpts := ContextOptions{ + ClusterID: clusterId, + Days: 30, + Pages: 40, + FullScan: false, + Verbose: false, + Output: jsonOutputConfigValue, + } + contextOptions := newContextOptions(queryOpts) + err := contextOptions.setup() + if err != nil { + return "", []error{err} } - table := printer.NewTablePrinter(w, 20, 1, 2, ' ') - table.AddRow([]string{ - "Version", - "Supported?", - fmt.Sprintf("SLs (last %d d)", o.days), - "Jira Tickets", - "Current Alerts", - fmt.Sprintf("Historical Alerts (last %d d)", o.days), - }) - table.AddRow([]string{ - data.ClusterVersion, - fmt.Sprintf("%t", len(data.LimitedSupportReasons) == 0), - fmt.Sprintf("%d (%d internal)", len(data.ServiceLogs), numInternalServiceLogs), - fmt.Sprintf("%d", len(data.JiraIssues)), - fmt.Sprintf("H: %d | L: %d", highAlertCount, lowAlertCount), - historicalAlertsString, - }) - - if err := table.Flush(); err != nil { - fmt.Fprintf(w, "Error printing Short Output: %v\n", err) - } -} + contextData, errs := contextOptions.generateContextData() -func (o *contextOptions) printJsonOutput(data *contextData, w io.Writer) { - jsonOut, err := json.MarshalIndent(data, "", " ") + builder := &strings.Builder{} + presenter := NewClusterContextPresenter(builder) + err = presenter.Render(contextData, queryOpts) if err != nil { - fmt.Fprintf(os.Stderr, "Can't marshal results to json: %v\n", err) - return + errs = append(errs, err) } - - fmt.Fprintln(w, string(jsonOut)) + return builder.String(), errs } // generateContextData Creates a contextData struct that contains all the @@ -358,7 +297,7 @@ func (o *contextOptions) printJsonOutput(data *contextData, w io.Writer) { // value will only be nil, if this function fails to get basic cluster // information. The second return value will *never* be nil, but instead have a // length of 0 if no errors occurred -func (o *contextOptions) generateContextData() (*contextData, []error) { +func (o *ContextCache) generateContextData() (*contextData, []error) { data := &contextData{} errors := []error{} @@ -368,8 +307,8 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { pdwg := sync.WaitGroup{} var skipPagerDutyCollection bool pdProvider, err := pagerduty.NewClient(). - WithUserToken(o.usertoken). - WithOauthToken(o.oauthtoken). + WithUserToken(o.queryOpts.UserToken). + WithOauthToken(o.queryOpts.OAuthToken). WithBaseDomain(o.baseDomain). WithTeamIdList(viper.GetStringSlice(pagerduty.PagerDutyTeamIDsKey)). Init() @@ -396,6 +335,10 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { data.ClusterName = o.cluster.Name() data.ClusterID = o.clusterID + data.ExternalClusterID = o.externalClusterID + data.InfraID = o.infraID + data.RegionID = o.regionID + data.Cluster = o.cluster data.ClusterVersion = o.cluster.Version().RawID() data.OCMEnv = utils.GetCurrentOCMEnv(ocmClient) @@ -440,7 +383,7 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetLimitedSupport := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Limited Support reasons").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Limited Support reasons").End() limitedSupportReasons, err := utils.GetClusterLimitedSupportReasons(ocmClient, o.clusterID) if err != nil { errors = append(errors, fmt.Errorf("error while getting Limited Support status reasons: %v", err)) @@ -451,8 +394,8 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetServiceLogs := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Service Logs").End() - timeToCheckSvcLogs := time.Now().AddDate(0, 0, -o.days) + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Service Logs").End() + timeToCheckSvcLogs := time.Now().AddDate(0, 0, -o.queryOpts.Days) data.ServiceLogs, err = servicelog.GetServiceLogsSince(o.clusterID, timeToCheckSvcLogs, false, false) if err != nil { errors = append(errors, fmt.Errorf("error while getting the service logs: %v", err)) @@ -461,7 +404,7 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetBannedUser := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Check Banned User").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Check Banned User").End() subscription, err := utils.GetSubscription(ocmClient, data.ClusterID) if err != nil { errors = append(errors, fmt.Errorf("error while getting subscripton %v", err)) @@ -477,8 +420,8 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetJiraIssues := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Jira Issues").End() - data.JiraIssues, err = utils.GetJiraIssuesForCluster(o.clusterID, o.externalClusterID, o.jiratoken) + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Jira Issues").End() + data.JiraIssues, err = utils.GetJiraIssuesForCluster(o.clusterID, o.externalClusterID, o.queryOpts.JiraToken) if err != nil { errors = append(errors, fmt.Errorf("error while getting the open jira tickets: %v", err)) } @@ -486,14 +429,14 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetHandoverAnnouncements := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Handover Announcements").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Handover Announcements").End() org, err := utils.GetOrganization(ocmClient, o.clusterID) if err != nil { fmt.Printf("Failed to get Subscription for cluster %s - err: %q", o.clusterID, err) } productID := o.cluster.Product().ID() - data.HandoverAnnouncements, err = utils.GetRelatedHandoverAnnouncements(o.clusterID, o.externalClusterID, o.jiratoken, org.Name(), productID, o.cluster.Hypershift().Enabled(), o.cluster.Version().RawID()) + data.HandoverAnnouncements, err = utils.GetRelatedHandoverAnnouncements(o.clusterID, o.externalClusterID, o.queryOpts.JiraToken, org.Name(), productID, o.cluster.Hypershift().Enabled(), o.cluster.Version().RawID()) if err != nil { errors = append(errors, fmt.Errorf("error while getting the open jira tickets: %v", err)) } @@ -501,8 +444,8 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetSupportExceptions := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Support Exceptions").End() - data.SupportExceptions, err = utils.GetJiraSupportExceptionsForOrg(o.organizationID, o.jiratoken) + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Support Exceptions").End() + data.SupportExceptions, err = utils.GetJiraSupportExceptionsForOrg(o.organizationID, o.queryOpts.JiraToken) if err != nil { errors = append(errors, fmt.Errorf("error while getting support exceptions: %v", err)) } @@ -511,7 +454,7 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetDynatraceDetails := func() { var clusterID string = o.clusterID defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Dynatrace URL").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Dynatrace URL").End() hcpCluster, err := dynatrace.FetchClusterDetails(clusterID) if err != nil { @@ -547,14 +490,14 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { return } - delayTracker := utils.StartDelayTracker(o.verbose, "PagerDuty Service") + delayTracker := utils.StartDelayTracker(o.queryOpts.Verbose, "PagerDuty Service") data.pdServiceID, err = pdProvider.GetPDServiceIDs() if err != nil { errors = append(errors, fmt.Errorf("error getting PD Service ID: %v", err)) } delayTracker.End() - defer utils.StartDelayTracker(o.verbose, "current PagerDuty Alerts").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "current PagerDuty Alerts").End() data.PdAlerts, err = pdProvider.GetFiringAlertsForCluster(data.pdServiceID) if err != nil { errors = append(errors, fmt.Errorf("error while getting current PD Alerts: %v", err)) @@ -563,7 +506,7 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetMigrationInfo := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Migration Info").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Migration Info").End() migrationResponse, err := utils.GetMigration(ocmClient, o.clusterID) if err != nil { @@ -596,11 +539,11 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetMigrationInfo, ) - if o.output == longOutputConfigValue { + if o.queryOpts.Output == longOutputConfigValue { GetDescription := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "Cluster Description").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "Cluster Description").End() cmd := "ocm describe cluster " + o.clusterID output, err := exec.Command("bash", "-c", cmd).Output() @@ -617,11 +560,11 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { ) } - if o.full { + if o.queryOpts.FullScan { GetHistoricalPagerDutyAlerts := func() { pdwg.Wait() defer wg.Done() - defer utils.StartDelayTracker(o.verbose, "historical PagerDuty Alerts").End() + defer utils.StartDelayTracker(o.queryOpts.Verbose, "historical PagerDuty Alerts").End() data.HistoricalAlerts, err = pdProvider.GetHistoricalAlertsForCluster(data.pdServiceID) if err != nil { errors = append(errors, fmt.Errorf("error while getting historical PD Alert Data: %v", err)) @@ -630,8 +573,8 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetCloudTrailLogs := func() { defer wg.Done() - defer utils.StartDelayTracker(o.verbose, fmt.Sprintf("past %d pages of Cloudtrail data", o.pages)).End() - data.CloudtrailEvents, err = GetCloudTrailLogsForCluster(o.awsProfile, o.clusterID, o.pages) + defer utils.StartDelayTracker(o.queryOpts.Verbose, fmt.Sprintf("past %d pages of Cloudtrail data", o.queryOpts.Pages)).End() + data.CloudtrailEvents, err = GetCloudTrailLogsForCluster(o.queryOpts.AWSProfile, o.clusterID, o.queryOpts.Pages) if err != nil { errors = append(errors, fmt.Errorf("error getting cloudtrail logs for cluster: %v", err)) } @@ -693,144 +636,6 @@ func GetCloudTrailLogsForCluster(awsProfile string, clusterID string, maxPages i return filteredEvents, nil } -func printHistoricalPDAlertSummary(incidentCounters map[string][]*pagerduty.IncidentOccurrenceTracker, serviceIDs []string, sinceDays int, w io.Writer) { - var name string = "PagerDuty Historical Alerts" - fmt.Fprintln(w, delimiter+name) - - for _, serviceID := range serviceIDs { - - if len(incidentCounters[serviceID]) == 0 { - fmt.Fprintln(w, "Service: https://redhat.pagerduty.com/service-directory/"+serviceID+": None") - continue - } - - fmt.Fprintln(w, "Service: https://redhat.pagerduty.com/service-directory/"+serviceID+":") - table := printer.NewTablePrinter(w, 20, 1, 3, ' ') - table.AddRow([]string{"Type", "Count", "Last Occurrence"}) - totalIncidents := 0 - for _, incident := range incidentCounters[serviceID] { - table.AddRow([]string{incident.IncidentName, strconv.Itoa(incident.Count), incident.LastOccurrence}) - totalIncidents += incident.Count - } - - // Add empty row for readability - table.AddRow([]string{}) - if err := table.Flush(); err != nil { - fmt.Fprintf(w, "Error printing %s: %v\n", name, err) - } - - fmt.Fprintln(w, "\tTotal number of incidents [", totalIncidents, "] in [", sinceDays, "] days") - } -} - -func printJIRASupportExceptions(issues []jira.Issue, w io.Writer) { - var name string = "Support Exceptions" - fmt.Fprintln(w, delimiter+name) - - for _, i := range issues { - fmt.Fprintf(w, "[%s](%s/%s): %+v [Status: %s]\n", i.Key, i.Fields.Type.Name, i.Fields.Priority.Name, i.Fields.Summary, i.Fields.Status.Name) - fmt.Fprintf(w, "- Link: %s/browse/%s\n\n", JiraBaseURL, i.Key) - } - - if len(issues) == 0 { - fmt.Fprintln(w, "None") - } -} - -func (o *contextOptions) printOtherLinks(data *contextData, w io.Writer) { - var name string = "External resources" - fmt.Fprintln(w, delimiter+name) - var ohssQueryURL = fmt.Sprintf("%[1]s/issues/?jql=project%%20%%3D%%22OpenShift%%20Hosted%%20SRE%%20Support%%22and%%20(%%22Cluster%%20ID%%22%%20~%%20%%20%%22%[2]s%%22OR%%22Cluster%%20ID%%22~%%22%[3]s%%22OR%%22description%%22~%%22%[2]s%%22OR%%22description%%22~%%22%[3]s%%22)", - JiraBaseURL, - o.clusterID, - o.externalClusterID) - links := map[string]string{ - "OHSS Cards": ohssQueryURL, - "CCX dashboard": fmt.Sprintf("https://kraken.psi.redhat.com/clusters/%s", o.externalClusterID), - "Splunk Audit Logs": o.buildSplunkURL(data), - } - - if data.pdServiceID != nil { - for _, id := range data.pdServiceID { - links[fmt.Sprintf("PagerDuty Service %s", id)] = fmt.Sprintf("https://redhat.pagerduty.com/service-directory/%s", id) - } - } - - // Sort, so it's always a predictable order - var keys []string - for k := range links { - keys = append(keys, k) - } - sort.Strings(keys) - - table := printer.NewTablePrinter(w, 20, 1, 3, ' ') - for _, link := range keys { - table.AddRow([]string{link, strings.TrimSpace(links[link])}) - } - - if err := table.Flush(); err != nil { - fmt.Fprintf(w, "Error printing %s: %v\n", name, err) - } -} - -func (o *contextOptions) buildSplunkURL(data *contextData) string { - // Determine the relevant Splunk URL - // at the time of this writing, the only region we will support in the near future will be the ap-southeast-1 - // region. Additionally, region-based clusters will ONLY be supported for HCP. Therefore, if we see a region - // at all, we can assume that it's ap-southeast-1 and use that URL. - if o.regionID != "" { - return buildHCPSplunkURL(SGPSplunkURL, data.OCMEnv, o.cluster) - } - if o.cluster.Hypershift().Enabled() { - return buildHCPSplunkURL(HCPSplunkURL, data.OCMEnv, o.cluster) - } else { - switch data.OCMEnv { - case "production": - return fmt.Sprintf(ClassicSplunkURL, "openshift_managed_audit", o.infraID) - case "stage": - return fmt.Sprintf(ClassicSplunkURL, "openshift_managed_audit_stage", o.infraID) - default: - return "" - } - } -} - -func buildHCPSplunkURL(baseURL string, environment string, cluster *cmv1.Cluster) string { - switch environment { - case "production": - return fmt.Sprintf(baseURL, "openshift_managed_hypershift_audit", "production", cluster.ID(), cluster.Name()) - case "stage": - return fmt.Sprintf(baseURL, "openshift_managed_hypershift_audit_stage", "staging", cluster.ID(), cluster.Name()) - default: - return "" - } -} - -func printCloudTrailLogs(events []*types.Event, w io.Writer) { - var name string = "Potentially interesting CloudTrail events" - fmt.Fprintln(w, delimiter+name) - - if events == nil { - fmt.Fprintln(w, "None") - return - } - - table := printer.NewTablePrinter(w, 20, 1, 3, ' ') - table.AddRow([]string{"EventId", "EventName", "Username", "EventTime"}) - for _, event := range events { - if event.Username == nil { - table.AddRow([]string{*event.EventId, *event.EventName, "", event.EventTime.String()}) - } else { - table.AddRow([]string{*event.EventId, *event.EventName, *event.Username, event.EventTime.String()}) - } - } - // Add empty row for readability - table.AddRow([]string{}) - if err := table.Flush(); err != nil { - fmt.Fprintf(w, "Error printing %s: %v\n", name, err) - } -} - // These are a list of skippable aws event types, as they won't indicate any modification on the customer's side. func skippableEvent(eventName string) bool { skippableList := []string{ @@ -852,87 +657,9 @@ func skippableEvent(eventName string) bool { return false } -func printNetworkInfo(data *contextData, w io.Writer) { - var name = "Network Info" - fmt.Fprintln(w, delimiter+name) - - table := printer.NewTablePrinter(w, 20, 1, 3, ' ') - table.AddRow([]string{"Network Type", data.NetworkType}) - table.AddRow([]string{"MachineCIDR", data.NetworkMachineCIDR}) - table.AddRow([]string{"ServiceCIDR", data.NetworkServiceCIDR}) - table.AddRow([]string{"Max Services", strconv.Itoa(data.NetworkMaxServices)}) - table.AddRow([]string{"PodCIDR", data.NetworkPodCIDR}) - table.AddRow([]string{"Host Prefix", strconv.Itoa(data.NetworkHostPrefix)}) - table.AddRow([]string{"Max Nodes (based on PodCIDR)", strconv.Itoa(data.NetworkMaxNodesFromPodCIDR)}) - table.AddRow([]string{"Max pods per node", strconv.Itoa(data.NetworkMaxPodsPerNode)}) - - if err := table.Flush(); err != nil { - fmt.Fprintf(w, "Error printing %s: %v\n", name, err) - } -} - -func printDynatraceResources(data *contextData, w io.Writer) { - var name string = "Dynatrace Details" - fmt.Fprintln(w, delimiter+name) - - links := map[string]string{ - "Dynatrace Tenant URL": data.DyntraceEnvURL, - "Logs App URL": data.DyntraceLogsURL, - } - - // Sort, so it's always a predictable order - var keys []string - for k := range links { - keys = append(keys, k) - } - sort.Strings(keys) - - table := printer.NewTablePrinter(w, 20, 1, 3, ' ') - for _, link := range keys { - url := strings.TrimSpace(links[link]) - if url == dynatrace.ErrUnsupportedCluster.Error() { - fmt.Fprintln(w, dynatrace.ErrUnsupportedCluster.Error()) - break - } else if url != "" { - table.AddRow([]string{link, url}) - } - } - - if err := table.Flush(); err != nil { - fmt.Fprintf(w, "Error printing %s: %v\n", name, err) - } -} - -func printUserBannedStatus(data *contextData, w io.Writer) { - var name string = "User Ban Details" - fmt.Fprintln(w, "\n"+delimiter+name) - if data.UserBanned { - fmt.Fprintln(w, "User is banned") - fmt.Fprintf(w, "Ban code = %v\n", data.BanCode) - fmt.Fprintf(w, "Ban description = %v\n", data.BanDescription) - if data.BanCode == BanCodeExportControlCompliance { - fmt.Fprintln(w, "User banned due to export control compliance.\nPlease follow the steps detailed here: https://github.com/openshift/ops-sop/blob/master/v4/alerts/UpgradeConfigSyncFailureOver4HrSRE.md#user-banneddisabled-due-to-export-control-compliance .") - } - } else { - fmt.Fprintln(w, "User is not banned") - } -} - func (data *contextData) printClusterHeader(w io.Writer) { clusterHeader := fmt.Sprintf("%s -- %s", data.ClusterName, data.ClusterID) fmt.Fprintln(w, strings.Repeat("=", len(clusterHeader))) fmt.Fprintln(w, clusterHeader) fmt.Fprintln(w, strings.Repeat("=", len(clusterHeader))) } - -func printSDNtoOVNMigrationStatus(data *contextData, w io.Writer) { - name := "SDN to OVN Migration Status" - fmt.Fprintln(w, "\n"+delimiter+name) - - if data.SdnToOvnMigration != nil && data.MigrationStateValue == cmv1.ClusterMigrationStateValueInProgress { - fmt.Fprintln(w, "SDN to OVN migration is in progress") - return - } - - fmt.Fprintln(w, "No active SDN to OVN migrations") -} diff --git a/cmd/cluster/context_presenter.go b/cmd/cluster/context_presenter.go new file mode 100644 index 000000000..1affd6c41 --- /dev/null +++ b/cmd/cluster/context_presenter.go @@ -0,0 +1,388 @@ +package cluster + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/andygrunwald/go-jira" + "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/osdctl/cmd/dynatrace" + "github.com/openshift/osdctl/pkg/printer" + "github.com/openshift/osdctl/pkg/provider/pagerduty" + "github.com/openshift/osdctl/pkg/utils" +) + +// ClusterContextPresenter handles all output formatting for cluster context data. +// It separates presentation logic from data gathering and business logic. +type ClusterContextPresenter struct { + writer io.Writer +} + +// NewClusterContextPresenter creates a new presenter that writes to the given writer +func NewClusterContextPresenter(w io.Writer) *ClusterContextPresenter { + return &ClusterContextPresenter{writer: w} +} + +// Render renders the context data in the specified format +func (p *ClusterContextPresenter) Render(data *contextData, opts ContextOptions) error { + switch opts.Output { + case shortOutputConfigValue: + return p.RenderShort(data, opts) + case longOutputConfigValue: + return p.RenderLong(data, opts) + case jsonOutputConfigValue: + return p.RenderJSON(data) + default: + return fmt.Errorf("unknown output format: %s", opts.Output) + } +} + +// RenderLong renders the full detailed output +func (p *ClusterContextPresenter) RenderLong(data *contextData, opts ContextOptions) error { + data.printClusterHeader(p.writer) + + fmt.Fprintln(p.writer, strings.TrimSpace(data.Description)) + fmt.Fprintln(p.writer) + p.printNetworkInfo(data) + fmt.Fprintln(p.writer) + utils.PrintHandoverAnnouncements(data.HandoverAnnouncements) + fmt.Fprintln(p.writer) + utils.PrintLimitedSupportReasons(data.LimitedSupportReasons) + fmt.Fprintln(p.writer) + p.printJIRASupportExceptions(data.SupportExceptions) + fmt.Fprintln(p.writer) + utils.PrintServiceLogs(data.ServiceLogs, opts.Verbose, opts.Days) + fmt.Fprintln(p.writer) + utils.PrintJiraIssues(data.JiraIssues) + fmt.Fprintln(p.writer) + utils.PrintPDAlerts(data.PdAlerts, data.pdServiceID) + fmt.Fprintln(p.writer) + + if opts.FullScan { + p.printHistoricalPDAlertSummary(data.HistoricalAlerts, data.pdServiceID, opts.Days) + fmt.Fprintln(p.writer) + + p.printCloudTrailLogs(data.CloudtrailEvents) + fmt.Fprintln(p.writer) + } + + // Print other helpful links + p.printOtherLinks(data, opts) + fmt.Fprintln(p.writer) + + // Print Dynatrace URL + p.printDynatraceResources(data) + + // Print User Banned Details + p.printUserBannedStatus(data) + + // Print SDNtoOVN Migration Status + p.printSDNtoOVNMigrationStatus(data) + + return nil +} + +// RenderShort renders the compact summary output +func (p *ClusterContextPresenter) RenderShort(data *contextData, opts ContextOptions) error { + data.printClusterHeader(p.writer) + + highAlertCount := 0 + lowAlertCount := 0 + for _, alerts := range data.PdAlerts { + for _, alert := range alerts { + if strings.ToLower(alert.Urgency) == "high" { + highAlertCount++ + } else { + lowAlertCount++ + } + } + } + + historicalAlertsString := "N/A" + historicalAlertsCount := 0 + if data.HistoricalAlerts != nil { + for _, histAlerts := range data.HistoricalAlerts { + for _, histAlert := range histAlerts { + historicalAlertsCount += histAlert.Count + } + } + historicalAlertsString = fmt.Sprintf("%d", historicalAlertsCount) + } + + var numInternalServiceLogs int + for _, serviceLog := range data.ServiceLogs { + if serviceLog.InternalOnly() { + numInternalServiceLogs++ + } + } + + table := printer.NewTablePrinter(p.writer, 20, 1, 2, ' ') + table.AddRow([]string{ + "Version", + "Supported?", + fmt.Sprintf("SLs (last %d d)", opts.Days), + "Jira Tickets", + "Current Alerts", + fmt.Sprintf("Historical Alerts (last %d d)", opts.Days), + }) + table.AddRow([]string{ + data.ClusterVersion, + fmt.Sprintf("%t", len(data.LimitedSupportReasons) == 0), + fmt.Sprintf("%d (%d internal)", len(data.ServiceLogs), numInternalServiceLogs), + fmt.Sprintf("%d", len(data.JiraIssues)), + fmt.Sprintf("H: %d | L: %d", highAlertCount, lowAlertCount), + historicalAlertsString, + }) + + if err := table.Flush(); err != nil { + return fmt.Errorf("error printing short output: %v", err) + } + + return nil +} + +// RenderJSON renders the output as JSON +func (p *ClusterContextPresenter) RenderJSON(data *contextData) error { + jsonOut, err := json.MarshalIndent(data, "", " ") + if err != nil { + return fmt.Errorf("can't marshal results to json: %v", err) + } + + fmt.Fprintln(p.writer, string(jsonOut)) + return nil +} + +// printOtherLinks prints external resource links +func (p *ClusterContextPresenter) printOtherLinks(data *contextData, opts ContextOptions) { + name := "External resources" + fmt.Fprintln(p.writer, delimiter+name) + + var ohssQueryURL = fmt.Sprintf("%[1]s/issues/?jql=project%%20%%3D%%22OpenShift%%20Hosted%%20SRE%%20Support%%22and%%20(%%22Cluster%%20ID%%22%%20~%%20%%20%%22%[2]s%%22OR%%22Cluster%%20ID%%22~%%22%[3]s%%22OR%%22description%%22~%%22%[2]s%%22OR%%22description%%22~%%22%[3]s%%22)", + JiraBaseURL, + data.ClusterID, + data.ExternalClusterID) + + links := map[string]string{ + "OHSS Cards": ohssQueryURL, + "CCX dashboard": fmt.Sprintf("https://kraken.psi.redhat.com/clusters/%s", data.ExternalClusterID), + "Splunk Audit Logs": buildSplunkURL(data), + } + + if data.pdServiceID != nil { + for _, id := range data.pdServiceID { + links[fmt.Sprintf("PagerDuty Service %s", id)] = fmt.Sprintf("https://redhat.pagerduty.com/service-directory/%s", id) + } + } + + // Sort, so it's always a predictable order + var keys []string + for k := range links { + keys = append(keys, k) + } + sort.Strings(keys) + + table := printer.NewTablePrinter(p.writer, 20, 1, 3, ' ') + for _, link := range keys { + table.AddRow([]string{link, strings.TrimSpace(links[link])}) + } + + if err := table.Flush(); err != nil { + fmt.Fprintf(p.writer, "Error printing %s: %v\n", name, err) + } +} + +// buildSplunkURL constructs the appropriate Splunk URL based on cluster configuration +func buildSplunkURL(data *contextData) string { + // Determine the relevant Splunk URL + // at the time of this writing, the only region we will support in the near future will be the ap-southeast-1 + // region. Additionally, region-based clusters will ONLY be supported for HCP. Therefore, if we see a region + // at all, we can assume that it's ap-southeast-1 and use that URL. + if data.RegionID != "" { + return buildHCPSplunkURL(SGPSplunkURL, data.OCMEnv, data.Cluster) + } + if data.Cluster != nil && data.Cluster.Hypershift().Enabled() { + return buildHCPSplunkURL(HCPSplunkURL, data.OCMEnv, data.Cluster) + } else { + switch data.OCMEnv { + case "production": + return fmt.Sprintf(ClassicSplunkURL, "openshift_managed_audit", data.InfraID) + case "stage": + return fmt.Sprintf(ClassicSplunkURL, "openshift_managed_audit_stage", data.InfraID) + default: + return "" + } + } +} + +func buildHCPSplunkURL(baseURL string, environment string, cluster *cmv1.Cluster) string { + if cluster == nil { + return "" + } + switch environment { + case "production": + return fmt.Sprintf(baseURL, "openshift_managed_hypershift_audit", "production", cluster.ID(), cluster.Name()) + case "stage": + return fmt.Sprintf(baseURL, "openshift_managed_hypershift_audit_stage", "staging", cluster.ID(), cluster.Name()) + default: + return "" + } +} + +// printHistoricalPDAlertSummary prints a summary of historical PagerDuty alerts +func (p *ClusterContextPresenter) printHistoricalPDAlertSummary(incidentCounters map[string][]*pagerduty.IncidentOccurrenceTracker, serviceIDs []string, sinceDays int) { + name := "PagerDuty Historical Alerts" + fmt.Fprintln(p.writer, delimiter+name) + + for _, serviceID := range serviceIDs { + + if len(incidentCounters[serviceID]) == 0 { + fmt.Fprintln(p.writer, "Service: https://redhat.pagerduty.com/service-directory/"+serviceID+": None") + continue + } + + fmt.Fprintln(p.writer, "Service: https://redhat.pagerduty.com/service-directory/"+serviceID+":") + table := printer.NewTablePrinter(p.writer, 20, 1, 3, ' ') + table.AddRow([]string{"Type", "Count", "Last Occurrence"}) + totalIncidents := 0 + for _, incident := range incidentCounters[serviceID] { + table.AddRow([]string{incident.IncidentName, strconv.Itoa(incident.Count), incident.LastOccurrence}) + totalIncidents += incident.Count + } + + // Add empty row for readability + table.AddRow([]string{}) + if err := table.Flush(); err != nil { + fmt.Fprintf(p.writer, "Error printing %s: %v\n", name, err) + } + + fmt.Fprintln(p.writer, "\tTotal number of incidents [", totalIncidents, "] in [", sinceDays, "] days") + } +} + +// printJIRASupportExceptions prints JIRA support exception tickets +func (p *ClusterContextPresenter) printJIRASupportExceptions(issues []jira.Issue) { + name := "Support Exceptions" + fmt.Fprintln(p.writer, delimiter+name) + + for _, i := range issues { + fmt.Fprintf(p.writer, "[%s](%s/%s): %+v [Status: %s]\n", i.Key, i.Fields.Type.Name, i.Fields.Priority.Name, i.Fields.Summary, i.Fields.Status.Name) + fmt.Fprintf(p.writer, "- Link: %s/browse/%s\n\n", JiraBaseURL, i.Key) + } + + if len(issues) == 0 { + fmt.Fprintln(p.writer, "None") + } +} + +// printCloudTrailLogs prints potentially interesting CloudTrail events +func (p *ClusterContextPresenter) printCloudTrailLogs(events []*types.Event) { + name := "Potentially interesting CloudTrail events" + fmt.Fprintln(p.writer, delimiter+name) + + if events == nil { + fmt.Fprintln(p.writer, "None") + return + } + + table := printer.NewTablePrinter(p.writer, 20, 1, 3, ' ') + table.AddRow([]string{"EventId", "EventName", "Username", "EventTime"}) + for _, event := range events { + if event.Username == nil { + table.AddRow([]string{*event.EventId, *event.EventName, "", event.EventTime.String()}) + } else { + table.AddRow([]string{*event.EventId, *event.EventName, *event.Username, event.EventTime.String()}) + } + } + // Add empty row for readability + table.AddRow([]string{}) + if err := table.Flush(); err != nil { + fmt.Fprintf(p.writer, "Error printing %s: %v\n", name, err) + } +} + +// printNetworkInfo prints network configuration details +func (p *ClusterContextPresenter) printNetworkInfo(data *contextData) { + var name = "Network Info" + fmt.Fprintln(p.writer, delimiter+name) + + table := printer.NewTablePrinter(p.writer, 20, 1, 3, ' ') + table.AddRow([]string{"Network Type", data.NetworkType}) + table.AddRow([]string{"MachineCIDR", data.NetworkMachineCIDR}) + table.AddRow([]string{"ServiceCIDR", data.NetworkServiceCIDR}) + table.AddRow([]string{"Max Services", strconv.Itoa(data.NetworkMaxServices)}) + table.AddRow([]string{"PodCIDR", data.NetworkPodCIDR}) + table.AddRow([]string{"Host Prefix", strconv.Itoa(data.NetworkHostPrefix)}) + table.AddRow([]string{"Max Nodes (based on PodCIDR)", strconv.Itoa(data.NetworkMaxNodesFromPodCIDR)}) + table.AddRow([]string{"Max pods per node", strconv.Itoa(data.NetworkMaxPodsPerNode)}) + + if err := table.Flush(); err != nil { + fmt.Fprintf(p.writer, "Error printing %s: %v\n", name, err) + } +} + +// printDynatraceResources prints Dynatrace-related URLs and information +func (p *ClusterContextPresenter) printDynatraceResources(data *contextData) { + name := "Dynatrace Details" + fmt.Fprintln(p.writer, delimiter+name) + + links := map[string]string{ + "Dynatrace Tenant URL": data.DyntraceEnvURL, + "Logs App URL": data.DyntraceLogsURL, + } + + // Sort, so it's always a predictable order + var keys []string + for k := range links { + keys = append(keys, k) + } + sort.Strings(keys) + + table := printer.NewTablePrinter(p.writer, 20, 1, 3, ' ') + for _, link := range keys { + url := strings.TrimSpace(links[link]) + if url == dynatrace.ErrUnsupportedCluster.Error() { + fmt.Fprintln(p.writer, dynatrace.ErrUnsupportedCluster.Error()) + break + } else if url != "" { + table.AddRow([]string{link, url}) + } + } + + if err := table.Flush(); err != nil { + fmt.Fprintf(p.writer, "Error printing %s: %v\n", name, err) + } +} + +// printUserBannedStatus prints user ban status and details +func (p *ClusterContextPresenter) printUserBannedStatus(data *contextData) { + name := "User Ban Details" + fmt.Fprintln(p.writer, "\n"+delimiter+name) + if data.UserBanned { + fmt.Fprintln(p.writer, "User is banned") + fmt.Fprintf(p.writer, "Ban code = %v\n", data.BanCode) + fmt.Fprintf(p.writer, "Ban description = %v\n", data.BanDescription) + if data.BanCode == BanCodeExportControlCompliance { + fmt.Fprintln(p.writer, "User banned due to export control compliance.\nPlease follow the steps detailed here: https://github.com/openshift/ops-sop/blob/master/v4/alerts/UpgradeConfigSyncFailureOver4HrSRE.md#user-banneddisabled-due-to-export-control-compliance .") + } + } else { + fmt.Fprintln(p.writer, "User is not banned") + } +} + +// printSDNtoOVNMigrationStatus prints the status of SDN to OVN migration +func (p *ClusterContextPresenter) printSDNtoOVNMigrationStatus(data *contextData) { + name := "SDN to OVN Migration Status" + fmt.Fprintln(p.writer, "\n"+delimiter+name) + + if data.SdnToOvnMigration != nil && data.MigrationStateValue == cmv1.ClusterMigrationStateValueInProgress { + fmt.Fprintln(p.writer, "SDN to OVN migration is in progress") + return + } + + fmt.Fprintln(p.writer, "No active SDN to OVN migrations") +} diff --git a/cmd/cluster/context_test.go b/cmd/cluster/context_test.go index daab1d396..6223215f9 100644 --- a/cmd/cluster/context_test.go +++ b/cmd/cluster/context_test.go @@ -83,7 +83,8 @@ func TestPrintDynatraceResources(t *testing.T) { } var buf bytes.Buffer - printDynatraceResources(data, &buf) + p := NewClusterContextPresenter(&buf) + p.printDynatraceResources(data) output := buf.String() expectedHeader := "Dynatrace Details" @@ -156,7 +157,8 @@ func TestPrintCloudTrailLogs(t *testing.T) { } var buf bytes.Buffer - printCloudTrailLogs(events, &buf) + p := NewClusterContextPresenter(&buf) + p.printCloudTrailLogs(events) outputStr := buf.String() if !strings.Contains(outputStr, "Potentially interesting CloudTrail events") { @@ -260,17 +262,14 @@ func TestBuildSplunkURL(t *testing.T) { CreationTimestamp: time.Now(), } - o := &contextOptions{ - cluster: mockCluster.ToV1Cluster(), - infraID: tc.infraID, - regionID: tc.regionID, - } - data := &contextData{ - OCMEnv: tc.ocmEnv, + Cluster: mockCluster.ToV1Cluster(), + OCMEnv: tc.ocmEnv, + InfraID: tc.infraID, + RegionID: tc.regionID, } - actualURL := o.buildSplunkURL(data) + actualURL := buildSplunkURL(data) assert.Equal(t, tc.expectedURL, actualURL, "Generated Splunk URL does not match expected value") }) } @@ -282,18 +281,16 @@ func TestPrintOtherLinks(t *testing.T) { mockExternalClusterID := "mock-external-cluster-id" mockPDServiceID := []string{"PD12345"} - o := &contextOptions{ - clusterID: mockClusterID, - externalClusterID: mockExternalClusterID, - } - data := &contextData{ - pdServiceID: mockPDServiceID, + ClusterID: mockClusterID, + ExternalClusterID: mockExternalClusterID, + pdServiceID: mockPDServiceID, } - var buf bytes.Buffer - o.printOtherLinks(data, &buf) - output := buf.String() + buffer := strings.Builder{} + p := NewClusterContextPresenter(&buffer) + p.printOtherLinks(data, ContextOptions{}) + output := buffer.String() expectedLinks := []string{ "OHSS Cards", @@ -322,7 +319,8 @@ func TestPrintJIRASupportExceptions(t *testing.T) { } var buf bytes.Buffer - printJIRASupportExceptions(mockIssues, &buf) + p := NewClusterContextPresenter(&buf) + p.printJIRASupportExceptions(mockIssues) output := buf.String() expectedStrings := []string{ @@ -346,7 +344,8 @@ func TestPrintHistoricalPDAlertSummary(t *testing.T) { mockSinceDays := 7 var buf bytes.Buffer - printHistoricalPDAlertSummary(mockIncidentCounters, mockServiceIDs, mockSinceDays, &buf) + p := NewClusterContextPresenter(&buf) + p.printHistoricalPDAlertSummary(mockIncidentCounters, mockServiceIDs, mockSinceDays) output := buf.String() expectedStrings := []string{ @@ -364,7 +363,7 @@ func TestPrintHistoricalPDAlertSummary(t *testing.T) { } func TestPrintShortOutput(t *testing.T) { - opts := &contextOptions{days: 7} + opts := ContextOptions{Days: 7} limitedSupportReason, _ := v1.NewLimitedSupportReason().Build() serviceLog1, _ := v2.NewLogEntry(). @@ -394,9 +393,11 @@ func TestPrintShortOutput(t *testing.T) { HistoricalAlerts: map[string][]*pagerduty.IncidentOccurrenceTracker{"service-2": {historicalAlert}}, } - var buf bytes.Buffer - opts.printShortOutput(data, &buf) - output := buf.String() + buffer := strings.Builder{} + p := NewClusterContextPresenter(&buffer) + err := p.RenderShort(data, opts) + assert.NoError(t, err) + output := buffer.String() assert.Contains(t, output, "Version") assert.Contains(t, output, "Supported?") @@ -408,7 +409,6 @@ func TestPrintShortOutput(t *testing.T) { } func TestPrintJsonOutput(t *testing.T) { - opts := &contextOptions{} jiraIssue := jira.Issue{Key: "JIRA-999"} data := &contextData{ @@ -417,12 +417,14 @@ func TestPrintJsonOutput(t *testing.T) { JiraIssues: []jira.Issue{jiraIssue}, } - var buf bytes.Buffer - opts.printJsonOutput(data, &buf) - output := buf.String() + buffer := strings.Builder{} + p := NewClusterContextPresenter(&buffer) + err := p.RenderJSON(data) + assert.NoError(t, err) + output := buffer.String() var result map[string]interface{} - err := json.Unmarshal([]byte(output), &result) + err = json.Unmarshal([]byte(output), &result) assert.NoError(t, err) assert.Contains(t, output, `"JSON Test Cluster"`) assert.Contains(t, output, `"4.9"`) @@ -510,15 +512,17 @@ func TestPrintLongOutput(t *testing.T) { *mockData.CloudtrailEvents[0].EventId = "evt-1234567890" *mockData.CloudtrailEvents[0].Username = "mockUser" - o := &contextOptions{ - verbose: true, - days: 30, - full: true, + o := &ContextOptions{ + Verbose: true, + Days: 30, + FullScan: true, } - var buf bytes.Buffer - o.printLongOutput(mockData, &buf) - output := buf.String() + buffer := strings.Builder{} + p := NewClusterContextPresenter(&buffer) + err := p.RenderLong(mockData, *o) + assert.NoError(t, err) + output := buffer.String() assert.Contains(t, output, "ClusterABC") assert.Contains(t, output, "cluster-123") @@ -529,11 +533,12 @@ func TestPrintLongOutput(t *testing.T) { } func TestRun_UnknownOutput(t *testing.T) { - contextOptions := &contextOptions{ - output: "invalidOutputFormat", + contextOptions := ContextOptions{ + Days: 1, + Output: "invalidOutputFormat", } - err := contextOptions.run() + err := contextOptions.Validate() if err == nil || err.Error() != "unknown Output Format: invalidOutputFormat" { t.Errorf("Expected unknown output format error, got: %v", err) @@ -578,7 +583,8 @@ func TestPrintUserBannedStatus(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var buf bytes.Buffer - printUserBannedStatus(&tt.data, &buf) + p := NewClusterContextPresenter(&buf) + p.printUserBannedStatus(&tt.data) actualOutput := buf.String() expected := strings.TrimSpace(tt.expectedOutput) @@ -613,7 +619,8 @@ func TestPrintSDNtoOVNMigrationStatus(t *testing.T) { } var buf bytes.Buffer - printSDNtoOVNMigrationStatus(data, &buf) + p := NewClusterContextPresenter(&buf) + p.printSDNtoOVNMigrationStatus(data) assert.Contains(t, buf.String(), tt.expectedOutput) }) diff --git a/cmd/cmd.go b/cmd/cmd.go index 5b703d06a..434c84a95 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -32,6 +32,7 @@ import ( "github.com/openshift/osdctl/cmd/jira" "github.com/openshift/osdctl/cmd/jumphost" "github.com/openshift/osdctl/cmd/mc" + "github.com/openshift/osdctl/cmd/mcp" "github.com/openshift/osdctl/cmd/network" "github.com/openshift/osdctl/cmd/org" "github.com/openshift/osdctl/cmd/promote" @@ -107,6 +108,7 @@ func NewCmdRoot(streams genericclioptions.IOStreams) *cobra.Command { rootCmd.AddCommand(swarm.Cmd) rootCmd.AddCommand(iampermissions.NewCmdIamPermissions()) rootCmd.AddCommand(dynatrace.NewCmdDynatrace()) + rootCmd.AddCommand(mcp.MCPCmd) // Add cost command to use AWS Cost Manager rootCmd.AddCommand(cost.NewCmdCost(streams, globalOpts)) diff --git a/cmd/mcp/cmd.go b/cmd/mcp/cmd.go new file mode 100644 index 000000000..34d22d065 --- /dev/null +++ b/cmd/mcp/cmd.go @@ -0,0 +1,126 @@ +package mcp + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/openshift/osdctl/cmd/cluster" + "github.com/openshift/osdctl/cmd/servicelog" + "github.com/spf13/cobra" +) + +// This is reusable Input type for commands that only need the cluster id and nothing else. +type ClusterIdInput struct { + ClusterId string `json:"cluster_id" jsonschema:"ID of the cluster to retrieve information for"` +} + +var ClusterIdInputSchema, _ = jsonschema.For[ClusterIdInput](&jsonschema.ForOptions{}) + +// This is just the most generic type of output. Used for e.g. the context command that already provides a JSON output +// option, but the data types that make it up can't be converted to a JSONSCHEMA because Jira types are +// self-referential. +type MCPStringOutput struct { + Context string `json:"context"` +} + +var MCPStringOutputSchema, _ = jsonschema.For[MCPStringOutput](&jsonschema.ForOptions{}) + +type MCPServiceLogInput struct { + ClusterId string `json:"cluster_id" jsonschema:"ID of the cluster to retrieve information for"` + Internal bool `json:"internal" jsonschema:"Include internal servicelogs"` + All bool `json:"all" jsonschema:"List all servicelogs"` +} + +var MCPServiceLogInputSchema, _ = jsonschema.For[MCPServiceLogInput](&jsonschema.ForOptions{}) + +type MCPServiceLogOutput struct { + ServiceLogs servicelog.LogEntryResponseView `json:"service_logs"` +} + +var MCPServiceLogOutputSchema, _ = jsonschema.For[MCPServiceLogOutput](&jsonschema.ForOptions{}) + +var MCPCmd = &cobra.Command{ + Use: "mcp", + Short: "Start osdctl in MCP server mode", + Long: "Start osdctl as model-context-protocol server for integration with AI assistants.", + Args: cobra.ExactArgs(0), + RunE: runMCP, +} + +func init() { + MCPCmd.Flags().Bool("http", false, "Use an HTTP server instead of stdio") + MCPCmd.Flags().Int("port", 8080, "HTTP Server port to use when running in HTTP mode") +} + +func runMCP(cmd *cobra.Command, argv []string) error { + useHttp, _ := cmd.Flags().GetBool("http") + httpPort, _ := cmd.Flags().GetInt("port") + if useHttp { + fmt.Println("HTTP mode selected") + } + server := mcp.NewServer(&mcp.Implementation{Name: "osdctl", Version: "v0.0.1"}, nil) + mcp.AddTool(server, &mcp.Tool{ + Name: "context", + Description: "Retrieve cluster context for a given cluster id", + InputSchema: ClusterIdInputSchema, + OutputSchema: MCPStringOutputSchema, + Title: "cluster context", + }, GenerateContext) + mcp.AddTool(server, &mcp.Tool{ + Name: "service_logs", + Description: "Retrieve cluster service logs for a given cluster id", + InputSchema: MCPServiceLogInputSchema, + OutputSchema: MCPServiceLogOutputSchema, + Title: "cluster service logs", + }, ListServiceLogs) + if useHttp { + // Create the streamable HTTP handler. + handler := mcp.NewStreamableHTTPHandler(func(req *http.Request) *mcp.Server { + return server + }, nil) + server := &http.Server{ + Addr: fmt.Sprintf("http://localhost:%d", httpPort), + ReadHeaderTimeout: 3 * time.Second, + } + http.Handle("/", handler) + if err := server.ListenAndServe(); err != nil { + return err + } + } + if err := server.Run(context.Background(), &mcp.StdioTransport{}); err != nil { + return err + } + return nil +} + +func GenerateContext(ctx context.Context, req *mcp.CallToolRequest, input ClusterIdInput) (*mcp.CallToolResult, MCPStringOutput, error) { + context, _ := cluster.GenerateContextData(input.ClusterId) + return nil, MCPStringOutput{ + context, + }, nil +} + +func ListServiceLogs(ctx context.Context, req *mcp.CallToolRequest, input MCPServiceLogInput) (*mcp.CallToolResult, MCPServiceLogOutput, error) { + output := MCPServiceLogOutput{} + serviceLogs, err := servicelog.FetchServiceLogs(input.ClusterId, input.All, input.Internal) + if err != nil { + return &mcp.CallToolResult{ + Meta: mcp.Meta{}, + Content: []mcp.Content{}, + StructuredContent: nil, + IsError: true, + }, output, err + } + view := servicelog.ConvertOCMSlToLogEntryView(serviceLogs) + output.ServiceLogs = view + return &mcp.CallToolResult{ + Meta: mcp.Meta{}, + Content: []mcp.Content{}, + StructuredContent: output, + IsError: false, + }, output, nil +} diff --git a/cmd/servicelog/list.go b/cmd/servicelog/list.go index c1804af93..f2b0b1502 100644 --- a/cmd/servicelog/list.go +++ b/cmd/servicelog/list.go @@ -63,6 +63,17 @@ func listServiceLogs(clusterID string, opts *listCmdOptions) error { } func printServiceLogResponse(response *slv1.ClustersClusterLogsListResponse) error { + view := ConvertOCMSlToLogEntryView(response) + + viewBytes, err := json.Marshal(view) + if err != nil { + return fmt.Errorf("failed to marshal response for output: %w", err) + } + + return dump.Pretty(os.Stdout, viewBytes) +} + +func ConvertOCMSlToLogEntryView(response *slv1.ClustersClusterLogsListResponse) LogEntryResponseView { entryViews := logEntryToView(response.Items().Slice()) slices.Reverse(entryViews) view := LogEntryResponseView{ @@ -72,13 +83,7 @@ func printServiceLogResponse(response *slv1.ClustersClusterLogsListResponse) err Size: response.Size(), Total: response.Total(), } - - viewBytes, err := json.Marshal(view) - if err != nil { - return fmt.Errorf("failed to marshal response for output: %w", err) - } - - return dump.Pretty(os.Stdout, viewBytes) + return view } type LogEntryResponseView struct { @@ -110,15 +115,24 @@ type LogEntryView struct { } func logEntryToView(entries []*slv1.LogEntry) []*LogEntryView { + // Forces an empty array to actual be [] when Marshalled and not null - this is a JSONSCHEMA error that is + // configurable json v2: https://pkg.go.dev/encoding/json/v2#FormatNilSliceAsNull + emptyDocReference := []string{} entryViews := make([]*LogEntryView, 0, len(entries)) for _, entry := range entries { + var docRef []string + if len(entry.DocReferences()) > 0 { + docRef = entry.DocReferences() + } else { + docRef = emptyDocReference + } entryView := &LogEntryView{ ClusterID: entry.ClusterID(), ClusterUUID: entry.ClusterUUID(), CreatedAt: entry.CreatedAt(), CreatedBy: entry.CreatedBy(), Description: entry.Description(), - DocReferences: entry.DocReferences(), + DocReferences: docRef, EventStreamID: entry.EventStreamID(), Href: entry.HREF(), ID: entry.ID(), diff --git a/docs/README.md b/docs/README.md index 5791cdc3b..bc3d579b6 100644 --- a/docs/README.md +++ b/docs/README.md @@ -101,6 +101,7 @@ - `delete` - Delete a jumphost created by `osdctl jumphost create` - `mc` - - `list` - List ROSA HCP Management Clusters +- `mcp` - Start osdctl in MCP server mode - `network` - network related utilities - `packet-capture` - Start packet capture - `verify-egress` - Verify an AWS OSD/ROSA cluster can reach all required external URLs necessary for full support. @@ -3026,6 +3027,32 @@ osdctl mc list [flags] -S, --skip-version-check skip checking to see if this is the most recent release ``` +### osdctl mcp + +Start osdctl as model-context-protocol server for integration with AI assistants. + +``` +osdctl mcp [flags] +``` + +#### Flags + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + -h, --help help for mcp + --http Use an HTTP server instead of stdio + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --port int HTTP Server port to use when running in HTTP mode (default 8080) + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + ### osdctl network network related utilities diff --git a/docs/osdctl.md b/docs/osdctl.md index 209867412..1fbed1f39 100644 --- a/docs/osdctl.md +++ b/docs/osdctl.md @@ -38,6 +38,7 @@ CLI tool to provide OSD related utilities * [osdctl jira](osdctl_jira.md) - Provides a set of commands for interacting with Jira * [osdctl jumphost](osdctl_jumphost.md) - * [osdctl mc](osdctl_mc.md) - +* [osdctl mcp](osdctl_mcp.md) - Start osdctl in MCP server mode * [osdctl network](osdctl_network.md) - network related utilities * [osdctl org](osdctl_org.md) - Provides information for a specified organization * [osdctl promote](osdctl_promote.md) - Utilities to promote services/operators diff --git a/docs/osdctl_mcp.md b/docs/osdctl_mcp.md new file mode 100644 index 000000000..8ed90a1b6 --- /dev/null +++ b/docs/osdctl_mcp.md @@ -0,0 +1,39 @@ +## osdctl mcp + +Start osdctl in MCP server mode + +### Synopsis + +Start osdctl as model-context-protocol server for integration with AI assistants. + +``` +osdctl mcp [flags] +``` + +### Options + +``` + -h, --help help for mcp + --http Use an HTTP server instead of stdio + --port int HTTP Server port to use when running in HTTP mode (default 8080) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + +### SEE ALSO + +* [osdctl](osdctl.md) - OSD CLI + diff --git a/go.mod b/go.mod index 7b9063cd9..e4131a92c 100644 --- a/go.mod +++ b/go.mod @@ -30,9 +30,11 @@ require ( github.com/fatih/color v1.18.0 github.com/golang-jwt/jwt/v5 v5.2.2 github.com/google/go-github/v63 v63.0.0 + github.com/google/jsonschema-go v0.3.0 github.com/google/uuid v1.6.0 github.com/hashicorp/hcl/v2 v2.23.0 github.com/manifoldco/promptui v0.9.0 + github.com/modelcontextprotocol/go-sdk v1.0.0 github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.38.0 @@ -249,6 +251,7 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/yosssi/ace v0.0.5 // indirect github.com/zalando/go-keyring v0.2.6 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect @@ -269,7 +272,7 @@ require ( golang.org/x/sys v0.35.0 // indirect golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.33.0 // indirect + golang.org/x/tools v0.34.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect diff --git a/go.sum b/go.sum index 83da2dd13..3d3fbbade 100644 --- a/go.sum +++ b/go.sum @@ -324,6 +324,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= +github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= @@ -476,6 +478,8 @@ github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modelcontextprotocol/go-sdk v1.0.0 h1:Z4MSjLi38bTgLrd/LjSmofqRqyBiVKRyQSJgw8q8V74= +github.com/modelcontextprotocol/go-sdk v1.0.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -669,6 +673,8 @@ github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA= github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0= github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= @@ -843,8 +849,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=