From bb4990010944418b45fd58f19e92bb7dc01a33b9 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Tue, 25 Mar 2025 18:45:07 -0700 Subject: [PATCH 01/40] OSD-26415: Allow pull-secret to be updated w/o transferring ownership. --- cmd/cluster/transferowner.go | 370 ++++++++++++++++++++++++++++------- 1 file changed, 296 insertions(+), 74 deletions(-) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index 477e39a44..2af54cb44 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/fatih/color" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" hiveapiv1 "github.com/openshift/hive/apis/hive/v1" hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" @@ -38,31 +39,47 @@ import ( const ( CheckSyncMaxAttempts = 24 - SL_TRANSFER_INITIATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_starting.json" - SL_TRANSFER_COMPLETE = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_completed.json" + SL_TRANSFER_INITIATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_starting.json" + SL_TRANSFER_COMPLETE = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_completed.json" + SL_PULL_SECRET_ROTATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/pull_secret_rotated.json" ) // transferOwnerOptions defines the struct for running transferOwner command type transferOwnerOptions struct { - output string - clusterID string - newOwnerName string - reason string - dryrun bool - hypershift bool - cluster *cmv1.Cluster + output string + clusterID string + newOwnerName string + reason string + dryrun bool + hypershift bool + doPullSecretOnly bool + cluster *cmv1.Cluster genericclioptions.IOStreams GlobalOptions *globalflags.GlobalOptions } +var red *color.Color +var blue *color.Color +var green *color.Color + +const transferOwnerCmdExample = ` + # Transfer ownership + osdctl cluster transfer-owner --new-owner "new_OCM_userName" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" + + # Update pull secret without transfering ownership + osdctl cluster transfer-owner --pull-secret-only --cluster-id 1kfmyclusteristhebesteverp8m --reason "update pull secret per jira-id" +` + func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command { ops := newTransferOwnerOptions(streams, globalOpts) transferOwnerCmd := &cobra.Command{ Use: "transfer-owner", Short: "Transfer cluster ownership to a new user (to be done by Region Lead)", Args: cobra.NoArgs, + Example: transferOwnerCmdExample, DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.preRun()) }, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.run()) }, @@ -71,11 +88,14 @@ func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *global transferOwnerCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The Internal Cluster ID/External Cluster ID/ Cluster Name") transferOwnerCmd.Flags().StringVar(&ops.newOwnerName, "new-owner", ops.newOwnerName, "The new owners username to transfer the cluster to") transferOwnerCmd.Flags().BoolVarP(&ops.dryrun, "dry-run", "d", false, "Dry-run - show all changes but do not apply them") + transferOwnerCmd.Flags().BoolVar(&ops.doPullSecretOnly, "pull-secret-only", false, "Update cluster pull secret from current OCM AccessToken data without ownership transfer") transferOwnerCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") _ = transferOwnerCmd.MarkFlagRequired("cluster-id") - _ = transferOwnerCmd.MarkFlagRequired("new-owner") _ = transferOwnerCmd.MarkFlagRequired("reason") + transferOwnerCmd.MarkFlagsOneRequired("new-owner", "pull-secret-only") + // Avoid user errors, typos, etc.. Dont allow 'new-owner' to be provided when rotating 'pull-secret-only' + transferOwnerCmd.MarkFlagsMutuallyExclusive("new-owner", "pull-secret-only") return transferOwnerCmd } @@ -96,6 +116,22 @@ type serviceLogParameters struct { IsExternalOrgTransfer bool } +// SL params for pull-secret updates w/o owner transfer +type psOnlyServiceLogParameters struct { + ClusterID string + OwnerName string + OwnerID string + PullSecretUpdated bool +} + +func (o *transferOwnerOptions) preRun() error { + // Initialize the color formats... + red = color.New(color.FgHiRed, color.BgBlack) + green = color.New(color.FgHiGreen, color.BgBlack) + blue = color.New(color.FgHiBlue, color.BgBlack) + return nil +} + func generateInternalServiceLog(params serviceLogParameters) servicelog.PostCmdOptions { return servicelog.PostCmdOptions{ ClusterId: params.ClusterID, @@ -267,7 +303,34 @@ func rolloutTelemeterClientPods(clientset *kubernetes.Clientset, namespace, sele return nil } -func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret string) error { +func comparePullSecretAuths(pullSecret *corev1.Secret, expectedAuths map[string]*amv1.AccessTokenAuth) error { + var errString strings.Builder + blue.Println("\nComparing pull-secret to expected auth sections...") + for akey, auth := range expectedAuths { + // Find the matching auth entry for this registry name in the cluster pull_secret data... + psTokenAuth, err := getPullSecretTokenAuth(akey, pullSecret) + if err != nil { + errString.WriteString(fmt.Sprintf("Failed to fetch expected auth['%s'] from cluster pull-secret, err:'%s'.\n", akey, err)) + } + if auth.Auth() != psTokenAuth.Auth() { + errString.WriteString(fmt.Sprintf("Expected auth['%s'] does not match authToken found in cluster pull-secret.\n", akey)) + } else { + green.Printf("Auth '%s' - tokens match\n", akey) + } + if auth.Email() != psTokenAuth.Email() { + errString.WriteString(fmt.Sprintf("Expected auth['%s'] does not match email found in cluster pull-secret.\n", akey)) + } else { + green.Printf("Auth '%s' - emails match\n", akey) + } + } + if errString.Len() <= 0 { + return nil + } else { + return fmt.Errorf("%s", errString.String()) + } +} + +func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret string, expectedAuths map[string]*amv1.AccessTokenAuth) error { // Retrieve the pull secret from the "openshift-config" namespace pullSecret, err := clientset.CoreV1().Secrets("openshift-config").Get(context.TODO(), "pull-secret", metav1.GetOptions{}) if err != nil { @@ -279,14 +342,30 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret if !ok { return fmt.Errorf("pull secret data not found in the secret") } + err = comparePullSecretAuths(pullSecret, expectedAuths) + if err != nil { + red.Printf("\nFound mis-matching auth values during compare. Please review:\n%s", err) + fmt.Printf("Would you like to continue?") + if !utils.ConfirmPrompt() { + return fmt.Errorf("operation aborted by the user") + } + } else { + //fmt.Printf("\nComparison shows subset of Auths from OCM AuthToken have matching tokens + emails in cluster pull-secret. PASS\n") + green.Println("\nComparison shows subset of Auths from OCM AuthToken have matching tokens + emails in cluster pull-secret. PASS") + } - fmt.Println("Actual Cluster Pull Secret:") + blue.Println("Actual Cluster Pull Secret:") fmt.Println(string(pullSecretData)) // Print the expected pull secret - fmt.Println("\nExpected Cluster Pull Secret:") + blue.Println("\nExpected Auths from OCM AccessToken expected to be present in Pull Secret (note this can be a subset):") fmt.Println(expectedPullSecret) + // TODO: Consider confirming that the email and token values of the 'subset' of Auths + // contained in the OCM AccessToken actually matches email/token values in the cluster's + // openshift-config/pull-secret. Provide any descrepencies to the user here before + // prompting to visually evaluate. + // // Ask the user to confirm if the actual pull secret matches their expectation reader := bufio.NewReader(os.Stdin) fmt.Print("\nDoes the actual pull secret match your expectation? (yes/no): ") @@ -300,13 +379,13 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret return fmt.Errorf("operation aborted by the user") } - fmt.Println("Pull secret verification successful.") + green.Println("Pull secret verification (by user) successful.") return nil } func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, mgmtClusterName string, pullsecret []byte) error { - + fmt.Printf("updateManifestwork begin...\n") if err := workv1.AddToScheme(kubeCli.Scheme()); err != nil { return fmt.Errorf("failed to add scheme: %w", err) } @@ -322,6 +401,7 @@ func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, secretNamePrefix := hostedCluster.DomainPrefix() + "-pull" // Generate a random new secret name based on the existing pull secret name + // A new secret 'name' is used here to trigger the update(?) randomSuffix := func(chars string, length int) string { rand.Seed(time.Now().UnixNano()) result := make([]byte, length) @@ -332,6 +412,7 @@ func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, } newSecretName := secretNamePrefix + "-" + randomSuffix("0123456789abcdef", 6) + fmt.Printf("get() Manifestwork...\n") manifestWork := &workv1.ManifestWork{} err = kubeCli.Get(context.TODO(), types.NamespacedName{Name: manifestWorkName, Namespace: manifestWorkNamespace}, manifestWork) if err != nil { @@ -391,13 +472,15 @@ func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, } } + fmt.Printf("update() Manifestwork...\n") err = kubeCli.Update(context.TODO(), manifestWork, &client.UpdateOptions{}) if err != nil { return fmt.Errorf("cannot update the pull-secret within manifestwork: %w", err) } // The secret will be synced to the management cluster and guest cluster in a few seconds, wait here - fmt.Println("sleep 60 seconds here to make sure secret gets synced on guest cluster") + fmt.Println("Manifest work updated. ") + fmt.Println("Sleeping 60 seconds here to allow secret to be synced on guest cluster") time.Sleep(time.Second * 60) return nil @@ -442,6 +525,7 @@ func (o *transferOwnerOptions) run() error { // Create an OCM client to talk to the cluster API // the user has to be logged in (e.g. 'ocm login') + var err error ocm, err := utils.CreateConnection() if err != nil { return fmt.Errorf("failed to create OCM client: %w", err) @@ -456,11 +540,36 @@ func (o *transferOwnerOptions) run() error { cluster, err := utils.GetClusterAnyStatus(ocm, o.clusterID) o.cluster = cluster o.clusterID = cluster.ID() - - userDetails, err := ocm.AccountsMgmt().V1().Accounts().Account(o.newOwnerName).Get().Send() - userName, ok := userDetails.Body().GetUsername() - if !ok { - return fmt.Errorf("Failed to get username from new user id") + var userName string + var subscription *amv1.Subscription = nil + var oldOwnerAccount *amv1.Account = nil + var userDetails *amv1.AccountGetResponse + var ok bool + if o.doPullSecretOnly { + // This is updating the pull secret and not a ownwership transfer. + // Use existing subscription, account, and userName value... + subscription, err = utils.GetSubscription(ocm, o.clusterID) + if err != nil { + return fmt.Errorf("Failed to get subscription info for cluster:'%s', err: '%v'", o.clusterID, err) + } + oldOwnerAccount, err = utils.GetAccount(ocm, subscription.Creator().ID()) + if err != nil { + return fmt.Errorf("Failed to get account info from subscription, err:'%v'", err) + } + userName = oldOwnerAccount.Username() + fmt.Printf("Old username:'%s'\n", userName) + } else { + // This is an ownership transfer. + // Lookup New Owner Name... + userDetails, err = ocm.AccountsMgmt().V1().Accounts().Account(o.newOwnerName).Get().Send() + if err != nil { + return fmt.Errorf("failed to fetch Account info, err:'%v'", err) + } + ok := false + userName, ok = userDetails.Body().GetUsername() + if !ok { + return fmt.Errorf("Failed to get username from new user id") + } } var mgmtCluster, svcCluster, hiveCluster, masterCluster *cmv1.Cluster @@ -490,9 +599,12 @@ func (o *transferOwnerOptions) run() error { elevationReasons := []string{ o.reason, - fmt.Sprintf("Updating pull secret using osdctl to tranfert owner to %s", o.newOwnerName), } - + if o.doPullSecretOnly { + elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl")) + } else { + elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl to tranfert owner to %s", o.newOwnerName)) + } // Gather all required information fmt.Println("Gathering all required information for the cluster transfer...") cluster, err = utils.GetCluster(ocm, o.clusterID) @@ -505,19 +617,22 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("cluster has no external id") } - subscription, err := utils.GetSubscription(ocm, o.clusterID) - if err != nil { - return fmt.Errorf("could not get subscription: %w", err) + if subscription == nil { + subscription, err = utils.GetSubscription(ocm, o.clusterID) + if err != nil { + return fmt.Errorf("could not get subscription: %w", err) + } } subscriptionID, ok := subscription.GetID() if !ok { return fmt.Errorf("Could not get subscription id") } - - oldOwnerAccount, ok := subscription.GetCreator() - if !ok { - return fmt.Errorf("cluster has no owner account") + if oldOwnerAccount == nil { + oldOwnerAccount, ok = subscription.GetCreator() + if !ok { + return fmt.Errorf("cluster has no owner account") + } } oldOrganizationId, ok := subscription.GetOrganizationID() @@ -532,21 +647,39 @@ func (o *transferOwnerOptions) run() error { fmt.Printf("Error: %s", err) return fmt.Errorf("could not get current owner organization") } - - newAccount, err := utils.GetAccount(ocm, o.newOwnerName) - if err != nil { - return fmt.Errorf("could not get new owners account: %w", err) - } - - newOrganization, ok := newAccount.GetOrganization() - if !ok { - return fmt.Errorf("new account has no organization") + var newAccount *amv1.Account = nil + var newOrganization *amv1.Organization = nil + var oldUsername string + if o.doPullSecretOnly { + // This is not an ownership transfer, just pull-secret update, + // new info == old info. + // TODO: Can likely skip most of the next set of checks, but why not? + oldUsername, ok = oldOwnerAccount.GetUsername() + if !ok { + fmt.Printf("old username not found?\n") + } + fmt.Printf("Using old account values. OwnerAccount:'%s'\n", oldUsername) + newAccount = oldOwnerAccount + newOrganization = oldOrganization + } else { + newAccount, err = utils.GetAccount(ocm, o.newOwnerName) + if err != nil { + return fmt.Errorf("could not get new owners account: %w", err) + } + newOrganization, ok = newAccount.GetOrganization() + if !ok { + return fmt.Errorf("new account has no organization") + } } newOrganizationId, ok := newOrganization.GetID() if !ok { return fmt.Errorf("new organization has no ID") } + fmt.Printf("old orgID:'%s', new orgID:'%s'\n", newOrganizationId, oldOrganizationId) + if o.doPullSecretOnly && newOrganizationId != oldOrganizationId { + return fmt.Errorf("new org != old org. Ownership transfer not expected with pull-secret-only flag") + } accountID, ok := newAccount.GetID() if !ok { @@ -610,22 +743,44 @@ func (o *transferOwnerOptions) run() error { IsExternalOrgTransfer: orgChanged, } - // Send a SL saying we're about to start - fmt.Println("Notify the customer before ownership transfer commences. Sending service log.") - postCmd := generateServiceLog(slParams, SL_TRANSFER_INITIATED) - if err := postCmd.Run(); err != nil { - fmt.Println("Failed to POST customer service log. Please manually send a service log to notify the customer before ownership transfer commences:") - fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", - o.clusterID, SL_TRANSFER_INITIATED, strings.Join(postCmd.TemplateParams, " -p ")) + var postCmd servicelog.PostCmdOptions + //TODO: If only updating the pull-secret, and not transfering ownership + // should we send a SL both before and after the rotate operation? + // Currently only sending an after the pull-secret update is completed + // when not also transfering ownership. + if !o.doPullSecretOnly { + // Send a SL saying we're about to start ownership transfer + fmt.Println("Notify the customer before ownership transfer commences. Sending service log.") + postCmd = generateServiceLog(slParams, SL_TRANSFER_INITIATED) + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST customer service log. Please manually send a service log to notify the customer before ownership transfer commences:") + fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", + o.clusterID, SL_TRANSFER_INITIATED, strings.Join(postCmd.TemplateParams, " -p ")) + } } // Send internal SL to cluster with additional details in case we // need them later. This prevents leaking PII to customers. - postCmd = generateInternalServiceLog(slParams) - fmt.Println("Internal SL Being Sent") - if err := postCmd.Run(); err != nil { - fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") - fmt.Println(fmt.Sprintf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID)) + if o.doPullSecretOnly { + postCmd = servicelog.PostCmdOptions{ + ClusterId: slParams.ClusterID, + TemplateParams: []string{ + "MESSAGE=" + fmt.Sprintf("Pull-secret update initiated. UserName:'%s', OwnerID:'%s'", slParams.OldOwnerID, slParams.OldOwnerName), + }, + InternalOnly: true, + } + fmt.Println("Internal SL Being Sent") + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") + fmt.Printf("osdctl servicelog post -i -p MESSAGE=\"Pull-secret update. UserName:'%s', OwnerID:'%s'.\" %s \n", slParams.OldOwnerID, slParams.OldOwnerName, slParams.ClusterID) + } + } else { + postCmd = generateInternalServiceLog(slParams) + fmt.Println("Internal SL Being Sent") + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") + fmt.Println(fmt.Sprintf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID)) + } } masterKubeCli, _, masterKubeClientSet, err := common.GetKubeConfigAndClient(masterCluster.ID(), elevationReasons...) @@ -633,10 +788,32 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("failed to retrieve Kubernetes configuration and client for Hive cluster ID %s: %w", masterCluster.ID(), err) } - // Fetch the pull secret with the given new username - response, err := ocm.AccountsMgmt().V1().AccessToken().Post().Impersonate(userName).Parameter("body", nil).Send() + // Get account running this command to compare against cluster's account for + // impersonation purposes. + currentAccountResp, err := ocm.AccountsMgmt().V1().CurrentAccount().Get().Send() + var currentOCMAccount *amv1.Account = nil if err != nil { - return fmt.Errorf("Can't send request: %w", err) + //Ignore this error and continue with an attempt to use 'impersonate' instead... + fmt.Fprintf(os.Stderr, "Failed to fetch currentAccount info, err:'%v'\n", err) + currentAccountResp = nil + } else { + currentOCMAccount = currentAccountResp.Body() + } + + // Fetch the current Access Token for pull secret with the given new username from OCM + var response *amv1.AccessTokenPostResponse = nil + err = nil + if currentOCMAccount == nil || currentOCMAccount.Username() != userName { + // This account is not owned by the OCM account running this command, so impersonate... + // Impersonate requires region-lead permissions at this time. + response, err = ocm.AccountsMgmt().V1().AccessToken().Post().Impersonate(userName).Parameter("body", nil).Send() + } else { + // This account is owned by the OCM account running this command, no need to impersonate + // This allows non-region leads to test this utility against their own test clusters. + response, err = ocm.AccountsMgmt().V1().AccessToken().Post().Send() + } + if err != nil { + return fmt.Errorf("failed to fetch OCM AccessToken: %w", err) } auths, ok := response.Body().GetAuths() @@ -658,13 +835,24 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("failed to marshal pull secret data: %w", err) } - // Print the pull secret - fmt.Println("Pull Secret:") + //Attempt to pretty print the json for easier user initial review... + prettySecret, err := json.MarshalIndent(map[string]map[string]map[string]string{ + "auths": authsMap, + }, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error Marshalling data for pretty print. Err:'%v'", err) + } else { + blue.Println("Pull Secret data(Indented)...") + blue.Printf("\n%s\n", prettySecret) + } + + // Print the pull secret in it's actual form for user to confirm (ie no go, json, formatting errors, etc) + green.Print("\nPlease review Pull Secret data to be used for update(after formatting):\n") fmt.Println(string(pullSecret)) // Ask the user if they would like to continue var continueConfirmation string - fmt.Print("Do you want to continue? (yes/no): ") + fmt.Print("\nDo you want to continue? (yes/no): ") _, err = fmt.Scanln(&continueConfirmation) if err != nil { return fmt.Errorf("failed to read user input: %w", err) @@ -687,10 +875,12 @@ func (o *transferOwnerOptions) run() error { } } + fmt.Println("Create cluster kubecli...") _, _, targetClientSet, err := common.GetKubeConfigAndClient(o.clusterID, elevationReasons...) if err != nil { return fmt.Errorf("failed to retrieve Kubernetes configuration and client for cluster with ID %s: %w", o.clusterID, err) } + fmt.Println("Cluster kubecli created") // Rollout the telemeterClient pod for non HCP clusters if !o.hypershift { @@ -700,11 +890,34 @@ func (o *transferOwnerOptions) run() error { } } - err = verifyClusterPullSecret(targetClientSet, string(pullSecret)) + err = verifyClusterPullSecret(targetClientSet, string(pullSecret), auths) if err != nil { return fmt.Errorf("error verifying cluster pull secret: %w", err) } + if o.doPullSecretOnly { + // User has chosen to update pull secret w/o ownership transfer. + // Send SL to notify customer this is completed, then return the command. + fmt.Println("Notify the customer the pull-secret update is completed. Sending service log.") + //postCmd = generateServiceLog(slParams, SL_PULL_SECRET_ROTATED) + postCmd = servicelog.PostCmdOptions{ + Template: SL_PULL_SECRET_ROTATED, + ClusterId: o.clusterID, + TemplateParams: []string{fmt.Sprintf("ACCOUNT=%s", oldOwnerAccountID)}, + } + + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST service log. Please manually send a service log to notify the customer the pull-secrete update completed:") + fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", + o.clusterID, SL_PULL_SECRET_ROTATED, strings.Join(postCmd.TemplateParams, " -p ")) + } + + fmt.Printf("Pull secret update complete, exiting successfully\n") + return nil + } + + // Transfer ownership specific operations... + fmt.Printf("Transfer cluster: \t\t'%v' (%v)\n", externalClusterID, cluster.Name()) fmt.Printf("from user \t\t\t'%v' to '%v'\n", oldOwnerAccount.ID(), accountID) if !utils.ConfirmPrompt() { @@ -760,7 +973,7 @@ func (o *transferOwnerOptions) run() error { response, err := subscriptionClient.Update().Body(subscriptionOrgPatch).Send() if err != nil || response.Status() != 200 { - return fmt.Errorf("request failed with status: %d, '%w'", response.Status(), err) + return fmt.Errorf("Update Subscription request to patch org failed with status: %d, err:'%w'", response.Status(), err) } fmt.Printf("Patched organization on subscription\n") } @@ -769,7 +982,16 @@ func (o *transferOwnerOptions) run() error { patchRes, err := subscriptionCreatorPatchRequest.Send() if err != nil || patchRes.Status() != 200 { - return fmt.Errorf("request failed with status: %d, '%w'", patchRes.Status(), err) + // err var is not always set to something meaningful here. + // Instead the response body usually contains the err info... + red.Fprintf(os.Stderr, "Error, Patch Request Response: '%s'\n", patchRes.String()) + var errString string + if err != nil { + errString = fmt.Sprintf("%v", err) + } else { + errString = patchRes.String() + } + return fmt.Errorf("Subscription request to patch creator failed with status: %d, err: '%s'", patchRes.Status(), errString) } fmt.Printf("Patched creator on subscription\n") @@ -777,7 +999,7 @@ func (o *transferOwnerOptions) run() error { err = deleteOldRoleBinding(ocm, subscriptionID) if err != nil { - fmt.Printf("can't delete old rolebinding %v \n", err) + fmt.Printf("Warning, can't delete old rolebinding, err: %v \n", err) } // create new rolebinding @@ -786,13 +1008,13 @@ func (o *transferOwnerOptions) run() error { // don't fail if the rolebinding already exists, could be rerun if err != nil { - return fmt.Errorf("request failed '%w'", err) + return fmt.Errorf("Account new roleBinding request failed, err: '%w'", err) } else if postRes.Status() == 201 { fmt.Printf("Created new role binding.\n") } else if postRes.Status() == 409 { fmt.Printf("can't add new rolebinding, rolebinding already exists\n") } else { - return fmt.Errorf("request failed with status: %d, '%w'", postRes.Status(), err) + return fmt.Errorf("Account new roleBinding request failed with status: %d, err: '%w'", postRes.Status(), err) } // If the organization id has changed, re-register the cluster with CS with the new organization id @@ -800,19 +1022,19 @@ func (o *transferOwnerOptions) run() error { request, err := createNewRegisterClusterRequest(ocm, externalClusterID, subscriptionID, newOrganizationId, clusterURL, displayName) if err != nil { - return fmt.Errorf("can't create RegisterClusterRequest with CS, '%w'", err) + return fmt.Errorf("can't create RegisterClusterRequest with CS, err:'%w'", err) } response, err := request.Send() if err != nil || (response.Status() != 200 && response.Status() != 201) { - return fmt.Errorf("request failed with status: %d, '%w'", response.Status(), err) + return fmt.Errorf("NewRegisterClusterRequest failed with status: %d, err:'%w'", response.Status(), err) } fmt.Print("Re-registered cluster\n") } err = validateTransfer(ocm, subscription.ClusterID(), newOrganizationId) if err != nil { - return fmt.Errorf("error while validating transfer %w", err) + return fmt.Errorf("error while validating transfer. %w", err) } fmt.Print("Transfer complete\n") @@ -834,7 +1056,7 @@ func getRoleBinding(ocm *sdk.Connection, subscriptionID string) (*amv1.RoleBindi Send() if err != nil { - return nil, fmt.Errorf("can't send request: %v", err) + return nil, fmt.Errorf("RoleBindings list, can't send request: %v", err) } if response.Total() == 0 { @@ -855,19 +1077,19 @@ func deleteOldRoleBinding(ocm *sdk.Connection, subscriptionID string) error { oldRoleBinding, err := getRoleBinding(ocm, subscriptionID) if err != nil { - return fmt.Errorf("can't get old owners rolebinding %w", err) + return fmt.Errorf("can't get old owners rolebinding: %w", err) } oldRoleBindingID, ok := oldRoleBinding.GetID() if !ok { - return fmt.Errorf("old rolebinding has no id %w", err) + return fmt.Errorf("old rolebinding has no id, err: %w", err) } oldRoleBindingClient := ocm.AccountsMgmt().V1().RoleBindings().RoleBinding(oldRoleBindingID) response, err := oldRoleBindingClient.Delete().Send() if err != nil { - return fmt.Errorf("request failed '%w'", err) + return fmt.Errorf("request failed, err: '%w'", err) } if response.Status() == 204 { fmt.Printf("Deleted old rolebinding: %v\n", oldRoleBindingID) @@ -877,7 +1099,7 @@ func deleteOldRoleBinding(ocm *sdk.Connection, subscriptionID string) error { fmt.Printf("can't find old rolebinding: %v\n", oldRoleBindingID) return nil } - fmt.Printf("request failed with status: %d\n", response.Status()) + fmt.Printf("delete RoleBindingrequest failed with status: %d\n", response.Status()) return nil } @@ -898,7 +1120,7 @@ func createSubscriptionCreatorPatchRequest(ocm *sdk.Connection, subscriptionID s body, err := json.Marshal(CreatorPatch{accountID}) if err != nil { - return nil, fmt.Errorf("cannot create body for request '%w'", err) + return nil, fmt.Errorf("cannot create body for request, err: '%w'", err) } request.Bytes(body) @@ -932,7 +1154,7 @@ func createNewRegisterClusterRequest(ocm *sdk.Connection, externalClusterID stri body, err := json.Marshal(RegisterCluster{externalClusterID, subscriptionID, organizationID, consoleURL, displayName}) if err != nil { - return nil, fmt.Errorf("cannot create body for request '%w'", err) + return nil, fmt.Errorf("cannot create body for request, err: '%w'", err) } request.Bytes(body) @@ -949,7 +1171,7 @@ func validateTransfer(ocm *sdk.Connection, clusterID string, newOrgID string) er Send() if err != nil || response.Status() != 200 { - return fmt.Errorf("request failed with status: %d, '%w'", response.Status(), err) + return fmt.Errorf("list clusters request failed with status: %d, err: '%w'", response.Status(), err) } if response.Total() == 0 { From 2314441f2eb1d15418eff007a1cc0c02e686dcdc Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Wed, 23 Apr 2025 16:28:01 -0700 Subject: [PATCH 02/40] OSD-26415: Update with generate-docs content --- docs/README.md | 1 + docs/osdctl_cluster_transfer-owner.md | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/docs/README.md b/docs/README.md index ab278971c..31a65b54b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1968,6 +1968,7 @@ osdctl cluster transfer-owner [flags] --kubeconfig string Path to the kubeconfig file to use for CLI requests. --new-owner string The new owners username to transfer the cluster to -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --pull-secret-only Update cluster pull secret from current OCM AccessToken data without ownership transfer --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") -s, --server string The address and port of the Kubernetes API server diff --git a/docs/osdctl_cluster_transfer-owner.md b/docs/osdctl_cluster_transfer-owner.md index 5cdfb686e..75e9c30eb 100644 --- a/docs/osdctl_cluster_transfer-owner.md +++ b/docs/osdctl_cluster_transfer-owner.md @@ -6,6 +6,18 @@ Transfer cluster ownership to a new user (to be done by Region Lead) osdctl cluster transfer-owner [flags] ``` +### Examples + +``` + + # Transfer ownership + osdctl cluster transfer-owner --new-owner "new_OCM_userName" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" + + # Update pull secret without transfering ownership + osdctl cluster transfer-owner --pull-secret-only --cluster-id 1kfmyclusteristhebesteverp8m --reason "update pull secret per jira-id" + +``` + ### Options ``` @@ -13,6 +25,7 @@ osdctl cluster transfer-owner [flags] -d, --dry-run Dry-run - show all changes but do not apply them -h, --help help for transfer-owner --new-owner string The new owners username to transfer the cluster to + --pull-secret-only Update cluster pull secret from current OCM AccessToken data without ownership transfer --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) ``` From 086e0ba0221d8a1eb45d26f391dd6959ec906dfe Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Wed, 21 May 2025 12:26:12 -0700 Subject: [PATCH 03/40] OSD-26415: New update-pullsecret wrapper cmd separate from transfer-owner. Updates per PR review. --- cmd/cluster/cmd.go | 1 + cmd/cluster/transferowner.go | 85 +++++++++++------------- cmd/cluster/updatepullsecret.go | 42 ++++++++++++ docs/README.md | 29 +++++++- docs/osdctl_cluster.md | 1 + docs/osdctl_cluster_transfer-owner.md | 4 -- docs/osdctl_cluster_update-pullsecret.md | 45 +++++++++++++ 7 files changed, 156 insertions(+), 51 deletions(-) create mode 100644 cmd/cluster/updatepullsecret.go create mode 100644 docs/osdctl_cluster_update-pullsecret.md diff --git a/cmd/cluster/cmd.go b/cmd/cluster/cmd.go index 35e5e1a5e..46b6b065e 100644 --- a/cmd/cluster/cmd.go +++ b/cmd/cluster/cmd.go @@ -31,6 +31,7 @@ func NewCmdCluster(streams genericclioptions.IOStreams, client *k8s.LazyClient, clusterCmd.AddCommand(newCmdResync()) clusterCmd.AddCommand(newCmdContext()) clusterCmd.AddCommand(newCmdTransferOwner(streams, globalOpts)) + clusterCmd.AddCommand(newCmdUpdatePullSecret(streams, globalOpts)) clusterCmd.AddCommand(access.NewCmdAccess(streams, client)) clusterCmd.AddCommand(newCmdCpd()) clusterCmd.AddCommand(newCmdCheckBannedUser()) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index 2af54cb44..e1335b88e 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -4,6 +4,7 @@ import ( "bufio" "context" "encoding/json" + "errors" "fmt" "math/rand" "os" @@ -66,9 +67,6 @@ var green *color.Color const transferOwnerCmdExample = ` # Transfer ownership osdctl cluster transfer-owner --new-owner "new_OCM_userName" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" - - # Update pull secret without transfering ownership - osdctl cluster transfer-owner --pull-secret-only --cluster-id 1kfmyclusteristhebesteverp8m --reason "update pull secret per jira-id" ` func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command { @@ -93,10 +91,8 @@ func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *global _ = transferOwnerCmd.MarkFlagRequired("cluster-id") _ = transferOwnerCmd.MarkFlagRequired("reason") - transferOwnerCmd.MarkFlagsOneRequired("new-owner", "pull-secret-only") - // Avoid user errors, typos, etc.. Dont allow 'new-owner' to be provided when rotating 'pull-secret-only' - transferOwnerCmd.MarkFlagsMutuallyExclusive("new-owner", "pull-secret-only") - + _ = transferOwnerCmd.Flags().MarkHidden("pull-secret-only") + transferOwnerCmd.MarkFlagRequired("new-owner") return transferOwnerCmd } @@ -304,30 +300,26 @@ func rolloutTelemeterClientPods(clientset *kubernetes.Clientset, namespace, sele } func comparePullSecretAuths(pullSecret *corev1.Secret, expectedAuths map[string]*amv1.AccessTokenAuth) error { - var errString strings.Builder + var err error = nil blue.Println("\nComparing pull-secret to expected auth sections...") for akey, auth := range expectedAuths { // Find the matching auth entry for this registry name in the cluster pull_secret data... psTokenAuth, err := getPullSecretTokenAuth(akey, pullSecret) if err != nil { - errString.WriteString(fmt.Sprintf("Failed to fetch expected auth['%s'] from cluster pull-secret, err:'%s'.\n", akey, err)) + err = errors.Join(err, fmt.Errorf("Failed to fetch expected auth['%s'] from cluster pull-secret, err:'%s'.\n", akey, err)) } if auth.Auth() != psTokenAuth.Auth() { - errString.WriteString(fmt.Sprintf("Expected auth['%s'] does not match authToken found in cluster pull-secret.\n", akey)) + err = errors.Join(err, fmt.Errorf("Expected auth['%s'] does not match authToken found in cluster pull-secret.\n", akey)) } else { green.Printf("Auth '%s' - tokens match\n", akey) } if auth.Email() != psTokenAuth.Email() { - errString.WriteString(fmt.Sprintf("Expected auth['%s'] does not match email found in cluster pull-secret.\n", akey)) + err = errors.Join(err, fmt.Errorf("Expected auth['%s'] does not match email found in cluster pull-secret.\n", akey)) } else { green.Printf("Auth '%s' - emails match\n", akey) } } - if errString.Len() <= 0 { - return nil - } else { - return fmt.Errorf("%s", errString.String()) - } + return err } func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret string, expectedAuths map[string]*amv1.AccessTokenAuth) error { @@ -336,8 +328,6 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret if err != nil { return fmt.Errorf("failed to get pull secret: %w", err) } - - // Print the actual pull secret data pullSecretData, ok := pullSecret.Data[".dockerconfigjson"] if !ok { return fmt.Errorf("pull secret data not found in the secret") @@ -345,42 +335,45 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret err = comparePullSecretAuths(pullSecret, expectedAuths) if err != nil { red.Printf("\nFound mis-matching auth values during compare. Please review:\n%s", err) - fmt.Printf("Would you like to continue?") + fmt.Print("Would you like to continue?") if !utils.ConfirmPrompt() { return fmt.Errorf("operation aborted by the user") } } else { - //fmt.Printf("\nComparison shows subset of Auths from OCM AuthToken have matching tokens + emails in cluster pull-secret. PASS\n") green.Println("\nComparison shows subset of Auths from OCM AuthToken have matching tokens + emails in cluster pull-secret. PASS") } + // This step was in the original utlity so leaving the option to print data to terminal here, + // but making it optional and prompting the user instead. The new programatic + // comparisons per comparePullSecretAuths() may negate the need for a visual inspection in most cases... + fmt.Print("(Optional. WARNING: This will print sensitive data to the terminal) Would you like to print pull secret content to screen for additional visual comparison?") + if utils.ConfirmPrompt() { + // Print the actual pull secret data + blue.Println("Actual Cluster Pull Secret:") + fmt.Println(string(pullSecretData)) + + // Print the expected pull secret + blue.Println("\nExpected Auths from OCM AccessToken expected to be present in Pull Secret (note this can be a subset):") + fmt.Println(expectedPullSecret) + + // TODO: Consider confirming that the email and token values of the 'subset' of Auths + // contained in the OCM AccessToken actually matches email/token values in the cluster's + // openshift-config/pull-secret. Provide any descrepencies to the user here before + // prompting to visually evaluate. + // + // Ask the user to confirm if the actual pull secret matches their expectation + reader := bufio.NewReader(os.Stdin) + fmt.Print("\nDoes the actual pull secret match your expectation? (yes/no): ") + response, err := reader.ReadString('\n') + if err != nil { + return fmt.Errorf("failed to read user input: %w", err) + } + response = strings.ToLower(strings.TrimSpace(response)) + if response != "yes" { + return fmt.Errorf("operation aborted by the user") + } - blue.Println("Actual Cluster Pull Secret:") - fmt.Println(string(pullSecretData)) - - // Print the expected pull secret - blue.Println("\nExpected Auths from OCM AccessToken expected to be present in Pull Secret (note this can be a subset):") - fmt.Println(expectedPullSecret) - - // TODO: Consider confirming that the email and token values of the 'subset' of Auths - // contained in the OCM AccessToken actually matches email/token values in the cluster's - // openshift-config/pull-secret. Provide any descrepencies to the user here before - // prompting to visually evaluate. - // - // Ask the user to confirm if the actual pull secret matches their expectation - reader := bufio.NewReader(os.Stdin) - fmt.Print("\nDoes the actual pull secret match your expectation? (yes/no): ") - response, err := reader.ReadString('\n') - if err != nil { - return fmt.Errorf("failed to read user input: %w", err) - } - - response = strings.ToLower(strings.TrimSpace(response)) - if response != "yes" { - return fmt.Errorf("operation aborted by the user") + green.Println("Pull secret verification (by user) successful.") } - - green.Println("Pull secret verification (by user) successful.") - return nil } diff --git a/cmd/cluster/updatepullsecret.go b/cmd/cluster/updatepullsecret.go new file mode 100644 index 000000000..2dd5f1730 --- /dev/null +++ b/cmd/cluster/updatepullsecret.go @@ -0,0 +1,42 @@ +package cluster + +import ( + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + + "github.com/openshift/osdctl/internal/utils/globalflags" +) + +const updatePullSecretCmdExample = ` + # Update Pull Secret's OCM access token data + osdctl cluster update-pullsecret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" +` + +func newCmdUpdatePullSecret(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command { + ops := newTransferOwnerOptions(streams, globalOpts) + updatePullSecretCmd := &cobra.Command{ + Use: "update-pullsecret", + Short: "Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead)", + Args: cobra.NoArgs, + Example: updatePullSecretCmdExample, + DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.preRun()) }, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(ops.run()) + }, + } + // can we get cluster-id from some context maybe? + updatePullSecretCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The Internal Cluster ID/External Cluster ID/ Cluster Name") + updatePullSecretCmd.Flags().BoolVarP(&ops.dryrun, "dry-run", "d", false, "Dry-run - show all changes but do not apply them") + updatePullSecretCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") + + _ = updatePullSecretCmd.MarkFlagRequired("cluster-id") + _ = updatePullSecretCmd.MarkFlagRequired("reason") + // This arg is used as part of this wrapper command to instruct the transfer op to exit after + // updating the pull secret, and before doing the ownership transfer... + updatePullSecretCmd.Flags().BoolVar(&ops.doPullSecretOnly, "pull-secret-only", true, "Update cluster pull secret from current OCM AccessToken data without ownership transfer") + _ = updatePullSecretCmd.Flags().MarkHidden("pull-secret-only") + + return updatePullSecretCmd +} diff --git a/docs/README.md b/docs/README.md index 31a65b54b..fea66e64e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -68,6 +68,7 @@ - `post --cluster-id ` - Send limited support reason to a given cluster - `status --cluster-id ` - Shows the support status of a specified cluster - `transfer-owner` - Transfer cluster ownership to a new user (to be done by Region Lead) + - `update-pullsecret` - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) - `validate-pull-secret --cluster-id ` - Checks if the pull secret email matches the owner email - `validate-pull-secret-ext [CLUSTER_ID]` - Extended checks to confirm pull-secret data is synced with current OCM data - `cost` - Cost Management related utilities @@ -1968,7 +1969,33 @@ osdctl cluster transfer-owner [flags] --kubeconfig string Path to the kubeconfig file to use for CLI requests. --new-owner string The new owners username to transfer the cluster to -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] - --pull-secret-only Update cluster pull secret from current OCM AccessToken data without ownership transfer + --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + +### osdctl cluster update-pullsecret + +Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) + +``` +osdctl cluster update-pullsecret [flags] +``` + +#### Flags + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string The name of the kubeconfig cluster to use + -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name + --context string The name of the kubeconfig context to use + -d, --dry-run Dry-run - show all changes but do not apply them + -h, --help help for update-pullsecret + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") -s, --server string The address and port of the Kubernetes API server diff --git a/docs/osdctl_cluster.md b/docs/osdctl_cluster.md index 312cf7f10..3f5e85757 100644 --- a/docs/osdctl_cluster.md +++ b/docs/osdctl_cluster.md @@ -45,6 +45,7 @@ Provides information for a specified cluster * [osdctl cluster ssh](osdctl_cluster_ssh.md) - utilities for accessing cluster via ssh * [osdctl cluster support](osdctl_cluster_support.md) - Cluster Support * [osdctl cluster transfer-owner](osdctl_cluster_transfer-owner.md) - Transfer cluster ownership to a new user (to be done by Region Lead) +* [osdctl cluster update-pullsecret](osdctl_cluster_update-pullsecret.md) - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) * [osdctl cluster validate-pull-secret](osdctl_cluster_validate-pull-secret.md) - Checks if the pull secret email matches the owner email * [osdctl cluster validate-pull-secret-ext](osdctl_cluster_validate-pull-secret-ext.md) - Extended checks to confirm pull-secret data is synced with current OCM data diff --git a/docs/osdctl_cluster_transfer-owner.md b/docs/osdctl_cluster_transfer-owner.md index 75e9c30eb..db15cc3ef 100644 --- a/docs/osdctl_cluster_transfer-owner.md +++ b/docs/osdctl_cluster_transfer-owner.md @@ -13,9 +13,6 @@ osdctl cluster transfer-owner [flags] # Transfer ownership osdctl cluster transfer-owner --new-owner "new_OCM_userName" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" - # Update pull secret without transfering ownership - osdctl cluster transfer-owner --pull-secret-only --cluster-id 1kfmyclusteristhebesteverp8m --reason "update pull secret per jira-id" - ``` ### Options @@ -25,7 +22,6 @@ osdctl cluster transfer-owner [flags] -d, --dry-run Dry-run - show all changes but do not apply them -h, --help help for transfer-owner --new-owner string The new owners username to transfer the cluster to - --pull-secret-only Update cluster pull secret from current OCM AccessToken data without ownership transfer --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) ``` diff --git a/docs/osdctl_cluster_update-pullsecret.md b/docs/osdctl_cluster_update-pullsecret.md new file mode 100644 index 000000000..c90501714 --- /dev/null +++ b/docs/osdctl_cluster_update-pullsecret.md @@ -0,0 +1,45 @@ +## osdctl cluster update-pullsecret + +Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) + +``` +osdctl cluster update-pullsecret [flags] +``` + +### Examples + +``` + + # Update Pull Secret's OCM access token data + osdctl cluster update-pullsecret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" + +``` + +### Options + +``` + -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name + -d, --dry-run Dry-run - show all changes but do not apply them + -h, --help help for update-pullsecret + --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + +### SEE ALSO + +* [osdctl cluster](osdctl_cluster.md) - Provides information for a specified cluster + From 970f0013f496337b89f9f2184d5ad1885c9d9ff0 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Wed, 21 May 2025 13:51:14 -0700 Subject: [PATCH 04/40] OSD-26415: Lint updates --- cmd/cluster/transferowner.go | 60 +++++++++++++++------------------ cmd/cluster/updatepullsecret.go | 6 ++-- 2 files changed, 31 insertions(+), 35 deletions(-) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index e1335b88e..d649e7d34 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -40,14 +40,13 @@ import ( const ( CheckSyncMaxAttempts = 24 - SL_TRANSFER_INITIATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_starting.json" - SL_TRANSFER_COMPLETE = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_completed.json" - SL_PULL_SECRET_ROTATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/pull_secret_rotated.json" + SL_TRANSFER_INITIATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_starting.json" + SL_TRANSFER_COMPLETE = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_completed.json" + SL_PULLSEC_ROTATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/pull_secret_rotated.json" ) // transferOwnerOptions defines the struct for running transferOwner command type transferOwnerOptions struct { - output string clusterID string newOwnerName string reason string @@ -92,7 +91,7 @@ func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *global _ = transferOwnerCmd.MarkFlagRequired("cluster-id") _ = transferOwnerCmd.MarkFlagRequired("reason") _ = transferOwnerCmd.Flags().MarkHidden("pull-secret-only") - transferOwnerCmd.MarkFlagRequired("new-owner") + _ = transferOwnerCmd.MarkFlagRequired("new-owner") return transferOwnerCmd } @@ -112,14 +111,6 @@ type serviceLogParameters struct { IsExternalOrgTransfer bool } -// SL params for pull-secret updates w/o owner transfer -type psOnlyServiceLogParameters struct { - ClusterID string - OwnerName string - OwnerID string - PullSecretUpdated bool -} - func (o *transferOwnerOptions) preRun() error { // Initialize the color formats... red = color.New(color.FgHiRed, color.BgBlack) @@ -301,20 +292,26 @@ func rolloutTelemeterClientPods(clientset *kubernetes.Clientset, namespace, sele func comparePullSecretAuths(pullSecret *corev1.Secret, expectedAuths map[string]*amv1.AccessTokenAuth) error { var err error = nil + var psTokenAuth *amv1.AccessTokenAuth = nil blue.Println("\nComparing pull-secret to expected auth sections...") for akey, auth := range expectedAuths { // Find the matching auth entry for this registry name in the cluster pull_secret data... - psTokenAuth, err := getPullSecretTokenAuth(akey, pullSecret) + psTokenAuth, err = getPullSecretTokenAuth(akey, pullSecret) if err != nil { - err = errors.Join(err, fmt.Errorf("Failed to fetch expected auth['%s'] from cluster pull-secret, err:'%s'.\n", akey, err)) + err = errors.Join(err, fmt.Errorf("failed to fetch expected auth['%s'] from cluster pull-secret, err:'%s'", akey, err)) + continue + } + if psTokenAuth == nil { + err = errors.Join(err, fmt.Errorf("failed to fetch expected auth['%s'] from cluster pull-secret, err: (nil authToken)", akey)) + continue } if auth.Auth() != psTokenAuth.Auth() { - err = errors.Join(err, fmt.Errorf("Expected auth['%s'] does not match authToken found in cluster pull-secret.\n", akey)) + err = errors.Join(err, fmt.Errorf("expected auth['%s'] does not match authToken found in cluster pull-secret", akey)) } else { green.Printf("Auth '%s' - tokens match\n", akey) } if auth.Email() != psTokenAuth.Email() { - err = errors.Join(err, fmt.Errorf("Expected auth['%s'] does not match email found in cluster pull-secret.\n", akey)) + err = errors.Join(err, fmt.Errorf("expected auth['%s'] does not match email found in cluster pull-secret", akey)) } else { green.Printf("Auth '%s' - emails match\n", akey) } @@ -543,11 +540,11 @@ func (o *transferOwnerOptions) run() error { // Use existing subscription, account, and userName value... subscription, err = utils.GetSubscription(ocm, o.clusterID) if err != nil { - return fmt.Errorf("Failed to get subscription info for cluster:'%s', err: '%v'", o.clusterID, err) + return fmt.Errorf("failed to get subscription info for cluster:'%s', err: '%v'", o.clusterID, err) } oldOwnerAccount, err = utils.GetAccount(ocm, subscription.Creator().ID()) if err != nil { - return fmt.Errorf("Failed to get account info from subscription, err:'%v'", err) + return fmt.Errorf("failed to get account info from subscription, err:'%v'", err) } userName = oldOwnerAccount.Username() fmt.Printf("Old username:'%s'\n", userName) @@ -561,7 +558,7 @@ func (o *transferOwnerOptions) run() error { ok := false userName, ok = userDetails.Body().GetUsername() if !ok { - return fmt.Errorf("Failed to get username from new user id") + return fmt.Errorf("failed to get username from new user id") } } @@ -594,7 +591,7 @@ func (o *transferOwnerOptions) run() error { o.reason, } if o.doPullSecretOnly { - elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl")) + elevationReasons = append(elevationReasons, "Updating pull secret using osdctl") } else { elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl to tranfert owner to %s", o.newOwnerName)) } @@ -737,10 +734,10 @@ func (o *transferOwnerOptions) run() error { } var postCmd servicelog.PostCmdOptions - //TODO: If only updating the pull-secret, and not transfering ownership + //TODO: If only updating the pull-secret, and not transferring ownership // should we send a SL both before and after the rotate operation? // Currently only sending an after the pull-secret update is completed - // when not also transfering ownership. + // when not also transferring ownership. if !o.doPullSecretOnly { // Send a SL saying we're about to start ownership transfer fmt.Println("Notify the customer before ownership transfer commences. Sending service log.") @@ -772,7 +769,7 @@ func (o *transferOwnerOptions) run() error { fmt.Println("Internal SL Being Sent") if err := postCmd.Run(); err != nil { fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") - fmt.Println(fmt.Sprintf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID)) + fmt.Printf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s \n", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID) } } @@ -795,7 +792,6 @@ func (o *transferOwnerOptions) run() error { // Fetch the current Access Token for pull secret with the given new username from OCM var response *amv1.AccessTokenPostResponse = nil - err = nil if currentOCMAccount == nil || currentOCMAccount.Username() != userName { // This account is not owned by the OCM account running this command, so impersonate... // Impersonate requires region-lead permissions at this time. @@ -894,7 +890,7 @@ func (o *transferOwnerOptions) run() error { fmt.Println("Notify the customer the pull-secret update is completed. Sending service log.") //postCmd = generateServiceLog(slParams, SL_PULL_SECRET_ROTATED) postCmd = servicelog.PostCmdOptions{ - Template: SL_PULL_SECRET_ROTATED, + Template: SL_PULLSEC_ROTATED, ClusterId: o.clusterID, TemplateParams: []string{fmt.Sprintf("ACCOUNT=%s", oldOwnerAccountID)}, } @@ -902,7 +898,7 @@ func (o *transferOwnerOptions) run() error { if err := postCmd.Run(); err != nil { fmt.Println("Failed to POST service log. Please manually send a service log to notify the customer the pull-secrete update completed:") fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", - o.clusterID, SL_PULL_SECRET_ROTATED, strings.Join(postCmd.TemplateParams, " -p ")) + o.clusterID, SL_PULLSEC_ROTATED, strings.Join(postCmd.TemplateParams, " -p ")) } fmt.Printf("Pull secret update complete, exiting successfully\n") @@ -966,7 +962,7 @@ func (o *transferOwnerOptions) run() error { response, err := subscriptionClient.Update().Body(subscriptionOrgPatch).Send() if err != nil || response.Status() != 200 { - return fmt.Errorf("Update Subscription request to patch org failed with status: %d, err:'%w'", response.Status(), err) + return fmt.Errorf("update Subscription request to patch org failed with status: %d, err:'%w'", response.Status(), err) } fmt.Printf("Patched organization on subscription\n") } @@ -984,7 +980,7 @@ func (o *transferOwnerOptions) run() error { } else { errString = patchRes.String() } - return fmt.Errorf("Subscription request to patch creator failed with status: %d, err: '%s'", patchRes.Status(), errString) + return fmt.Errorf("subscription request to patch creator failed with status: %d, err: '%s'", patchRes.Status(), errString) } fmt.Printf("Patched creator on subscription\n") @@ -1001,13 +997,13 @@ func (o *transferOwnerOptions) run() error { // don't fail if the rolebinding already exists, could be rerun if err != nil { - return fmt.Errorf("Account new roleBinding request failed, err: '%w'", err) + return fmt.Errorf("account new roleBinding request failed, err: '%w'", err) } else if postRes.Status() == 201 { fmt.Printf("Created new role binding.\n") } else if postRes.Status() == 409 { fmt.Printf("can't add new rolebinding, rolebinding already exists\n") } else { - return fmt.Errorf("Account new roleBinding request failed with status: %d, err: '%w'", postRes.Status(), err) + return fmt.Errorf("account new roleBinding request failed with status: %d, err: '%w'", postRes.Status(), err) } // If the organization id has changed, re-register the cluster with CS with the new organization id @@ -1020,7 +1016,7 @@ func (o *transferOwnerOptions) run() error { response, err := request.Send() if err != nil || (response.Status() != 200 && response.Status() != 201) { - return fmt.Errorf("NewRegisterClusterRequest failed with status: %d, err:'%w'", response.Status(), err) + return fmt.Errorf("newRegisterClusterRequest failed with status: %d, err:'%w'", response.Status(), err) } fmt.Print("Re-registered cluster\n") } diff --git a/cmd/cluster/updatepullsecret.go b/cmd/cluster/updatepullsecret.go index 2dd5f1730..dcf4268a8 100644 --- a/cmd/cluster/updatepullsecret.go +++ b/cmd/cluster/updatepullsecret.go @@ -8,7 +8,7 @@ import ( "github.com/openshift/osdctl/internal/utils/globalflags" ) -const updatePullSecretCmdExample = ` +const updatePullSecCmdExample = ` # Update Pull Secret's OCM access token data osdctl cluster update-pullsecret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" ` @@ -19,7 +19,7 @@ func newCmdUpdatePullSecret(streams genericclioptions.IOStreams, globalOpts *glo Use: "update-pullsecret", Short: "Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead)", Args: cobra.NoArgs, - Example: updatePullSecretCmdExample, + Example: updatePullSecCmdExample, DisableAutoGenTag: true, PreRun: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.preRun()) }, Run: func(cmd *cobra.Command, args []string) { @@ -29,7 +29,7 @@ func newCmdUpdatePullSecret(streams genericclioptions.IOStreams, globalOpts *glo // can we get cluster-id from some context maybe? updatePullSecretCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The Internal Cluster ID/External Cluster ID/ Cluster Name") updatePullSecretCmd.Flags().BoolVarP(&ops.dryrun, "dry-run", "d", false, "Dry-run - show all changes but do not apply them") - updatePullSecretCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") + updatePullSecretCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket)") _ = updatePullSecretCmd.MarkFlagRequired("cluster-id") _ = updatePullSecretCmd.MarkFlagRequired("reason") From bc3f52821da6c6a6e8cb400d62876da4267ea498 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Wed, 21 May 2025 14:31:39 -0700 Subject: [PATCH 05/40] OSD-26415: Update Docs --- docs/README.md | 2 +- docs/osdctl_cluster_update-pullsecret.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README.md b/docs/README.md index fea66e64e..f6b31fd2f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1996,7 +1996,7 @@ osdctl cluster update-pullsecret [flags] --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] - --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) + --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") -s, --server string The address and port of the Kubernetes API server --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value diff --git a/docs/osdctl_cluster_update-pullsecret.md b/docs/osdctl_cluster_update-pullsecret.md index c90501714..7692cb866 100644 --- a/docs/osdctl_cluster_update-pullsecret.md +++ b/docs/osdctl_cluster_update-pullsecret.md @@ -21,7 +21,7 @@ osdctl cluster update-pullsecret [flags] -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name -d, --dry-run Dry-run - show all changes but do not apply them -h, --help help for update-pullsecret - --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) + --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) ``` ### Options inherited from parent commands From 00623704eed723ae63d7561e16f2d7a5532db3a5 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Wed, 21 May 2025 14:39:14 -0700 Subject: [PATCH 06/40] Forcing an empty commit. From be702b3c5be9d9ef2ec390c35ec118c91f84b676 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Thu, 22 May 2025 15:48:22 -0700 Subject: [PATCH 07/40] OSD-26415: Update messaging, format when prompting user. --- cmd/cluster/transferowner.go | 94 +++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 33 deletions(-) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index d649e7d34..540fbfdda 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -35,6 +35,8 @@ import ( "github.com/openshift/osdctl/internal/utils/globalflags" "github.com/openshift/osdctl/pkg/utils" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) const ( @@ -53,6 +55,7 @@ type transferOwnerOptions struct { dryrun bool hypershift bool doPullSecretOnly bool + opDescription string cluster *cmv1.Cluster genericclioptions.IOStreams @@ -116,6 +119,11 @@ func (o *transferOwnerOptions) preRun() error { red = color.New(color.FgHiRed, color.BgBlack) green = color.New(color.FgHiGreen, color.BgBlack) blue = color.New(color.FgHiBlue, color.BgBlack) + if o.doPullSecretOnly { + o.opDescription = "update pull-secret" + } else { + o.opDescription = "transfer ownership" + } return nil } @@ -138,6 +146,11 @@ func generateServiceLog(params serviceLogParameters, template string) servicelog func updatePullSecret(conn *sdk.Connection, kubeCli client.Client, clientset *kubernetes.Clientset, clusterID string, pullsecret []byte) error { currentEnv := utils.GetCurrentOCMEnv(conn) + if currentEnv == "stage" { + // stage hive cluster namespaces are prefixed 'uhc-staging' although the ocm url is currently using 'stage' + // This may be better as a loop over all the namespaces looking for a clusterid match instead? + currentEnv = "staging" + } secretName := "pull" hiveNamespace := "uhc-" + currentEnv + "-" + clusterID @@ -147,7 +160,7 @@ func updatePullSecret(conn *sdk.Connection, kubeCli client.Client, clientset *ku } if len(clusterDeployments.Items) == 0 { - return fmt.Errorf("failed to retreive cluster deployments") + return fmt.Errorf("error, found '0' cluster deployments in hive namespace:'%s'", hiveNamespace) } cdName := clusterDeployments.Items[0].ObjectMeta.Name @@ -342,7 +355,9 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret // This step was in the original utlity so leaving the option to print data to terminal here, // but making it optional and prompting the user instead. The new programatic // comparisons per comparePullSecretAuths() may negate the need for a visual inspection in most cases... - fmt.Print("(Optional. WARNING: This will print sensitive data to the terminal) Would you like to print pull secret content to screen for additional visual comparison?") + red.Print("\nWARNING: This will print sensitive data to the terminal!\n") + fmt.Print("Would you like to print pull secret content to screen for additional visual comparison?\n") + fmt.Print("Choose 'N' to skip, 'Y' to display secret. ") if utils.ConfirmPrompt() { // Print the actual pull secret data blue.Println("Actual Cluster Pull Secret:") @@ -370,6 +385,8 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret } green.Println("Pull secret verification (by user) successful.") + } else { + fmt.Println("(Skipping display)") } return nil } @@ -516,6 +533,8 @@ func (o *transferOwnerOptions) run() error { // Create an OCM client to talk to the cluster API // the user has to be logged in (e.g. 'ocm login') var err error + // To avoid warnings/backtrace, if k8s controller-runtime logger is not yet set, do it now... + log.SetLogger(zap.New(zap.WriteTo(os.Stderr))) ocm, err := utils.CreateConnection() if err != nil { return fmt.Errorf("failed to create OCM client: %w", err) @@ -571,7 +590,7 @@ func (o *transferOwnerOptions) run() error { // Find and setup all resources that are needed if o.hypershift { - fmt.Println("Given cluster is HCP, start to proceed the HCP owner transfer") + fmt.Printf("Given cluster is HCP, start to proceed for an HCP '%s' \n", o.opDescription) mgmtCluster, err = utils.GetManagementCluster(o.clusterID) svcCluster, err = utils.GetServiceCluster(o.clusterID) if err != nil { @@ -579,7 +598,7 @@ func (o *transferOwnerOptions) run() error { } masterCluster = svcCluster } else { - fmt.Println("Given cluster is OSD/ROSA classic, start to proceed the classic owner transfer") + fmt.Printf("Given cluster is OSD/ROSA classic, start to proceed for a classic '%s'\n", o.opDescription) hiveCluster, err = utils.GetHiveCluster(o.clusterID) if err != nil { return err @@ -593,10 +612,10 @@ func (o *transferOwnerOptions) run() error { if o.doPullSecretOnly { elevationReasons = append(elevationReasons, "Updating pull secret using osdctl") } else { - elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl to tranfert owner to %s", o.newOwnerName)) + elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl to tranfer owner to %s", o.newOwnerName)) } // Gather all required information - fmt.Println("Gathering all required information for the cluster transfer...") + fmt.Printf("Gathering all required information for the cluster '%s'...\n", o.opDescription) cluster, err = utils.GetCluster(ocm, o.clusterID) if err != nil { return fmt.Errorf("failed to get cluster information for cluster with ID %s: %w", o.clusterID, err) @@ -751,6 +770,7 @@ func (o *transferOwnerOptions) run() error { // Send internal SL to cluster with additional details in case we // need them later. This prevents leaking PII to customers. + fmt.Print("\nPlease review the following'Internal' ServiceLog. (Choose 'Y' to send, or 'N' to skip sending this SL...)\n") if o.doPullSecretOnly { postCmd = servicelog.PostCmdOptions{ ClusterId: slParams.ClusterID, @@ -759,14 +779,12 @@ func (o *transferOwnerOptions) run() error { }, InternalOnly: true, } - fmt.Println("Internal SL Being Sent") if err := postCmd.Run(); err != nil { fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") fmt.Printf("osdctl servicelog post -i -p MESSAGE=\"Pull-secret update. UserName:'%s', OwnerID:'%s'.\" %s \n", slParams.OldOwnerID, slParams.OldOwnerName, slParams.ClusterID) } } else { postCmd = generateInternalServiceLog(slParams) - fmt.Println("Internal SL Being Sent") if err := postCmd.Run(); err != nil { fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") fmt.Printf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s \n", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID) @@ -824,32 +842,41 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("failed to marshal pull secret data: %w", err) } - //Attempt to pretty print the json for easier user initial review... - prettySecret, err := json.MarshalIndent(map[string]map[string]map[string]string{ - "auths": authsMap, - }, "", " ") - if err != nil { - fmt.Fprintf(os.Stderr, "Error Marshalling data for pretty print. Err:'%v'", err) - } else { - blue.Println("Pull Secret data(Indented)...") - blue.Printf("\n%s\n", prettySecret) - } + // This step was in the original utlity so leaving the option to print data to terminal here, + // but making it optional and prompting the user instead. The new programatic + // comparisons per comparePullSecretAuths() may negate the need for a visual inspection in most cases... + red.Print("\nWARNING: This will print sensitive data to the terminal!\n") + fmt.Print("Would you like to print pull secret content to screen for visual review?\nDisplay pullsecret data (choose 'N' to skip, 'Y' to display)? ") + if utils.ConfirmPrompt() { + //Attempt to pretty print the json for easier user initial review... + prettySecret, err := json.MarshalIndent(map[string]map[string]map[string]string{ + "auths": authsMap, + }, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error Marshalling data for pretty print. Err:'%v'", err) + } else { + blue.Println("Pull Secret data(Indented)...") + blue.Printf("\n%s\n", prettySecret) + } - // Print the pull secret in it's actual form for user to confirm (ie no go, json, formatting errors, etc) - green.Print("\nPlease review Pull Secret data to be used for update(after formatting):\n") - fmt.Println(string(pullSecret)) + // Print the pull secret in it's actual form for user to confirm (ie no go, json, formatting errors, etc) + green.Print("\nPlease review Pull Secret data to be used for update(after formatting):\n") + fmt.Println(string(pullSecret)) - // Ask the user if they would like to continue - var continueConfirmation string - fmt.Print("\nDo you want to continue? (yes/no): ") - _, err = fmt.Scanln(&continueConfirmation) - if err != nil { - return fmt.Errorf("failed to read user input: %w", err) - } + // Ask the user if they would like to continue + var continueConfirmation string + fmt.Print("\nDo you want to continue? (yes/no): ") + _, err = fmt.Scanln(&continueConfirmation) + if err != nil { + return fmt.Errorf("failed to read user input: %w", err) + } - // Check the user's response - if continueConfirmation != "yes" { - return fmt.Errorf("operation aborted by the user") + // Check the user's response + if continueConfirmation != "yes" { + return fmt.Errorf("operation aborted by the user") + } + } else { + fmt.Println("(Skipping display)") } if o.hypershift { @@ -858,7 +885,7 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("failed to update pull secret for service cluster with ID %s: %w", o.clusterID, err) } } else { - err = updatePullSecret(ocm, masterKubeCli, masterKubeClientSet, o.clusterID, pullSecret) + err = updatePullSecret(ocm, masterKubeCli, masterKubeClientSet, o.cluster.ID(), pullSecret) if err != nil { return fmt.Errorf("failed to update pull secret for Hive cluster with ID %s: %w", o.clusterID, err) } @@ -907,8 +934,9 @@ func (o *transferOwnerOptions) run() error { // Transfer ownership specific operations... - fmt.Printf("Transfer cluster: \t\t'%v' (%v)\n", externalClusterID, cluster.Name()) + fmt.Printf("\nTransfer cluster: \t\t'%v' (%v)\n", externalClusterID, cluster.Name()) fmt.Printf("from user \t\t\t'%v' to '%v'\n", oldOwnerAccount.ID(), accountID) + fmt.Print("Is the above correct? Proceed with transfer? ") if !utils.ConfirmPrompt() { return nil } From abf4aac66e7718d0c72bb7d4700c2f43eb4a23aa Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Mon, 9 Jun 2025 13:16:18 -0700 Subject: [PATCH 08/40] OSD-26415: Allow setting service log dryrun flag externally --- cmd/servicelog/post.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/servicelog/post.go b/cmd/servicelog/post.go index 341fce546..9b6f76799 100644 --- a/cmd/servicelog/post.go +++ b/cmd/servicelog/post.go @@ -113,6 +113,10 @@ func (o *PostCmdOptions) Validate() error { return nil } +func (o *PostCmdOptions) SetDryRun(dryRun bool) { + o.isDryRun = dryRun +} + // CheckServiceLogsLastHour returns true if there were servicelogs sent in the past hour, otherwise false func CheckServiceLogsLastHour(clusterId string) bool { timeStampToCompare := time.Now().Add(-time.Hour) From 9b45c08c3478eb4d54950287c1db17dd4dc74c70 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Mon, 9 Jun 2025 13:30:34 -0700 Subject: [PATCH 09/40] OSD-26415: Update docs --- docs/osdctl_cluster_transfer-owner.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/osdctl_cluster_transfer-owner.md b/docs/osdctl_cluster_transfer-owner.md index 4e2816c50..c6068674c 100644 --- a/docs/osdctl_cluster_transfer-owner.md +++ b/docs/osdctl_cluster_transfer-owner.md @@ -11,7 +11,7 @@ osdctl cluster transfer-owner [flags] ``` # Transfer ownership - osdctl cluster transfer-owner --new-owner "new_OCM_userName" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" + osdctl cluster transfer-owner --new-owner "$NEW_ACCOUNT" --old-owner "$OLD_ACCOUNT" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" ``` From ce1fcd7edb2a30674e906313a44cf1aa2de02471 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Mon, 9 Jun 2025 14:42:20 -0700 Subject: [PATCH 10/40] OSD-26415: Update comments --- cmd/cluster/transferowner.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index e3c0e27eb..9b7920896 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -58,7 +58,7 @@ type transferOwnerOptions struct { oldOwnerName string newOwnerName string reason string - dryrun bool //TODO: This is misleading. Currently+historically dryrun still rotates the pull secret before exiting. + dryrun bool hypershift bool doPullSecretOnly bool opDescription string @@ -94,7 +94,6 @@ func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *global transferOwnerCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The Internal Cluster ID/External Cluster ID/ Cluster Name") transferOwnerCmd.Flags().StringVar(&ops.oldOwnerName, "old-owner", ops.oldOwnerName, "The old owner's username to transfer the cluster from") transferOwnerCmd.Flags().StringVar(&ops.newOwnerName, "new-owner", ops.newOwnerName, "The new owner's username to transfer the cluster to") - //TODO: dryrun is misleading. Currently+historically dryrun still rotates the pull secret before exiting prior to transfer of ownership. transferOwnerCmd.Flags().BoolVarP(&ops.dryrun, "dry-run", "d", false, "Dry-run - show all changes but do not apply them") transferOwnerCmd.Flags().BoolVar(&ops.doPullSecretOnly, "pull-secret-only", false, "Update cluster pull secret from current OCM AccessToken data without ownership transfer") transferOwnerCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") @@ -398,11 +397,6 @@ func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret blue.Println("\nExpected Auths from OCM AccessToken expected to be present in Pull Secret (note this can be a subset):") fmt.Println(expectedPullSecret) - // TODO: Consider confirming that the email and token values of the 'subset' of Auths - // contained in the OCM AccessToken actually matches email/token values in the cluster's - // openshift-config/pull-secret. Provide any descrepencies to the user here before - // prompting to visually evaluate. - // // Ask the user to confirm if the actual pull secret matches their expectation reader := bufio.NewReader(os.Stdin) fmt.Print("\nDoes the actual pull secret match your expectation? (yes/no): ") From 071486846406722e8132e26d031790357ea6df08 Mon Sep 17 00:00:00 2001 From: Matt Clark Date: Mon, 16 Jun 2025 21:42:08 -0700 Subject: [PATCH 11/40] OSD-26415: Use existing pull-secret naming for cmd --- cmd/cluster/updatepullsecret.go | 4 ++-- docs/README.md | 8 ++++---- docs/osdctl_cluster.md | 2 +- ...pullsecret.md => osdctl_cluster_update-pull-secret.md} | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) rename docs/{osdctl_cluster_update-pullsecret.md => osdctl_cluster_update-pull-secret.md} (87%) diff --git a/cmd/cluster/updatepullsecret.go b/cmd/cluster/updatepullsecret.go index dcf4268a8..0c1a6a400 100644 --- a/cmd/cluster/updatepullsecret.go +++ b/cmd/cluster/updatepullsecret.go @@ -10,13 +10,13 @@ import ( const updatePullSecCmdExample = ` # Update Pull Secret's OCM access token data - osdctl cluster update-pullsecret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" + osdctl cluster update-pull-secret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" ` func newCmdUpdatePullSecret(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command { ops := newTransferOwnerOptions(streams, globalOpts) updatePullSecretCmd := &cobra.Command{ - Use: "update-pullsecret", + Use: "update-pull-secret", Short: "Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead)", Args: cobra.NoArgs, Example: updatePullSecCmdExample, diff --git a/docs/README.md b/docs/README.md index 3708a4907..956c44b78 100644 --- a/docs/README.md +++ b/docs/README.md @@ -68,7 +68,7 @@ - `post --cluster-id ` - Send limited support reason to a given cluster - `status --cluster-id ` - Shows the support status of a specified cluster - `transfer-owner` - Transfer cluster ownership to a new user (to be done by Region Lead) - - `update-pullsecret` - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) + - `update-pull-secret` - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) - `validate-pull-secret --cluster-id ` - Checks if the pull secret email matches the owner email - `validate-pull-secret-ext [CLUSTER_ID]` - Extended checks to confirm pull-secret data is synced with current OCM data - `cost` - Cost Management related utilities @@ -1977,12 +1977,12 @@ osdctl cluster transfer-owner [flags] -S, --skip-version-check skip checking to see if this is the most recent release ``` -### osdctl cluster update-pullsecret +### osdctl cluster update-pull-secret Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) ``` -osdctl cluster update-pullsecret [flags] +osdctl cluster update-pull-secret [flags] ``` #### Flags @@ -1993,7 +1993,7 @@ osdctl cluster update-pullsecret [flags] -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name --context string The name of the kubeconfig context to use -d, --dry-run Dry-run - show all changes but do not apply them - -h, --help help for update-pullsecret + -h, --help help for update-pull-secret --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] diff --git a/docs/osdctl_cluster.md b/docs/osdctl_cluster.md index 3f5e85757..49dd65e9b 100644 --- a/docs/osdctl_cluster.md +++ b/docs/osdctl_cluster.md @@ -45,7 +45,7 @@ Provides information for a specified cluster * [osdctl cluster ssh](osdctl_cluster_ssh.md) - utilities for accessing cluster via ssh * [osdctl cluster support](osdctl_cluster_support.md) - Cluster Support * [osdctl cluster transfer-owner](osdctl_cluster_transfer-owner.md) - Transfer cluster ownership to a new user (to be done by Region Lead) -* [osdctl cluster update-pullsecret](osdctl_cluster_update-pullsecret.md) - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) +* [osdctl cluster update-pull-secret](osdctl_cluster_update-pull-secret.md) - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) * [osdctl cluster validate-pull-secret](osdctl_cluster_validate-pull-secret.md) - Checks if the pull secret email matches the owner email * [osdctl cluster validate-pull-secret-ext](osdctl_cluster_validate-pull-secret-ext.md) - Extended checks to confirm pull-secret data is synced with current OCM data diff --git a/docs/osdctl_cluster_update-pullsecret.md b/docs/osdctl_cluster_update-pull-secret.md similarity index 87% rename from docs/osdctl_cluster_update-pullsecret.md rename to docs/osdctl_cluster_update-pull-secret.md index 7692cb866..a550bdb8e 100644 --- a/docs/osdctl_cluster_update-pullsecret.md +++ b/docs/osdctl_cluster_update-pull-secret.md @@ -1,9 +1,9 @@ -## osdctl cluster update-pullsecret +## osdctl cluster update-pull-secret Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) ``` -osdctl cluster update-pullsecret [flags] +osdctl cluster update-pull-secret [flags] ``` ### Examples @@ -11,7 +11,7 @@ osdctl cluster update-pullsecret [flags] ``` # Update Pull Secret's OCM access token data - osdctl cluster update-pullsecret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" + osdctl cluster update-pull-secret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" ``` @@ -20,7 +20,7 @@ osdctl cluster update-pullsecret [flags] ``` -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name -d, --dry-run Dry-run - show all changes but do not apply them - -h, --help help for update-pullsecret + -h, --help help for update-pull-secret --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) ``` From c5c557eaae2e0436c710b4846cdd6e3bc827312e Mon Sep 17 00:00:00 2001 From: devppratik Date: Mon, 2 Jun 2025 22:48:18 +0530 Subject: [PATCH 12/40] Add Handover Announcements to Cluster Context Command Improve search logic for Handover Announcements Fix minor issues Fix Lint Fix Lint Add logic to print unique announcements Fix validMatch logic --- cmd/cluster/context.go | 23 +++++++- pkg/utils/jira.go | 44 ++++++++++++++- pkg/utils/print.go | 14 +++++ pkg/utils/utils.go | 121 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 199 insertions(+), 3 deletions(-) diff --git a/cmd/cluster/context.go b/cmd/cluster/context.go index e8c3673d1..e1fb3ff06 100644 --- a/cmd/cluster/context.go +++ b/cmd/cluster/context.go @@ -87,8 +87,9 @@ type contextData struct { ServiceLogs []*v1.LogEntry // Jira Cards - JiraIssues []jira.Issue - SupportExceptions []jira.Issue + JiraIssues []jira.Issue + HandoverAnnouncements []jira.Issue + SupportExceptions []jira.Issue // PD Alerts pdServiceID []string @@ -231,6 +232,8 @@ func (o *contextOptions) printLongOutput(data *contextData, w io.Writer) { fmt.Fprintln(w, strings.TrimSpace(data.Description)) fmt.Println() + utils.PrintHandoverAnnouncements(data.HandoverAnnouncements) + fmt.Println() utils.PrintLimitedSupportReasons(data.LimitedSupportReasons) fmt.Println() printJIRASupportExceptions(data.SupportExceptions, w) @@ -421,6 +424,21 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { } } + GetHandoverAnnouncements := func() { + defer wg.Done() + defer utils.StartDelayTracker(o.verbose, "Handover Announcements").End() + org, err := utils.GetOrganization(ocmClient, o.clusterID) + if err != nil { + fmt.Printf("Failed to get Subscription for cluster %s - err: %q", o.clusterID, err) + } + + productID := o.cluster.Product().ID() + data.HandoverAnnouncements, err = utils.GetRelatedHandoverAnnouncements(o.clusterID, o.externalClusterID, o.jiratoken, org.Name(), productID, o.cluster.Hypershift().Enabled(), o.cluster.Version().RawID()) + if err != nil { + errors = append(errors, fmt.Errorf("error while getting the open jira tickets: %v", err)) + } + } + GetSupportExceptions := func() { defer wg.Done() defer utils.StartDelayTracker(o.verbose, "Support Exceptions").End() @@ -488,6 +506,7 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { GetLimitedSupport, GetServiceLogs, GetJiraIssues, + GetHandoverAnnouncements, GetSupportExceptions, GetPagerDutyAlerts, GetDynatraceDetails, diff --git a/pkg/utils/jira.go b/pkg/utils/jira.go index 367103893..1041f51a0 100644 --- a/pkg/utils/jira.go +++ b/pkg/utils/jira.go @@ -10,7 +10,6 @@ import ( const ( JiraTokenConfigKey = "jira_token" - JiraBaseURL = "https://issues.redhat.com" ) // GetJiraClient creates a jira client that connects to @@ -57,6 +56,49 @@ func GetJiraIssuesForCluster(clusterID string, externalClusterID string, jiratok return issues, nil } +func GetRelatedHandoverAnnouncements(clusterID string, externalClusterID string, jiraToken string, orgName string, product string, isHCP bool, version string) ([]jira.Issue, error) { + jiraClient, err := GetJiraClient(jiraToken) + if err != nil { + return nil, fmt.Errorf("failed to create project service: %v", err) + } + + projectKey := JiraHandoverAnnouncementProjectKey + productName := determineClusterProduct(product, isHCP) + baseQueries := []fieldQuery{ + {Field: "Cluster ID", Value: clusterID, Operator: "~"}, + {Field: "Cluster ID", Value: externalClusterID, Operator: "~"}, + } + jql := buildJQL(projectKey, baseQueries) + issues, _, err := jiraClient.Issue.Search(jql, nil) + if err != nil { + return nil, fmt.Errorf("failed to search for jira issues: %w", err) + } + extededQueries := []fieldQuery{ + {Field: "Cluster ID", Value: "None,N/A,All", Operator: "~*"}, + {Field: "Customer Name", Value: orgName, Operator: "~"}, + {Field: "Products", Value: productName, Operator: "="}, + {Field: "affectedVersion", Value: formatVersion(version), Operator: "~"}, + } + + jql = buildJQL(projectKey, extededQueries) + otherIssues, _, err := jiraClient.Issue.Search(jql, nil) + if err != nil { + return nil, fmt.Errorf("failed to search for jira issues: %w", err) + } + seenKeys := make(map[string]bool) + for _, i := range issues { + seenKeys[i.Key] = true + } + for _, i := range otherIssues { + if isValidMatch(i, orgName, productName, version) && !seenKeys[i.Key] { + issues = append(issues, i) + seenKeys[i.Key] = true + } + } + + return issues, nil +} + func GetJiraSupportExceptionsForOrg(organizationID string, jiratoken string) ([]jira.Issue, error) { jiraClient, err := GetJiraClient(jiratoken) if err != nil { diff --git a/pkg/utils/print.go b/pkg/utils/print.go index 6ba4b3e7c..caef975fe 100644 --- a/pkg/utils/print.go +++ b/pkg/utils/print.go @@ -99,6 +99,20 @@ func PrintJiraIssues(issues []jira.Issue) { } } +func PrintHandoverAnnouncements(issues []jira.Issue) { + var name = "Related Handover Announcements" + fmt.Println(delimiter + name) + + for _, i := range issues { + fmt.Printf("[%s]: %+v\n", i.Key, i.Fields.Summary) + fmt.Printf("- Link: %s/browse/%s\n\n", JiraBaseURL, i.Key) + } + + if len(issues) == 0 { + fmt.Println("None") + } +} + func PrintLimitedSupportReasons(limitedSupportReasons []*cmv1.LimitedSupportReason) { var name = "Limited Support Status" fmt.Println(delimiter + name) diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index eda89b7bc..75f8dbeab 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -8,12 +8,26 @@ import ( "runtime/debug" "strings" + "github.com/andygrunwald/go-jira" sdk "github.com/openshift-online/ocm-sdk-go" amv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "k8s.io/cli-runtime/pkg/genericclioptions" ) +type fieldQuery struct { + Field string + Operator string + Value string +} + +const ( + JiraHandoverAnnouncementProjectKey = "SRE Platform HandOver Announcements" + JiraBaseURL = "https://issues.redhat.com" + productCustomField = "customfield_12319040" + customerNameCustomField = "customfield_12310160" +) + var clusterKeyRE = regexp.MustCompile(`^(\w|-)+$`) func IsValidKey(clusterKey string) bool { @@ -301,3 +315,110 @@ func GetDependencyVersion(dependencyPath string) (string, error) { return "", fmt.Errorf("unable to find version for %v", dependencyPath) } + +func determineClusterProduct(productID string, isHCP bool) (productName string) { + if productID == "rosa" && isHCP { + productName = "Red Hat OpenShift on AWS with Hosted Control Planes" + } else if productID == "rosa" { + productName = "Red Hat OpenShift on AWS" + } else if productID == "osd" { + productName = "OpenShift Dedicated" + } + return productName +} +func buildJQL(projectKey string, filters []fieldQuery) string { + var conditions []string + for _, q := range filters { + switch q.Operator { + case "~*": + values := strings.Split(q.Value, ",") + var orParts []string + for _, v := range values { + orParts = append(orParts, + fmt.Sprintf(`(project = "%s" AND "%s" ~ "%s")`, projectKey, q.Field, strings.TrimSpace(v))) + } + conditions = append(conditions, "("+strings.Join(orParts, " OR ")+")") + + case "in": + conditions = append(conditions, + fmt.Sprintf(`(project = "%s" AND "%s" in (%s))`, projectKey, q.Field, q.Value), + ) + + default: + conditions = append(conditions, + fmt.Sprintf(`(project = "%s" AND "%s" %s "%s")`, projectKey, q.Field, q.Operator, q.Value), + ) + } + } + return "(" + strings.Join(conditions, " OR ") + ") AND status != Closed ORDER BY created DESC" +} + +func formatVersion(version string) string { + versionParts := strings.Split(version, ".") + versionPrefix := version + if len(versionParts) >= 2 { + versionPrefix = fmt.Sprintf("%s.%s", versionParts[0], versionParts[1]) + } + return versionPrefix +} + +func isValidMatch(i jira.Issue, orgName string, product string, version string) bool { + isIgnored := func(val string) bool { + val = strings.ToLower(strings.TrimSpace(val)) + return val == "none" || val == "n/a" || val == "all" || val == "" + } + + hasMatchingValue := func(items []interface{}, expected string) bool { + expected = strings.ToLower(strings.TrimSpace(expected)) + for _, item := range items { + if m, ok := item.(map[string]interface{}); ok { + if val, ok := m["value"].(string); ok { + val = strings.ToLower(strings.TrimSpace(val)) + if val == expected { + return true + } + } + } + } + return false + } + + productRaw := i.Fields.Unknowns[productCustomField] + versionRaw := i.Fields.AffectsVersions + nameRaw := i.Fields.Unknowns[customerNameCustomField] + + productMatch := false + if items, ok := productRaw.([]interface{}); ok { + productMatch = hasMatchingValue(items, product) + } + if !productMatch { + return false + } + + versionMatch := false + clusterFormattedVersion := formatVersion(version) + + for _, v := range versionRaw { + if v != nil { + vFormatted := formatVersion(v.Name) + if vFormatted == clusterFormattedVersion || isIgnored(v.Name) { + versionMatch = true + break + } + } + } + + nameMatch := false + if nameStr, ok := nameRaw.(string); ok { + parts := strings.Split(nameStr, ";") + for _, part := range parts { + val := strings.TrimSpace(part) + if val == orgName || isIgnored(val) { + nameMatch = true + break + } + } + } + + return versionMatch || (nameMatch && versionMatch) +} From 572d9aca0fd22aa3c36e68dc23780f539df962dc Mon Sep 17 00:00:00 2001 From: devppratik Date: Fri, 13 Jun 2025 13:05:28 +0530 Subject: [PATCH 13/40] Update DTP Labels to v3 Update to a generic label --- pkg/utils/ocm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/ocm.go b/pkg/utils/ocm.go index 734e97dd9..8c60d1211 100644 --- a/pkg/utils/ocm.go +++ b/pkg/utils/ocm.go @@ -28,7 +28,7 @@ const ( integrationGovURL = "https://api-admin.int.openshiftusgov.com" stagingGovURL = "https://api-admin.stage.openshiftusgov.com" HypershiftClusterTypeLabel = "ext-hypershift.openshift.io/cluster-type" - DynatraceTenantKeyLabel = "sre-capabilities.dtp.v2.tenant" + DynatraceTenantKeyLabel = "dynatrace.regional-tenant" ) var urlAliases = map[string]string{ From cad7060e6dabe91989835baadf82b4f2f9d9ca34 Mon Sep 17 00:00:00 2001 From: devppratik Date: Fri, 13 Jun 2025 17:47:17 +0530 Subject: [PATCH 14/40] Add Test Cases for the utils/jira Add go generate to jira mock Update the correct mock --- cmd/jira/quick-task.go | 43 +++++++----- cmd/swarm/secondary.go | 4 +- go.mod | 1 + go.sum | 1 + pkg/utils/jira.go | 131 +++++++++++++++++++++-------------- pkg/utils/jira_test.go | 101 +++++++++++++++++++++++++++ pkg/utils/mocks/jira_mock.go | 121 ++++++++++++++++++++++++++++++++ pkg/utils/utils_test.go | 102 +++++++++++++++++++++++++++ 8 files changed, 433 insertions(+), 71 deletions(-) create mode 100644 pkg/utils/jira_test.go create mode 100644 pkg/utils/mocks/jira_mock.go diff --git a/cmd/jira/quick-task.go b/cmd/jira/quick-task.go index 70af5aff2..519c36280 100644 --- a/cmd/jira/quick-task.go +++ b/cmd/jira/quick-task.go @@ -55,19 +55,19 @@ osdctl jira quick-task "Update command to take new flag" --add-to-sprint } boardId := viper.GetInt(BoardIdLabel) - jiraClient, err := utils.GetJiraClient("") + jiraClient, err := utils.NewJiraClient("") if err != nil { return fmt.Errorf("failed to get Jira client: %w", err) } - issue, err := CreateQuickTicket(jiraClient.User, jiraClient.Issue, args[0], teamLabel) + issue, err := CreateQuickTicket(jiraClient, args[0], teamLabel) if err != nil { return fmt.Errorf("error creating ticket: %w", err) } fmt.Printf("Successfully created ticket:\n%v/browse/%v\n", utils.JiraBaseURL, issue.Key) if addToSprint { - err = addTicketToCurrentSprint(jiraClient.Board, jiraClient.Sprint, issue, boardId, teamName) + err = addTicketToCurrentSprint(jiraClient, issue, boardId, teamName) if err != nil { return fmt.Errorf("failed to add ticket to current sprint: %w", err) } @@ -78,24 +78,27 @@ osdctl jira quick-task "Update command to take new flag" --add-to-sprint } func init() { - quickTaskCmd.Flags().Bool("add-to-sprint", false, "whether or not to add the created Jira task to the SRE's current sprint.") + quickTaskCmd.Flags().Bool(AddToSprintFlag, false, "whether or not to add the created Jira task to the SRE's current sprint.") } -func CreateQuickTicket(userService *jira.UserService, issueService *jira.IssueService, summary string, teamLabel string) (*jira.Issue, error) { - user, _, err := userService.GetSelf() +func CreateQuickTicket(client utils.JiraClientInterface, summary string, teamLabel string) (*jira.Issue, error) { + user, _, err := client.User().GetSelf() if err != nil { return nil, fmt.Errorf("failed to get jira user for self: %w", err) } - issue, err := utils.CreateIssue( - issueService, - summary, - DefaultDescription, - DefaultTicketType, - DefaultProject, - user, - user, - []string{teamLabel}, + issue, err := client.CreateIssue( + &jira.Issue{ + Fields: &jira.IssueFields{ + Summary: summary, + Description: DefaultDescription, + Type: jira.IssueType{Name: DefaultTicketType}, + Project: jira.Project{Key: DefaultProject}, + Reporter: user, + Assignee: user, + Labels: []string{teamLabel}, + }, + }, ) if err != nil { return nil, fmt.Errorf("failed to create issue: %w", err) @@ -104,8 +107,8 @@ func CreateQuickTicket(userService *jira.UserService, issueService *jira.IssueSe return issue, nil } -func addTicketToCurrentSprint(boardService *jira.BoardService, sprintService *jira.SprintService, issue *jira.Issue, boardId int, teamName string) error { - sprints, _, err := boardService.GetAllSprintsWithOptions(boardId, &jira.GetAllSprintsOptions{State: SprintState}) +func addTicketToCurrentSprint(client utils.JiraClientInterface, issue *jira.Issue, boardId int, teamName string) error { + sprints, _, err := client.Board().GetAllSprintsWithOptions(boardId, &jira.GetAllSprintsOptions{State: SprintState}) if err != nil { return fmt.Errorf("failed to get active sprints for board %v: %w", boardId, err) } @@ -118,7 +121,11 @@ func addTicketToCurrentSprint(boardService *jira.BoardService, sprintService *ji } } - _, err = sprintService.MoveIssuesToSprint(activeSprint.ID, []string{issue.ID}) + if activeSprint.ID == 0 { + return fmt.Errorf("no active sprint found for team '%s'", teamName) + } + + _, err = client.Sprint().MoveIssuesToSprint(activeSprint.ID, []string{issue.ID}) if err != nil { return fmt.Errorf("issue %v was not moved to active sprint: %w", issue.Key, err) } diff --git a/cmd/swarm/secondary.go b/cmd/swarm/secondary.go index a9577dadf..6a190d319 100644 --- a/cmd/swarm/secondary.go +++ b/cmd/swarm/secondary.go @@ -35,7 +35,7 @@ var secondaryCmd = &cobra.Command{ osdctl swarm secondary`, RunE: func(cmd *cobra.Command, args []string) error { - jiraClient, err := utils.GetJiraClient("") + jiraClient, err := utils.NewJiraClient("") if err != nil { return fmt.Errorf("failed to get Jira client: %w", err) } @@ -50,7 +50,7 @@ var secondaryCmd = &cobra.Command{ jql := buildJQL() // Search jira issues - issues, _, err := jiraClient.Issue.Search(jql, nil) + issues, err := jiraClient.SearchIssues(jql) if err != nil { return fmt.Errorf("error fetching JIRA issues: %w", err) diff --git a/go.mod b/go.mod index 2a14908bb..a2f40b5a3 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/deckarep/golang-set v1.8.0 github.com/fatih/color v1.18.0 github.com/golang-jwt/jwt/v5 v5.2.2 + github.com/golang/mock v1.6.0 github.com/google/go-github/v63 v63.0.0 github.com/google/uuid v1.6.0 github.com/hashicorp/hcl/v2 v2.23.0 diff --git a/go.sum b/go.sum index 6ae25f062..a4fa60cd9 100644 --- a/go.sum +++ b/go.sum @@ -824,6 +824,7 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= diff --git a/pkg/utils/jira.go b/pkg/utils/jira.go index 1041f51a0..467cbd51b 100644 --- a/pkg/utils/jira.go +++ b/pkg/utils/jira.go @@ -12,10 +12,57 @@ const ( JiraTokenConfigKey = "jira_token" ) -// GetJiraClient creates a jira client that connects to -// https://issues.redhat.com. To work, the jiraToken needs to be set in the -// config -func GetJiraClient(jiratoken string) (*jira.Client, error) { +// JiraClientInterface defines the methods we use from go-jira +// +//go:generate mockgen -source=jira.go -destination=./mocks/jira_mock.go -package=utils +type JiraClientInterface interface { + SearchIssues(jql string) ([]jira.Issue, error) + CreateIssue(issue *jira.Issue) (*jira.Issue, error) + User() *jira.UserService + Issue() *jira.IssueService + Board() *jira.BoardService + Sprint() *jira.SprintService +} + +// jiraClientWrapper wraps the actual go-jira client +type jiraClientWrapper struct { + client *jira.Client +} + +// Full implementation of the interface + +func (j *jiraClientWrapper) SearchIssues(jql string) ([]jira.Issue, error) { + issues, _, err := j.client.Issue.Search(jql, nil) + return issues, err +} + +func (j *jiraClientWrapper) CreateIssue(issue *jira.Issue) (*jira.Issue, error) { + created, _, err := j.client.Issue.Create(issue) + return created, err +} + +func (j *jiraClientWrapper) User() *jira.UserService { + return j.client.User +} + +func (j *jiraClientWrapper) Issue() *jira.IssueService { + return j.client.Issue +} + +func (j *jiraClientWrapper) Board() *jira.BoardService { + return j.client.Board +} + +func (j *jiraClientWrapper) Sprint() *jira.SprintService { + return j.client.Sprint +} + +// Factory function +var NewJiraClient = func(jiraToken string) (JiraClientInterface, error) { + return getJiraClient(jiraToken) +} + +func getJiraClient(jiratoken string) (JiraClientInterface, error) { if jiratoken == "" { if viper.IsSet(JiraTokenConfigKey) { jiratoken = viper.GetString(JiraTokenConfigKey) @@ -27,18 +74,15 @@ func GetJiraClient(jiratoken string) (*jira.Client, error) { return nil, fmt.Errorf("JIRA token is not defined") } } - tp := jira.PATAuthTransport{ - Token: jiratoken, - } - return jira.NewClient(tp.Client(), JiraBaseURL) -} - -func GetJiraIssuesForCluster(clusterID string, externalClusterID string, jiratoken string) ([]jira.Issue, error) { - jiraClient, err := GetJiraClient(jiratoken) + tp := jira.PATAuthTransport{Token: jiratoken} + client, err := jira.NewClient(tp.Client(), JiraBaseURL) if err != nil { - return nil, fmt.Errorf("error connecting to jira: %v", err) + return nil, err } + return &jiraClientWrapper{client: client}, nil +} +func GetJiraIssuesForClusterWithClient(jiraClient JiraClientInterface, clusterID, externalClusterID string) ([]jira.Issue, error) { jql := fmt.Sprintf( `project = "OpenShift Hosted SRE Support" AND ( "Cluster ID" ~ "%[1]s" OR "Cluster ID" ~ "%[2]s" @@ -48,43 +92,46 @@ func GetJiraIssuesForCluster(clusterID string, externalClusterID string, jiratok externalClusterID, clusterID, ) - issues, _, err := jiraClient.Issue.Search(jql, nil) + return jiraClient.SearchIssues(jql) +} + +func GetJiraIssuesForCluster(clusterID, externalClusterID, jiratoken string) ([]jira.Issue, error) { + client, err := NewJiraClient(jiratoken) if err != nil { - return nil, fmt.Errorf("failed to search for jira issues: %w\n", err) + return nil, fmt.Errorf("error connecting to jira: %v", err) } - - return issues, nil + return GetJiraIssuesForClusterWithClient(client, clusterID, externalClusterID) } -func GetRelatedHandoverAnnouncements(clusterID string, externalClusterID string, jiraToken string, orgName string, product string, isHCP bool, version string) ([]jira.Issue, error) { - jiraClient, err := GetJiraClient(jiraToken) +func GetRelatedHandoverAnnouncements(clusterID, externalClusterID, jiraToken, orgName, product string, isHCP bool, version string) ([]jira.Issue, error) { + client, err := NewJiraClient(jiraToken) if err != nil { return nil, fmt.Errorf("failed to create project service: %v", err) } - projectKey := JiraHandoverAnnouncementProjectKey productName := determineClusterProduct(product, isHCP) baseQueries := []fieldQuery{ {Field: "Cluster ID", Value: clusterID, Operator: "~"}, {Field: "Cluster ID", Value: externalClusterID, Operator: "~"}, } - jql := buildJQL(projectKey, baseQueries) - issues, _, err := jiraClient.Issue.Search(jql, nil) + jql := buildJQL(JiraHandoverAnnouncementProjectKey, baseQueries) + issues, err := client.SearchIssues(jql) if err != nil { return nil, fmt.Errorf("failed to search for jira issues: %w", err) } - extededQueries := []fieldQuery{ + + extendedQueries := []fieldQuery{ {Field: "Cluster ID", Value: "None,N/A,All", Operator: "~*"}, {Field: "Customer Name", Value: orgName, Operator: "~"}, {Field: "Products", Value: productName, Operator: "="}, {Field: "affectedVersion", Value: formatVersion(version), Operator: "~"}, } - - jql = buildJQL(projectKey, extededQueries) - otherIssues, _, err := jiraClient.Issue.Search(jql, nil) + jql = buildJQL(JiraHandoverAnnouncementProjectKey, extendedQueries) + otherIssues, err := client.SearchIssues(jql) if err != nil { return nil, fmt.Errorf("failed to search for jira issues: %w", err) } + seenKeys := make(map[string]bool) for _, i := range issues { seenKeys[i.Key] = true @@ -95,38 +142,26 @@ func GetRelatedHandoverAnnouncements(clusterID string, externalClusterID string, seenKeys[i.Key] = true } } - return issues, nil } -func GetJiraSupportExceptionsForOrg(organizationID string, jiratoken string) ([]jira.Issue, error) { - jiraClient, err := GetJiraClient(jiratoken) +func GetJiraSupportExceptionsForOrg(organizationID, jiratoken string) ([]jira.Issue, error) { + client, err := NewJiraClient(jiratoken) if err != nil { return nil, fmt.Errorf("error connecting to jira: %v", err) } - jql := fmt.Sprintf( `project = "Support Exceptions" AND type = Story AND Status = Approved AND Resolution = Unresolved AND ("Customer Name" ~ "%[1]s" OR "Organization ID" ~ "%[1]s")`, organizationID, ) - - issues, _, err := jiraClient.Issue.Search(jql, nil) - if err != nil { - return nil, fmt.Errorf("failed to search for jira issues %w", err) - } - - return issues, nil + return client.SearchIssues(jql) } func CreateIssue( - service *jira.IssueService, - summary string, - description string, - ticketType string, - project string, - reporter *jira.User, - assignee *jira.User, + client JiraClientInterface, + summary, description, ticketType, project string, + reporter, assignee *jira.User, labels []string, ) (*jira.Issue, error) { issue := &jira.Issue{ @@ -140,11 +175,5 @@ func CreateIssue( Labels: labels, }, } - - createdIssue, _, err := service.Create(issue) - if err != nil { - return nil, fmt.Errorf("failed to create issue: %w", err) - } - - return createdIssue, nil + return client.CreateIssue(issue) } diff --git a/pkg/utils/jira_test.go b/pkg/utils/jira_test.go new file mode 100644 index 000000000..402767426 --- /dev/null +++ b/pkg/utils/jira_test.go @@ -0,0 +1,101 @@ +package utils + +import ( + "errors" + "testing" + + "github.com/andygrunwald/go-jira" + "github.com/golang/mock/gomock" + mocks "github.com/openshift/osdctl/pkg/utils/mocks" + "github.com/stretchr/testify/assert" +) + +func TestGetJiraIssuesForClusterWithClient(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockJiraClientInterface(ctrl) + clusterID := "abc123" + externalClusterID := "ext-abc123" + + expectedIssues := []jira.Issue{{Key: "ISSUE-1"}, {Key: "ISSUE-2"}} + jql := `project = "OpenShift Hosted SRE Support" AND ( + "Cluster ID" ~ "ext-abc123" OR "Cluster ID" ~ "abc123" + OR description ~ "ext-abc123" + OR description ~ "abc123") + ORDER BY created DESC` + + mockClient.EXPECT(). + SearchIssues(jql). + Return(expectedIssues, nil) + + issues, err := GetJiraIssuesForClusterWithClient(mockClient, clusterID, externalClusterID) + assert.NoError(t, err) + assert.Equal(t, expectedIssues, issues) +} + +func TestGetJiraSupportExceptionsForOrg(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockJiraClientInterface(ctrl) + + NewJiraClient = func(string) (JiraClientInterface, error) { + return mockClient, nil + } + + orgID := "123456" + jql := `project = "Support Exceptions" AND type = Story AND Status = Approved AND + Resolution = Unresolved AND ("Customer Name" ~ "123456" OR "Organization ID" ~ "123456")` + + expectedIssues := []jira.Issue{{Key: "EXC-1"}} + mockClient.EXPECT(). + SearchIssues(jql). + Return(expectedIssues, nil) + + issues, err := GetJiraSupportExceptionsForOrg(orgID, "fake-token") + assert.NoError(t, err) + assert.Equal(t, expectedIssues, issues) +} + +func TestCreateIssue(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockJiraClientInterface(ctrl) + + mockClient.EXPECT(). + CreateIssue(gomock.Any()). + DoAndReturn(func(i *jira.Issue) (*jira.Issue, error) { + i.Key = "PROJ-123" + return i, nil + }) + + createdIssue, err := CreateIssue( + mockClient, + "Test summary", + "Test description", + "Bug", + "PROJ", + nil, nil, + []string{"label1"}, + ) + + assert.NoError(t, err) + assert.Equal(t, "PROJ-123", createdIssue.Key) +} + +func TestSearchIssues_Error(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockJiraClientInterface(ctrl) + errExpected := errors.New("Jira search failed") + + mockClient.EXPECT(). + SearchIssues(gomock.Any()). + Return(nil, errExpected) + + _, err := mockClient.SearchIssues("dummy jql") + assert.Equal(t, errExpected, err) +} diff --git a/pkg/utils/mocks/jira_mock.go b/pkg/utils/mocks/jira_mock.go new file mode 100644 index 000000000..ff15dc562 --- /dev/null +++ b/pkg/utils/mocks/jira_mock.go @@ -0,0 +1,121 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: jira.go + +// Package utils is a generated GoMock package. +package utils + +import ( + reflect "reflect" + + jira "github.com/andygrunwald/go-jira" + gomock "github.com/golang/mock/gomock" +) + +// MockJiraClientInterface is a mock of JiraClientInterface interface. +type MockJiraClientInterface struct { + ctrl *gomock.Controller + recorder *MockJiraClientInterfaceMockRecorder +} + +// MockJiraClientInterfaceMockRecorder is the mock recorder for MockJiraClientInterface. +type MockJiraClientInterfaceMockRecorder struct { + mock *MockJiraClientInterface +} + +// NewMockJiraClientInterface creates a new mock instance. +func NewMockJiraClientInterface(ctrl *gomock.Controller) *MockJiraClientInterface { + mock := &MockJiraClientInterface{ctrl: ctrl} + mock.recorder = &MockJiraClientInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockJiraClientInterface) EXPECT() *MockJiraClientInterfaceMockRecorder { + return m.recorder +} + +// Board mocks base method. +func (m *MockJiraClientInterface) Board() *jira.BoardService { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Board") + ret0, _ := ret[0].(*jira.BoardService) + return ret0 +} + +// Board indicates an expected call of Board. +func (mr *MockJiraClientInterfaceMockRecorder) Board() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Board", reflect.TypeOf((*MockJiraClientInterface)(nil).Board)) +} + +// CreateIssue mocks base method. +func (m *MockJiraClientInterface) CreateIssue(issue *jira.Issue) (*jira.Issue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateIssue", issue) + ret0, _ := ret[0].(*jira.Issue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateIssue indicates an expected call of CreateIssue. +func (mr *MockJiraClientInterfaceMockRecorder) CreateIssue(issue interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIssue", reflect.TypeOf((*MockJiraClientInterface)(nil).CreateIssue), issue) +} + +// Issue mocks base method. +func (m *MockJiraClientInterface) Issue() *jira.IssueService { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Issue") + ret0, _ := ret[0].(*jira.IssueService) + return ret0 +} + +// Issue indicates an expected call of Issue. +func (mr *MockJiraClientInterfaceMockRecorder) Issue() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Issue", reflect.TypeOf((*MockJiraClientInterface)(nil).Issue)) +} + +// SearchIssues mocks base method. +func (m *MockJiraClientInterface) SearchIssues(jql string) ([]jira.Issue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SearchIssues", jql) + ret0, _ := ret[0].([]jira.Issue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SearchIssues indicates an expected call of SearchIssues. +func (mr *MockJiraClientInterfaceMockRecorder) SearchIssues(jql interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchIssues", reflect.TypeOf((*MockJiraClientInterface)(nil).SearchIssues), jql) +} + +// Sprint mocks base method. +func (m *MockJiraClientInterface) Sprint() *jira.SprintService { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sprint") + ret0, _ := ret[0].(*jira.SprintService) + return ret0 +} + +// Sprint indicates an expected call of Sprint. +func (mr *MockJiraClientInterfaceMockRecorder) Sprint() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sprint", reflect.TypeOf((*MockJiraClientInterface)(nil).Sprint)) +} + +// User mocks base method. +func (m *MockJiraClientInterface) User() *jira.UserService { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "User") + ret0, _ := ret[0].(*jira.UserService) + return ret0 +} + +// User indicates an expected call of User. +func (mr *MockJiraClientInterfaceMockRecorder) User() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "User", reflect.TypeOf((*MockJiraClientInterface)(nil).User)) +} diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index a20848914..5efc247ca 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -2,7 +2,10 @@ package utils import ( "runtime/debug" + "strings" "testing" + + "github.com/andygrunwald/go-jira" ) func mockReadBuildInfo(parseBuildInfoError bool) func() (info *debug.BuildInfo, ok bool) { @@ -66,3 +69,102 @@ func TestGetDependencyVersion(t *testing.T) { }) } } + +func TestDetermineClusterProduct(t *testing.T) { + tests := []struct { + productID string + isHCP bool + expected string + }{ + {"rosa", true, "Red Hat OpenShift on AWS with Hosted Control Planes"}, + {"rosa", false, "Red Hat OpenShift on AWS"}, + {"osd", false, "OpenShift Dedicated"}, + {"unknown", false, ""}, + } + + for _, tt := range tests { + got := determineClusterProduct(tt.productID, tt.isHCP) + if got != tt.expected { + t.Errorf("determineClusterProduct(%q, %v) = %q; want %q", tt.productID, tt.isHCP, got, tt.expected) + } + } +} + +func TestBuildJQL(t *testing.T) { + filters := []fieldQuery{ + {"Summary", "~*", "foo,bar"}, + {"Component", "in", `"UI","Backend"`}, + {"Severity", "=", "High"}, + } + + expectedSubstrs := []string{ + `"Summary" ~ "foo"`, + `"Summary" ~ "bar"`, + `"Component" in ("UI","Backend")`, + `"Severity" = "High"`, + `status != Closed`, + } + + jql := buildJQL("TEST", filters) + + for _, substr := range expectedSubstrs { + if !strings.Contains(jql, substr) { + t.Errorf("JQL missing expected substring: %q", substr) + } + } +} + +func TestFormatVersion(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"4.15.3", "4.15"}, + {"4.11", "4.11"}, + {"5", "5"}, + {"4.12.0.1", "4.12"}, + } + + for _, tt := range tests { + got := formatVersion(tt.input) + if got != tt.expected { + t.Errorf("formatVersion(%q) = %q; want %q", tt.input, got, tt.expected) + } + } +} + +func TestIsValidMatch(t *testing.T) { + mockIssue := func(productVal, versionName, customerName string) jira.Issue { + return jira.Issue{ + Fields: &jira.IssueFields{ + AffectsVersions: []*jira.AffectsVersion{{Name: versionName}}, + Unknowns: map[string]interface{}{ + productCustomField: []interface{}{ + map[string]interface{}{"value": productVal}, + }, + customerNameCustomField: customerName, + }, + }, + } + } + + tests := []struct { + issue jira.Issue + org string + product string + version string + shouldPass bool + }{ + {mockIssue("Red Hat OpenShift on AWS", "4.15.3", "Acme Corp"), "Acme Corp", "Red Hat OpenShift on AWS", "4.15", true}, + {mockIssue("Red Hat OpenShift on AWS", "none", "Acme Corp"), "Acme Corp", "Red Hat OpenShift on AWS", "4.15", true}, + {mockIssue("Red Hat OpenShift on AWS", "4.15.3", "N/A"), "Acme Corp", "Red Hat OpenShift on AWS", "4.15", true}, + {mockIssue("Wrong Product", "4.15.3", "Acme Corp"), "Acme Corp", "Red Hat OpenShift on AWS", "4.15", false}, + } + + for i, tt := range tests { + got := isValidMatch(tt.issue, tt.org, tt.product, tt.version) + if got != tt.shouldPass { + t.Errorf("Test %d failed: isValidMatch() = %v; want %v", i, got, tt.shouldPass) + } + } +} From b2abef1e8eb6a0cd0c3754b0be11979291ca9013 Mon Sep 17 00:00:00 2001 From: devppratik Date: Fri, 13 Jun 2025 15:45:04 +0530 Subject: [PATCH 15/40] Add initial command for creating handover announcements Update the command and test Fix pipelines Update to PromptUI. Rebase and Fix Merge Remove survey --- cmd/jira/cmd.go | 22 ++- cmd/jira/handover.go | 179 ++++++++++++++++++ cmd/jira/handover_test.go | 67 +++++++ docs/README.md | 30 +++ docs/osdctl_jira.md | 1 + ...sdctl_jira_create-handover-announcement.md | 38 ++++ go.mod | 2 + go.sum | 6 + pkg/utils/jira.go | 16 +- pkg/utils/mocks/jira_mock.go | 15 ++ pkg/utils/utils.go | 13 +- pkg/utils/utils_test.go | 4 +- 12 files changed, 382 insertions(+), 11 deletions(-) create mode 100644 cmd/jira/handover.go create mode 100644 cmd/jira/handover_test.go create mode 100644 docs/osdctl_jira_create-handover-announcement.md diff --git a/cmd/jira/cmd.go b/cmd/jira/cmd.go index 6e0dea473..7610f9fc7 100644 --- a/cmd/jira/cmd.go +++ b/cmd/jira/cmd.go @@ -1,6 +1,11 @@ package jira -import "github.com/spf13/cobra" +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) var Cmd = &cobra.Command{ Use: "jira", @@ -10,4 +15,19 @@ var Cmd = &cobra.Command{ func init() { Cmd.AddCommand(quickTaskCmd) + Cmd.AddCommand(createHandoverAnnouncmentCmd) + + createHandoverAnnouncmentCmd.Flags().String("summary", "", "Enter Summary/Title for the Announcment") + createHandoverAnnouncmentCmd.Flags().String("description", "", "Enter Description for the Announcment") + createHandoverAnnouncmentCmd.Flags().String("products", "", "Comma-separated list of products (e.g. 'Product A,Product B')") + createHandoverAnnouncmentCmd.Flags().String("customer", "", "Customer name") + createHandoverAnnouncmentCmd.Flags().String("cluster", "", "Cluster ID") + createHandoverAnnouncmentCmd.Flags().String("version", "", "Affects version") + + flags := []string{"summary", "description", "products", "customer", "cluster", "version"} + for _, flag := range flags { + if err := viper.BindPFlag(flag, createHandoverAnnouncmentCmd.Flags().Lookup(flag)); err != nil { + log.Printf("Failed to bind flag '%s': %v", flag, err) + } + } } diff --git a/cmd/jira/handover.go b/cmd/jira/handover.go new file mode 100644 index 000000000..cc0b5005d --- /dev/null +++ b/cmd/jira/handover.go @@ -0,0 +1,179 @@ +package jira + +import ( + "fmt" + "log" + "strings" + + jira "github.com/andygrunwald/go-jira" + "github.com/manifoldco/promptui" + "github.com/openshift/osdctl/pkg/utils" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const handoverAnnoucementsProjectID = 12351820 + +var createHandoverAnnouncmentCmd = &cobra.Command{ + Use: "create-handover-announcement", + Short: "Create a new Handover announcement for SREPHOA Project", + Run: func(cmd *cobra.Command, args []string) { + CreateHandoverAnnouncment() + }, +} + +var allowedProducts = []string{ + "OpenShift Dedicated", + "OpenShift Dedicated on AWS", + "OpenShift Dedicated on GCP", + "Red Hat Openshift on AWS", + "Red Hat Openshift on AWS with Hosted Control Planes", +} + +func CreateHandoverAnnouncment() { + jiraClient, err := utils.NewJiraClient("") + if err != nil { + log.Fatalf("Failed to create Jira client: %v", err) + } + + summary := promptInput("summary", "Enter Summary/Title for the Announcment:") + description := promptInput("description", "Enter Description for the Announcment:") + products, err := getProducts() + if err != nil { + log.Fatalf("Product validation failed: %v", err) + } + customer := promptInput("customer", "Enter Customer Name:") + clusterID := promptInput("cluster", "Enter Cluster ID:") + version := promptInput("version", "Enter Affects Version (e.g. 4.16 or 4.15.32):") + + affectsVersion, err := createVersionIfNotExists(jiraClient, version) + if err != nil { + log.Fatalf("Could not ensure version: %v", err) + } + + issue := jira.Issue{ + Fields: &jira.IssueFields{ + Summary: summary, + Description: description, + Project: jira.Project{Key: "SREPHOA"}, + Type: jira.IssueType{Name: "Story"}, + AffectsVersions: []*jira.AffectsVersion{ + {Name: affectsVersion.Name}, + }, + }, + } + + // Add custom fields + issue.Fields.Unknowns = map[string]interface{}{ + utils.ProductCustomField: mapProducts(products), + utils.CustomerNameCustomField: customer, + utils.ClusterIDCustomField: clusterID, + } + + created, err := jiraClient.CreateIssue(&issue) + if err != nil { + log.Fatalf("Failed to create issue: %s", err) + } + + fmt.Printf("Issue created successfully: %v/browse/%s\nPlease update the announcment accordingly if required\n", utils.JiraBaseURL, created.Key) +} + +func getProducts() ([]string, error) { + productInput := viper.GetString("products") + var selected []string + + if productInput == "" { + fmt.Println("Available products:") + for _, p := range allowedProducts { + fmt.Printf(" - %s\n", p) + } + fmt.Println() + + prompt := promptui.Prompt{ + Label: "Enter product(s), comma-separated (e.g. Product A, Product B)", + Validate: func(input string) error { + if strings.TrimSpace(input) == "" { + return fmt.Errorf("input cannot be empty") + } + return nil + }, + } + + var err error + productInput, err = prompt.Run() + if err != nil { + log.Fatalf("Prompt failed: %v", err) + } + } + + raw := strings.Split(productInput, ",") + for _, p := range raw { + clean := strings.TrimSpace(p) + if clean == "" { + continue + } + if !containsIgnoreCase(allowedProducts, clean) { + return nil, fmt.Errorf("invalid product: %q (must be one of: %v)", clean, allowedProducts) + } + selected = append(selected, clean) + } + + return selected, nil +} + +func mapProducts(products []string) []map[string]string { + var result []map[string]string + for _, p := range products { + result = append(result, map[string]string{"value": p}) + } + return result +} + +func containsIgnoreCase(list []string, val string) bool { + for _, item := range list { + if strings.EqualFold(item, val) { + return true + } + } + return false +} + +func createVersionIfNotExists(jiraClient utils.JiraClientInterface, versionName string) (*jira.AffectsVersion, error) { + newVersion := &jira.Version{ + Name: versionName, + ProjectID: handoverAnnoucementsProjectID, + } + createdVersion, err := jiraClient.CreateVersion(newVersion) + if err != nil { + + return nil, fmt.Errorf("failed to create version %q: %w", versionName, err) + } + return &jira.AffectsVersion{ + Name: createdVersion.Name, + ID: createdVersion.ID, + }, nil +} + +func promptInput(flagName, promptMsg string) string { + val := viper.GetString(flagName) + if val != "" { + return val + } + + prompt := promptui.Prompt{ + Label: promptMsg, + Validate: func(input string) error { + if strings.TrimSpace(input) == "" { + return fmt.Errorf("input cannot be empty") + } + return nil + }, + } + + result, err := prompt.Run() + if err != nil { + log.Fatalf("Prompt for %q failed: %v", flagName, err) + } + + return strings.TrimSpace(result) +} diff --git a/cmd/jira/handover_test.go b/cmd/jira/handover_test.go new file mode 100644 index 000000000..338648d71 --- /dev/null +++ b/cmd/jira/handover_test.go @@ -0,0 +1,67 @@ +package jira + +import ( + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" +) + +func TestMapProducts(t *testing.T) { + input := []string{"Product A", "Product B"} + expected := []map[string]string{ + {"value": "Product A"}, + {"value": "Product B"}, + } + result := mapProducts(input) + assert.Equal(t, expected, result) +} + +func TestContainsIgnoreCase(t *testing.T) { + list := []string{"OpenShift Dedicated", + "OpenShift Dedicated on AWS", + "OpenShift Dedicated on GCP", + "Red Hat Openshift on AWS", + "Red Hat Openshift on AWS with Hosted Control Planes", + } + assert.True(t, containsIgnoreCase(list, "OpenShift Dedicated")) + assert.True(t, containsIgnoreCase(list, "red hat openshift on aws")) + assert.False(t, containsIgnoreCase(list, "EKS")) +} + +func TestGetFieldInput_FromFlag(t *testing.T) { + viper.Set("customer", "ACME Corp") + val := promptInput("customer", "Enter Customer Name:") + assert.Equal(t, "ACME Corp", val) + viper.Set("customer", "") +} + +func TestGetProducts_FromFlag(t *testing.T) { + viper.Set("products", "OpenShift Dedicated on GCP") + products, _ := getProducts() + assert.ElementsMatch(t, []string{"OpenShift Dedicated on GCP"}, products) + viper.Set("products", "") +} + +func TestGetProducts_InvalidProduct(t *testing.T) { + viper.Set("products", "InvalidProduct") + _, err := getProducts() + assert.Error(t, err) + viper.Set("products", "") +} + +func TestGetProducts_TrimmedAndDeduplicated(t *testing.T) { + viper.Set("products", " OpenShift Dedicated, OpenShift Dedicated on GCP ") + products, _ := getProducts() + assert.ElementsMatch(t, []string{"OpenShift Dedicated", "OpenShift Dedicated on GCP"}, products) + viper.Set("products", "") +} + +func TestMapProducts_EmptyInput(t *testing.T) { + result := mapProducts([]string{}) + assert.Empty(t, result) +} + +func TestContainsIgnoreCase_EmptyList(t *testing.T) { + assert.False(t, containsIgnoreCase([]string{}, "Anything")) +} diff --git a/docs/README.md b/docs/README.md index 956c44b78..c1f19e91c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -94,6 +94,7 @@ - `get` - Get OCP CredentialsRequests - `save` - Save iam permissions for use in mcc - `jira` - Provides a set of commands for interacting with Jira + - `create-handover-announcement` - Create a new Handover announcement for SREPHOA Project - `quick-task ` - creates a new ticket with the given name - `jumphost` - - `create` - Create a jumphost for emergency SSH access to a cluster's VMs @@ -2747,6 +2748,35 @@ osdctl jira [flags] -S, --skip-version-check skip checking to see if this is the most recent release ``` +### osdctl jira create-handover-announcement + +Create a new Handover announcement for SREPHOA Project + +``` +osdctl jira create-handover-announcement [flags] +``` + +#### Flags + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string Cluster ID + --context string The name of the kubeconfig context to use + --customer string Customer name + --description string Enter Description for the Announcment + -h, --help help for create-handover-announcement + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --products string Comma-separated list of products (e.g. 'Product A,Product B') + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release + --summary string Enter Summary/Title for the Announcment + --version string Affects version +``` + ### osdctl jira quick-task Creates a new ticket with the given name and a label specified by "jira_team_label" from the osdctl config. The flags "jira_board_id" and "jira_team" are also required for running this command. diff --git a/docs/osdctl_jira.md b/docs/osdctl_jira.md index a7ef42e10..262fdb884 100644 --- a/docs/osdctl_jira.md +++ b/docs/osdctl_jira.md @@ -26,5 +26,6 @@ Provides a set of commands for interacting with Jira ### SEE ALSO * [osdctl](osdctl.md) - OSD CLI +* [osdctl jira create-handover-announcement](osdctl_jira_create-handover-announcement.md) - Create a new Handover announcement for SREPHOA Project * [osdctl jira quick-task](osdctl_jira_quick-task.md) - creates a new ticket with the given name diff --git a/docs/osdctl_jira_create-handover-announcement.md b/docs/osdctl_jira_create-handover-announcement.md new file mode 100644 index 000000000..7729d603f --- /dev/null +++ b/docs/osdctl_jira_create-handover-announcement.md @@ -0,0 +1,38 @@ +## osdctl jira create-handover-announcement + +Create a new Handover announcement for SREPHOA Project + +``` +osdctl jira create-handover-announcement [flags] +``` + +### Options + +``` + --cluster string Cluster ID + --customer string Customer name + --description string Enter Description for the Announcment + -h, --help help for create-handover-announcement + --products string Comma-separated list of products (e.g. 'Product A,Product B') + --summary string Enter Summary/Title for the Announcment + --version string Affects version +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + +### SEE ALSO + +* [osdctl jira](osdctl_jira.md) - Provides a set of commands for interacting with Jira + diff --git a/go.mod b/go.mod index a2f40b5a3..9ec1342cd 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/google/go-github/v63 v63.0.0 github.com/google/uuid v1.6.0 github.com/hashicorp/hcl/v2 v2.23.0 + github.com/manifoldco/promptui v0.9.0 github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.37.0 @@ -118,6 +119,7 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.3 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cloudwego/base64x v0.1.5 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/creack/pty v1.1.20 // indirect diff --git a/go.sum b/go.sum index a4fa60cd9..8ee9fff4b 100644 --- a/go.sum +++ b/go.sum @@ -144,8 +144,11 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= @@ -445,6 +448,8 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= @@ -764,6 +769,7 @@ golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/pkg/utils/jira.go b/pkg/utils/jira.go index 467cbd51b..144371eeb 100644 --- a/pkg/utils/jira.go +++ b/pkg/utils/jira.go @@ -18,6 +18,7 @@ const ( type JiraClientInterface interface { SearchIssues(jql string) ([]jira.Issue, error) CreateIssue(issue *jira.Issue) (*jira.Issue, error) + CreateVersion(version *jira.Version) (*jira.Version, error) User() *jira.UserService Issue() *jira.IssueService Board() *jira.BoardService @@ -41,6 +42,17 @@ func (j *jiraClientWrapper) CreateIssue(issue *jira.Issue) (*jira.Issue, error) return created, err } +func (j *jiraClientWrapper) CreateVersion(version *jira.Version) (*jira.Version, error) { + createdVersion, resp, err := j.client.Version.Create(version) + if err != nil { + if resp != nil && resp.StatusCode == 400 { + return &jira.Version{Name: version.Name}, nil + } + return nil, fmt.Errorf("failed to create version %q: %w", version.Name, err) + } + return createdVersion, nil +} + func (j *jiraClientWrapper) User() *jira.UserService { return j.client.User } @@ -114,7 +126,7 @@ func GetRelatedHandoverAnnouncements(clusterID, externalClusterID, jiraToken, or {Field: "Cluster ID", Value: clusterID, Operator: "~"}, {Field: "Cluster ID", Value: externalClusterID, Operator: "~"}, } - jql := buildJQL(JiraHandoverAnnouncementProjectKey, baseQueries) + jql := buildJQL(JiraHandoverAnnouncementProjectName, baseQueries) issues, err := client.SearchIssues(jql) if err != nil { return nil, fmt.Errorf("failed to search for jira issues: %w", err) @@ -126,7 +138,7 @@ func GetRelatedHandoverAnnouncements(clusterID, externalClusterID, jiraToken, or {Field: "Products", Value: productName, Operator: "="}, {Field: "affectedVersion", Value: formatVersion(version), Operator: "~"}, } - jql = buildJQL(JiraHandoverAnnouncementProjectKey, extendedQueries) + jql = buildJQL(JiraHandoverAnnouncementProjectName, extendedQueries) otherIssues, err := client.SearchIssues(jql) if err != nil { return nil, fmt.Errorf("failed to search for jira issues: %w", err) diff --git a/pkg/utils/mocks/jira_mock.go b/pkg/utils/mocks/jira_mock.go index ff15dc562..30ec91381 100644 --- a/pkg/utils/mocks/jira_mock.go +++ b/pkg/utils/mocks/jira_mock.go @@ -63,6 +63,21 @@ func (mr *MockJiraClientInterfaceMockRecorder) CreateIssue(issue interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIssue", reflect.TypeOf((*MockJiraClientInterface)(nil).CreateIssue), issue) } +// CreateVersion mocks base method. +func (m *MockJiraClientInterface) CreateVersion(version *jira.Version) (*jira.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateVersion", version) + ret0, _ := ret[0].(*jira.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateVersion indicates an expected call of CreateVersion. +func (mr *MockJiraClientInterfaceMockRecorder) CreateVersion(version interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVersion", reflect.TypeOf((*MockJiraClientInterface)(nil).CreateVersion), version) +} + // Issue mocks base method. func (m *MockJiraClientInterface) Issue() *jira.IssueService { m.ctrl.T.Helper() diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 75f8dbeab..1effb6f89 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -22,10 +22,11 @@ type fieldQuery struct { } const ( - JiraHandoverAnnouncementProjectKey = "SRE Platform HandOver Announcements" - JiraBaseURL = "https://issues.redhat.com" - productCustomField = "customfield_12319040" - customerNameCustomField = "customfield_12310160" + JiraHandoverAnnouncementProjectName = "SRE Platform HandOver Announcements" + JiraBaseURL = "https://issues.redhat.com" + ProductCustomField = "customfield_12319040" + CustomerNameCustomField = "customfield_12310160" + ClusterIDCustomField = "customfield_12316349" ) var clusterKeyRE = regexp.MustCompile(`^(\w|-)+$`) @@ -383,9 +384,9 @@ func isValidMatch(i jira.Issue, orgName string, product string, version string) return false } - productRaw := i.Fields.Unknowns[productCustomField] + productRaw := i.Fields.Unknowns[ProductCustomField] versionRaw := i.Fields.AffectsVersions - nameRaw := i.Fields.Unknowns[customerNameCustomField] + nameRaw := i.Fields.Unknowns[CustomerNameCustomField] productMatch := false if items, ok := productRaw.([]interface{}); ok { diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index 5efc247ca..0f117f45f 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -139,10 +139,10 @@ func TestIsValidMatch(t *testing.T) { Fields: &jira.IssueFields{ AffectsVersions: []*jira.AffectsVersion{{Name: versionName}}, Unknowns: map[string]interface{}{ - productCustomField: []interface{}{ + ProductCustomField: []interface{}{ map[string]interface{}{"value": productVal}, }, - customerNameCustomField: customerName, + CustomerNameCustomField: customerName, }, }, } From 94ad358d1c3a10349d3c5aa90713e0424315a603 Mon Sep 17 00:00:00 2001 From: feichashao <siwu@redhat.com> Date: Fri, 27 Jun 2025 11:54:36 +0800 Subject: [PATCH 16/40] add more supported instance types for down scale --- cmd/cluster/resize/cmd.go | 3 +++ cmd/cluster/resize/infra_node_test.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/cluster/resize/cmd.go b/cmd/cluster/resize/cmd.go index 8b1745e38..6dd4555d7 100644 --- a/cmd/cluster/resize/cmd.go +++ b/cmd/cluster/resize/cmd.go @@ -6,6 +6,7 @@ import ( var supportedInstanceTypes = map[string][]string{ "controlplane": { + "m5.2xlarge", "m5.4xlarge", "m5.8xlarge", "m5.12xlarge", @@ -19,6 +20,8 @@ var supportedInstanceTypes = map[string][]string{ "n2-standard-32", }, "infra": { + "r5.xlarge", + "r5.2xlarge", "r5.4xlarge", "r5.8xlarge", "r5.12xlarge", diff --git a/cmd/cluster/resize/infra_node_test.go b/cmd/cluster/resize/infra_node_test.go index 40e315aa0..75d784378 100644 --- a/cmd/cluster/resize/infra_node_test.go +++ b/cmd/cluster/resize/infra_node_test.go @@ -114,7 +114,7 @@ func TestValidateInstanceSize(t *testing.T) { { instanceSize: "r5.2xlarge", nodeType: "infra", - expectErr: true, + expectErr: false, }, { instanceSize: "m5.4xlarge", @@ -129,7 +129,7 @@ func TestValidateInstanceSize(t *testing.T) { { instanceSize: "m5.2xlarge", nodeType: "controlplane", - expectErr: true, + expectErr: false, }, { instanceSize: "r5.4xlarge", From 0bf51480ada59efd199bdd227473ad5c8bc9a8b6 Mon Sep 17 00:00:00 2001 From: Draksha Khan <dkhan@redhat.com> Date: Thu, 10 Jul 2025 12:27:51 +0530 Subject: [PATCH 17/40] SREP-1010: Added "osdctl dt" check for vault cli (#772) * fix: add check for vault cli * fix: add check for vault cli --- cmd/dynatrace/vault.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/dynatrace/vault.go b/cmd/dynatrace/vault.go index 043dc0f7d..7759d759c 100644 --- a/cmd/dynatrace/vault.go +++ b/cmd/dynatrace/vault.go @@ -19,6 +19,15 @@ func setupVaultToken(vaultAddr string) error { return fmt.Errorf("error setting environment variable: %v", err) } + versionCheckCmd := exec.Command("vault", "version") + + versionCheckCmd.Stdout = os.Stdout + versionCheckCmd.Stderr = os.Stderr + + if err = versionCheckCmd.Run(); err != nil { + return fmt.Errorf("missing vault cli: %v", err) + } + tokenCheckCmd := exec.Command("vault", "token", "lookup") tokenCheckCmd.Stdout = nil tokenCheckCmd.Stderr = nil From ff59bcf6706c9fa6c7d294aa2701d67b8b713ba1 Mon Sep 17 00:00:00 2001 From: Claudio Busse <cbusse@redhat.com> Date: Thu, 10 Jul 2025 13:09:06 +0200 Subject: [PATCH 18/40] Add SRE-P Orange to owners --- OWNERS | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/OWNERS b/OWNERS index 0fbb92d53..ca5fd0f24 100644 --- a/OWNERS +++ b/OWNERS @@ -8,6 +8,15 @@ reviewers: - devppratik - Tafhim - joshbranham +- bergmannf +- Makdaam +- Nikokolas3270 +- RaphaelBut +- MateSaary +- rolandmkunkel +- petrkotas +- zmird-r +- hectorakemp approvers: - clcollins - dustman9000 @@ -18,6 +27,15 @@ approvers: - devppratik - Tafhim - joshbranham +- bergmannf +- Makdaam +- Nikokolas3270 +- RaphaelBut +- MateSaary +- rolandmkunkel +- petrkotas +- zmird-r +- hectorakemp maintainers: - clcollins - fahlmant From 5d742da29fdaed4ce42efb278c720b59df9e4efe Mon Sep 17 00:00:00 2001 From: Amit Upadhyay <amiupadh@redhat.com> Date: Wed, 30 Apr 2025 14:04:44 +0530 Subject: [PATCH 19/40] Added unit test case for infra_node and controlplane_node --- cmd/cluster/resize/controlplane_node_test.go | 49 ++ cmd/cluster/resize/infra_node.go | 76 ++- cmd/cluster/resize/infra_node_test.go | 460 +++++++++++++++++++ cmd/cluster/resize/mocks.go | 94 ++++ 4 files changed, 658 insertions(+), 21 deletions(-) create mode 100644 cmd/cluster/resize/controlplane_node_test.go create mode 100644 cmd/cluster/resize/mocks.go diff --git a/cmd/cluster/resize/controlplane_node_test.go b/cmd/cluster/resize/controlplane_node_test.go new file mode 100644 index 000000000..49e5650fb --- /dev/null +++ b/cmd/cluster/resize/controlplane_node_test.go @@ -0,0 +1,49 @@ +package resize + +import ( + "errors" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPromptGenerateResizeSL(t *testing.T) { + tests := []struct { + name string + input string + expectedError error + }{ + { + name: "User cancels service log generation", + input: "n\n", + expectedError: nil, + }, + { + name: "Failed to search for clusters", + input: "y\nJIRA-123\njustification text\n", + expectedError: errors.New("failed to search for clusters with provided filters"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pr, pw, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdin = pr + go func() { + pw.Write([]byte(tt.input)) + pw.Close() + }() + + err = promptGenerateResizeSL("cluster-123", "new-instance-type") + if tt.expectedError != nil { + assert.Contains(t, err.Error(), tt.expectedError.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/cmd/cluster/resize/infra_node.go b/cmd/cluster/resize/infra_node.go index 0de56c8b2..73f9962d0 100644 --- a/cmd/cluster/resize/infra_node.go +++ b/cmd/cluster/resize/infra_node.go @@ -10,8 +10,10 @@ import ( "strings" "time" + awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/smithy-go" + sdk "github.com/openshift-online/ocm-sdk-go" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" machinev1beta1 "github.com/openshift/api/machine/v1beta1" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -581,30 +583,62 @@ func (r *Infra) terminateCloudInstances(ctx context.Context, nodeList *corev1.No switch r.cluster.CloudProvider().ID() { case "aws": - ocmClient, err := utils.CreateConnection() - if err != nil { - return err - } - defer ocmClient.Close() - cfg, err := osdCloud.CreateAWSV2Config(ocmClient, r.cluster) - if err != nil { - return err + var ocmClient interface{} + if mOCM, ok := ctx.Value("ocm").(interface{ ClustersMgmt() interface{} }); ok { + ocmClient = mOCM + } else { + var err error + ocmClient, err = utils.CreateConnection() + if err != nil { + return err + } + defer ocmClient.(*sdk.Connection).Close() } - awsClient := ec2.NewFromConfig(cfg) - _, err = awsClient.TerminateInstances(ctx, &ec2.TerminateInstancesInput{ - InstanceIds: instanceIDs, - }) - if err != nil { - var apiErr smithy.APIError - if errors.As(err, &apiErr) { - code := apiErr.ErrorCode() - message := apiErr.ErrorMessage() - log.Printf("AWS ERROR: %v - %v\n", code, message) - } else { - log.Printf("ERROR: %v\n", err.Error()) + if mBuilder, ok := ctx.Value("aws_builder").(interface { + CreateAWSV2Config(interface{}, *cmv1.Cluster) (awssdk.Config, error) + }); ok { + cfg, err := mBuilder.CreateAWSV2Config(ocmClient, r.cluster) + if err != nil { + return err + } + + cfg.Region = r.cluster.Region().ID() + awsClient := ec2.NewFromConfig(cfg) + _, err = awsClient.TerminateInstances(ctx, &ec2.TerminateInstancesInput{ + InstanceIds: instanceIDs, + }) + if err != nil { + var apiErr smithy.APIError + if errors.As(err, &apiErr) { + code := apiErr.ErrorCode() + message := apiErr.ErrorMessage() + log.Printf("AWS ERROR: %v - %v\n", code, message) + } else { + log.Printf("ERROR: %v\n", err.Error()) + } + return err + } + } else { + cfg, err := osdCloud.CreateAWSV2Config(ocmClient.(*sdk.Connection), r.cluster) + if err != nil { + return err + } + awsClient := ec2.NewFromConfig(cfg) + _, err = awsClient.TerminateInstances(ctx, &ec2.TerminateInstancesInput{ + InstanceIds: instanceIDs, + }) + if err != nil { + var apiErr smithy.APIError + if errors.As(err, &apiErr) { + code := apiErr.ErrorCode() + message := apiErr.ErrorMessage() + log.Printf("AWS ERROR: %v - %v\n", code, message) + } else { + log.Printf("ERROR: %v\n", err.Error()) + } + return err } - return err } case "gcp": diff --git a/cmd/cluster/resize/infra_node_test.go b/cmd/cluster/resize/infra_node_test.go index 75d784378..59a9967cd 100644 --- a/cmd/cluster/resize/infra_node_test.go +++ b/cmd/cluster/resize/infra_node_test.go @@ -1,12 +1,19 @@ package resize import ( + "context" + "fmt" "testing" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" hivev1 "github.com/openshift/hive/apis/hive/v1" hivev1aws "github.com/openshift/hive/apis/hive/v1/aws" hivev1gcp "github.com/openshift/hive/apis/hive/v1/gcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" ) // newTestCluster assembles a *cmv1.Cluster while handling the error to help out with inline test-case generation @@ -183,3 +190,456 @@ func TestConvertProviderIDtoInstanceID(t *testing.T) { }) } } + +func TestSkipError(t *testing.T) { + tests := []struct { + name string + result result + msg string + expected bool + }{ + { + name: "no error", + result: result{ + condition: true, + err: nil, + }, + msg: "test message", + expected: true, + }, + { + name: "with error", + result: result{ + condition: false, + err: fmt.Errorf("test error"), + }, + msg: "test message", + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := skipError(test.result, test.msg) + if err != nil { + t.Errorf("expected nil error, got %v", err) + } + if actual != test.expected { + t.Errorf("expected condition %v, got %v", test.expected, actual) + } + }) + } +} + +func TestNodesMatchExpectedCount(t *testing.T) { + tests := []struct { + name string + labelSelector labels.Selector + expectedCount int + mockNodeList *corev1.NodeList + mockListError error + expectedMatch bool + expectedError error + }{ + { + name: "matching count", + labelSelector: labels.NewSelector(), + expectedCount: 2, + mockNodeList: &corev1.NodeList{ + Items: []corev1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "node2"}}, + }, + }, + expectedMatch: true, + }, + { + name: "non-matching count", + labelSelector: labels.NewSelector(), + expectedCount: 2, + mockNodeList: &corev1.NodeList{ + Items: []corev1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "node1"}}, + }, + }, + expectedMatch: false, + }, + { + name: "list error", + labelSelector: labels.NewSelector(), + expectedCount: 2, + mockListError: fmt.Errorf("list error"), + expectedError: fmt.Errorf("list error"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Create mock client + mockClient := &MockClient{} + mockClient.On("List", mock.Anything, mock.Anything, mock.Anything). + Return(test.mockListError). + Run(func(args mock.Arguments) { + if test.mockNodeList != nil { + arg := args.Get(1).(*corev1.NodeList) + *arg = *test.mockNodeList + } + }) + + // Create Infra instance with mock client + r := &Infra{ + client: mockClient, + } + + // Call the function + match, err := r.nodesMatchExpectedCount(context.Background(), test.labelSelector, test.expectedCount) + + // Verify results + if test.expectedError != nil { + if err == nil || err.Error() != test.expectedError.Error() { + t.Errorf("expected error %v, got %v", test.expectedError, err) + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if match != test.expectedMatch { + t.Errorf("expected match %v, got %v", test.expectedMatch, match) + } + } + + // Verify mock was called correctly + mockClient.AssertExpectations(t) + }) + } +} + +func TestGenerateServiceLog(t *testing.T) { + tests := []struct { + name string + mp *hivev1.MachinePool + instanceType string + justification string + clusterId string + ohss string + expectedTemplate string + expectedParams []string + }{ + { + name: "AWS case", + mp: &hivev1.MachinePool{ + Spec: hivev1.MachinePoolSpec{ + Platform: hivev1.MachinePoolPlatform{ + AWS: &hivev1aws.MachinePoolPlatform{ + InstanceType: "r5.xlarge", + }, + }, + }, + }, + instanceType: "r5.2xlarge", + justification: "test justification", + clusterId: "test-cluster", + ohss: "test-ohss", + expectedTemplate: resizedInfraNodeServiceLogTemplate, + expectedParams: []string{ + "INSTANCE_TYPE=r5.2xlarge", + "JUSTIFICATION=test justification", + "JIRA_ID=test-ohss", + }, + }, + { + name: "GCP case", + mp: &hivev1.MachinePool{ + Spec: hivev1.MachinePoolSpec{ + Platform: hivev1.MachinePoolPlatform{ + GCP: &hivev1gcp.MachinePool{ + InstanceType: "custom-4-32768-ext", + }, + }, + }, + }, + instanceType: "custom-8-65536-ext", + justification: "test justification", + clusterId: "test-cluster", + ohss: "test-ohss", + expectedTemplate: GCPresizedInfraNodeServiceLogTemplate, + expectedParams: []string{ + "INSTANCE_TYPE=custom-8-65536-ext", + "JUSTIFICATION=test justification", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := generateServiceLog(test.mp, test.instanceType, test.justification, test.clusterId, test.ohss) + + if actual.Template != test.expectedTemplate { + t.Errorf("expected template %s, got %s", test.expectedTemplate, actual.Template) + } + + if actual.ClusterId != test.clusterId { + t.Errorf("expected cluster ID %s, got %s", test.clusterId, actual.ClusterId) + } + + if len(actual.TemplateParams) != len(test.expectedParams) { + t.Errorf("expected %d params, got %d", len(test.expectedParams), len(actual.TemplateParams)) + } + + for i, param := range actual.TemplateParams { + if param != test.expectedParams[i] { + t.Errorf("expected param %s, got %s", test.expectedParams[i], param) + } + } + }) + } +} + +func TestTerminateCloudInstances(t *testing.T) { + testCases := []struct { + name string + provider string + nodes []string + expectError bool + errorMsg string + }{ + { + name: "AWS_error", + provider: "AWS", + nodes: []string{"i-1234567890abcdef0"}, + expectError: true, + }, + { + name: "AWS_error_no_config", + provider: "aws", + nodes: []string{"i-1234567890abcdef0"}, + expectError: true, + errorMsg: "failed to load backplane-cli config", + }, + { + name: "GCP_not_supported", + provider: "gcp", + nodes: []string{"gce://project/zone/instance-1"}, + expectError: false, + }, + { + name: "unsupported_provider", + provider: "azure", + nodes: []string{"azure://subscription/resource-group/instance-1"}, + expectError: true, + errorMsg: "cloud provider not supported: azure, only AWS is supported", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // mockOCM := &MockOCMConnection{} + // mockAWSBuilder := &MockAWSBuilder{} + + cluster, err := cmv1.NewCluster().CloudProvider( + cmv1.NewCloudProvider().ID(tc.provider), + ).Build() + if err != nil { + t.Fatalf("Failed to build cluster: %v", err) + } + + infra := &Infra{ + cluster: cluster, + // ocm: mockOCM, + // awsBuilder: mockAWSBuilder, + } + + // Create node list + nodeList := &corev1.NodeList{ + Items: make([]corev1.Node, len(tc.nodes)), + } + for i, id := range tc.nodes { + nodeList.Items[i] = corev1.Node{ + Spec: corev1.NodeSpec{ + ProviderID: id, + }, + } + } + + err = infra.terminateCloudInstances(context.TODO(), nodeList) + + if tc.expectError { + assert.Error(t, err) + if tc.errorMsg != "" { + assert.Contains(t, err.Error(), tc.errorMsg) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestGetInfraMachinePool(t *testing.T) { + // Create a test namespace + testNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-namespace", + Labels: map[string]string{ + "api.openshift.com/id": "test-cluster", + }, + }, + } + + // Create a test machine pool with name exactly matching "infra" + testMachinePool := &hivev1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-infra", // Name in metadata + Namespace: "test-namespace", + }, + Spec: hivev1.MachinePoolSpec{ + Name: "infra", // This is what the code checks for + Platform: hivev1.MachinePoolPlatform{ + AWS: &hivev1aws.MachinePoolPlatform{ + InstanceType: "r5.xlarge", + }, + }, + }, + } + + // Create mock client + mockHive := &MockClient{} + + // Set up mock expectations for namespace list - first call + firstCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { + _, ok := obj.(*corev1.NamespaceList) + return ok + }), mock.Anything) + firstCall.Return(nil).Run(func(args mock.Arguments) { + nsList := args.Get(1).(*corev1.NamespaceList) + nsList.Items = []corev1.Namespace{*testNamespace} + }) + + // Set up mock expectations for machine pool list - second call + secondCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { + _, ok := obj.(*hivev1.MachinePoolList) + return ok + }), mock.Anything) + secondCall.Return(nil).Run(func(args mock.Arguments) { + mpList := args.Get(1).(*hivev1.MachinePoolList) + mpList.Items = []hivev1.MachinePool{*testMachinePool} + }) + + // Create Infra instance + infra := &Infra{ + clusterId: "test-cluster", + hive: mockHive, + } + + // Call the function + mp, err := infra.getInfraMachinePool(context.Background()) + + // Verify results + assert.NoError(t, err) + assert.NotNil(t, mp) + assert.Equal(t, "infra", mp.Spec.Name) + assert.Equal(t, "r5.xlarge", mp.Spec.Platform.AWS.InstanceType) + + // Verify mock was called correctly + mockHive.AssertExpectations(t) +} + +func TestGetInfraMachinePoolNoNamespace(t *testing.T) { + // Create mock client + mockHive := &MockClient{} + + // Set up mock expectations for namespace list - empty list + firstCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { + _, ok := obj.(*corev1.NamespaceList) + return ok + }), mock.Anything) + firstCall.Return(nil).Run(func(args mock.Arguments) { + nsList := args.Get(1).(*corev1.NamespaceList) + nsList.Items = []corev1.Namespace{} // Empty list + }) + + // Create Infra instance + infra := &Infra{ + clusterId: "test-cluster", + hive: mockHive, + } + + // Call the function + mp, err := infra.getInfraMachinePool(context.Background()) + + // Verify results + assert.Error(t, err) + assert.Contains(t, err.Error(), "expected 1 namespace, found 0 namespaces with tag: api.openshift.com/id=test-cluster") + assert.Nil(t, mp) + + // Verify mock was called correctly + mockHive.AssertExpectations(t) +} + +func TestGetInfraMachinePoolNoInfraPool(t *testing.T) { + // Create a test namespace + testNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-namespace", + Labels: map[string]string{ + "api.openshift.com/id": "test-cluster", + }, + }, + } + + // Create a test machine pool (worker, not infra) + testMachinePool := &hivev1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-worker", + Namespace: "test-namespace", + }, + Spec: hivev1.MachinePoolSpec{ + Name: "worker", // Not "infra" + Platform: hivev1.MachinePoolPlatform{ + AWS: &hivev1aws.MachinePoolPlatform{ + InstanceType: "r5.xlarge", + }, + }, + }, + } + + // Create mock client + mockHive := &MockClient{} + + // Set up mock expectations for namespace list - first call + firstCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { + _, ok := obj.(*corev1.NamespaceList) + return ok + }), mock.Anything) + firstCall.Return(nil).Run(func(args mock.Arguments) { + nsList := args.Get(1).(*corev1.NamespaceList) + nsList.Items = []corev1.Namespace{*testNamespace} + }) + + // Set up mock expectations for machine pool list - second call + secondCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { + _, ok := obj.(*hivev1.MachinePoolList) + return ok + }), mock.Anything) + secondCall.Return(nil).Run(func(args mock.Arguments) { + mpList := args.Get(1).(*hivev1.MachinePoolList) + mpList.Items = []hivev1.MachinePool{*testMachinePool} + }) + + // Create Infra instance + infra := &Infra{ + clusterId: "test-cluster", + hive: mockHive, + } + + // Call the function + mp, err := infra.getInfraMachinePool(context.Background()) + + // Verify results + assert.Error(t, err) + assert.Contains(t, err.Error(), "did not find the infra machinepool in namespace: test-namespace") + assert.Nil(t, mp) + + // Verify mock was called correctly + mockHive.AssertExpectations(t) +} diff --git a/cmd/cluster/resize/mocks.go b/cmd/cluster/resize/mocks.go new file mode 100644 index 000000000..3fb2065ae --- /dev/null +++ b/cmd/cluster/resize/mocks.go @@ -0,0 +1,94 @@ +package resize + +import ( + "context" + + "github.com/stretchr/testify/mock" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MockClient is a mock implementation of the client.Client interface +type MockClient struct { + mock.Mock +} + +// List implements the client.Client interface +func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + args := m.Called(ctx, list, opts) + return args.Error(0) +} + +// Get implements the client.Client interface +func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + args := m.Called(ctx, key, obj, opts) + return args.Error(0) +} + +// Create implements the client.Client interface +func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +// Delete implements the client.Client interface +func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +// Update implements the client.Client interface +func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +// Patch implements the client.Client interface +func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +// DeleteAllOf implements the client.Client interface +func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +// GroupVersionKindFor implements the client.Client interface +func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + args := m.Called(obj) + return args.Get(0).(schema.GroupVersionKind), args.Error(1) +} + +// IsObjectNamespaced implements the client.Client interface +func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + args := m.Called(obj) + return args.Bool(0), args.Error(1) +} + +// RESTMapper implements the client.Client interface +func (m *MockClient) RESTMapper() meta.RESTMapper { + args := m.Called() + return args.Get(0).(meta.RESTMapper) +} + +// Scheme implements the client.Client interface +func (m *MockClient) Scheme() *runtime.Scheme { + args := m.Called() + return args.Get(0).(*runtime.Scheme) +} + +// Status implements the client.Client interface +func (m *MockClient) Status() client.StatusWriter { + args := m.Called() + return args.Get(0).(client.StatusWriter) +} + +// SubResource implements the client.Client interface +func (m *MockClient) SubResource(subResource string) client.SubResourceClient { + args := m.Called(subResource) + return args.Get(0).(client.SubResourceClient) +} From 2a5b8c99b56a9a41e536317e3a9d8a652812cecf Mon Sep 17 00:00:00 2001 From: Amit Upadhyay <amiupadh@redhat.com> Date: Tue, 6 May 2025 12:21:14 +0530 Subject: [PATCH 20/40] fixed lint issues --- cmd/cluster/resize/infra_node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cluster/resize/infra_node.go b/cmd/cluster/resize/infra_node.go index 73f9962d0..7f8f3ec93 100644 --- a/cmd/cluster/resize/infra_node.go +++ b/cmd/cluster/resize/infra_node.go @@ -602,7 +602,7 @@ func (r *Infra) terminateCloudInstances(ctx context.Context, nodeList *corev1.No if err != nil { return err } - + cfg.Region = r.cluster.Region().ID() awsClient := ec2.NewFromConfig(cfg) _, err = awsClient.TerminateInstances(ctx, &ec2.TerminateInstancesInput{ From 284d84bd3892039af455a40f11c4590d16b0fc37 Mon Sep 17 00:00:00 2001 From: Amit Upadhyay <amiupadh@redhat.com> Date: Wed, 2 Jul 2025 12:27:24 +0530 Subject: [PATCH 21/40] removed unwanted test case --- cmd/cluster/resize/infra_node_test.go | 81 --------------------------- 1 file changed, 81 deletions(-) diff --git a/cmd/cluster/resize/infra_node_test.go b/cmd/cluster/resize/infra_node_test.go index 59a9967cd..27c80834d 100644 --- a/cmd/cluster/resize/infra_node_test.go +++ b/cmd/cluster/resize/infra_node_test.go @@ -314,87 +314,6 @@ func TestNodesMatchExpectedCount(t *testing.T) { } } -func TestGenerateServiceLog(t *testing.T) { - tests := []struct { - name string - mp *hivev1.MachinePool - instanceType string - justification string - clusterId string - ohss string - expectedTemplate string - expectedParams []string - }{ - { - name: "AWS case", - mp: &hivev1.MachinePool{ - Spec: hivev1.MachinePoolSpec{ - Platform: hivev1.MachinePoolPlatform{ - AWS: &hivev1aws.MachinePoolPlatform{ - InstanceType: "r5.xlarge", - }, - }, - }, - }, - instanceType: "r5.2xlarge", - justification: "test justification", - clusterId: "test-cluster", - ohss: "test-ohss", - expectedTemplate: resizedInfraNodeServiceLogTemplate, - expectedParams: []string{ - "INSTANCE_TYPE=r5.2xlarge", - "JUSTIFICATION=test justification", - "JIRA_ID=test-ohss", - }, - }, - { - name: "GCP case", - mp: &hivev1.MachinePool{ - Spec: hivev1.MachinePoolSpec{ - Platform: hivev1.MachinePoolPlatform{ - GCP: &hivev1gcp.MachinePool{ - InstanceType: "custom-4-32768-ext", - }, - }, - }, - }, - instanceType: "custom-8-65536-ext", - justification: "test justification", - clusterId: "test-cluster", - ohss: "test-ohss", - expectedTemplate: GCPresizedInfraNodeServiceLogTemplate, - expectedParams: []string{ - "INSTANCE_TYPE=custom-8-65536-ext", - "JUSTIFICATION=test justification", - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - actual := generateServiceLog(test.mp, test.instanceType, test.justification, test.clusterId, test.ohss) - - if actual.Template != test.expectedTemplate { - t.Errorf("expected template %s, got %s", test.expectedTemplate, actual.Template) - } - - if actual.ClusterId != test.clusterId { - t.Errorf("expected cluster ID %s, got %s", test.clusterId, actual.ClusterId) - } - - if len(actual.TemplateParams) != len(test.expectedParams) { - t.Errorf("expected %d params, got %d", len(test.expectedParams), len(actual.TemplateParams)) - } - - for i, param := range actual.TemplateParams { - if param != test.expectedParams[i] { - t.Errorf("expected param %s, got %s", test.expectedParams[i], param) - } - } - }) - } -} - func TestTerminateCloudInstances(t *testing.T) { testCases := []struct { name string From d10240b156cd6b619802762d212060b1cb9d2c80 Mon Sep 17 00:00:00 2001 From: Amit Upadhyay <amiupadh@redhat.com> Date: Wed, 2 Jul 2025 12:31:29 +0530 Subject: [PATCH 22/40] removed unwanted test case --- cmd/cluster/resize/infra_node_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/cluster/resize/infra_node_test.go b/cmd/cluster/resize/infra_node_test.go index 27c80834d..0161a5092 100644 --- a/cmd/cluster/resize/infra_node_test.go +++ b/cmd/cluster/resize/infra_node_test.go @@ -562,3 +562,5 @@ func TestGetInfraMachinePoolNoInfraPool(t *testing.T) { // Verify mock was called correctly mockHive.AssertExpectations(t) } + + From 7e98c37e4b9db825b55d9d57ec4c1b11ef12d87c Mon Sep 17 00:00:00 2001 From: Amit Upadhyay <amiupadh@redhat.com> Date: Wed, 2 Jul 2025 14:08:14 +0530 Subject: [PATCH 23/40] removed unwanted test case --- cmd/cluster/resize/controlplane_node_test.go | 49 ------------ cmd/cluster/resize/infra_node_test.go | 80 -------------------- 2 files changed, 129 deletions(-) delete mode 100644 cmd/cluster/resize/controlplane_node_test.go diff --git a/cmd/cluster/resize/controlplane_node_test.go b/cmd/cluster/resize/controlplane_node_test.go deleted file mode 100644 index 49e5650fb..000000000 --- a/cmd/cluster/resize/controlplane_node_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package resize - -import ( - "errors" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPromptGenerateResizeSL(t *testing.T) { - tests := []struct { - name string - input string - expectedError error - }{ - { - name: "User cancels service log generation", - input: "n\n", - expectedError: nil, - }, - { - name: "Failed to search for clusters", - input: "y\nJIRA-123\njustification text\n", - expectedError: errors.New("failed to search for clusters with provided filters"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pr, pw, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - os.Stdin = pr - go func() { - pw.Write([]byte(tt.input)) - pw.Close() - }() - - err = promptGenerateResizeSL("cluster-123", "new-instance-type") - if tt.expectedError != nil { - assert.Contains(t, err.Error(), tt.expectedError.Error()) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/cmd/cluster/resize/infra_node_test.go b/cmd/cluster/resize/infra_node_test.go index 0161a5092..d19e52438 100644 --- a/cmd/cluster/resize/infra_node_test.go +++ b/cmd/cluster/resize/infra_node_test.go @@ -314,86 +314,6 @@ func TestNodesMatchExpectedCount(t *testing.T) { } } -func TestTerminateCloudInstances(t *testing.T) { - testCases := []struct { - name string - provider string - nodes []string - expectError bool - errorMsg string - }{ - { - name: "AWS_error", - provider: "AWS", - nodes: []string{"i-1234567890abcdef0"}, - expectError: true, - }, - { - name: "AWS_error_no_config", - provider: "aws", - nodes: []string{"i-1234567890abcdef0"}, - expectError: true, - errorMsg: "failed to load backplane-cli config", - }, - { - name: "GCP_not_supported", - provider: "gcp", - nodes: []string{"gce://project/zone/instance-1"}, - expectError: false, - }, - { - name: "unsupported_provider", - provider: "azure", - nodes: []string{"azure://subscription/resource-group/instance-1"}, - expectError: true, - errorMsg: "cloud provider not supported: azure, only AWS is supported", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // mockOCM := &MockOCMConnection{} - // mockAWSBuilder := &MockAWSBuilder{} - - cluster, err := cmv1.NewCluster().CloudProvider( - cmv1.NewCloudProvider().ID(tc.provider), - ).Build() - if err != nil { - t.Fatalf("Failed to build cluster: %v", err) - } - - infra := &Infra{ - cluster: cluster, - // ocm: mockOCM, - // awsBuilder: mockAWSBuilder, - } - - // Create node list - nodeList := &corev1.NodeList{ - Items: make([]corev1.Node, len(tc.nodes)), - } - for i, id := range tc.nodes { - nodeList.Items[i] = corev1.Node{ - Spec: corev1.NodeSpec{ - ProviderID: id, - }, - } - } - - err = infra.terminateCloudInstances(context.TODO(), nodeList) - - if tc.expectError { - assert.Error(t, err) - if tc.errorMsg != "" { - assert.Contains(t, err.Error(), tc.errorMsg) - } - } else { - assert.NoError(t, err) - } - }) - } -} - func TestGetInfraMachinePool(t *testing.T) { // Create a test namespace testNamespace := &corev1.Namespace{ From ae52a9a937f8795f0d97e79906a60952c080012c Mon Sep 17 00:00:00 2001 From: Amit Upadhyay <amiupadh@redhat.com> Date: Thu, 3 Jul 2025 14:51:44 +0530 Subject: [PATCH 24/40] fixed go fmt --- cmd/cluster/resize/infra_node_test.go | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/cmd/cluster/resize/infra_node_test.go b/cmd/cluster/resize/infra_node_test.go index d19e52438..b183e1077 100644 --- a/cmd/cluster/resize/infra_node_test.go +++ b/cmd/cluster/resize/infra_node_test.go @@ -315,7 +315,6 @@ func TestNodesMatchExpectedCount(t *testing.T) { } func TestGetInfraMachinePool(t *testing.T) { - // Create a test namespace testNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test-namespace", @@ -325,14 +324,13 @@ func TestGetInfraMachinePool(t *testing.T) { }, } - // Create a test machine pool with name exactly matching "infra" testMachinePool := &hivev1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster-infra", // Name in metadata + Name: "test-cluster-infra", Namespace: "test-namespace", }, Spec: hivev1.MachinePoolSpec{ - Name: "infra", // This is what the code checks for + Name: "infra", Platform: hivev1.MachinePoolPlatform{ AWS: &hivev1aws.MachinePoolPlatform{ InstanceType: "r5.xlarge", @@ -341,10 +339,7 @@ func TestGetInfraMachinePool(t *testing.T) { }, } - // Create mock client mockHive := &MockClient{} - - // Set up mock expectations for namespace list - first call firstCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { _, ok := obj.(*corev1.NamespaceList) return ok @@ -354,7 +349,6 @@ func TestGetInfraMachinePool(t *testing.T) { nsList.Items = []corev1.Namespace{*testNamespace} }) - // Set up mock expectations for machine pool list - second call secondCall := mockHive.On("List", mock.Anything, mock.MatchedBy(func(obj interface{}) bool { _, ok := obj.(*hivev1.MachinePoolList) return ok @@ -364,22 +358,17 @@ func TestGetInfraMachinePool(t *testing.T) { mpList.Items = []hivev1.MachinePool{*testMachinePool} }) - // Create Infra instance infra := &Infra{ clusterId: "test-cluster", hive: mockHive, } - // Call the function mp, err := infra.getInfraMachinePool(context.Background()) - // Verify results assert.NoError(t, err) assert.NotNil(t, mp) assert.Equal(t, "infra", mp.Spec.Name) assert.Equal(t, "r5.xlarge", mp.Spec.Platform.AWS.InstanceType) - - // Verify mock was called correctly mockHive.AssertExpectations(t) } @@ -482,5 +471,3 @@ func TestGetInfraMachinePoolNoInfraPool(t *testing.T) { // Verify mock was called correctly mockHive.AssertExpectations(t) } - - From d4ecdaeb376ad09211062f302c4d28479458f1a9 Mon Sep 17 00:00:00 2001 From: Fabio Aldana <97414315+faldanarh@users.noreply.github.com> Date: Mon, 7 Jul 2025 12:41:58 +1000 Subject: [PATCH 25/40] Update HCP Must-Gather help The example command brings wrong values for the "--gather" flag. Fixed with the available options. --- cmd/hcp/mustgather/mustGather.go | 2 +- docs/osdctl_hcp_must-gather.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/hcp/mustgather/mustGather.go b/cmd/hcp/mustgather/mustGather.go index b841d3e99..f1af3aac2 100644 --- a/cmd/hcp/mustgather/mustGather.go +++ b/cmd/hcp/mustgather/mustGather.go @@ -43,7 +43,7 @@ func NewCmdMustGather() *cobra.Command { Use: "must-gather --cluster-id <cluster-identifier>", Short: "Create a must-gather for HCP cluster", Long: "Create a must-gather for an HCP cluster with optional gather targets", - Example: "osdctl hcp must-gather --cluster-id CLUSTER_ID --gather sc_mg,mc_mg,sc_acm --reason OHSS-1234", + Example: "osdctl hcp must-gather --cluster-id CLUSTER_ID --gather sc,mc,sc_acm --reason OHSS-1234", RunE: func(cmd *cobra.Command, args []string) error { return mg.Run() diff --git a/docs/osdctl_hcp_must-gather.md b/docs/osdctl_hcp_must-gather.md index 9aadb9c02..6df3e7e76 100644 --- a/docs/osdctl_hcp_must-gather.md +++ b/docs/osdctl_hcp_must-gather.md @@ -13,7 +13,7 @@ osdctl hcp must-gather --cluster-id <cluster-identifier> [flags] ### Examples ``` -osdctl hcp must-gather --cluster-id CLUSTER_ID --gather sc_mg,mc_mg,sc_acm --reason OHSS-1234 +osdctl hcp must-gather --cluster-id CLUSTER_ID --gather sc,mc,sc_acm --reason OHSS-1234 ``` ### Options From e695169afbb84b8133a0f87303a6dff6e59cdae9 Mon Sep 17 00:00:00 2001 From: RaphaelBut <rbut@redhat.com> Date: Thu, 10 Jul 2025 12:34:09 +0100 Subject: [PATCH 26/40] Replace unmainted pkg golang-gomock with uber-gomock --- go.mod | 1 - go.sum | 1 - pkg/utils/jira_test.go | 2 +- pkg/utils/mocks/jira_mock.go | 14 ++++++++++---- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 9ec1342cd..322832d5d 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,6 @@ require ( github.com/deckarep/golang-set v1.8.0 github.com/fatih/color v1.18.0 github.com/golang-jwt/jwt/v5 v5.2.2 - github.com/golang/mock v1.6.0 github.com/google/go-github/v63 v63.0.0 github.com/google/uuid v1.6.0 github.com/hashicorp/hcl/v2 v2.23.0 diff --git a/go.sum b/go.sum index 8ee9fff4b..4d06a3b32 100644 --- a/go.sum +++ b/go.sum @@ -830,7 +830,6 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= diff --git a/pkg/utils/jira_test.go b/pkg/utils/jira_test.go index 402767426..e446b3d85 100644 --- a/pkg/utils/jira_test.go +++ b/pkg/utils/jira_test.go @@ -5,9 +5,9 @@ import ( "testing" "github.com/andygrunwald/go-jira" - "github.com/golang/mock/gomock" mocks "github.com/openshift/osdctl/pkg/utils/mocks" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" ) func TestGetJiraIssuesForClusterWithClient(t *testing.T) { diff --git a/pkg/utils/mocks/jira_mock.go b/pkg/utils/mocks/jira_mock.go index 30ec91381..b38ad3bf5 100644 --- a/pkg/utils/mocks/jira_mock.go +++ b/pkg/utils/mocks/jira_mock.go @@ -1,5 +1,10 @@ // Code generated by MockGen. DO NOT EDIT. // Source: jira.go +// +// Generated by this command: +// +// mockgen -source=jira.go -destination=./mocks/jira_mock.go -package=utils +// // Package utils is a generated GoMock package. package utils @@ -8,13 +13,14 @@ import ( reflect "reflect" jira "github.com/andygrunwald/go-jira" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockJiraClientInterface is a mock of JiraClientInterface interface. type MockJiraClientInterface struct { ctrl *gomock.Controller recorder *MockJiraClientInterfaceMockRecorder + isgomock struct{} } // MockJiraClientInterfaceMockRecorder is the mock recorder for MockJiraClientInterface. @@ -58,7 +64,7 @@ func (m *MockJiraClientInterface) CreateIssue(issue *jira.Issue) (*jira.Issue, e } // CreateIssue indicates an expected call of CreateIssue. -func (mr *MockJiraClientInterfaceMockRecorder) CreateIssue(issue interface{}) *gomock.Call { +func (mr *MockJiraClientInterfaceMockRecorder) CreateIssue(issue any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIssue", reflect.TypeOf((*MockJiraClientInterface)(nil).CreateIssue), issue) } @@ -73,7 +79,7 @@ func (m *MockJiraClientInterface) CreateVersion(version *jira.Version) (*jira.Ve } // CreateVersion indicates an expected call of CreateVersion. -func (mr *MockJiraClientInterfaceMockRecorder) CreateVersion(version interface{}) *gomock.Call { +func (mr *MockJiraClientInterfaceMockRecorder) CreateVersion(version any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVersion", reflect.TypeOf((*MockJiraClientInterface)(nil).CreateVersion), version) } @@ -102,7 +108,7 @@ func (m *MockJiraClientInterface) SearchIssues(jql string) ([]jira.Issue, error) } // SearchIssues indicates an expected call of SearchIssues. -func (mr *MockJiraClientInterfaceMockRecorder) SearchIssues(jql interface{}) *gomock.Call { +func (mr *MockJiraClientInterfaceMockRecorder) SearchIssues(jql any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchIssues", reflect.TypeOf((*MockJiraClientInterface)(nil).SearchIssues), jql) } From 7ee1fb4e55aa44c36437216312c2d7ed35f2afbc Mon Sep 17 00:00:00 2001 From: Mate Saary <msaary@redhat.com> Date: Thu, 10 Jul 2025 02:48:05 +0100 Subject: [PATCH 27/40] Fix dt logs console URL --- cmd/dynatrace/logsCmd.go | 44 +++++++++++++++------------------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/cmd/dynatrace/logsCmd.go b/cmd/dynatrace/logsCmd.go index 647c9b13c..75258da92 100644 --- a/cmd/dynatrace/logsCmd.go +++ b/cmd/dynatrace/logsCmd.go @@ -102,41 +102,29 @@ func NewCmdLogs() *cobra.Command { func GetLinkToWebConsole(dtURL string, since int, finalQuery string) (string, error) { SearchQuery := map[string]interface{}{ - "version": "0", - "data": map[string]interface{}{ - "tableConfig": map[string]interface{}{ - "visibleColumns": []string{"timestamp", "status", "content"}, - "columnAttributes": map[string]interface{}{ - "columnWidths": map[string]interface{}{}, - "lineWraps": map[string]interface{}{ - "timestamp": true, - "status": true, - "content": true, - }, - "tableLineWrap": true, - }, - "columnOrder": []string{"timestamp", "status", "content"}, - }, - "queryConfig": map[string]interface{}{ - "query": finalQuery, - "timeframe": map[string]interface{}{"from": fmt.Sprintf("now()-%vh", since), "to": "now()"}, - "filter": map[string]interface{}{ - "datatype": "logs", - "filters": map[string]interface{}{}, - "sort": map[string]interface{}{ - "field": "timestamp", - "direction": sortOrder, - }, - }, - "showDqlEditor": true, + "version": 1, + "dt.query": finalQuery, + "dt.timeframe": map[string]interface{}{ + "from": fmt.Sprintf("now()-%vh", since), + "to": "now()", + }, + "showDqlEditor": true, + "tableConfig": map[string]interface{}{ + "visibleColumns": []string{"timestamp", "status", "content"}, + "columnOrder": []string{"timestamp", "status", "content"}, + "columnAttributes": map[string]interface{}{ + "columnWidths": map[string]interface{}{}, + "lineWraps": map[string]interface{}{}, + "tableLineWrap": true, }, }, } + mStr, err := json.Marshal(SearchQuery) if err != nil { return "", fmt.Errorf("failed to create JSON for sharable URL: %v", err) } - return fmt.Sprintf("%sui/apps/dynatrace.logs/?gtf=-%dh&gf=all&sortDirection=desc&advancedQueryMode=true&isDefaultQuery=false&visualizationType=table#%s\n\n", dtURL, since, url.PathEscape(string(mStr))), nil + return fmt.Sprintf("%sui/apps/dynatrace.logs/#%s", dtURL, url.PathEscape(string(mStr))), nil } func main(clusterID string) error { From 03f03d2b58b4d880fd74ef5b20d56ca5d6fd1a2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Jun 2025 18:47:13 +0000 Subject: [PATCH 28/40] Bump github.com/openshift/backplane-cli from 0.1.46 to 0.1.47 Bumps [github.com/openshift/backplane-cli](https://github.com/openshift/backplane-cli) from 0.1.46 to 0.1.47. - [Release notes](https://github.com/openshift/backplane-cli/releases) - [Changelog](https://github.com/openshift/backplane-cli/blob/main/docs/release.md) - [Commits](https://github.com/openshift/backplane-cli/compare/v0.1.46...v0.1.47) --- updated-dependencies: - dependency-name: github.com/openshift/backplane-cli dependency-version: 0.1.47 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 322832d5d..2be923824 100644 --- a/go.mod +++ b/go.mod @@ -40,7 +40,7 @@ require ( github.com/openshift-online/ocm-sdk-go v0.1.465 github.com/openshift/api v0.0.0-20250207102212-9e59a77ed2e0 github.com/openshift/aws-account-operator/api v0.0.0-20250205151445-6455c35fc4ae - github.com/openshift/backplane-cli v0.1.46 + github.com/openshift/backplane-cli v0.1.47 github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4 github.com/openshift/gcp-project-operator v0.0.0-20241024143818-ec4eabd35aba github.com/openshift/hive/apis v0.0.0-20250206153200-5a34ea42e678 diff --git a/go.sum b/go.sum index 4d06a3b32..25158ef89 100644 --- a/go.sum +++ b/go.sum @@ -535,8 +535,8 @@ github.com/openshift/aws-account-operator/api v0.0.0-20250205151445-6455c35fc4ae github.com/openshift/aws-account-operator/api v0.0.0-20250205151445-6455c35fc4ae/go.mod h1:1PdbQqTDrejSl9zsScM1x59f0oHNTsAgoJqTZqTkH/U= github.com/openshift/backplane-api v0.0.0-20250514095514-2aa57551ec70 h1:rUcdH93mEXHMviKxy1no2vFuQm0TiqA2vuCKnczeQ1k= github.com/openshift/backplane-api v0.0.0-20250514095514-2aa57551ec70/go.mod h1:RuJZpJy45AJnkp7A0ZPTZhLOVkCGDLI6cGknKvp65LE= -github.com/openshift/backplane-cli v0.1.46 h1:REX5aO8uRAfAA35/XbTft8cxqE/gNmovz5fli74WTDM= -github.com/openshift/backplane-cli v0.1.46/go.mod h1:DKTrs4gWVq+VMmGGHnxejfTfhz4r4Q4tpXivLBr2JJw= +github.com/openshift/backplane-cli v0.1.47 h1:coLmH62CFM5YUyWWWVYa6lU17W0RvBlXWpIal9bH7Pk= +github.com/openshift/backplane-cli v0.1.47/go.mod h1:DKTrs4gWVq+VMmGGHnxejfTfhz4r4Q4tpXivLBr2JJw= github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4 h1:nrD3npDGt5bvwNXZKTzzEuZTI/4Uo5PbrkpAjfxhxtE= github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4/go.mod h1:Lzu29TMne5LsgPnyw2n9jrPiD5t6uyG5aE6KFy8cz6w= github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ= From 27ab9999fb8161745176e38eb08bcc35e0ce3654 Mon Sep 17 00:00:00 2001 From: devppratik <ppanda@redhat.com> Date: Tue, 17 Jun 2025 11:29:22 +0530 Subject: [PATCH 29/40] OSD-30374: update long description based on SOP Update docs Fix lint Update docs --- cmd/jira/cmd.go | 2 +- cmd/jira/handover.go | 25 +++++++++++++++++ docs/README.md | 26 +++++++++++++++-- ...sdctl_jira_create-handover-announcement.md | 28 ++++++++++++++++++- 4 files changed, 77 insertions(+), 4 deletions(-) diff --git a/cmd/jira/cmd.go b/cmd/jira/cmd.go index 7610f9fc7..dd31b65b0 100644 --- a/cmd/jira/cmd.go +++ b/cmd/jira/cmd.go @@ -22,7 +22,7 @@ func init() { createHandoverAnnouncmentCmd.Flags().String("products", "", "Comma-separated list of products (e.g. 'Product A,Product B')") createHandoverAnnouncmentCmd.Flags().String("customer", "", "Customer name") createHandoverAnnouncmentCmd.Flags().String("cluster", "", "Cluster ID") - createHandoverAnnouncmentCmd.Flags().String("version", "", "Affects version") + createHandoverAnnouncmentCmd.Flags().String("version", "", "Affected Openshift Version (e.g 4.16 or 4.15.32)") flags := []string{"summary", "description", "products", "customer", "cluster", "version"} for _, flag := range flags { diff --git a/cmd/jira/handover.go b/cmd/jira/handover.go index cc0b5005d..5bc3c1df4 100644 --- a/cmd/jira/handover.go +++ b/cmd/jira/handover.go @@ -14,9 +14,34 @@ import ( const handoverAnnoucementsProjectID = 12351820 +const longDescription = ` +Create a new Handover announcement for SREPHOA Project. To fill the fields, use the following instructions: + +1. Cluster ID + - If a specific cluster is affected, enter the ID (internal or external). + - If not applicable: enter None, N/A, or All for fleet-wide impact. + +2. Customer Name + - Use the exact name from the output of the command you ran. Do not modify or abbreviate. Copy-paste exactly. + - If not applicable: enter None or N/A + Note : To find the Customer Name you can get it from ocm describe cluster | grep -i organization or run the following command ocm get $(ocm get $(ocm get cluster $CLUSTER_ID | jq -r .subscription.href) | jq -r '.creator.href') | jq -r '.organization.name' + +3. Version +Use the Openshift version number in the format: + - 4.16 if it affects entire Y-stream versions + - 4.16.5 if it affects a specific version + +4. Product Type +Select the appropriate product type: + - Choose Multiple if it affects the fleet + - Otherwise, select the specific product involved + +5. Description - Add a brief description of the announcement.` + var createHandoverAnnouncmentCmd = &cobra.Command{ Use: "create-handover-announcement", Short: "Create a new Handover announcement for SREPHOA Project", + Long: longDescription, Run: func(cmd *cobra.Command, args []string) { CreateHandoverAnnouncment() }, diff --git a/docs/README.md b/docs/README.md index c1f19e91c..2993addaa 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2750,7 +2750,29 @@ osdctl jira [flags] ### osdctl jira create-handover-announcement -Create a new Handover announcement for SREPHOA Project + +Create a new Handover announcement for SREPHOA Project. To fill the fields, use the following instructions: + +1. Cluster ID + - If a specific cluster is affected, enter the ID (internal or external). + - If not applicable: enter None, N/A, or All for fleet-wide impact. + +2. Customer Name + - Use the exact name from the output of the command you ran. Do not modify or abbreviate. Copy-paste exactly. + - If not applicable: enter None or N/A + Note : To find the Customer Name you can get it from ocm describe cluster | grep -i organization or run the following command ocm get $(ocm get $(ocm get cluster $CLUSTER_ID | jq -r .subscription.href) | jq -r '.creator.href') | jq -r '.organization.name' + +3. Version +Use the Openshift version number in the format: + - 4.16 if it affects entire Y-stream versions + - 4.16.5 if it affects a specific version + +4. Product Type +Select the appropriate product type: + - Choose Multiple if it affects the fleet + - Otherwise, select the specific product involved + +5. Description - Add a brief description of the announcement. ``` osdctl jira create-handover-announcement [flags] @@ -2774,7 +2796,7 @@ osdctl jira create-handover-announcement [flags] --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value -S, --skip-version-check skip checking to see if this is the most recent release --summary string Enter Summary/Title for the Announcment - --version string Affects version + --version string Affected Openshift Version (e.g 4.16 or 4.15.32) ``` ### osdctl jira quick-task diff --git a/docs/osdctl_jira_create-handover-announcement.md b/docs/osdctl_jira_create-handover-announcement.md index 7729d603f..71815089d 100644 --- a/docs/osdctl_jira_create-handover-announcement.md +++ b/docs/osdctl_jira_create-handover-announcement.md @@ -2,6 +2,32 @@ Create a new Handover announcement for SREPHOA Project +### Synopsis + + +Create a new Handover announcement for SREPHOA Project. To fill the fields, use the following instructions: + +1. Cluster ID + - If a specific cluster is affected, enter the ID (internal or external). + - If not applicable: enter None, N/A, or All for fleet-wide impact. + +2. Customer Name + - Use the exact name from the output of the command you ran. Do not modify or abbreviate. Copy-paste exactly. + - If not applicable: enter None or N/A + Note : To find the Customer Name you can get it from ocm describe cluster | grep -i organization or run the following command ocm get $(ocm get $(ocm get cluster $CLUSTER_ID | jq -r .subscription.href) | jq -r '.creator.href') | jq -r '.organization.name' + +3. Version +Use the Openshift version number in the format: + - 4.16 if it affects entire Y-stream versions + - 4.16.5 if it affects a specific version + +4. Product Type +Select the appropriate product type: + - Choose Multiple if it affects the fleet + - Otherwise, select the specific product involved + +5. Description - Add a brief description of the announcement. + ``` osdctl jira create-handover-announcement [flags] ``` @@ -15,7 +41,7 @@ osdctl jira create-handover-announcement [flags] -h, --help help for create-handover-announcement --products string Comma-separated list of products (e.g. 'Product A,Product B') --summary string Enter Summary/Title for the Announcment - --version string Affects version + --version string Affected Openshift Version (e.g 4.16 or 4.15.32) ``` ### Options inherited from parent commands From ee4eab7ab57ce18796260e54cdae679b15c081a7 Mon Sep 17 00:00:00 2001 From: Leszek Jakubowski <2552733+Makdaam@users.noreply.github.com> Date: Tue, 22 Jul 2025 13:20:58 +0000 Subject: [PATCH 30/40] SREP-1062 Adding network info section to cluster context --- cmd/cluster/context.go | 72 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/cmd/cluster/context.go b/cmd/cluster/context.go index e1fb3ff06..61022bd4f 100644 --- a/cmd/cluster/context.go +++ b/cmd/cluster/context.go @@ -4,6 +4,8 @@ import ( "encoding/json" "fmt" "io" + "math" + "net" "os" "os/exec" "sort" @@ -106,6 +108,16 @@ type contextData struct { UserBanned bool BanCode string BanDescription string + + // Network data + NetworkType string + NetworkMachineCIDR string + NetworkServiceCIDR string + NetworkPodCIDR string + NetworkHostPrefix int + NetworkMaxNodesFromPodCIDR int + NetworkMaxPodsPerNode int + NetworkMaxServices int } // newCmdContext implements the context command to show the current context of a cluster @@ -232,6 +244,8 @@ func (o *contextOptions) printLongOutput(data *contextData, w io.Writer) { fmt.Fprintln(w, strings.TrimSpace(data.Description)) fmt.Println() + printNetworkInfo(data, w) + fmt.Println() utils.PrintHandoverAnnouncements(data.HandoverAnnouncements) fmt.Println() utils.PrintLimitedSupportReasons(data.LimitedSupportReasons) @@ -378,6 +392,45 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { data.ClusterVersion = o.cluster.Version().RawID() data.OCMEnv = utils.GetCurrentOCMEnv(ocmClient) + // network info fetch and calculations + var clusterNetwork = o.cluster.Network() + var ok bool + var podNetwork *net.IPNet + var serviceNetwork *net.IPNet + + data.NetworkType = clusterNetwork.Type() + data.NetworkMachineCIDR, ok = clusterNetwork.GetMachineCIDR() + if !ok { + errors = append(errors, fmt.Errorf("missing Machine CIDR in OCM Cluster")) + return nil, errors + } + data.NetworkServiceCIDR = clusterNetwork.ServiceCIDR() + data.NetworkPodCIDR = clusterNetwork.PodCIDR() + data.NetworkHostPrefix = clusterNetwork.HostPrefix() + + //max possible nodes from hostprefix + + _, podNetwork, err = net.ParseCIDR(data.NetworkPodCIDR) + if err != nil { + errors = append(errors, err) + return nil, errors + } + var b, max = podNetwork.Mask.Size() + data.NetworkMaxNodesFromPodCIDR = int(math.Pow(float64(2), float64(data.NetworkHostPrefix-b))) + + //max pods per node + data.NetworkMaxPodsPerNode = int(math.Pow(float64(2), float64(max-data.NetworkHostPrefix))) + + //max services + + _, serviceNetwork, err = net.ParseCIDR(data.NetworkServiceCIDR) + if err != nil { + errors = append(errors, err) + return nil, errors + } + b, max = serviceNetwork.Mask.Size() + data.NetworkMaxServices = int(math.Pow(float64(2), float64(max-b))) - 2 // minus 2: API and DNS service + GetLimitedSupport := func() { defer wg.Done() defer utils.StartDelayTracker(o.verbose, "Limited Support reasons").End() @@ -769,6 +822,25 @@ func skippableEvent(eventName string) bool { return false } +func printNetworkInfo(data *contextData, w io.Writer) { + var name = "Network Info" + fmt.Fprintln(w, delimiter+name) + + table := printer.NewTablePrinter(w, 20, 1, 3, ' ') + table.AddRow([]string{"Network Type", data.NetworkType}) + table.AddRow([]string{"MachineCIDR", data.NetworkMachineCIDR}) + table.AddRow([]string{"ServiceCIDR", data.NetworkServiceCIDR}) + table.AddRow([]string{"Max Services", strconv.Itoa(data.NetworkMaxServices)}) + table.AddRow([]string{"PodCIDR", data.NetworkPodCIDR}) + table.AddRow([]string{"Host Prefix", strconv.Itoa(data.NetworkHostPrefix)}) + table.AddRow([]string{"Max Nodes (based on PodCIDR)", strconv.Itoa(data.NetworkMaxNodesFromPodCIDR)}) + table.AddRow([]string{"Max pods per node", strconv.Itoa(data.NetworkMaxPodsPerNode)}) + + if err := table.Flush(); err != nil { + fmt.Fprintf(w, "Error printing %s: %v\n", name, err) + } +} + func printDynatraceResources(data *contextData, w io.Writer) { var name string = "Dynatrace Details" fmt.Fprintln(w, delimiter+name) From 8fd43fd607e4d2fc9802cd7a81dacbdceb8073be Mon Sep 17 00:00:00 2001 From: Mate Saary <msaary@redhat.com> Date: Wed, 30 Jul 2025 00:11:20 +0100 Subject: [PATCH 31/40] SREP-962: Add absolute timestamps to fetching DT logs --- cmd/cluster/context.go | 24 ++++++++++--------- cmd/dynatrace/dtquery.go | 14 ++++++++++++ cmd/dynatrace/dtquery_test.go | 27 ++++++++++++++++++++++ cmd/dynatrace/logsCmd.go | 43 +++++++++++++++++++++++++++++------ docs/README.md | 2 ++ docs/osdctl_dynatrace_logs.md | 5 ++++ go.mod | 2 +- go.sum | 3 ++- 8 files changed, 100 insertions(+), 20 deletions(-) diff --git a/cmd/cluster/context.go b/cmd/cluster/context.go index 61022bd4f..b85dff9c4 100644 --- a/cmd/cluster/context.go +++ b/cmd/cluster/context.go @@ -515,18 +515,20 @@ func (o *contextOptions) generateContextData() (*contextData, []error) { data.DyntraceEnvURL = "Failed to fetch Dynatrace URL" } return - } else { - query, err := dynatrace.GetQuery(hcpCluster) - if err != nil { - errors = append(errors, fmt.Errorf("failed to build query for Dynatrace %v", err)) - } - queryTxt := query.Build() - data.DyntraceEnvURL = hcpCluster.DynatraceURL - data.DyntraceLogsURL, err = dynatrace.GetLinkToWebConsole(hcpCluster.DynatraceURL, 10, queryTxt) - if err != nil { - errors = append(errors, fmt.Errorf("failed to get url: %v", err)) - } } + query, err := dynatrace.GetQuery(hcpCluster, time.Time{}, time.Time{}, 1) // passing nil from/to values to use --since behaviour + if err != nil { + errors = append(errors, fmt.Errorf("failed to build query for Dynatrace %v", err)) + data.DyntraceEnvURL = fmt.Sprintf("Failed to build Dynatrace query: %v", err) + return + } + queryTxt := query.Build() + data.DyntraceEnvURL = hcpCluster.DynatraceURL + data.DyntraceLogsURL, err = dynatrace.GetLinkToWebConsole(hcpCluster.DynatraceURL, "now()-10h", "now()", queryTxt) + if err != nil { + errors = append(errors, fmt.Errorf("failed to get url: %v", err)) + } + } GetPagerDutyAlerts := func() { diff --git a/cmd/dynatrace/dtquery.go b/cmd/dynatrace/dtquery.go index 32cab25b2..9ff228ce6 100644 --- a/cmd/dynatrace/dtquery.go +++ b/cmd/dynatrace/dtquery.go @@ -3,8 +3,11 @@ package dynatrace import ( "fmt" "strings" + "time" ) +const timeFormat = "2006-01-02T15:04:05Z" + type DTQuery struct { fragments []string finalQuery string @@ -18,6 +21,17 @@ func (q *DTQuery) InitLogs(hours int) *DTQuery { return q } +func (q *DTQuery) InitLogsWithTimeRange(from time.Time, to time.Time) *DTQuery { + q.fragments = []string{} + + fromStr := from.Format(timeFormat) + toStr := to.Format(timeFormat) + + q.fragments = append(q.fragments, fmt.Sprintf("fetch logs, from:\"%s\", to:\"%s\" \n| filter matchesValue(event.type, \"LOG\") and ", fromStr, toStr)) + + return q +} + func (q *DTQuery) InitEvents(hours int) *DTQuery { q.fragments = []string{} diff --git a/cmd/dynatrace/dtquery_test.go b/cmd/dynatrace/dtquery_test.go index dff422c1a..aabf0dda4 100644 --- a/cmd/dynatrace/dtquery_test.go +++ b/cmd/dynatrace/dtquery_test.go @@ -2,6 +2,7 @@ package dynatrace import ( "testing" + "time" ) func TestDTQuery_InitLogs(t *testing.T) { @@ -192,3 +193,29 @@ func TestDTQuery_Deployments(t *testing.T) { }) } } + +func TestDTQuery_InitLogsWithTimeRange(t *testing.T) { + tests := []struct { + name string + from time.Time + to time.Time + expected string + }{ + { + name: "Standard time range", + from: time.Date(2025, 6, 12, 5, 0, 0, 0, time.UTC), + to: time.Date(2025, 6, 17, 15, 0, 0, 0, time.UTC), + expected: `fetch logs, from:"2025-06-12T05:00:00Z", to:"2025-06-17T15:00:00Z" +| filter matchesValue(event.type, "LOG") and `, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + q := new(DTQuery).InitLogsWithTimeRange(tt.from, tt.to) + if q.fragments[0] != tt.expected { + t.Errorf("expected: %s\ngot: %s", tt.expected, q.fragments[0]) + } + }) + } +} diff --git a/cmd/dynatrace/logsCmd.go b/cmd/dynatrace/logsCmd.go index 75258da92..10510864d 100644 --- a/cmd/dynatrace/logsCmd.go +++ b/cmd/dynatrace/logsCmd.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/url" + "time" k8s "github.com/openshift/osdctl/pkg/k8s" "github.com/spf13/cobra" @@ -14,6 +15,8 @@ var ( dryRun bool tail int since int + fromVar time.Time + toVar time.Time contains string sortOrder string clusterID string @@ -52,6 +55,9 @@ const ( # Only return logs newer than 2 hours old (an integer in hours) $ osdctl dt logs alertmanager-main-0 -n openshift-monitoring --since 2 + # Get logs for a specific time range using --from and --to flags + $ osdctl dt logs alertmanager-main-0 -n openshift-monitoring --from "2025-06-15 04:00" --to "2025-06-17 13:00" + # Restrict return of logs to those that contain a specific phrase $ osdctl dt logs alertmanager-main-0 -n openshift-monitoring --contains <phrase> ` @@ -89,6 +95,11 @@ func NewCmdLogs() *cobra.Command { logsCmd.Flags().BoolVar(&dryRun, "dry-run", false, "Only builds the query without fetching any logs from the tenant") logsCmd.Flags().IntVar(&tail, "tail", 1000, "Last 'n' logs to fetch (defaults to 100)") logsCmd.Flags().IntVar(&since, "since", 1, "Number of hours (integer) since which to search (defaults to 1 hour)") + logsCmd.Flags().TimeVar(&fromVar, "from", time.Time{}, []string{time.RFC3339, "2006-01-02 15:04"}, "Datetime from which to filter logs, in the format \"YYYY-MM-DD HH:MM\"") + logsCmd.Flags().TimeVar(&toVar, "to", time.Time{}, []string{time.RFC3339, "2006-01-02 15:04"}, "Datetime until which to filter logs to, in the format \"YYYY-MM-DD HH:MM\"") + logsCmd.MarkFlagsRequiredTogether("from", "to") + logsCmd.MarkFlagsMutuallyExclusive("since", "from") + logsCmd.MarkFlagsMutuallyExclusive("since", "to") logsCmd.Flags().StringVar(&contains, "contains", "", "Include logs which contain a phrase") logsCmd.Flags().StringVar(&sortOrder, "sort", "asc", "Sort the results by timestamp in either ascending or descending order. Accepted values are 'asc' and 'desc'. Defaults to 'asc'") logsCmd.Flags().StringSliceVar(&nodeList, "node", []string{}, "Node name(s) (comma-separated)") @@ -100,13 +111,13 @@ func NewCmdLogs() *cobra.Command { return logsCmd } -func GetLinkToWebConsole(dtURL string, since int, finalQuery string) (string, error) { +func GetLinkToWebConsole(dtURL string, from string, to string, finalQuery string) (string, error) { SearchQuery := map[string]interface{}{ "version": 1, "dt.query": finalQuery, "dt.timeframe": map[string]interface{}{ - "from": fmt.Sprintf("now()-%vh", since), - "to": "now()", + "from": from, + "to": to, }, "showDqlEditor": true, "tableConfig": map[string]interface{}{ @@ -132,6 +143,11 @@ func main(clusterID string) error { if since <= 0 { return fmt.Errorf("invalid time duration") } + + if !fromVar.IsZero() && !toVar.IsZero() && toVar.Before(fromVar) { + return fmt.Errorf("--to cannot be set to a datetime before --from") + } + hcpCluster, err := FetchClusterDetails(clusterID) if err != nil { return fmt.Errorf("failed to acquire cluster details %v", err) @@ -141,7 +157,7 @@ func main(clusterID string) error { return fmt.Errorf("invalid sort order, expecting 'asc' or 'desc'") } - query, err := GetQuery(hcpCluster) + query, err := GetQuery(hcpCluster, fromVar, toVar, since) if err != nil { return fmt.Errorf("failed to build query for Dynatrace %v", err) } @@ -149,7 +165,15 @@ func main(clusterID string) error { fmt.Println(query.Build()) if console { - url, err := GetLinkToWebConsole(hcpCluster.DynatraceURL, since, query.finalQuery) + var url string + var err error + + if !fromVar.IsZero() && !toVar.IsZero() { // Absolute timestamp condition + url, err = GetLinkToWebConsole(hcpCluster.DynatraceURL, fromVar.Format(time.RFC3339), toVar.Format(time.RFC3339), query.finalQuery) + } else { // otherwise relative (since "mode") + url, err = GetLinkToWebConsole(hcpCluster.DynatraceURL, fmt.Sprintf("now()-%dh", since), "now()", query.finalQuery) + } + if err != nil { return fmt.Errorf("failed to get url: %v", err) } @@ -179,9 +203,14 @@ func main(clusterID string) error { return nil } -func GetQuery(hcpCluster HCPCluster) (query DTQuery, error error) { +func GetQuery(hcpCluster HCPCluster, fromVar time.Time, toVar time.Time, since int) (query DTQuery, error error) { q := DTQuery{} - q.InitLogs(since).Cluster(hcpCluster.managementClusterName) + + if !fromVar.IsZero() && !toVar.IsZero() { + q.InitLogsWithTimeRange(fromVar, toVar).Cluster(hcpCluster.managementClusterName) + } else { + q.InitLogs(since).Cluster(hcpCluster.managementClusterName) + } if hcpCluster.hcpNamespace != "" { namespaceList = append(namespaceList, hcpCluster.hcpNamespace) diff --git a/docs/README.md b/docs/README.md index 2993addaa..319fa9fec 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2336,6 +2336,7 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] --contains string Include logs which contain a phrase --context string The name of the kubeconfig context to use --dry-run Only builds the query without fetching any logs from the tenant + --from time Datetime from which to filter logs, in the format "YYYY-MM-DD HH:MM" (default 0001-01-01T00:00:00Z) -h, --help help for logs --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. @@ -2350,6 +2351,7 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] --sort string Sort the results by timestamp in either ascending or descending order. Accepted values are 'asc' and 'desc'. Defaults to 'asc' (default "asc") --status strings Status(Info/Warn/Error) (comma-separated) --tail int Last 'n' logs to fetch (defaults to 100) (default 1000) + --to time Datetime until which to filter logs to, in the format "YYYY-MM-DD HH:MM" (default 0001-01-01T00:00:00Z) ``` ### osdctl dynatrace url diff --git a/docs/osdctl_dynatrace_logs.md b/docs/osdctl_dynatrace_logs.md index 9b4e9d93c..dea33c215 100644 --- a/docs/osdctl_dynatrace_logs.md +++ b/docs/osdctl_dynatrace_logs.md @@ -37,6 +37,9 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] # Only return logs newer than 2 hours old (an integer in hours) $ osdctl dt logs alertmanager-main-0 -n openshift-monitoring --since 2 + # Get logs for a specific time range using --from and --to flags + $ osdctl dt logs alertmanager-main-0 -n openshift-monitoring --from "2025-06-15 04:00" --to "2025-06-17 13:00" + # Restrict return of logs to those that contain a specific phrase $ osdctl dt logs alertmanager-main-0 -n openshift-monitoring --contains <phrase> @@ -50,6 +53,7 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] --container strings Container name(s) (comma-separated) --contains string Include logs which contain a phrase --dry-run Only builds the query without fetching any logs from the tenant + --from time Datetime from which to filter logs, in the format "YYYY-MM-DD HH:MM" (default 0001-01-01T00:00:00Z) -h, --help help for logs -n, --namespace strings Namespace(s) (comma-separated) --node strings Node name(s) (comma-separated) @@ -57,6 +61,7 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] --sort string Sort the results by timestamp in either ascending or descending order. Accepted values are 'asc' and 'desc'. Defaults to 'asc' (default "asc") --status strings Status(Info/Warn/Error) (comma-separated) --tail int Last 'n' logs to fetch (defaults to 100) (default 1000) + --to time Datetime until which to filter logs to, in the format "YYYY-MM-DD HH:MM" (default 0001-01-01T00:00:00Z) ``` ### Options inherited from parent commands diff --git a/go.mod b/go.mod index 2be923824..d7b3a5f29 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.12.0 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.7 github.com/spf13/viper v1.20.1 github.com/stretchr/testify v1.10.0 github.com/zclconf/go-cty v1.13.0 diff --git a/go.sum b/go.sum index 25158ef89..fc76cf2ba 100644 --- a/go.sum +++ b/go.sum @@ -603,8 +603,9 @@ github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cA github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= From 35a46ae3e1c9ba097d0c7792062335d1de17b2cc Mon Sep 17 00:00:00 2001 From: Josh Branham <jbranham@redhat.com> Date: Thu, 31 Jul 2025 15:37:51 -0600 Subject: [PATCH 32/40] Standardize --cluster-id flag to support -C shorthand across all commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updated all commands under cmd/ to consistently support the -C shorthand for the --cluster-id flag: - Added -C shorthand to commands using StringVar (no shorthand previously) - Standardized existing commands using -c (lowercase) to -C (uppercase) - Updated documentation to reflect the changes This ensures a consistent user experience across all osdctl commands that accept cluster ID parameters. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com> --- cmd/alerts/list_alerts.go | 2 +- cmd/alerts/silence/add_silence.go | 2 +- cmd/alerts/silence/clear_silence.go | 2 +- cmd/alerts/silence/list_silence.go | 2 +- cmd/cluster/access/access.go | 2 +- cmd/cluster/access/cleanup.go | 2 +- cmd/cluster/checkbanneduser.go | 2 +- cmd/cluster/context.go | 2 +- cmd/cluster/detachstuckvolume.go | 2 +- cmd/cluster/etcd_health.go | 2 +- cmd/cluster/etcd_replace.go | 2 +- cmd/cluster/hypershift_info.go | 2 +- cmd/cluster/loggingcheck.go | 2 +- cmd/cluster/org_id.go | 2 +- cmd/cluster/resize/controlplane_node.go | 2 +- cmd/cluster/ssh/key.go | 2 +- cmd/cluster/support/delete.go | 2 +- cmd/cluster/support/post.go | 2 +- cmd/cluster/support/status.go | 2 +- cmd/cluster/validatepullsecret.go | 2 +- cmd/dynatrace/dashCmd.go | 2 +- cmd/dynatrace/hcpGatherLogsCmd.go | 2 +- cmd/dynatrace/logsCmd.go | 2 +- cmd/dynatrace/urlCmd.go | 2 +- cmd/env/env.go | 2 +- cmd/hcp/mustgather/mustGather.go | 2 +- docs/README.md | 52 ++++++++++----------- docs/osdctl_alert_list.md | 2 +- docs/osdctl_alert_silence_add.md | 2 +- docs/osdctl_alert_silence_expire.md | 2 +- docs/osdctl_alert_silence_list.md | 2 +- docs/osdctl_cluster_break-glass.md | 2 +- docs/osdctl_cluster_break-glass_cleanup.md | 2 +- docs/osdctl_cluster_check-banned-user.md | 2 +- docs/osdctl_cluster_context.md | 2 +- docs/osdctl_cluster_detach-stuck-volume.md | 2 +- docs/osdctl_cluster_etcd-health-check.md | 2 +- docs/osdctl_cluster_etcd-member-replace.md | 2 +- docs/osdctl_cluster_hypershift-info.md | 2 +- docs/osdctl_cluster_logging-check.md | 2 +- docs/osdctl_cluster_orgId.md | 2 +- docs/osdctl_cluster_resize_control-plane.md | 2 +- docs/osdctl_cluster_ssh_key.md | 2 +- docs/osdctl_cluster_support_delete.md | 2 +- docs/osdctl_cluster_support_post.md | 2 +- docs/osdctl_cluster_support_status.md | 2 +- docs/osdctl_cluster_validate-pull-secret.md | 2 +- docs/osdctl_dynatrace_dashboard.md | 2 +- docs/osdctl_dynatrace_gather-logs.md | 2 +- docs/osdctl_dynatrace_logs.md | 2 +- docs/osdctl_dynatrace_url.md | 2 +- docs/osdctl_env.md | 2 +- docs/osdctl_hcp_must-gather.md | 2 +- 53 files changed, 78 insertions(+), 78 deletions(-) diff --git a/cmd/alerts/list_alerts.go b/cmd/alerts/list_alerts.go index f78ed647f..4ca0f930c 100644 --- a/cmd/alerts/list_alerts.go +++ b/cmd/alerts/list_alerts.go @@ -30,7 +30,7 @@ func NewCmdListAlerts() *cobra.Command { ListAlerts(alertCmd) }, } - newCmd.Flags().StringVar(&alertCmd.clusterID, "cluster-id", "", "Provide the internal ID of the cluster") + newCmd.Flags().StringVarP(&alertCmd.clusterID, "cluster-id", "C", "", "Provide the internal ID of the cluster") _ = newCmd.MarkFlagRequired("cluster-id") newCmd.Flags().StringVarP(&alertCmd.alertLevel, "level", "l", "all", "Alert level [warning, critical, firing, pending, all]") diff --git a/cmd/alerts/silence/add_silence.go b/cmd/alerts/silence/add_silence.go index fe549bf22..33aab7deb 100644 --- a/cmd/alerts/silence/add_silence.go +++ b/cmd/alerts/silence/add_silence.go @@ -37,7 +37,7 @@ func NewCmdAddSilence() *cobra.Command { }, } - cmd.Flags().StringVar(&addSilenceCmd.clusterID, "cluster-id", "", "Provide the internal ID of the cluster") + cmd.Flags().StringVarP(&addSilenceCmd.clusterID, "cluster-id", "C", "", "Provide the internal ID of the cluster") cmd.Flags().StringSliceVar(&addSilenceCmd.alertID, "alertname", []string{}, "alertname (comma-separated)") cmd.Flags().StringVarP(&addSilenceCmd.comment, "comment", "c", "Adding silence using the osdctl alert command", "add comment about silence") cmd.Flags().StringVarP(&addSilenceCmd.duration, "duration", "d", "15d", "Adding duration for silence as 15 days") //default duration set to 15 days diff --git a/cmd/alerts/silence/clear_silence.go b/cmd/alerts/silence/clear_silence.go index 8967255bb..0ab185542 100644 --- a/cmd/alerts/silence/clear_silence.go +++ b/cmd/alerts/silence/clear_silence.go @@ -33,7 +33,7 @@ func NewCmdClearSilence() *cobra.Command { }, } - cmd.Flags().StringVar(&silenceCmd.clusterID, "cluster-id", "", "Provide the internal ID of the cluster") + cmd.Flags().StringVarP(&silenceCmd.clusterID, "cluster-id", "C", "", "Provide the internal ID of the cluster") cmd.Flags().StringSliceVar(&silenceCmd.silenceIDs, "silence-id", []string{}, "silence id (comma-separated)") cmd.Flags().BoolVarP(&silenceCmd.all, "all", "a", false, "clear all silences") cmd.Flags().StringVar(&silenceCmd.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") diff --git a/cmd/alerts/silence/list_silence.go b/cmd/alerts/silence/list_silence.go index 358f000a8..893eeefae 100644 --- a/cmd/alerts/silence/list_silence.go +++ b/cmd/alerts/silence/list_silence.go @@ -27,7 +27,7 @@ func NewCmdListSilence() *cobra.Command { ListSilence(listSilenceCmd) }, } - cmd.Flags().StringVar(&listSilenceCmd.clusterID, "cluster-id", "", "Provide the internal ID of the cluster") + cmd.Flags().StringVarP(&listSilenceCmd.clusterID, "cluster-id", "C", "", "Provide the internal ID of the cluster") cmd.Flags().StringVar(&listSilenceCmd.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") _ = cmd.MarkFlagRequired("cluster-id") _ = cmd.MarkFlagRequired("reason") diff --git a/cmd/cluster/access/access.go b/cmd/cluster/access/access.go index d04622b8f..9fccf7dcc 100644 --- a/cmd/cluster/access/access.go +++ b/cmd/cluster/access/access.go @@ -65,7 +65,7 @@ func NewCmdAccess(streams genericclioptions.IOStreams, client *k8s.LazyClient) * } accessCmd.AddCommand(newCmdCleanup(client, streams)) accessCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") - accessCmd.Flags().StringVar(&ops.clusterID, "cluster-id", "", "Provide the internal ID of the cluster") + accessCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Provide the internal ID of the cluster") _ = accessCmd.MarkFlagRequired("reason") _ = accessCmd.MarkFlagRequired("cluster-id") diff --git a/cmd/cluster/access/cleanup.go b/cmd/cluster/access/cleanup.go index 5300c655f..6249f3e58 100644 --- a/cmd/cluster/access/cleanup.go +++ b/cmd/cluster/access/cleanup.go @@ -33,7 +33,7 @@ func newCmdCleanup(client *k8s.LazyClient, streams genericclioptions.IOStreams) cmdutil.CheckErr(ops.Run(cmd)) }, } - cleanupCmd.Flags().StringVar(&ops.clusterID, "cluster-id", "", "[Mandatory] Provide the Internal ID of the cluster") + cleanupCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "[Mandatory] Provide the Internal ID of the cluster") cleanupCmd.Flags().StringVar(&ops.reason, "reason", "", "[Mandatory for PrivateLink clusters] The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") _ = cleanupCmd.MarkFlagRequired("cluster-id") diff --git a/cmd/cluster/checkbanneduser.go b/cmd/cluster/checkbanneduser.go index 35355490c..faccf6661 100644 --- a/cmd/cluster/checkbanneduser.go +++ b/cmd/cluster/checkbanneduser.go @@ -22,7 +22,7 @@ func newCmdCheckBannedUser() *cobra.Command { }, } - cmd.Flags().StringVarP(&clusterID, "cluster-id", "c", "", "Provide internal ID of the cluster") + cmd.Flags().StringVarP(&clusterID, "cluster-id", "C", "", "Provide internal ID of the cluster") _ = cmd.MarkFlagRequired("cluster-id") return cmd diff --git a/cmd/cluster/context.go b/cmd/cluster/context.go index b85dff9c4..f3c5d9546 100644 --- a/cmd/cluster/context.go +++ b/cmd/cluster/context.go @@ -139,7 +139,7 @@ func newCmdContext() *cobra.Command { }, } - contextCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "Provide internal ID of the cluster") + contextCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Provide internal ID of the cluster") _ = contextCmd.MarkFlagRequired("cluster-id") contextCmd.Flags().StringVarP(&ops.output, "output", "o", "long", "Valid formats are ['long', 'short', 'json']. Output is set to 'long' by default") diff --git a/cmd/cluster/detachstuckvolume.go b/cmd/cluster/detachstuckvolume.go index b2854b43b..a75cd02e1 100644 --- a/cmd/cluster/detachstuckvolume.go +++ b/cmd/cluster/detachstuckvolume.go @@ -43,7 +43,7 @@ func newCmdDetachStuckVolume() *cobra.Command { }, } - detachstuckvolumeCmd.Flags().StringVar(&ops.clusterID, "cluster-id", "", "Provide internal ID of the cluster") + detachstuckvolumeCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Provide internal ID of the cluster") detachstuckvolumeCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket)") _ = detachstuckvolumeCmd.MarkFlagRequired("cluster-id") _ = detachstuckvolumeCmd.MarkFlagRequired("reason") diff --git a/cmd/cluster/etcd_health.go b/cmd/cluster/etcd_health.go index fedbb208f..7debab655 100644 --- a/cmd/cluster/etcd_health.go +++ b/cmd/cluster/etcd_health.go @@ -68,7 +68,7 @@ func newCmdEtcdHealthCheck() *cobra.Command { }, } - cmd.Flags().StringVar(&opts.clusterID, "cluster-id", "", "Provide the internal Cluster ID or name to perform health check on") + cmd.Flags().StringVarP(&opts.clusterID, "cluster-id", "C", "", "Provide the internal Cluster ID or name to perform health check on") cmd.Flags().StringVar(&opts.reason, "reason", "", "Specify a reason for privilege escalation") err := cmd.MarkFlagRequired("cluster-id") diff --git a/cmd/cluster/etcd_replace.go b/cmd/cluster/etcd_replace.go index aece6a7a1..f77beb6e3 100644 --- a/cmd/cluster/etcd_replace.go +++ b/cmd/cluster/etcd_replace.go @@ -50,7 +50,7 @@ func newCmdEtcdMemberReplacement() *cobra.Command { cmdutil.CheckErr(opts.EtcdReplaceMember()) }, } - replaceCmd.Flags().StringVar(&opts.clusterID, "cluster-id", "", "Provide internal Cluster ID") + replaceCmd.Flags().StringVarP(&opts.clusterID, "cluster-id", "C", "", "Provide internal Cluster ID") replaceCmd.Flags().StringVar(&opts.nodeId, "node", "", "Node ID (required)") replaceCmd.Flags().StringVar(&opts.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket)") _ = replaceCmd.MarkFlagRequired("cluster-id") diff --git a/cmd/cluster/hypershift_info.go b/cmd/cluster/hypershift_info.go index fdbfe532e..f6bbb91c5 100644 --- a/cmd/cluster/hypershift_info.go +++ b/cmd/cluster/hypershift_info.go @@ -47,7 +47,7 @@ It attempts to render the relationships as graphviz if that output format is cho cmdutil.CheckErr(ops.run()) }, } - infoCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "Provide internal ID of the cluster") + infoCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Provide internal ID of the cluster") infoCmd.Flags().StringVarP(&ops.awsProfile, "profile", "p", "", "AWS Profile") infoCmd.Flags().StringVarP(&ops.awsRegion, "region", "r", "", "AWS Region") infoCmd.Flags().StringVarP(&ops.privatelinkAccountId, "privatelinkaccount", "l", "", "Privatelink account ID") diff --git a/cmd/cluster/loggingcheck.go b/cmd/cluster/loggingcheck.go index 8d67cbe05..5950beb29 100644 --- a/cmd/cluster/loggingcheck.go +++ b/cmd/cluster/loggingcheck.go @@ -37,7 +37,7 @@ func newCmdLoggingCheck(streams genericclioptions.IOStreams, globalOpts *globalf }, } loggingCheckCmd.Flags().BoolVarP(&ops.verbose, "verbose", "", false, "Verbose output") - loggingCheckCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "The internal ID of the cluster to check (required)") + loggingCheckCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The internal ID of the cluster to check (required)") cmdutil.CheckErr(loggingCheckCmd.MarkFlagRequired("cluster-id")) return loggingCheckCmd diff --git a/cmd/cluster/org_id.go b/cmd/cluster/org_id.go index ac596ef73..3a844fdbd 100644 --- a/cmd/cluster/org_id.go +++ b/cmd/cluster/org_id.go @@ -31,7 +31,7 @@ func newCmdOrgId() *cobra.Command { }, } - orgIdCmd.Flags().StringVarP(&o.clusterID, "cluster-id", "c", "", "The internal ID of the cluster to check (required)") + orgIdCmd.Flags().StringVarP(&o.clusterID, "cluster-id", "C", "", "The internal ID of the cluster to check (required)") return orgIdCmd } diff --git a/cmd/cluster/resize/controlplane_node.go b/cmd/cluster/resize/controlplane_node.go index 0c6bea56f..c161b2634 100644 --- a/cmd/cluster/resize/controlplane_node.go +++ b/cmd/cluster/resize/controlplane_node.go @@ -73,7 +73,7 @@ func newCmdResizeControlPlane() *cobra.Command { return ops.run(context.Background()) }, } - resizeControlPlaneNodeCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "The internal ID of the cluster to perform actions on") + resizeControlPlaneNodeCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The internal ID of the cluster to perform actions on") resizeControlPlaneNodeCmd.Flags().StringVar(&ops.newMachineType, "machine-type", "", "The target AWS machine type to resize to (e.g. m5.2xlarge)") resizeControlPlaneNodeCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket)") _ = resizeControlPlaneNodeCmd.MarkFlagRequired("cluster-id") diff --git a/cmd/cluster/ssh/key.go b/cmd/cluster/ssh/key.go index dce7657ec..e9ef8a9b0 100644 --- a/cmd/cluster/ssh/key.go +++ b/cmd/cluster/ssh/key.go @@ -79,7 +79,7 @@ Despite the logs from backplane, the ssh key is the only output channelled throu }, } - cmd.Flags().StringVar(&opts.clusterID, "cluster-id", "", "Cluster identifier (internal ID, UUID, name, etc) to retrieve the SSH key for. If not specified, the current cluster will be used.") + cmd.Flags().StringVarP(&opts.clusterID, "cluster-id", "C", "", "Cluster identifier (internal ID, UUID, name, etc) to retrieve the SSH key for. If not specified, the current cluster will be used.") cmd.Flags().BoolVarP(&opts.skipConfirmation, "yes", "y", false, "Skip any confirmation prompts and print the key automatically. Useful for redirects and scripting.") cmd.Flags().StringVar(&opts.elevationReason, "reason", "", "Provide a reason for accessing the clusters SSH key, used for backplane. Eg: 'OHSS-XXXX', or '#ITN-2024-XXXXX") diff --git a/cmd/cluster/support/delete.go b/cmd/cluster/support/delete.go index ac70c1c18..5e6db2a31 100644 --- a/cmd/cluster/support/delete.go +++ b/cmd/cluster/support/delete.go @@ -45,7 +45,7 @@ func newCmddelete(streams genericclioptions.IOStreams, globalOpts *globalflags.G } // Defined required flags - deleteCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "Internal cluster ID (required)") + deleteCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Internal cluster ID (required)") deleteCmd.Flags().BoolVar(&ops.removeAll, "all", false, "Remove all limited support reasons") deleteCmd.Flags().StringVarP(&ops.limitedSupportReasonID, "limited-support-reason-id", "i", "", "Limited support reason ID") deleteCmd.Flags().BoolVarP(&ops.isDryRun, "dry-run", "d", false, "Dry-run - print the limited support reason about to be sent but don't send it.") diff --git a/cmd/cluster/support/post.go b/cmd/cluster/support/post.go index e9f37cbc4..11b486b62 100644 --- a/cmd/cluster/support/post.go +++ b/cmd/cluster/support/post.go @@ -84,7 +84,7 @@ The cluster has a second failing ingress controller, which is not supported and } // Define required flags - postCmd.Flags().StringVarP(&p.ClusterID, "cluster-id", "c", "", "Internal Cluster ID (required)") + postCmd.Flags().StringVarP(&p.ClusterID, "cluster-id", "C", "", "Internal Cluster ID (required)") postCmd.Flags().StringVarP(&p.Template, "template", "t", "", "Message template file or URL") postCmd.Flags().StringArrayVarP(&p.TemplateParams, "param", "p", p.TemplateParams, "Specify a key-value pair (eg. -p FOO=BAR) to set/override a parameter value in the template.") postCmd.Flags().Var(&p.Misconfiguration, MisconfigurationFlag, "The type of misconfiguration responsible for the cluster being placed into limited support. Valid values are `cloud` or `cluster`.") diff --git a/cmd/cluster/support/status.go b/cmd/cluster/support/status.go index 856fd2671..1e7381133 100644 --- a/cmd/cluster/support/status.go +++ b/cmd/cluster/support/status.go @@ -34,7 +34,7 @@ func newCmdstatus(streams genericclioptions.IOStreams, globalOpts *globalflags.G }, } - statusCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "Cluster ID for which to get support status") + statusCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "Cluster ID for which to get support status") statusCmd.Flags().BoolVarP(&ops.verbose, "verbose", "", false, "Verbose output") _ = statusCmd.MarkFlagRequired("cluster-id") diff --git a/cmd/cluster/validatepullsecret.go b/cmd/cluster/validatepullsecret.go index bf0ef57f8..5ecb7ace3 100644 --- a/cmd/cluster/validatepullsecret.go +++ b/cmd/cluster/validatepullsecret.go @@ -40,7 +40,7 @@ This command will automatically login to the cluster to check the current pull-s } // Add cluster-id flag - validatePullSecretCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "c", "", "The internal ID of the cluster to check (required)") + validatePullSecretCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The internal ID of the cluster to check (required)") if err := validatePullSecretCmd.MarkFlagRequired("cluster-id"); err != nil { fmt.Printf("Error marking cluster-id flag as required: %v\n", err) } diff --git a/cmd/dynatrace/dashCmd.go b/cmd/dynatrace/dashCmd.go index c85e9f041..7c72289be 100644 --- a/cmd/dynatrace/dashCmd.go +++ b/cmd/dynatrace/dashCmd.go @@ -46,7 +46,7 @@ func newCmdDashboard() *cobra.Command { } urlCmd.Flags().StringVar(&dashboardName, "dash", "Central ROSA HCP Dashboard", "Name of the dashboard you wish to find") - urlCmd.Flags().StringVar(&clusterId, "cluster-id", "", "Provide the id of the cluster") + urlCmd.Flags().StringVarP(&clusterId, "cluster-id", "C", "", "Provide the id of the cluster") _ = urlCmd.MarkFlagRequired("cluster-id") return urlCmd diff --git a/cmd/dynatrace/hcpGatherLogsCmd.go b/cmd/dynatrace/hcpGatherLogsCmd.go index cab9623cb..2c2118387 100644 --- a/cmd/dynatrace/hcpGatherLogsCmd.go +++ b/cmd/dynatrace/hcpGatherLogsCmd.go @@ -55,7 +55,7 @@ func NewCmdHCPMustGather() *cobra.Command { hcpMgCmd.Flags().IntVar(&g.Tail, "tail", 0, "Last 'n' logs and events to fetch. By default it will pull everything") hcpMgCmd.Flags().StringVar(&g.SortOrder, "sort", "asc", "Sort the results by timestamp in either ascending or descending order. Accepted values are 'asc' and 'desc'") hcpMgCmd.Flags().StringVar(&g.DestDir, "dest-dir", "", "Destination directory for the logs dump, defaults to the local directory.") - hcpMgCmd.Flags().StringVar(&g.ClusterID, "cluster-id", "", "Internal ID of the HCP cluster to gather logs from (required)") + hcpMgCmd.Flags().StringVarP(&g.ClusterID, "cluster-id", "C", "", "Internal ID of the HCP cluster to gather logs from (required)") _ = hcpMgCmd.MarkFlagRequired("cluster-id") diff --git a/cmd/dynatrace/logsCmd.go b/cmd/dynatrace/logsCmd.go index 10510864d..ec2cc562c 100644 --- a/cmd/dynatrace/logsCmd.go +++ b/cmd/dynatrace/logsCmd.go @@ -91,7 +91,7 @@ func NewCmdLogs() *cobra.Command { }, } - logsCmd.Flags().StringVar(&clusterID, "cluster-id", "", "Name or Internal ID of the cluster (defaults to current cluster context)") + logsCmd.Flags().StringVarP(&clusterID, "cluster-id", "C", "", "Name or Internal ID of the cluster (defaults to current cluster context)") logsCmd.Flags().BoolVar(&dryRun, "dry-run", false, "Only builds the query without fetching any logs from the tenant") logsCmd.Flags().IntVar(&tail, "tail", 1000, "Last 'n' logs to fetch (defaults to 100)") logsCmd.Flags().IntVar(&since, "since", 1, "Number of hours (integer) since which to search (defaults to 1 hour)") diff --git a/cmd/dynatrace/urlCmd.go b/cmd/dynatrace/urlCmd.go index c28ff1604..2c303e437 100644 --- a/cmd/dynatrace/urlCmd.go +++ b/cmd/dynatrace/urlCmd.go @@ -24,7 +24,7 @@ func newCmdURL() *cobra.Command { }, } - urlCmd.Flags().StringVar(&clusterID, "cluster-id", "", "ID of the cluster") + urlCmd.Flags().StringVarP(&clusterID, "cluster-id", "C", "", "ID of the cluster") _ = urlCmd.MarkFlagRequired("cluster-id") return urlCmd diff --git a/cmd/env/env.go b/cmd/env/env.go index 12356810a..c3b8fec36 100644 --- a/cmd/env/env.go +++ b/cmd/env/env.go @@ -106,7 +106,7 @@ func NewCmdEnv() *cobra.Command { envCmd.Flags().BoolVarP(&options.ResetEnv, "reset", "r", false, "Reset environment") envCmd.Flags().BoolVarP(&options.ExportKubeConfig, "export-kubeconfig", "k", false, "Output export kubeconfig statement, to use environment outside of the env directory") - envCmd.Flags().StringVarP(&options.ClusterId, "cluster-id", "c", "", "Cluster ID") + envCmd.Flags().StringVarP(&options.ClusterId, "cluster-id", "C", "", "Cluster ID") envCmd.Flags().StringVarP(&options.LoginScript, "login-script", "l", "", "OCM login script to execute in a loop in ocb every 30 seconds") envCmd.Flags().StringVarP(&options.LoginScript, "username", "u", "", "Username for individual cluster login") diff --git a/cmd/hcp/mustgather/mustGather.go b/cmd/hcp/mustgather/mustGather.go index f1af3aac2..43847f893 100644 --- a/cmd/hcp/mustgather/mustGather.go +++ b/cmd/hcp/mustgather/mustGather.go @@ -51,7 +51,7 @@ func NewCmdMustGather() *cobra.Command { } defaultAcmImage := "quay.io/stolostron/must-gather:2.11.4-SNAPSHOT-2024-12-02-15-19-44" - mustGatherCommand.Flags().StringVar(&mg.clusterId, "cluster-id", "", "Internal ID of the cluster to gather data from") + mustGatherCommand.Flags().StringVarP(&mg.clusterId, "cluster-id", "C", "", "Internal ID of the cluster to gather data from") mustGatherCommand.Flags().StringVar(&mg.reason, "reason", "", "The reason for this command, which requires elevation (e.g., OHSS ticket or PD incident).") mustGatherCommand.Flags().StringVar(&mg.gatherTargets, "gather", "hcp", "Comma-separated list of gather targets (available: sc, sc_acm, mc, hcp).") mustGatherCommand.Flags().StringVar(&mg.acmMustGatherImage, "acm_image", defaultAcmImage, "Overrides the acm must-gather image being used for acm mc, sc as well as hcp must-gathers.") diff --git a/docs/README.md b/docs/README.md index 319fa9fec..e13c73b60 100644 --- a/docs/README.md +++ b/docs/README.md @@ -927,7 +927,7 @@ osdctl alert list --cluster-id <cluster-id> --level [warning, critical, firing, ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for list --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -980,7 +980,7 @@ osdctl alert silence add --cluster-id <cluster-identifier> [--all --duration --c -a, --all Adding silences for all alert --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster -c, --comment string add comment about silence (default "Adding silence using the osdctl alert command") --context string The name of the kubeconfig context to use -d, --duration string Adding duration for silence as 15 days (default "15d") @@ -1009,7 +1009,7 @@ osdctl alert silence expire [--cluster-id <cluster-identifier>] [--all | --silen -a, --all clear all silences --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for expire --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1036,7 +1036,7 @@ osdctl alert silence list --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for list --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1195,7 +1195,7 @@ osdctl cluster break-glass --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for break-glass --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1224,7 +1224,7 @@ osdctl cluster break-glass cleanup --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string [Mandatory] Provide the Internal ID of the cluster + -C, --cluster-id string [Mandatory] Provide the Internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for cleanup --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1250,7 +1250,7 @@ osdctl cluster check-banned-user --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for check-banned-user --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1275,7 +1275,7 @@ osdctl cluster context --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster --context string The name of the kubeconfig context to use -d, --days int Command will display X days of Error SLs sent to the cluster. Days is set to 30 by default (default 30) --full Run full suite of checks. @@ -1346,7 +1346,7 @@ osdctl cluster detach-stuck-volume --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for detach-stuck-volume --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1372,7 +1372,7 @@ osdctl cluster etcd-health-check --cluster-id <cluster-id> --reason <reason for ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the internal Cluster ID or name to perform health check on + -C, --cluster-id string Provide the internal Cluster ID or name to perform health check on --context string The name of the kubeconfig context to use -h, --help help for etcd-health-check --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1398,7 +1398,7 @@ osdctl cluster etcd-member-replace --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide internal Cluster ID + -C, --cluster-id string Provide internal Cluster ID --context string The name of the kubeconfig context to use -h, --help help for etcd-member-replace --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1477,7 +1477,7 @@ osdctl cluster hypershift-info [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for hypershift-info --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1506,7 +1506,7 @@ osdctl cluster logging-check --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string The internal ID of the cluster to check (required) + -C, --cluster-id string The internal ID of the cluster to check (required) --context string The name of the kubeconfig context to use -h, --help help for logging-check --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1532,7 +1532,7 @@ osdctl cluster orgId --cluster-id <cluster-identifier [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string The internal ID of the cluster to check (required) + -C, --cluster-id string The internal ID of the cluster to check (required) --context string The name of the kubeconfig context to use -h, --help help for orgId --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1610,7 +1610,7 @@ osdctl cluster resize control-plane [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string The internal ID of the cluster to perform actions on + -C, --cluster-id string The internal ID of the cluster to perform actions on --context string The name of the kubeconfig context to use -h, --help help for control-plane --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1824,7 +1824,7 @@ osdctl cluster ssh key --reason $reason [--cluster-id $CLUSTER_ID] [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Cluster identifier (internal ID, UUID, name, etc) to retrieve the SSH key for. If not specified, the current cluster will be used. + -C, --cluster-id string Cluster identifier (internal ID, UUID, name, etc) to retrieve the SSH key for. If not specified, the current cluster will be used. --context string The name of the kubeconfig context to use -h, --help help for key --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -1876,7 +1876,7 @@ osdctl cluster support delete --cluster-id <cluster-identifier> [flags] --all Remove all limited support reasons --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Internal cluster ID (required) + -C, --cluster-id string Internal cluster ID (required) --context string The name of the kubeconfig context to use -d, --dry-run Dry-run - print the limited support reason about to be sent but don't send it. -h, --help help for delete @@ -1905,7 +1905,7 @@ osdctl cluster support post --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Internal Cluster ID (required) + -C, --cluster-id string Internal Cluster ID (required) --context string The name of the kubeconfig context to use --evidence string (optional) The reasoning that led to the decision to place the cluster in limited support. Can also be a link to a Jira case. Used for internal service log only. -h, --help help for post @@ -1936,7 +1936,7 @@ osdctl cluster support status --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Cluster ID for which to get support status + -C, --cluster-id string Cluster ID for which to get support status --context string The name of the kubeconfig context to use -h, --help help for status --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -2021,7 +2021,7 @@ osdctl cluster validate-pull-secret --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string The internal ID of the cluster to check (required) + -C, --cluster-id string The internal ID of the cluster to check (required) --context string The name of the kubeconfig context to use -h, --help help for validate-pull-secret --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -2266,7 +2266,7 @@ osdctl dynatrace dashboard --cluster-id CLUSTER_ID [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Provide the id of the cluster + -C, --cluster-id string Provide the id of the cluster --context string The name of the kubeconfig context to use --dash string Name of the dashboard you wish to find (default "Central ROSA HCP Dashboard") -h, --help help for dashboard @@ -2296,7 +2296,7 @@ osdctl dynatrace gather-logs --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Internal ID of the HCP cluster to gather logs from (required) + -C, --cluster-id string Internal ID of the HCP cluster to gather logs from (required) --context string The name of the kubeconfig context to use --dest-dir string Destination directory for the logs dump, defaults to the local directory. -h, --help help for gather-logs @@ -2330,7 +2330,7 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Name or Internal ID of the cluster (defaults to current cluster context) + -C, --cluster-id string Name or Internal ID of the cluster (defaults to current cluster context) --console Print the url to the dynatrace web console instead of outputting the logs --container strings Container name(s) (comma-separated) --contains string Include logs which contain a phrase @@ -2367,7 +2367,7 @@ osdctl dynatrace url --cluster-id <cluster-identifier> [flags] ``` --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string ID of the cluster + -C, --cluster-id string ID of the cluster --context string The name of the kubeconfig context to use -h, --help help for url --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure @@ -2415,7 +2415,7 @@ osdctl env [flags] [env-alias] -a, --api string OpenShift API URL for individual cluster login --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - -c, --cluster-id string Cluster ID + -C, --cluster-id string Cluster ID --context string The name of the kubeconfig context to use -d, --delete Delete environment -k, --export-kubeconfig Output export kubeconfig statement, to use environment outside of the env directory @@ -2470,7 +2470,7 @@ osdctl hcp must-gather --cluster-id <cluster-identifier> [flags] --acm_image string Overrides the acm must-gather image being used for acm mc, sc as well as hcp must-gathers. (default "quay.io/stolostron/must-gather:2.11.4-SNAPSHOT-2024-12-02-15-19-44") --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use - --cluster-id string Internal ID of the cluster to gather data from + -C, --cluster-id string Internal ID of the cluster to gather data from --context string The name of the kubeconfig context to use --gather string Comma-separated list of gather targets (available: sc, sc_acm, mc, hcp). (default "hcp") -h, --help help for must-gather diff --git a/docs/osdctl_alert_list.md b/docs/osdctl_alert_list.md index bfd1b96a6..63ef670f2 100644 --- a/docs/osdctl_alert_list.md +++ b/docs/osdctl_alert_list.md @@ -13,7 +13,7 @@ osdctl alert list --cluster-id <cluster-id> --level [warning, critical, firing, ### Options ``` - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster -h, --help help for list -l, --level string Alert level [warning, critical, firing, pending, all] (default "all") --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) diff --git a/docs/osdctl_alert_silence_add.md b/docs/osdctl_alert_silence_add.md index 43f2ad5d0..3d1452d1e 100644 --- a/docs/osdctl_alert_silence_add.md +++ b/docs/osdctl_alert_silence_add.md @@ -15,7 +15,7 @@ osdctl alert silence add --cluster-id <cluster-identifier> [--all --duration --c ``` --alertname strings alertname (comma-separated) -a, --all Adding silences for all alert - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster -c, --comment string add comment about silence (default "Adding silence using the osdctl alert command") -d, --duration string Adding duration for silence as 15 days (default "15d") -h, --help help for add diff --git a/docs/osdctl_alert_silence_expire.md b/docs/osdctl_alert_silence_expire.md index 8c1780387..ae480464e 100644 --- a/docs/osdctl_alert_silence_expire.md +++ b/docs/osdctl_alert_silence_expire.md @@ -14,7 +14,7 @@ osdctl alert silence expire [--cluster-id <cluster-identifier>] [--all | --silen ``` -a, --all clear all silences - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster -h, --help help for expire --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) --silence-id strings silence id (comma-separated) diff --git a/docs/osdctl_alert_silence_list.md b/docs/osdctl_alert_silence_list.md index b2f05e760..8e375224f 100644 --- a/docs/osdctl_alert_silence_list.md +++ b/docs/osdctl_alert_silence_list.md @@ -13,7 +13,7 @@ osdctl alert silence list --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster -h, --help help for list --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) ``` diff --git a/docs/osdctl_cluster_break-glass.md b/docs/osdctl_cluster_break-glass.md index 5ae8a5aa8..9ce2fb7c9 100644 --- a/docs/osdctl_cluster_break-glass.md +++ b/docs/osdctl_cluster_break-glass.md @@ -13,7 +13,7 @@ osdctl cluster break-glass --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string Provide the internal ID of the cluster + -C, --cluster-id string Provide the internal ID of the cluster -h, --help help for break-glass --reason string The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) ``` diff --git a/docs/osdctl_cluster_break-glass_cleanup.md b/docs/osdctl_cluster_break-glass_cleanup.md index 137066423..a5d8ad333 100644 --- a/docs/osdctl_cluster_break-glass_cleanup.md +++ b/docs/osdctl_cluster_break-glass_cleanup.md @@ -16,7 +16,7 @@ osdctl cluster break-glass cleanup --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string [Mandatory] Provide the Internal ID of the cluster + -C, --cluster-id string [Mandatory] Provide the Internal ID of the cluster -h, --help help for cleanup --reason string [Mandatory for PrivateLink clusters] The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket) ``` diff --git a/docs/osdctl_cluster_check-banned-user.md b/docs/osdctl_cluster_check-banned-user.md index a41df1602..038b58629 100644 --- a/docs/osdctl_cluster_check-banned-user.md +++ b/docs/osdctl_cluster_check-banned-user.md @@ -9,7 +9,7 @@ osdctl cluster check-banned-user --cluster-id <cluster-identifier> [flags] ### Options ``` - -c, --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster -h, --help help for check-banned-user ``` diff --git a/docs/osdctl_cluster_context.md b/docs/osdctl_cluster_context.md index 51ea1e48d..8b5f0866a 100644 --- a/docs/osdctl_cluster_context.md +++ b/docs/osdctl_cluster_context.md @@ -9,7 +9,7 @@ osdctl cluster context --cluster-id <cluster-identifier> [flags] ### Options ``` - -c, --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster -d, --days int Command will display X days of Error SLs sent to the cluster. Days is set to 30 by default (default 30) --full Run full suite of checks. -h, --help help for context diff --git a/docs/osdctl_cluster_detach-stuck-volume.md b/docs/osdctl_cluster_detach-stuck-volume.md index 30100e2ec..8d89357c0 100644 --- a/docs/osdctl_cluster_detach-stuck-volume.md +++ b/docs/osdctl_cluster_detach-stuck-volume.md @@ -9,7 +9,7 @@ osdctl cluster detach-stuck-volume --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster -h, --help help for detach-stuck-volume --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) ``` diff --git a/docs/osdctl_cluster_etcd-health-check.md b/docs/osdctl_cluster_etcd-health-check.md index 6ca964464..15051b9a8 100644 --- a/docs/osdctl_cluster_etcd-health-check.md +++ b/docs/osdctl_cluster_etcd-health-check.md @@ -13,7 +13,7 @@ osdctl cluster etcd-health-check --cluster-id <cluster-id> --reason <reason for ### Options ``` - --cluster-id string Provide the internal Cluster ID or name to perform health check on + -C, --cluster-id string Provide the internal Cluster ID or name to perform health check on -h, --help help for etcd-health-check --reason string Specify a reason for privilege escalation ``` diff --git a/docs/osdctl_cluster_etcd-member-replace.md b/docs/osdctl_cluster_etcd-member-replace.md index 297110ef5..81f49ac95 100644 --- a/docs/osdctl_cluster_etcd-member-replace.md +++ b/docs/osdctl_cluster_etcd-member-replace.md @@ -13,7 +13,7 @@ osdctl cluster etcd-member-replace --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string Provide internal Cluster ID + -C, --cluster-id string Provide internal Cluster ID -h, --help help for etcd-member-replace --node string Node ID (required) --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) diff --git a/docs/osdctl_cluster_hypershift-info.md b/docs/osdctl_cluster_hypershift-info.md index 707dd5d8b..02ab43ca5 100644 --- a/docs/osdctl_cluster_hypershift-info.md +++ b/docs/osdctl_cluster_hypershift-info.md @@ -14,7 +14,7 @@ osdctl cluster hypershift-info [flags] ### Options ``` - -c, --cluster-id string Provide internal ID of the cluster + -C, --cluster-id string Provide internal ID of the cluster -h, --help help for hypershift-info -o, --output string output format ['table', 'graphviz'] (default "graphviz") -l, --privatelinkaccount string Privatelink account ID diff --git a/docs/osdctl_cluster_logging-check.md b/docs/osdctl_cluster_logging-check.md index f86d4f637..12035dceb 100644 --- a/docs/osdctl_cluster_logging-check.md +++ b/docs/osdctl_cluster_logging-check.md @@ -9,7 +9,7 @@ osdctl cluster logging-check --cluster-id <cluster-identifier> [flags] ### Options ``` - -c, --cluster-id string The internal ID of the cluster to check (required) + -C, --cluster-id string The internal ID of the cluster to check (required) -h, --help help for logging-check --verbose Verbose output ``` diff --git a/docs/osdctl_cluster_orgId.md b/docs/osdctl_cluster_orgId.md index 0cc687451..ba7ae7ff4 100644 --- a/docs/osdctl_cluster_orgId.md +++ b/docs/osdctl_cluster_orgId.md @@ -9,7 +9,7 @@ osdctl cluster orgId --cluster-id <cluster-identifier [flags] ### Options ``` - -c, --cluster-id string The internal ID of the cluster to check (required) + -C, --cluster-id string The internal ID of the cluster to check (required) -h, --help help for orgId ``` diff --git a/docs/osdctl_cluster_resize_control-plane.md b/docs/osdctl_cluster_resize_control-plane.md index f6a48e9ce..152d6ac89 100644 --- a/docs/osdctl_cluster_resize_control-plane.md +++ b/docs/osdctl_cluster_resize_control-plane.md @@ -25,7 +25,7 @@ osdctl cluster resize control-plane [flags] ### Options ``` - -c, --cluster-id string The internal ID of the cluster to perform actions on + -C, --cluster-id string The internal ID of the cluster to perform actions on -h, --help help for control-plane --machine-type string The target AWS machine type to resize to (e.g. m5.2xlarge) --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) diff --git a/docs/osdctl_cluster_ssh_key.md b/docs/osdctl_cluster_ssh_key.md index 9df921c4c..fd26aef45 100644 --- a/docs/osdctl_cluster_ssh_key.md +++ b/docs/osdctl_cluster_ssh_key.md @@ -44,7 +44,7 @@ Despite the logs from backplane, the ssh key is the only output channelled throu ### Options ``` - --cluster-id string Cluster identifier (internal ID, UUID, name, etc) to retrieve the SSH key for. If not specified, the current cluster will be used. + -C, --cluster-id string Cluster identifier (internal ID, UUID, name, etc) to retrieve the SSH key for. If not specified, the current cluster will be used. -h, --help help for key --reason string Provide a reason for accessing the clusters SSH key, used for backplane. Eg: 'OHSS-XXXX', or '#ITN-2024-XXXXX -y, --yes Skip any confirmation prompts and print the key automatically. Useful for redirects and scripting. diff --git a/docs/osdctl_cluster_support_delete.md b/docs/osdctl_cluster_support_delete.md index 8e713c775..8fd7e4565 100644 --- a/docs/osdctl_cluster_support_delete.md +++ b/docs/osdctl_cluster_support_delete.md @@ -10,7 +10,7 @@ osdctl cluster support delete --cluster-id <cluster-identifier> [flags] ``` --all Remove all limited support reasons - -c, --cluster-id string Internal cluster ID (required) + -C, --cluster-id string Internal cluster ID (required) -d, --dry-run Dry-run - print the limited support reason about to be sent but don't send it. -h, --help help for delete -i, --limited-support-reason-id string Limited support reason ID diff --git a/docs/osdctl_cluster_support_post.md b/docs/osdctl_cluster_support_post.md index c87b8fa8e..0f4ab55ca 100644 --- a/docs/osdctl_cluster_support_post.md +++ b/docs/osdctl_cluster_support_post.md @@ -27,7 +27,7 @@ The cluster has a second failing ingress controller, which is not supported and ### Options ``` - -c, --cluster-id string Internal Cluster ID (required) + -C, --cluster-id string Internal Cluster ID (required) --evidence string (optional) The reasoning that led to the decision to place the cluster in limited support. Can also be a link to a Jira case. Used for internal service log only. -h, --help help for post --misconfiguration cloud The type of misconfiguration responsible for the cluster being placed into limited support. Valid values are cloud or `cluster`. diff --git a/docs/osdctl_cluster_support_status.md b/docs/osdctl_cluster_support_status.md index 83db3060f..2f9d2b4b2 100644 --- a/docs/osdctl_cluster_support_status.md +++ b/docs/osdctl_cluster_support_status.md @@ -9,7 +9,7 @@ osdctl cluster support status --cluster-id <cluster-identifier> [flags] ### Options ``` - -c, --cluster-id string Cluster ID for which to get support status + -C, --cluster-id string Cluster ID for which to get support status -h, --help help for status --verbose Verbose output ``` diff --git a/docs/osdctl_cluster_validate-pull-secret.md b/docs/osdctl_cluster_validate-pull-secret.md index 54e6e19c6..0f48fb5a3 100644 --- a/docs/osdctl_cluster_validate-pull-secret.md +++ b/docs/osdctl_cluster_validate-pull-secret.md @@ -16,7 +16,7 @@ osdctl cluster validate-pull-secret --cluster-id <cluster-identifier> [flags] ### Options ``` - -c, --cluster-id string The internal ID of the cluster to check (required) + -C, --cluster-id string The internal ID of the cluster to check (required) -h, --help help for validate-pull-secret --reason string The reason for this command to be run (usually an OHSS or PD ticket), mandatory when using elevate ``` diff --git a/docs/osdctl_dynatrace_dashboard.md b/docs/osdctl_dynatrace_dashboard.md index 5f0581096..7e7e16b57 100644 --- a/docs/osdctl_dynatrace_dashboard.md +++ b/docs/osdctl_dynatrace_dashboard.md @@ -9,7 +9,7 @@ osdctl dynatrace dashboard --cluster-id CLUSTER_ID [flags] ### Options ``` - --cluster-id string Provide the id of the cluster + -C, --cluster-id string Provide the id of the cluster --dash string Name of the dashboard you wish to find (default "Central ROSA HCP Dashboard") -h, --help help for dashboard ``` diff --git a/docs/osdctl_dynatrace_gather-logs.md b/docs/osdctl_dynatrace_gather-logs.md index a7978a3cc..ceece1aeb 100644 --- a/docs/osdctl_dynatrace_gather-logs.md +++ b/docs/osdctl_dynatrace_gather-logs.md @@ -25,7 +25,7 @@ osdctl dynatrace gather-logs --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string Internal ID of the HCP cluster to gather logs from (required) + -C, --cluster-id string Internal ID of the HCP cluster to gather logs from (required) --dest-dir string Destination directory for the logs dump, defaults to the local directory. -h, --help help for gather-logs --since int Number of hours (integer) since which to pull logs and events (default 10) diff --git a/docs/osdctl_dynatrace_logs.md b/docs/osdctl_dynatrace_logs.md index dea33c215..30dd34880 100644 --- a/docs/osdctl_dynatrace_logs.md +++ b/docs/osdctl_dynatrace_logs.md @@ -48,7 +48,7 @@ osdctl dynatrace logs --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string Name or Internal ID of the cluster (defaults to current cluster context) + -C, --cluster-id string Name or Internal ID of the cluster (defaults to current cluster context) --console Print the url to the dynatrace web console instead of outputting the logs --container strings Container name(s) (comma-separated) --contains string Include logs which contain a phrase diff --git a/docs/osdctl_dynatrace_url.md b/docs/osdctl_dynatrace_url.md index aeca2534d..ebaed52ce 100644 --- a/docs/osdctl_dynatrace_url.md +++ b/docs/osdctl_dynatrace_url.md @@ -9,7 +9,7 @@ osdctl dynatrace url --cluster-id <cluster-identifier> [flags] ### Options ``` - --cluster-id string ID of the cluster + -C, --cluster-id string ID of the cluster -h, --help help for url ``` diff --git a/docs/osdctl_env.md b/docs/osdctl_env.md index 1bf94cd29..0538c987f 100644 --- a/docs/osdctl_env.md +++ b/docs/osdctl_env.md @@ -36,7 +36,7 @@ osdctl env [flags] [env-alias] ``` -a, --api string OpenShift API URL for individual cluster login - -c, --cluster-id string Cluster ID + -C, --cluster-id string Cluster ID -d, --delete Delete environment -k, --export-kubeconfig Output export kubeconfig statement, to use environment outside of the env directory -h, --help help for env diff --git a/docs/osdctl_hcp_must-gather.md b/docs/osdctl_hcp_must-gather.md index 6df3e7e76..cbeca588c 100644 --- a/docs/osdctl_hcp_must-gather.md +++ b/docs/osdctl_hcp_must-gather.md @@ -20,7 +20,7 @@ osdctl hcp must-gather --cluster-id CLUSTER_ID --gather sc,mc,sc_acm --reason OH ``` --acm_image string Overrides the acm must-gather image being used for acm mc, sc as well as hcp must-gathers. (default "quay.io/stolostron/must-gather:2.11.4-SNAPSHOT-2024-12-02-15-19-44") - --cluster-id string Internal ID of the cluster to gather data from + -C, --cluster-id string Internal ID of the cluster to gather data from --gather string Comma-separated list of gather targets (available: sc, sc_acm, mc, hcp). (default "hcp") -h, --help help for must-gather --reason string The reason for this command, which requires elevation (e.g., OHSS ticket or PD incident). From d2f38abd5822d86172404d311f1417ac412f5cda Mon Sep 17 00:00:00 2001 From: Josh Branham <jbranham@redhat.com> Date: Thu, 31 Jul 2025 16:06:57 -0600 Subject: [PATCH 33/40] Fix last month time period calculation to handle year boundaries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The LM (Last Month) case in getTimePeriod was using t.Month()-1 which doesn't handle year boundaries correctly. Changed to use AddDate(0, -1, 0) to properly calculate the previous month, fixing the failing test TestGetTimePeriod/Last_Month_(LM). 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com> --- cmd/cost/get.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/cost/get.go b/cmd/cost/get.go index 36e44e7e0..83d15ac27 100644 --- a/cmd/cost/get.go +++ b/cmd/cost/get.go @@ -340,7 +340,8 @@ func getTimePeriod(timePtr *string) (string, string) { switch *timePtr { case "LM": //Last Month - start = fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month()-1, 01) + prevMonth := t.AddDate(0, -1, 0) + start = fmt.Sprintf("%d-%02d-%02d", prevMonth.Year(), prevMonth.Month(), 01) end = fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month(), 01) case "MTD": start = fmt.Sprintf("%d-%02d-%02d", t.Year(), t.Month(), 01) From 7da4b9c6f22b9291901eee516a2f83e5fbb0e2e4 Mon Sep 17 00:00:00 2001 From: Josh Branham <jbranham@redhat.com> Date: Wed, 6 Aug 2025 08:21:16 -0600 Subject: [PATCH 34/40] Use boilerplate v8 in CI --- .ci-operator.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci-operator.yaml b/.ci-operator.yaml index 518e16dc7..efff57779 100644 --- a/.ci-operator.yaml +++ b/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: boilerplate namespace: openshift - tag: image-v7.2.0 + tag: image-v8.0.0 From 9b70b71db539df8a48d82e7bcfeb16f2286f78b6 Mon Sep 17 00:00:00 2001 From: Josh Branham <jbranham@redhat.com> Date: Thu, 7 Aug 2025 09:17:12 -0600 Subject: [PATCH 35/40] SREP-862: Add support for `osdctl network verify-egress --pod-mode` (#784) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump osd-network-verifier and dependencies * Add pod mode support to osdctl network verify-egress Implement --pod-mode flag to run egress verification using Kubernetes Jobs instead of cloud instances, providing more accurate results by testing from within the actual cluster environment. Key features: - New --pod-mode flag enables Kubernetes-based verification - Automatic region detection from OCM for AWS clusters - Manual region override with --region flag - Configurable namespace with --namespace flag (default: openshift-network-diagnostics) - Custom kubeconfig support with --kubeconfig flag - Automatic probe switching to curl (required for pod mode) - Comprehensive input validation with clear error messages - Mutual exclusivity with cloud-specific flags Benefits: - No cloud credentials required - Tests actual cluster network environment - More accurate results than external instance testing - Secure execution with restrictive pod security contexts Usage examples: osdctl network verify-egress --cluster-id my-cluster --pod-mode osdctl network verify-egress --pod-mode --platform aws-classic --region us-east-1 Includes comprehensive unit test coverage with 21 new tests covering: - Input validation logic - Region detection (OCM vs manual) - Probe validation and switching - AWS config generation - Error handling scenarios 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com> * Use backplane as preference for getting k8s credentials * Docs and fmt * Consolidate and cleanup tests * Pull in actual osd-network-verifier release, update region flag --------- Co-authored-by: Claude <noreply@anthropic.com> --- cmd/network/verification.go | 249 +++++++++++++--- cmd/network/verification_pod_mode_test.go | 333 ++++++++++++++++++++++ docs/README.md | 14 +- docs/osdctl_network_verify-egress.md | 20 +- go.mod | 92 +++--- go.sum | 196 ++++++------- pkg/k8s/client.go | 14 +- 7 files changed, 727 insertions(+), 191 deletions(-) create mode 100644 cmd/network/verification_pod_mode_test.go diff --git a/cmd/network/verification.go b/cmd/network/verification.go index d523c8b8f..1804a6401 100644 --- a/cmd/network/verification.go +++ b/cmd/network/verification.go @@ -23,10 +23,14 @@ import ( onv "github.com/openshift/osd-network-verifier/pkg/verifier" onvAwsClient "github.com/openshift/osd-network-verifier/pkg/verifier/aws" onvGcpClient "github.com/openshift/osd-network-verifier/pkg/verifier/gcp" + onvKubeClient "github.com/openshift/osd-network-verifier/pkg/verifier/kube" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -41,7 +45,7 @@ const ( blockedEgressTemplateUrl = "https://raw.githubusercontent.com/openshift/managed-notifications/master/osd/required_network_egresses_are_blocked.json" caBundleConfigMapKey = "ca-bundle.crt" networkVerifierDepPath = "github.com/openshift/osd-network-verifier" - LimitedSupportTemplate = "https://raw.githubusercontent.com/openshift/managed-notifications/master/osd/limited_support/egressFailureLimitedSupport.json" + limitedSupportTemplate = "https://raw.githubusercontent.com/openshift/managed-notifications/master/osd/limited_support/egressFailureLimitedSupport.json" ) var networkVerifierDefaultTags = map[string]string{ @@ -92,6 +96,12 @@ type EgressVerification struct { GcpProjectID string // VpcName is the VPC where the verifier will run VpcName string + // PodMode enables Kubernetes pod-based verification instead of cloud instances + PodMode bool + // KubeConfig is the path to the kubeconfig file for pod mode + KubeConfig string + // Namespace is the Kubernetes namespace to run verification pods in + Namespace string } func NewCmdValidateEgress() *cobra.Command { @@ -108,7 +118,13 @@ func NewCmdValidateEgress() *cobra.Command { verify whether a ROSA cluster's VPC allows for all required external URLs are reachable. The exact cause can vary and typically requires a customer to remediate the issue themselves. - The osd-network-verifier launches a probe, an instance in a given subnet, and checks egress to external required URL's. Since October 2022, the probe is an instance without a public IP address. For this reason, the probe's requests will fail for subnets that don't have a NAT gateway. The osdctl network verify-egress command will always fail and give a false negative for public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. + The osd-network-verifier supports two modes: + 1. Traditional mode: launches a probe instance in a given subnet and checks egress to external required URLs. + Since October 2022, the probe is an instance without a public IP address. For this reason, the probe's requests + will fail for subnets that don't have a NAT gateway. This mode will always fail and give a false negative for + public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. + 2. Pod mode (--pod-mode): runs verification as Kubernetes Jobs within the target cluster. This mode requires + cluster admin access but provides more accurate results as it tests from within the actual cluster environment. Docs: https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.html#osd-aws-privatelink-firewall-prerequisites_prerequisites`, Example: ` @@ -128,6 +144,12 @@ func NewCmdValidateEgress() *cobra.Command { # Override automatic selection of the list of endpoints to check osdctl network verify-egress --cluster-id my-rosa-cluster --platform hostedcluster + # Run in pod mode using Kubernetes jobs (requires cluster access) + osdctl network verify-egress --cluster-id my-rosa-cluster --pod-mode + + # Run in pod mode with custom namespace and kubeconfig + osdctl network verify-egress --pod-mode --region us-east-1 --namespace my-namespace --kubeconfig ~/.kube/config + # (Not recommended) Run against a specific VPC, without specifying cluster-id <export environment variables like AWS_ACCESS_KEY_ID or use aws configure> osdctl network verify-egress --subnet-id subnet-abcdefg123 --security-group sg-abcdefgh123 --region us-east-1`, @@ -144,19 +166,28 @@ func NewCmdValidateEgress() *cobra.Command { validateEgressCmd.Flags().StringVar(&e.SecurityGroupId, "security-group", "", "(optional) security group ID override for osd-network-verifier, required if not specifying --cluster-id") validateEgressCmd.Flags().StringVar(&e.CaCert, "cacert", "", "(optional) path to a file containing the additional CA trust bundle. Typically set so that the verifier can use a configured cluster-wide proxy.") validateEgressCmd.Flags().BoolVar(&e.NoTls, "no-tls", false, "(optional) if provided, ignore all ssl certificate validations on client-side.") - validateEgressCmd.Flags().StringVar(&e.Region, "region", "", "(optional) AWS region") + validateEgressCmd.Flags().StringVar(&e.Region, "region", "", "(optional) AWS region, required for --pod-mode if not passing a --cluster-id") validateEgressCmd.Flags().BoolVar(&e.Debug, "debug", false, "(optional) if provided, enable additional debug-level logging") validateEgressCmd.Flags().BoolVarP(&e.AllSubnets, "all-subnets", "A", false, "(optional) an option for AWS Privatelink clusters to run osd-network-verifier against all subnets listed by ocm.") validateEgressCmd.Flags().StringVar(&e.platformName, "platform", "", "(optional) override for cloud platform/product. E.g., 'aws-classic' (OSD/ROSA Classic), 'aws-hcp' (ROSA HCP), or 'aws-hcp-zeroegress'") - validateEgressCmd.Flags().DurationVar(&e.EgressTimeout, "egress-timeout", 5*time.Second, "(optional) timeout for individual egress verification requests") + validateEgressCmd.Flags().DurationVar(&e.EgressTimeout, "egress-timeout", onv.DefaultTimeout, "(optional) timeout for individual egress verification requests") validateEgressCmd.Flags().BoolVar(&e.Version, "version", false, "When present, prints out the version of osd-network-verifier being used") validateEgressCmd.Flags().StringVar(&e.Probe, "probe", "curl", "(optional) select the probe to be used for egress testing. Either 'curl' (default) or 'legacy'") validateEgressCmd.Flags().StringVar(&e.CpuArchName, "cpu-arch", "x86", "(optional) compute instance CPU architecture. E.g., 'x86' or 'arm'") validateEgressCmd.Flags().StringVar(&e.GcpProjectID, "gcp-project-id", "", "(optional) the GCP project ID to run verification for") validateEgressCmd.Flags().StringVar(&e.VpcName, "vpc", "", "(optional) VPC name for cases where it can't be fetched from OCM") - - // If a cluster-id is specified, don't allow the foot-gun of overriding region - validateEgressCmd.MarkFlagsMutuallyExclusive("cluster-id", "region") + validateEgressCmd.Flags().BoolVar(&e.PodMode, "pod-mode", false, "(optional) run verification using Kubernetes pods instead of cloud instances") + validateEgressCmd.Flags().StringVar(&e.KubeConfig, "kubeconfig", "", "(optional) path to kubeconfig file for pod mode (uses default kubeconfig if not specified)") + validateEgressCmd.Flags().StringVar(&e.Namespace, "namespace", "openshift-network-diagnostics", "(optional) Kubernetes namespace to run verification pods in") + + // Pod mode is incompatible with cloud-specific configuration flags + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "cacert") + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "subnet-id") + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "security-group") + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "all-subnets") + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "cpu-arch") + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "gcp-project-id") + validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "vpc") return validateEgressCmd } @@ -198,48 +229,28 @@ func (e *EgressVerification) Run(ctx context.Context) { log.Fatalf("error getting platform: %s", err) } + // Setup verifier and inputs based on mode var inputs []*onv.ValidateEgressInput var verifier networkVerifier - switch platform { - case cloud.AWSHCP, cloud.AWSHCPZeroEgress, cloud.AWSClassic: - cfg, err := e.setupForAws(ctx) - if err != nil { - log.Fatal(err) - } - - verifier, err = onvAwsClient.NewAwsVerifierFromConfig(*cfg, e.log) - if err != nil { - log.Fatalf("failed to assemble osd-network-verifier client: %s", err) - } - - inputs, err = e.generateAWSValidateEgressInput(ctx, platform) - if err != nil { - log.Fatal(err) - } - case cloud.GCPClassic: - credentials, err := e.setupForGcp(ctx) - if err != nil { - log.Fatal(err) - } - - verifier, err = onvGcpClient.NewGcpVerifier(credentials, e.Debug) - if err != nil { - log.Fatalf("failed to assemble osd-network-verifier client: %s", err) - } + if e.PodMode { + e.log.Info(ctx, "Preparing to run pod-based network verification in namespace %s.", e.Namespace) + verifier, inputs, err = e.setupPodModeVerification(ctx, platform) + } else { + verifier, inputs, err = e.setupCloudProviderVerification(ctx, platform) + e.log.Info(ctx, "Preparing to check %+v subnet(s) with network verifier.", len(inputs)) + } - inputs, err = e.generateGcpValidateEgressInput(ctx, platform) - if err != nil { - log.Fatal(err) - } - default: - log.Fatalf("unsupported platform: %s", platform) + if err != nil { + log.Fatal(err) } - e.log.Info(ctx, "Preparing to check %+v subnet(s) with network verifier.", len(inputs)) var failures int for i := range inputs { - e.log.Info(ctx, "running network verifier for subnet %+v, security group %+v", inputs[i].SubnetID, inputs[i].AWS.SecurityGroupIDs) + if !e.PodMode { + e.log.Info(ctx, "running network verifier for subnet %+v, security group %+v", inputs[i].SubnetID, inputs[i].AWS.SecurityGroupIDs) + } + out := onv.ValidateEgress(verifier, *inputs[i]) out.Summary(e.Debug) // Prompt putting the cluster into LS if egresses crucial for monitoring (PagerDuty/DMS) are blocked. @@ -250,7 +261,7 @@ func (e *EgressVerification) Run(ctx context.Context) { blockedUrl := strings.Join(postCmd.TemplateParams, ",") if (strings.Contains(blockedUrl, "deadmanssnitch") || strings.Contains(blockedUrl, "pagerduty")) && e.cluster.State() == "ready" { fmt.Println("PagerDuty and/or DMS outgoing traffic is blocked, resulting in a loss of observability. As a result, Red Hat can no longer guarantee SLAs and the cluster should be put in limited support") - pCmd := lsupport.Post{Template: LimitedSupportTemplate} + pCmd := lsupport.Post{Template: limitedSupportTemplate} if err := pCmd.Run(e.ClusterId); err != nil { fmt.Printf("failed to post limited support reason: %v", err) } @@ -537,6 +548,22 @@ func (e *EgressVerification) validateInput() error { "--subnet-id foo --subnet-id bar") } + // Pod mode validation + if e.PodMode { + // Require cluster-id or explicit platform for platform determination + if e.ClusterId == "" && e.platformName == "" { + return fmt.Errorf("pod mode requires either --cluster-id or --platform to determine platform type") + } + + // For AWS platforms without cluster-id, require region + if e.ClusterId == "" && e.Region == "" { + // Check if we're dealing with an AWS platform + if strings.HasPrefix(strings.ToLower(e.platformName), "aws") { + return fmt.Errorf("pod mode for AWS platforms requires --region when --cluster-id is not specified") + } + } + } + return nil } @@ -547,3 +574,141 @@ func printVersion() { } log.Println(fmt.Sprintf("Using osd-network-verifier version %v", version)) } + +// setupForPodMode creates a Kubernetes client and KubeVerifier for pod-based verification +func (e *EgressVerification) setupForPodMode(ctx context.Context) (*onvKubeClient.KubeVerifier, error) { + var restConfig *rest.Config + var err error + + // Prefer backplane credentials when cluster ID is available + if e.ClusterId != "" { + restConfig, err = k8s.NewRestConfig(e.ClusterId) + if err != nil { + return nil, fmt.Errorf("failed to get REST config from backplane for cluster %s: %w", e.ClusterId, err) + } + e.log.Info(ctx, "Pod mode using backplane credentials for cluster: %s", e.ClusterId) + } else if e.KubeConfig != "" { + // Fallback to user-provided kubeconfig + restConfig, err = clientcmd.BuildConfigFromFlags("", e.KubeConfig) + if err != nil { + return nil, fmt.Errorf("failed to build kubeconfig from %s: %w", e.KubeConfig, err) + } + e.log.Info(ctx, "Pod mode using provided kubeconfig: %s", e.KubeConfig) + } else { + // Fallback to default kubeconfig from environment or home directory + kubeconfig := clientcmd.RecommendedHomeFile + restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to build default kubeconfig: %w", err) + } + e.log.Info(ctx, "Pod mode using default kubeconfig") + } + + // Create Kubernetes clientset + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("failed to create Kubernetes client: %w", err) + } + + // Create KubeVerifier + kubeVerifier, err := onvKubeClient.NewKubeVerifier(clientset, e.Debug) + if err != nil { + return nil, fmt.Errorf("failed to create KubeVerifier: %w", err) + } + + // Set namespace if specified + if e.Namespace != "" { + kubeVerifier.KubeClient.SetNamespace(e.Namespace) + } + + e.log.Info(ctx, "Pod mode initialized with namespace: %s", e.Namespace) + return kubeVerifier, nil +} + +// setupPodModeVerification sets up pod-based verification and returns verifier and inputs +func (e *EgressVerification) setupPodModeVerification(ctx context.Context, platform cloud.Platform) (networkVerifier, []*onv.ValidateEgressInput, error) { + // Force curl probe for pod mode + if strings.ToLower(e.Probe) != "curl" { + e.log.Info(ctx, "Pod mode only supports curl probe, switching from %s to curl", e.Probe) + e.Probe = "curl" + } + + verifier, err := e.setupForPodMode(ctx) + if err != nil { + return nil, nil, err + } + + input, err := e.defaultValidateEgressInput(ctx, platform) + if err != nil { + return nil, nil, err + } + + // For AWS-based platforms in pod mode, ensure region is set for proper egress list generation + if platform == cloud.AWSClassic || platform == cloud.AWSHCP || platform == cloud.AWSHCPZeroEgress { + var region string + + // Try to detect region from OCM cluster info first + if e.cluster != nil && e.cluster.Region() != nil && e.cluster.Region().ID() != "" { + region = e.cluster.Region().ID() + e.log.Info(ctx, "Detected AWS region from OCM: %s", region) + } else if e.Region != "" { + // Use manually specified region + region = e.Region + e.log.Info(ctx, "Using manually specified AWS region: %s", region) + } else { + // No region available - require user to specify it + return nil, nil, fmt.Errorf("pod mode for AWS platforms requires region information. Please specify --region or provide --cluster-id for automatic detection") + } + + // Set AWS config in the input for region-specific egress list generation + input.AWS = onv.AwsEgressConfig{ + Region: region, + } + } + + // For pod mode, we only need one input since we're not dealing with multiple subnets + inputs := []*onv.ValidateEgressInput{input} + return verifier, inputs, nil +} + +// setupCloudProviderVerification sets up cloud provider-based verification and returns verifier and inputs +func (e *EgressVerification) setupCloudProviderVerification(ctx context.Context, platform cloud.Platform) (networkVerifier, []*onv.ValidateEgressInput, error) { + switch platform { + case cloud.AWSHCP, cloud.AWSHCPZeroEgress, cloud.AWSClassic: + cfg, err := e.setupForAws(ctx) + if err != nil { + return nil, nil, err + } + + verifier, err := onvAwsClient.NewAwsVerifierFromConfig(*cfg, e.log) + if err != nil { + return nil, nil, fmt.Errorf("failed to assemble osd-network-verifier client: %s", err) + } + + inputs, err := e.generateAWSValidateEgressInput(ctx, platform) + if err != nil { + return nil, nil, err + } + return verifier, inputs, nil + + case cloud.GCPClassic: + credentials, err := e.setupForGcp(ctx) + if err != nil { + return nil, nil, err + } + + verifier, err := onvGcpClient.NewGcpVerifier(credentials, e.Debug) + if err != nil { + return nil, nil, fmt.Errorf("failed to assemble osd-network-verifier client: %s", err) + } + + inputs, err := e.generateGcpValidateEgressInput(ctx, platform) + if err != nil { + return nil, nil, err + } + return verifier, inputs, nil + + default: + return nil, nil, fmt.Errorf("unsupported platform: %s", platform) + } +} diff --git a/cmd/network/verification_pod_mode_test.go b/cmd/network/verification_pod_mode_test.go new file mode 100644 index 000000000..d6604c15c --- /dev/null +++ b/cmd/network/verification_pod_mode_test.go @@ -0,0 +1,333 @@ +package network + +import ( + "context" + "testing" + "time" + + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift/osd-network-verifier/pkg/data/cloud" + "github.com/openshift/osd-network-verifier/pkg/data/cpu" + "github.com/openshift/osd-network-verifier/pkg/probes/curl" + onv "github.com/openshift/osd-network-verifier/pkg/verifier" + "github.com/stretchr/testify/assert" +) + +func TestEgressVerification_PodModeRegionDetection(t *testing.T) { + tests := []struct { + name string + ev *EgressVerification + platform cloud.Platform + setupCluster func() *cmv1.Cluster + wantError bool + wantRegion string + }{ + { + name: "aws_classic_with_manual_region", + platform: cloud.AWSClassic, + ev: &EgressVerification{ + PodMode: true, + Region: "us-west-2", + log: newTestLogger(t), + }, + wantError: false, + wantRegion: "us-west-2", + }, + { + name: "aws_classic_with_ocm_region", + platform: cloud.AWSClassic, + ev: &EgressVerification{ + PodMode: true, + log: newTestLogger(t), + }, + setupCluster: func() *cmv1.Cluster { + return newTestCluster(t, cmv1.NewCluster(). + Region(cmv1.NewCloudRegion().ID("eu-west-1")). + CloudProvider(cmv1.NewCloudProvider().ID("aws"))) + }, + wantError: false, + wantRegion: "eu-west-1", + }, + { + name: "aws_classic_ocm_overrides_manual", + platform: cloud.AWSClassic, + ev: &EgressVerification{ + PodMode: true, + Region: "us-west-1", // This should be overridden by OCM + log: newTestLogger(t), + }, + setupCluster: func() *cmv1.Cluster { + return newTestCluster(t, cmv1.NewCluster(). + Region(cmv1.NewCloudRegion().ID("ap-south-1")). + CloudProvider(cmv1.NewCloudProvider().ID("aws"))) + }, + wantError: false, + wantRegion: "ap-south-1", // OCM takes precedence + }, + { + name: "aws_hcp_without_region", + platform: cloud.AWSHCP, + ev: &EgressVerification{ + PodMode: true, + log: newTestLogger(t), + }, + wantError: true, + }, + { + name: "aws_hcp_zero_egress_with_region", + platform: cloud.AWSHCPZeroEgress, + ev: &EgressVerification{ + PodMode: true, + Region: "ca-central-1", + log: newTestLogger(t), + }, + wantError: false, + wantRegion: "ca-central-1", + }, + { + name: "gcp_classic_no_region_needed", + platform: cloud.GCPClassic, + ev: &EgressVerification{ + PodMode: true, + log: newTestLogger(t), + }, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setupCluster != nil { + tt.ev.cluster = tt.setupCluster() + } + + // Test the region detection logic directly + if tt.platform == cloud.AWSClassic || tt.platform == cloud.AWSHCP || tt.platform == cloud.AWSHCPZeroEgress { + var region string + + // Simulate the region detection logic from setupPodModeVerification + if tt.ev.cluster != nil && tt.ev.cluster.Region() != nil && tt.ev.cluster.Region().ID() != "" { + region = tt.ev.cluster.Region().ID() + } else if tt.ev.Region != "" { + region = tt.ev.Region + } + + if tt.wantError { + assert.Empty(t, region, "Expected no region to be detected for error case") + } else { + assert.Equal(t, tt.wantRegion, region, "Region detection should match expected") + } + } + }) + } +} + +func TestEgressVerification_PodModeInputValidation(t *testing.T) { + tests := []struct { + name string + ev *EgressVerification + platform cloud.Platform + wantInput *onv.ValidateEgressInput + }{ + { + name: "aws_classic_pod_mode_input", + platform: cloud.AWSClassic, + ev: &EgressVerification{ + PodMode: true, + Region: "us-west-2", + Probe: "curl", + cpuArch: cpu.ArchX86, + EgressTimeout: 10 * time.Second, + NoTls: false, + log: newTestLogger(t), + }, + wantInput: &onv.ValidateEgressInput{ + CPUArchitecture: cpu.ArchX86, + PlatformType: cloud.AWSClassic, + Timeout: 10 * time.Second, + Tags: networkVerifierDefaultTags, + AWS: onv.AwsEgressConfig{Region: "us-west-2"}, + }, + }, + { + name: "gcp_classic_pod_mode_input", + platform: cloud.GCPClassic, + ev: &EgressVerification{ + PodMode: true, + Probe: "curl", + cpuArch: cpu.ArchX86, + EgressTimeout: 5 * time.Second, + NoTls: true, + log: newTestLogger(t), + }, + wantInput: &onv.ValidateEgressInput{ + CPUArchitecture: cpu.ArchX86, + PlatformType: cloud.GCPClassic, + Timeout: 5 * time.Second, + Tags: networkVerifierDefaultTags, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + input, err := tt.ev.defaultValidateEgressInput(ctx, tt.platform) + assert.NoError(t, err) + + // Verify basic fields + assert.Equal(t, tt.wantInput.CPUArchitecture, input.CPUArchitecture) + assert.Equal(t, tt.wantInput.PlatformType, input.PlatformType) + assert.Equal(t, tt.wantInput.Timeout, input.Timeout) + assert.Equal(t, tt.wantInput.Tags, input.Tags) + + // Verify probe type + assert.IsType(t, curl.Probe{}, input.Probe) + + // Verify proxy configuration + assert.Equal(t, tt.ev.NoTls, input.Proxy.NoTls) + + // For AWS platforms, simulate setting the region like setupPodModeVerification does + if tt.platform == cloud.AWSClassic || tt.platform == cloud.AWSHCP || tt.platform == cloud.AWSHCPZeroEgress { + input.AWS = onv.AwsEgressConfig{Region: tt.ev.Region} + assert.Equal(t, tt.wantInput.AWS.Region, input.AWS.Region) + } + }) + } +} + +func TestEgressVerification_PodModeAwsConfigSetting(t *testing.T) { + tests := []struct { + name string + platform cloud.Platform + region string + shouldSetAWS bool + }{ + { + name: "aws_classic_sets_aws_config", + platform: cloud.AWSClassic, + region: "us-east-1", + shouldSetAWS: true, + }, + { + name: "aws_hcp_sets_aws_config", + platform: cloud.AWSHCP, + region: "eu-west-1", + shouldSetAWS: true, + }, + { + name: "aws_hcp_zero_egress_sets_aws_config", + platform: cloud.AWSHCPZeroEgress, + region: "ap-southeast-1", + shouldSetAWS: true, + }, + { + name: "gcp_classic_no_aws_config", + platform: cloud.GCPClassic, + region: "", + shouldSetAWS: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ev := &EgressVerification{ + PodMode: true, + Region: tt.region, + Probe: "curl", + cpuArch: cpu.ArchX86, + EgressTimeout: 5 * time.Second, + log: newTestLogger(t), + } + + ctx := context.Background() + input, err := ev.defaultValidateEgressInput(ctx, tt.platform) + assert.NoError(t, err) + + if tt.shouldSetAWS { + // Simulate the AWS config setting from setupPodModeVerification + input.AWS = onv.AwsEgressConfig{Region: tt.region} + assert.Equal(t, tt.region, input.AWS.Region) + assert.NotEmpty(t, input.AWS.Region) + } else { + // For non-AWS platforms, AWS config should be empty + assert.Empty(t, input.AWS.Region) + } + }) + } +} + +func TestEgressVerification_ValidateInput_PodMode(t *testing.T) { + tests := []struct { + name string + ev *EgressVerification + wantError bool + errorMsg string + }{ + { + name: "pod_mode_with_cluster_id", + ev: &EgressVerification{ + PodMode: true, + ClusterId: "test-cluster", + }, + wantError: false, + }, + { + name: "pod_mode_with_platform", + ev: &EgressVerification{ + PodMode: true, + platformName: "aws-classic", + Region: "us-east-1", // Need region for AWS platform + }, + wantError: false, + }, + { + name: "pod_mode_without_cluster_or_platform", + ev: &EgressVerification{ + PodMode: true, + }, + wantError: true, + errorMsg: "pod mode requires either --cluster-id or --platform to determine platform type", + }, + { + name: "pod_mode_aws_without_cluster_or_region", + ev: &EgressVerification{ + PodMode: true, + platformName: "aws-classic", + }, + wantError: true, + errorMsg: "pod mode for AWS platforms requires --region when --cluster-id is not specified", + }, + { + name: "pod_mode_aws_with_region", + ev: &EgressVerification{ + PodMode: true, + platformName: "aws-classic", + Region: "us-east-1", + }, + wantError: false, + }, + { + name: "normal_mode_validation_still_works", + ev: &EgressVerification{ + PodMode: false, + SubnetIds: []string{"subnet-123"}, + }, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.ev.validateInput() + if tt.wantError { + assert.Error(t, err) + if tt.errorMsg != "" { + assert.Contains(t, err.Error(), tt.errorMsg) + } + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/docs/README.md b/docs/README.md index e13c73b60..bcd963ec1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3075,7 +3075,13 @@ Verify an AWS OSD/ROSA cluster can reach all required external URLs necessary fo verify whether a ROSA cluster's VPC allows for all required external URLs are reachable. The exact cause can vary and typically requires a customer to remediate the issue themselves. - The osd-network-verifier launches a probe, an instance in a given subnet, and checks egress to external required URL's. Since October 2022, the probe is an instance without a public IP address. For this reason, the probe's requests will fail for subnets that don't have a NAT gateway. The osdctl network verify-egress command will always fail and give a false negative for public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. + The osd-network-verifier supports two modes: + 1. Traditional mode: launches a probe instance in a given subnet and checks egress to external required URLs. + Since October 2022, the probe is an instance without a public IP address. For this reason, the probe's requests + will fail for subnets that don't have a NAT gateway. This mode will always fail and give a false negative for + public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. + 2. Pod mode (--pod-mode): runs verification as Kubernetes Jobs within the target cluster. This mode requires + cluster admin access but provides more accurate results as it tests from within the actual cluster environment. Docs: https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.html#osd-aws-privatelink-firewall-prerequisites_prerequisites @@ -3098,12 +3104,14 @@ osdctl network verify-egress [flags] --gcp-project-id string (optional) the GCP project ID to run verification for -h, --help help for verify-egress --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to the kubeconfig file to use for CLI requests. + --kubeconfig string (optional) path to kubeconfig file for pod mode (uses default kubeconfig if not specified) + --namespace string (optional) Kubernetes namespace to run verification pods in (default "openshift-network-diagnostics") --no-tls (optional) if provided, ignore all ssl certificate validations on client-side. -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] --platform string (optional) override for cloud platform/product. E.g., 'aws-classic' (OSD/ROSA Classic), 'aws-hcp' (ROSA HCP), or 'aws-hcp-zeroegress' + --pod-mode (optional) run verification using Kubernetes pods instead of cloud instances --probe string (optional) select the probe to be used for egress testing. Either 'curl' (default) or 'legacy' (default "curl") - --region string (optional) AWS region + --region string (optional) AWS region, required for --pod-mode if not passing a --cluster-id --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") --security-group string (optional) security group ID override for osd-network-verifier, required if not specifying --cluster-id -s, --server string The address and port of the Kubernetes API server diff --git a/docs/osdctl_network_verify-egress.md b/docs/osdctl_network_verify-egress.md index 637eb4c79..3fb4d44d6 100644 --- a/docs/osdctl_network_verify-egress.md +++ b/docs/osdctl_network_verify-egress.md @@ -12,7 +12,13 @@ Verify an AWS OSD/ROSA cluster can reach all required external URLs necessary fo verify whether a ROSA cluster's VPC allows for all required external URLs are reachable. The exact cause can vary and typically requires a customer to remediate the issue themselves. - The osd-network-verifier launches a probe, an instance in a given subnet, and checks egress to external required URL's. Since October 2022, the probe is an instance without a public IP address. For this reason, the probe's requests will fail for subnets that don't have a NAT gateway. The osdctl network verify-egress command will always fail and give a false negative for public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. + The osd-network-verifier supports two modes: + 1. Traditional mode: launches a probe instance in a given subnet and checks egress to external required URLs. + Since October 2022, the probe is an instance without a public IP address. For this reason, the probe's requests + will fail for subnets that don't have a NAT gateway. This mode will always fail and give a false negative for + public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. + 2. Pod mode (--pod-mode): runs verification as Kubernetes Jobs within the target cluster. This mode requires + cluster admin access but provides more accurate results as it tests from within the actual cluster environment. Docs: https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.html#osd-aws-privatelink-firewall-prerequisites_prerequisites @@ -40,6 +46,12 @@ osdctl network verify-egress [flags] # Override automatic selection of the list of endpoints to check osdctl network verify-egress --cluster-id my-rosa-cluster --platform hostedcluster + # Run in pod mode using Kubernetes jobs (requires cluster access) + osdctl network verify-egress --cluster-id my-rosa-cluster --pod-mode + + # Run in pod mode with custom namespace and kubeconfig + osdctl network verify-egress --pod-mode --region us-east-1 --namespace my-namespace --kubeconfig ~/.kube/config + # (Not recommended) Run against a specific VPC, without specifying cluster-id <export environment variables like AWS_ACCESS_KEY_ID or use aws configure> osdctl network verify-egress --subnet-id subnet-abcdefg123 --security-group sg-abcdefgh123 --region us-east-1 @@ -56,10 +68,13 @@ osdctl network verify-egress [flags] --egress-timeout duration (optional) timeout for individual egress verification requests (default 5s) --gcp-project-id string (optional) the GCP project ID to run verification for -h, --help help for verify-egress + --kubeconfig string (optional) path to kubeconfig file for pod mode (uses default kubeconfig if not specified) + --namespace string (optional) Kubernetes namespace to run verification pods in (default "openshift-network-diagnostics") --no-tls (optional) if provided, ignore all ssl certificate validations on client-side. --platform string (optional) override for cloud platform/product. E.g., 'aws-classic' (OSD/ROSA Classic), 'aws-hcp' (ROSA HCP), or 'aws-hcp-zeroegress' + --pod-mode (optional) run verification using Kubernetes pods instead of cloud instances --probe string (optional) select the probe to be used for egress testing. Either 'curl' (default) or 'legacy' (default "curl") - --region string (optional) AWS region + --region string (optional) AWS region, required for --pod-mode if not passing a --cluster-id --security-group string (optional) security group ID override for osd-network-verifier, required if not specifying --cluster-id --subnet-id stringArray (optional) private subnet ID override, required if not specifying --cluster-id and can be specified multiple times to run against multiple subnets --version When present, prints out the version of osd-network-verifier being used @@ -73,7 +88,6 @@ osdctl network verify-egress [flags] --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure - --kubeconfig string Path to the kubeconfig file to use for CLI requests. -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") -s, --server string The address and port of the Kubernetes API server diff --git a/go.mod b/go.mod index d7b3a5f29..281d78049 100644 --- a/go.mod +++ b/go.mod @@ -3,16 +3,16 @@ module github.com/openshift/osdctl go 1.23.6 require ( - cloud.google.com/go/compute v1.33.0 + cloud.google.com/go/compute v1.37.0 github.com/Dynatrace/dynatrace-operator v0.14.2 github.com/PagerDuty/go-pagerduty v1.8.0 github.com/andygrunwald/go-jira v1.16.0 - github.com/aws/aws-sdk-go-v2 v1.36.3 - github.com/aws/aws-sdk-go-v2/config v1.29.14 - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 + github.com/aws/aws-sdk-go-v2 v1.36.5 + github.com/aws/aws-sdk-go-v2/config v1.29.17 + github.com/aws/aws-sdk-go-v2/credentials v1.17.70 github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.47.4 github.com/aws/aws-sdk-go-v2/service/costexplorer v1.46.7 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.202.4 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.230.0 github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.28.17 github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.43.12 github.com/aws/aws-sdk-go-v2/service/iam v1.39.1 @@ -21,8 +21,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/route53 v1.48.7 github.com/aws/aws-sdk-go-v2/service/s3 v1.76.0 github.com/aws/aws-sdk-go-v2/service/servicequotas v1.25.18 - github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 - github.com/aws/smithy-go v1.22.2 + github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 + github.com/aws/smithy-go v1.22.4 github.com/brianvoe/gofakeit/v6 v6.24.0 github.com/coreos/go-semver v0.3.1 github.com/deckarep/golang-set v1.8.0 @@ -37,7 +37,7 @@ require ( github.com/onsi/gomega v1.37.0 github.com/openshift-online/ocm-cli v1.0.5 github.com/openshift-online/ocm-common v0.0.22 - github.com/openshift-online/ocm-sdk-go v0.1.465 + github.com/openshift-online/ocm-sdk-go v0.1.469 github.com/openshift/api v0.0.0-20250207102212-9e59a77ed2e0 github.com/openshift/aws-account-operator/api v0.0.0-20250205151445-6455c35fc4ae github.com/openshift/backplane-cli v0.1.47 @@ -45,7 +45,7 @@ require ( github.com/openshift/gcp-project-operator v0.0.0-20241024143818-ec4eabd35aba github.com/openshift/hive/apis v0.0.0-20250206153200-5a34ea42e678 github.com/openshift/hypershift/api v0.0.0-20250208145556-2753dcc8cfb7 - github.com/openshift/osd-network-verifier v1.2.3 + github.com/openshift/osd-network-verifier v1.3.0 github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/shopspring/decimal v1.4.0 github.com/sirupsen/logrus v1.9.3 @@ -56,18 +56,18 @@ require ( github.com/stretchr/testify v1.10.0 github.com/zclconf/go-cty v1.13.0 gitlab.com/gitlab-org/api/client-go v0.128.0 - go.uber.org/mock v0.5.0 - golang.org/x/oauth2 v0.26.0 - golang.org/x/sync v0.12.0 - golang.org/x/term v0.31.0 - google.golang.org/api v0.220.0 - google.golang.org/genproto v0.0.0-20250207221924-e9438ea467c6 + go.uber.org/mock v0.5.2 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.15.0 + golang.org/x/term v0.32.0 + google.golang.org/api v0.240.0 + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 + k8s.io/api v0.32.6 + k8s.io/apimachinery v0.32.6 k8s.io/cli-runtime v0.32.1 - k8s.io/client-go v0.32.1 + k8s.io/client-go v0.32.6 k8s.io/kubectl v0.32.1 k8s.io/utils v0.0.0-20241210054802-24370beab758 open-cluster-management.io/api v0.15.0 @@ -78,9 +78,9 @@ require ( require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cloud.google.com/go/auth v0.14.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/auth v0.16.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/AlecAivazis/survey/v2 v2.3.7 // indirect @@ -98,18 +98,18 @@ require ( github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect github.com/aws/aws-sdk-go-v2/service/ssm v1.59.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -168,8 +168,8 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect @@ -216,6 +216,8 @@ require ( github.com/nxadm/tail v1.4.11 // indirect github.com/oasdiff/yaml v0.0.0-20241214135536-5f7845c759c8 // indirect github.com/oasdiff/yaml3 v0.0.0-20241214160948-977117996672 // indirect + github.com/openshift-online/ocm-api-model/clientapi v0.0.0-20250619114224-37dc3401307a // indirect + github.com/openshift-online/ocm-api-model/model v0.0.0-20250619114224-37dc3401307a // indirect github.com/openshift/backplane-api v0.0.0-20250514095514-2aa57551ec70 // indirect github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect @@ -252,26 +254,26 @@ require ( github.com/zalando/go-keyring v0.2.6 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/arch v0.14.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.37.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.10.0 // indirect - golang.org/x/tools v0.31.0 // indirect + golang.org/x/mod v0.25.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.26.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.33.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect - google.golang.org/grpc v1.70.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/AlecAivazis/survey.v1 v1.8.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index fc76cf2ba..3fed44af5 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,16 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.118.1 h1:b8RATMcrK9A4BH0rj8yQupPXp+aP+cJ0l6H7V9osV1E= -cloud.google.com/go v0.118.1/go.mod h1:CFO4UPEPi8oV21xoezZCrd3d81K4fFkDTEJu4R8K+9M= -cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0= -cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM= -cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= -cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= -cloud.google.com/go/compute v1.33.0 h1:abGcwWokP7/bBpvRjUKlgchrZXYgRpwcKZIlNUHWf6Y= -cloud.google.com/go/compute v1.33.0/go.mod h1:Z8NErRhrWA3RmVWczlAPJjZcRTlqZB1pcpD0MaIc1ug= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute v1.37.0 h1:XxtZlXYkZXub3LNaLu90TTemcFqIU1yZ4E4q9VlR39A= +cloud.google.com/go/compute v1.37.0/go.mod h1:AsK4VqrSyXBo4SMbRtfAO1VfaMjUEjEwv1UB/AwVp5Q= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= @@ -65,20 +65,20 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.36.5 h1:0OF9RiEMEdDdZEMqF9MRjevyxAQcf6gY+E7vwBILFj0= +github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= -github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= -github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= +github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 h1:SsytQyTMHMDPspp+spo7XwXTP44aJZZAC7fBV2C5+5s= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 h1:i2vNHQiXUvKhs3quBR6aqlgJaiaexz/aNvdCktW/kAM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY= @@ -87,20 +87,20 @@ github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.47.4 h1:4hiC8jzPP89L+MTljvKs1 github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.47.4/go.mod h1:Kj+z0vXRl21DsnPR+lA5DjVWCaRTvAmwQ/shTGHeY84= github.com/aws/aws-sdk-go-v2/service/costexplorer v1.46.7 h1:LNTQAeENxc1l59SM6swUJd9zhRqK0lKUqqGdLClhffs= github.com/aws/aws-sdk-go-v2/service/costexplorer v1.46.7/go.mod h1:ObURpiozI8I9OLuqf5lNmc3VD5QOJ1rJcCK65StG4tU= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.202.4 h1:gdFRXlTMgV0+yrhQLAJKb+vX2K32Vw3n2TntDd+8AEM= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.202.4/go.mod h1:nSbxgPGhyI9j/cMVSHUEEtNQzEYeNOkbHnHNeTuQqt0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.230.0 h1:N0laDZWoAoKIRkwlc7p5Iu8l2JGEUtZLgG3Ai67n5K0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.230.0/go.mod h1:35jGWx7ECvCwTsApqicFYzZ7JFEnBc6oHUuOQ3xIS54= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.28.17 h1:5iAJcuuAgVMpVzItTGc+E7Tj8zXDL6sjAZQLZGq+8rA= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.28.17/go.mod h1:AR5tv65CXh3Yak2Dq+AGKn78FxtteGX4HgcQSp7Xk7s= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.43.12 h1:PLoBTtHl376mmxe5NSMUx1UD8yiM+BgIi9yJ1SgibHk= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.43.12/go.mod h1:h7JSZfD6QGeaAWpTk0+e1hQw2Venf5gh7UlUTEAiZL8= github.com/aws/aws-sdk-go-v2/service/iam v1.39.1 h1:N4OauekXigX0GgsJ+FUm7OO5HkrJR0ByZJ2YS5PIy3U= github.com/aws/aws-sdk-go-v2/service/iam v1.39.1/go.mod h1:8rUmP3N5TJXWWEzdQ+2Tc1IELc97pxBt5Zbt4QLq7KI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.6 h1:cCBJaT7EeEojpJ4s7wTDbhZlHVJOgNHN7iw6qVurGaw= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.6/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q= github.com/aws/aws-sdk-go-v2/service/organizations v1.37.8 h1:VsGPLkO6PuyRFlNs0XPWt8qM1bItGR45Id+8PhxtohQ= @@ -115,14 +115,14 @@ github.com/aws/aws-sdk-go-v2/service/servicequotas v1.25.18 h1:CG0TMFjcvZBmUlCF/ github.com/aws/aws-sdk-go-v2/service/servicequotas v1.25.18/go.mod h1:STMQPHWC5Lwpy89f1GeG9GfVXLOHmDmYsoAtOKbura4= github.com/aws/aws-sdk-go-v2/service/ssm v1.59.0 h1:KWArCwA/WkuHWKfygkNz0B6YS6OvdgoJUaJHX0Qby1s= github.com/aws/aws-sdk-go-v2/service/ssm v1.59.0/go.mod h1:PUWUl5MDiYNQkUHN9Pyd9kgtA/YhbxnSnHP+yQqzrM8= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= +github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= +github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -334,10 +334,10 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= -github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= @@ -523,12 +523,16 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/openshift-online/ocm-api-model/clientapi v0.0.0-20250619114224-37dc3401307a h1:6yb+WG4oqakYlBxMZptAF2ijLq+IQ1sG9e7yWbHyFgU= +github.com/openshift-online/ocm-api-model/clientapi v0.0.0-20250619114224-37dc3401307a/go.mod h1:fZwy5HY2URG9nrExvQeXrDU/08TGqZ16f8oymVEN5lo= +github.com/openshift-online/ocm-api-model/model v0.0.0-20250619114224-37dc3401307a h1:rGtR5YLBV/MxoDMoJdIqdgYbb8ljNC6ycMf1dog5LuM= +github.com/openshift-online/ocm-api-model/model v0.0.0-20250619114224-37dc3401307a/go.mod h1:B/fZxd88BTKig/rCCc189cENnFlEzxQslHHzoSgvo1I= github.com/openshift-online/ocm-cli v1.0.5 h1:YgtU7vkprnDlI8LovjjeA48x1FXxlKZt/91Tb+rgWHU= github.com/openshift-online/ocm-cli v1.0.5/go.mod h1:u3mIMD71A2hRJEgd9ZT9MfmPjMAvvXxje+StqHpfaEc= github.com/openshift-online/ocm-common v0.0.22 h1:VMVbGVYOQcyiew76hiBEKUtICHfhREjXTARezj9jQxI= github.com/openshift-online/ocm-common v0.0.22/go.mod h1:VEkuZp9aqbXtetZ5ycND6QpvhykvTuBF3oPsVM1X3vI= -github.com/openshift-online/ocm-sdk-go v0.1.465 h1:RZr92sdcAKyLVcL19/RYOn6KVtspDUH1wc3UuO4LgiE= -github.com/openshift-online/ocm-sdk-go v0.1.465/go.mod h1:EOkylgH0bafd+SlU9YvMrIIxHJw0Hk1EnC7W1VZeW8I= +github.com/openshift-online/ocm-sdk-go v0.1.469 h1:PdtKbT007q9OjFHMznutIxPXaembpM/LL8tP7oMtc50= +github.com/openshift-online/ocm-sdk-go v0.1.469/go.mod h1:RjLocq1aHUZ4h7LxkAqZVCIrPgWOiaJN1qOtDHY6MA4= github.com/openshift/api v0.0.0-20250207102212-9e59a77ed2e0 h1:5n8BKML7fkmR4tz81WI0jc722rbta4t7pzT21lcd/Ec= github.com/openshift/api v0.0.0-20250207102212-9e59a77ed2e0/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= github.com/openshift/aws-account-operator/api v0.0.0-20250205151445-6455c35fc4ae h1:yXsnxp1RC3l2VX26ipQbXZl4s3Vky8eWcJdozD+RtMo= @@ -547,8 +551,8 @@ github.com/openshift/hive/apis v0.0.0-20250206153200-5a34ea42e678 h1:oPNt/o9il5M github.com/openshift/hive/apis v0.0.0-20250206153200-5a34ea42e678/go.mod h1:vfgOsNigipl8aM05Jy9WG+eR9wfeZQNEN4pTD9wsrtM= github.com/openshift/hypershift/api v0.0.0-20250208145556-2753dcc8cfb7 h1:535PgO4fBROL+cLcWJFH8aNyGPts9UgR42CV+N/pxGc= github.com/openshift/hypershift/api v0.0.0-20250208145556-2753dcc8cfb7/go.mod h1:fQFj8aH3buOKqmhMQ5igRVOT7iQdduxRE9H1LM/BiY0= -github.com/openshift/osd-network-verifier v1.2.3 h1:MW+u2LR8M5AWzQ2t/RsS7JWycBAleUgT480AMO4VMu4= -github.com/openshift/osd-network-verifier v1.2.3/go.mod h1:X3dVNkC91NYTf2kTXUS/PeRTNvfS97WbvIqPIDP083M= +github.com/openshift/osd-network-verifier v1.3.0 h1:dDBpsFLMkQ33rrF1BGMNU7A4qbqgoa8pZUSx0j4QPdM= +github.com/openshift/osd-network-verifier v1.3.0/go.mod h1:oXfoEaGxqgiAvP5y1JJXKq/48n+3+1182wckkjZ5GjY= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= @@ -689,24 +693,24 @@ gitlab.com/gitlab-org/api/client-go v0.128.0 h1:Wvy1UIuluKemubao2k8EOqrl3gbgJ1PV gitlab.com/gitlab-org/api/client-go v0.128.0/go.mod h1:bYC6fPORKSmtuPRyD9Z2rtbAjE7UeNatu2VWHRf4/LE= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= -go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -718,8 +722,8 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 h1:qNgPs5exUA+G0C96DrPwNrvLSj7GT/9D+3WMWUcUg34= golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= @@ -731,8 +735,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -754,11 +758,11 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= -golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -766,8 +770,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -803,13 +807,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -817,10 +821,10 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= -golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -835,33 +839,33 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns= -google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY= +google.golang.org/api v0.240.0 h1:PxG3AA2UIqT1ofIzWV2COM3j3JagKTKSwy7L6RHNXNU= +google.golang.org/api v0.240.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20250207221924-e9438ea467c6 h1:SSk8oMbcHFbMwftDvX4PHbkqss3RkEZUF+k1h9d/sns= -google.golang.org/genproto v0.0.0-20250207221924-e9438ea467c6/go.mod h1:wkQ2Aj/xvshAUDtO/JHvu9y+AaN9cqs28QuSVSHtZSY= -google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 h1:L9JNMl/plZH9wmzQUHleO/ZZDSN+9Gh41wPczNy+5Fk= -google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 h1:vPV0tzlsK6EzEDHNNH5sa7Hs9bd7iXR7B1tSiPepkV0= +google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:pKLAc5OolXC3ViWGI62vvC0n10CpwAtRcTNCFwTKBEw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -874,8 +878,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/AlecAivazis/survey.v1 v1.8.8 h1:5UtTowJZTz1j7NxVzDGKTz6Lm9IWm8DDF6b7a2wq9VY= gopkg.in/AlecAivazis/survey.v1 v1.8.8/go.mod h1:CaHjv79TCgAvXMSFJSVgonHXYWxnhzI3eoHtnX5UgUo= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -911,17 +915,17 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/api v0.32.6 h1:UiBAMRzTP24Tz9UT1uhhmAv1auGTT9PT/npywSk9JrU= +k8s.io/api v0.32.6/go.mod h1:+iFCyQN34v2rsL53iQEN9lYE03mFdgPvgSXvATIDteg= k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.6 h1:odtEUjg7OT3132sBFsFn4Arj4Gd+BplYekmLQP8L3ak= +k8s.io/apimachinery v0.32.6/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM= k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/client-go v0.32.6 h1:Q+O+Sd9LKKFnsGZNVX2q1RDILYRpQZX+ea2RoIgjKlM= +k8s.io/client-go v0.32.6/go.mod h1:yqL9XJ2cTXy3WdJwdeyob3O6xiLwWrh9DP7SeszniW0= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= diff --git a/pkg/k8s/client.go b/pkg/k8s/client.go index 2b9f8f06d..ee9640c29 100644 --- a/pkg/k8s/client.go +++ b/pkg/k8s/client.go @@ -139,6 +139,16 @@ func (s *LazyClient) getClient() client.Client { } func New(clusterID string, options client.Options) (client.Client, error) { + cfg, err := NewRestConfig(clusterID) + if err != nil { + return nil, err + } + setRuntimeLoggerDiscard() + return client.New(cfg, options) +} + +// NewRestConfig returns a *rest.Config for the given cluster ID using backplane configuration +func NewRestConfig(clusterID string) (*rest.Config, error) { bp, err := bpconfig.GetBackplaneConfiguration() if err != nil { return nil, fmt.Errorf("failed to load backplane-cli config: %v", err) @@ -148,8 +158,8 @@ func New(clusterID string, options client.Options) (client.Client, error) { if err != nil { return nil, err } - setRuntimeLoggerDiscard() - return client.New(cfg, options) + + return cfg, nil } func NewAsBackplaneClusterAdmin(clusterID string, options client.Options, elevationReasons ...string) (client.Client, error) { From c54fce5934b1a2f96a88c5d51f9565a8e1d6005e Mon Sep 17 00:00:00 2001 From: Emika Hammond <emhammon@redhat.com> Date: Mon, 11 Aug 2025 18:12:28 -0400 Subject: [PATCH 36/40] SREP-1442: add dead link checking to osdctl servicelog post (#787) * add basic link verification and tests * fix skip-link-check flag * move linkValidator to pkg * add warning for non fatal HTTP response codes * update documentation with --skip-link-check flag * update pkg name to snake case * add newTestServer helper for link_validator tests --- cmd/servicelog/post.go | 17 ++ docs/README.md | 1 + docs/osdctl_servicelog_post.md | 1 + pkg/link_validator/link_validator.go | 80 +++++++ pkg/link_validator/link_validator_test.go | 256 ++++++++++++++++++++++ 5 files changed, 355 insertions(+) create mode 100644 pkg/link_validator/link_validator.go create mode 100644 pkg/link_validator/link_validator_test.go diff --git a/cmd/servicelog/post.go b/cmd/servicelog/post.go index 9b6f76799..65b757c49 100644 --- a/cmd/servicelog/post.go +++ b/cmd/servicelog/post.go @@ -22,6 +22,7 @@ import ( v1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/openshift/osdctl/internal/servicelog" "github.com/openshift/osdctl/internal/utils" + "github.com/openshift/osdctl/pkg/link_validator" "github.com/openshift/osdctl/pkg/printer" ocmutils "github.com/openshift/osdctl/pkg/utils" @@ -44,6 +45,7 @@ type PostCmdOptions struct { clustersFile string InternalOnly bool ClusterId string + skipLinkCheck bool // Messaged clusters successfulClusters map[string]string @@ -94,6 +96,7 @@ func newPostCmd() *cobra.Command { postCmd.Flags().StringArrayVarP(&opts.filterFiles, "query-file", "f", []string{}, "File containing search queries to apply. All lines in the file will be concatenated into a single query. If this flag is called multiple times, every file's search query will be combined with logical AND.") postCmd.Flags().StringVarP(&opts.clustersFile, "clusters-file", "c", "", `Read a list of clusters to post the servicelog to. the format of the file is: {"clusters":["$CLUSTERID"]}`) postCmd.Flags().BoolVarP(&opts.InternalOnly, "internal", "i", false, "Internal only service log. Use MESSAGE for template parameter (eg. -p MESSAGE='My super secret message').") + postCmd.Flags().BoolVar(&opts.skipLinkCheck, "skip-link-check", false, "Skip validating if links in Service Log are valid") return postCmd } @@ -245,6 +248,20 @@ func (o *PostCmdOptions) Run() error { return fmt.Errorf("cannot read generated template: %w", err) } + // Validate links in service log unless skipped via '--skip-link-check' + if !o.skipLinkCheck { + lv := link_validator.NewLinkValidator() + messageText := o.Message.Summary + " " + o.Message.Description + warnings, err := lv.ValidateLinks(messageText) + if err != nil { + log.Error("aborting due to dead link use '--skip-link-check' to override\n", err) + return nil + } + for _, warning := range warnings { + log.Warnf("link warning: %s (%v)", warning.URL, warning.Warning) + } + } + // If this is a dry-run, don't proceed further. if o.isDryRun { return nil diff --git a/docs/README.md b/docs/README.md index bcd963ec1..8f2d83b7b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3578,6 +3578,7 @@ osdctl servicelog post --cluster-id <cluster-identifier> [flags] --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") -s, --server string The address and port of the Kubernetes API server --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + --skip-link-check Skip validating if links in Service Log are valid -S, --skip-version-check skip checking to see if this is the most recent release -t, --template string Message template file or URL -y, --yes Skips all prompts. diff --git a/docs/osdctl_servicelog_post.md b/docs/osdctl_servicelog_post.md index a5b0fcecf..80ed058c1 100644 --- a/docs/osdctl_servicelog_post.md +++ b/docs/osdctl_servicelog_post.md @@ -46,6 +46,7 @@ osdctl servicelog post --cluster-id <cluster-identifier> [flags] -p, --param stringArray Specify a key-value pair (eg. -p FOO=BAR) to set/override a parameter value in the template. -q, --query stringArray Specify a search query (eg. -q "name like foo") for a bulk-post to matching clusters. -f, --query-file stringArray File containing search queries to apply. All lines in the file will be concatenated into a single query. If this flag is called multiple times, every file's search query will be combined with logical AND. + --skip-link-check Skip validating if links in Service Log are valid -t, --template string Message template file or URL -y, --yes Skips all prompts. ``` diff --git a/pkg/link_validator/link_validator.go b/pkg/link_validator/link_validator.go new file mode 100644 index 000000000..0d4d541c3 --- /dev/null +++ b/pkg/link_validator/link_validator.go @@ -0,0 +1,80 @@ +package link_validator + +import ( + "fmt" + "net/http" + "regexp" + "strings" + "time" +) + +const ( + Timeout = time.Second * 5 +) + +// LinkValidator handles validation of URLs in service log messages +type LinkValidator struct { + timeout time.Duration + httpClient *http.Client +} + +// ValidationResult holds a URL validation warning for non-fatal HTTP errors +type ValidationResult struct { + URL string + Warning error +} + +// NewLinkValidator creates a new LinkValidator with default settings +func NewLinkValidator() *LinkValidator { + return &LinkValidator{ + timeout: Timeout, + httpClient: &http.Client{Timeout: Timeout}, + } +} + +func extractURLs(text string) []string { + urlRegex := regexp.MustCompile(`https?://[^\s]+`) + matches := urlRegex.FindAllString(text, -1) + + var cleanURLs []string + for _, match := range matches { + // Remove common trailing punctuation + cleanURL := strings.TrimRight(match, ".,;:!?)]}") + cleanURLs = append(cleanURLs, cleanURL) + } + + return cleanURLs +} + +func (lv *LinkValidator) checkURL(url string) (int, error) { + resp, err := lv.httpClient.Head(url) + // Check for network errors + if err != nil { + return 0, err + } + defer resp.Body.Close() + return resp.StatusCode, nil +} + +// ValidateLinks performs link validation through HTTP status codes +func (lv *LinkValidator) ValidateLinks(message string) ([]ValidationResult, error) { + urls := extractURLs(message) + var warnings []ValidationResult + + for _, url := range urls { + statusCode, err := lv.checkURL(url) + if err != nil { + return nil, fmt.Errorf("network error for link validation %v", err) + } + if statusCode == 404 || statusCode == 410 { + return nil, fmt.Errorf("dead link: %s (HTTP %d)", url, statusCode) + } + if statusCode >= 400 { + warnings = append(warnings, ValidationResult{ + URL: url, + Warning: fmt.Errorf("HTTP %d", statusCode), + }) + } + } + return warnings, nil +} diff --git a/pkg/link_validator/link_validator_test.go b/pkg/link_validator/link_validator_test.go new file mode 100644 index 000000000..27f396b76 --- /dev/null +++ b/pkg/link_validator/link_validator_test.go @@ -0,0 +1,256 @@ +package link_validator + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +// newTestServer creates a test server that returns the specified HTTP status code +func newTestServer(status int) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(status) + })) +} + +func TestLinkValidator_extractURLs(t *testing.T) { + + testCases := []struct { + name string + input string + expected []string + }{ + { + name: "single HTTP URL", + input: "Please visit http://example.com for more info", + expected: []string{"http://example.com"}, + }, + { + name: "single HTTPS URL", + input: "Check out https://docs.openshift.com/rosa/install for details", + expected: []string{"https://docs.openshift.com/rosa/install"}, + }, + { + name: "multiple URLs", + input: "Visit https://example.com and http://test.com for more info", + expected: []string{"https://example.com", "http://test.com"}, + }, + { + name: "URL with trailing punctuation", + input: "See https://example.com.", + expected: []string{"https://example.com"}, + }, + { + name: "no URLs", + input: "This is just text without any URLs", + expected: []string{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := extractURLs(tc.input) + if len(result) != len(tc.expected) { + t.Errorf("Expected %d URLs, got %d: %v", len(tc.expected), len(result), result) + return + } + + for i, url := range result { + if url != tc.expected[i] { + t.Errorf("Expected URL %s, got %s", tc.expected[i], url) + } + } + }) + } +} + +func TestLinkValidator_checkURL(t *testing.T) { + // Create test servers using the helper function + server := newTestServer(http.StatusOK) + defer server.Close() + + server404 := newTestServer(http.StatusNotFound) + defer server404.Close() + + server500 := newTestServer(http.StatusInternalServerError) + defer server500.Close() + + lv := NewLinkValidator() + + testCases := []struct { + name string + url string + expectedStatus int + expectError bool + }{ + { + name: "valid URL", + url: server.URL, + expectedStatus: 200, + expectError: false, + }, + { + name: "404 URL", + url: server404.URL, + expectedStatus: 404, + expectError: false, + }, + { + name: "500 URL", + url: server500.URL, + expectedStatus: 500, + expectError: false, + }, + { + name: "invalid URL", + url: "not-a-valid-url", + expectedStatus: 0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + statusCode, err := lv.checkURL(tc.url) + if tc.expectError && err == nil { + t.Error("Expected error but got none") + } + if !tc.expectError && err != nil { + t.Errorf("Expected no error but got: %v", err) + } + if !tc.expectError && statusCode != tc.expectedStatus { + t.Errorf("Expected status code %d, got %d", tc.expectedStatus, statusCode) + } + }) + } +} + +func TestLinkValidator_ValidateLinks(t *testing.T) { + // Create test servers using the helper function + serverOK := newTestServer(http.StatusOK) + defer serverOK.Close() + + server404 := newTestServer(http.StatusNotFound) + defer server404.Close() + + server410 := newTestServer(http.StatusGone) + defer server410.Close() + + server500 := newTestServer(http.StatusInternalServerError) + defer server500.Close() + + lv := NewLinkValidator() + + testCases := []struct { + name string + message string + expectError bool + expectedWarnings int + }{ + { + name: "message with valid URL", + message: "Please check " + serverOK.URL + " for more information", + expectError: false, + expectedWarnings: 0, + }, + { + name: "message with no URLs", + message: "This is just a plain text message", + expectError: false, + expectedWarnings: 0, + }, + { + name: "message with 404 URL (dead link error)", + message: "Check " + server404.URL + " for details", + expectError: true, + expectedWarnings: 0, + }, + { + name: "message with 410 URL (gone link error)", + message: "Visit " + server410.URL + " for info", + expectError: true, + expectedWarnings: 0, + }, + { + name: "message with 500 URL (warning)", + message: "See " + server500.URL + " for more", + expectError: false, + expectedWarnings: 1, + }, + { + name: "message with mixed URLs", + message: "Good link: " + serverOK.URL + " and warning link: " + server500.URL, + expectError: false, + expectedWarnings: 1, + }, + { + name: "message with network error URL", + message: "Check http://this-domain-should-not-exist-12345.com", + expectError: true, + expectedWarnings: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + warnings, err := lv.ValidateLinks(tc.message) + if tc.expectError && err == nil { + t.Error("Expected error but got none") + } + if !tc.expectError && err != nil { + t.Errorf("Expected no error but got: %v", err) + } + if !tc.expectError && len(warnings) != tc.expectedWarnings { + t.Errorf("Expected %d warnings, got %d: %v", tc.expectedWarnings, len(warnings), warnings) + } + + // Verify warning structure if warnings are expected + if !tc.expectError && tc.expectedWarnings > 0 { + for _, warning := range warnings { + if warning.URL == "" { + t.Error("Warning should have a URL") + } + if warning.Warning == nil { + t.Error("Warning should have an error message") + } + } + } + }) + } +} + +func TestValidationResult_Structure(t *testing.T) { + // Create a test server that returns 403 Forbidden + server403 := newTestServer(http.StatusForbidden) + defer server403.Close() + + lv := NewLinkValidator() + + // Test a scenario that should produce warnings + message := "Check this link: " + server403.URL + warnings, err := lv.ValidateLinks(message) + + if err != nil { + t.Fatalf("Expected no error for 403 status, got: %v", err) + } + + if len(warnings) != 1 { + t.Fatalf("Expected 1 warning, got %d", len(warnings)) + } + + warning := warnings[0] + + // Test the ValidationResult structure + if warning.URL != server403.URL { + t.Errorf("Expected URL %s, got %s", server403.URL, warning.URL) + } + + if warning.Warning == nil { + t.Error("Expected warning to have an error message") + } + + expectedErrorMsg := "HTTP 403" + if warning.Warning.Error() != expectedErrorMsg { + t.Errorf("Expected warning message '%s', got '%s'", expectedErrorMsg, warning.Warning.Error()) + } +} From 2b588db5a4216de98caeca131c30bd03a0558a3f Mon Sep 17 00:00:00 2001 From: Rohit Bhilare <rbhilare@redhat.com> Date: Wed, 6 Aug 2025 15:51:11 +0530 Subject: [PATCH 37/40] Hosted cluster dump and a dynatrace logs export --- cmd/dynatrace/hcpGatherLogsCmd.go | 6 +++--- cmd/hcp/mustgather/mustGather.go | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cmd/dynatrace/hcpGatherLogsCmd.go b/cmd/dynatrace/hcpGatherLogsCmd.go index 2c2118387..aeded581a 100644 --- a/cmd/dynatrace/hcpGatherLogsCmd.go +++ b/cmd/dynatrace/hcpGatherLogsCmd.go @@ -44,7 +44,7 @@ func NewCmdHCPMustGather() *cobra.Command { DisableAutoGenTag: true, Run: func(cmd *cobra.Command, args []string) { - err := g.GatherLogs(g.ClusterID) + err := g.GatherLogs(g.ClusterID, "") if err != nil { cmdutil.CheckErr(err) } @@ -62,7 +62,7 @@ func NewCmdHCPMustGather() *cobra.Command { return hcpMgCmd } -func (g *GatherLogsOpts) GatherLogs(clusterID string) (error error) { +func (g *GatherLogsOpts) GatherLogs(clusterID string, elevationReasons ...string) (error error) { accessToken, err := getStorageAccessToken() if err != nil { return fmt.Errorf("failed to acquire access token %v", err) @@ -73,7 +73,7 @@ func (g *GatherLogsOpts) GatherLogs(clusterID string) (error error) { return err } - _, _, clientset, err := common.GetKubeConfigAndClient(hcpCluster.managementClusterID, "", "") + _, _, clientset, err := common.GetKubeConfigAndClient(hcpCluster.managementClusterID, elevationReasons...) if err != nil { return fmt.Errorf("failed to retrieve Kubernetes configuration and client for cluster with ID %s: %w", hcpCluster.managementClusterID, err) } diff --git a/cmd/hcp/mustgather/mustGather.go b/cmd/hcp/mustgather/mustGather.go index 43847f893..329c3a380 100644 --- a/cmd/hcp/mustgather/mustGather.go +++ b/cmd/hcp/mustgather/mustGather.go @@ -181,13 +181,13 @@ func (mg *mustGather) Run() error { // 2. ACM must-gather which includes running the hypershift binary for a dump clusterHyperShift, err := ocmClient.ClustersMgmt().V1().Clusters().Cluster(mg.clusterId).Hypershift().Get().Send() if err != nil { - fmt.Printf("failed to get OCM cluster hypershift info for %s: %v\n", mg.clusterId, err) + fmt.Printf("collected HCP dynatrace logs but failed to get OCM cluster hypershift info for %s: %v\n", mg.clusterId, err) return } hcpNamespace, ok := clusterHyperShift.Body().GetHCPNamespace() if !ok { - fmt.Println("failed to get HCP namespace") + fmt.Println("collected HCP dynatrace logs but failed to get HCP namespace") return } @@ -198,8 +198,9 @@ func (mg *mustGather) Run() error { acmHyperShiftImage := "quay.io/rokejungrh/must-gather:v2.13.0-33-linux" gatherScript := fmt.Sprintf("/usr/bin/gather hosted-cluster-namespace=%s hosted-cluster-name=%s", hcNamespace, hcName) if err := createMustGather(mcRestCfg, mcK8sCli, []string{"--dest-dir=" + destDir, "--image=" + acmHyperShiftImage, gatherScript}); err != nil { - fmt.Printf("failed to gather %s: %v\n", gatherTarget, err) + fmt.Printf("collected HCP dynatrace logs but failed to gather %s: %v\n", gatherTarget, err.Error()) } + default: fmt.Printf("unknown gather type: %s\n", gatherTarget) } From 873110e54e4df2f4db9929040374736d96513358 Mon Sep 17 00:00:00 2001 From: Dakota Long <dalong@redhat.com> Date: Mon, 18 Aug 2025 11:34:54 -0400 Subject: [PATCH 38/40] SREP-861 - support service accounts and no service log flag (#788) * https://issues.redhat.com/browse/SREP-861 Add support running network-verifier in pod with a service account. Add a flag to disable sending a service log. * Refine Cursor generated code * Fix if/else syntax * Update docs with make generate-docs * Add unit tests for using a kubeconfig and service account * change write to 0600 * Update PR based on feedback: Extract REST config logic from setupForPodMode and update priority to respect explicit user choices (--kubeconfig > --cluster-id > ServiceAccount > default). Add serviceAccountTokenPath constant and comprehensive tests. Change --no-service-log flag to --skip-service-log * fix nosec issue and update docs * change SL flag to skipServiceLog --------- Co-authored-by: Dakota Long <dalong@dalong-thinkpadp16vgen1.rmtusnc.csb> --- cmd/network/verification.go | 104 ++++++++++++++++------ cmd/network/verification_pod_mode_test.go | 95 ++++++++++++++++++++ cmd/network/verification_test.go | 83 +++++++++++++++++ docs/README.md | 7 ++ docs/osdctl_network_verify-egress.md | 13 +++ 5 files changed, 273 insertions(+), 29 deletions(-) diff --git a/cmd/network/verification.go b/cmd/network/verification.go index 1804a6401..157bdff55 100644 --- a/cmd/network/verification.go +++ b/cmd/network/verification.go @@ -46,6 +46,7 @@ const ( caBundleConfigMapKey = "ca-bundle.crt" networkVerifierDepPath = "github.com/openshift/osd-network-verifier" limitedSupportTemplate = "https://raw.githubusercontent.com/openshift/managed-notifications/master/osd/limited_support/egressFailureLimitedSupport.json" + serviceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" // #nosec G101 -- This is a standard Kubernetes ServiceAccount token path, not a credential ) var networkVerifierDefaultTags = map[string]string{ @@ -102,6 +103,8 @@ type EgressVerification struct { KubeConfig string // Namespace is the Kubernetes namespace to run verification pods in Namespace string + // SkipServiceLog disables automatic service log prompting on verification failures + SkipServiceLog bool } func NewCmdValidateEgress() *cobra.Command { @@ -125,6 +128,12 @@ func NewCmdValidateEgress() *cobra.Command { public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. 2. Pod mode (--pod-mode): runs verification as Kubernetes Jobs within the target cluster. This mode requires cluster admin access but provides more accurate results as it tests from within the actual cluster environment. + + Pod mode uses the following Kubernetes client configuration priority: + 1. In-cluster configuration (when ServiceAccount token exists) + 2. Backplane credentials (when --cluster-id is provided) + 3. User-provided kubeconfig (when --kubeconfig is specified) + 4. Default kubeconfig (from ~/.kube/config) Docs: https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.html#osd-aws-privatelink-firewall-prerequisites_prerequisites`, Example: ` @@ -147,9 +156,15 @@ func NewCmdValidateEgress() *cobra.Command { # Run in pod mode using Kubernetes jobs (requires cluster access) osdctl network verify-egress --cluster-id my-rosa-cluster --pod-mode + # Run in pod mode using ServiceAccount (when running inside a Kubernetes Pod) + osdctl network verify-egress --pod-mode --region us-east-1 --namespace my-namespace + # Run in pod mode with custom namespace and kubeconfig osdctl network verify-egress --pod-mode --region us-east-1 --namespace my-namespace --kubeconfig ~/.kube/config + # Run network verification without sending service logs on failure + osdctl network verify-egress --cluster-id my-rosa-cluster --skip-service-log + # (Not recommended) Run against a specific VPC, without specifying cluster-id <export environment variables like AWS_ACCESS_KEY_ID or use aws configure> osdctl network verify-egress --subnet-id subnet-abcdefg123 --security-group sg-abcdefgh123 --region us-east-1`, @@ -179,6 +194,7 @@ func NewCmdValidateEgress() *cobra.Command { validateEgressCmd.Flags().BoolVar(&e.PodMode, "pod-mode", false, "(optional) run verification using Kubernetes pods instead of cloud instances") validateEgressCmd.Flags().StringVar(&e.KubeConfig, "kubeconfig", "", "(optional) path to kubeconfig file for pod mode (uses default kubeconfig if not specified)") validateEgressCmd.Flags().StringVar(&e.Namespace, "namespace", "openshift-network-diagnostics", "(optional) Kubernetes namespace to run verification pods in") + validateEgressCmd.Flags().BoolVar(&e.SkipServiceLog, "skip-service-log", false, "(optional) disable automatic service log sending when verification fails") // Pod mode is incompatible with cloud-specific configuration flags validateEgressCmd.MarkFlagsMutuallyExclusive("pod-mode", "cacert") @@ -257,17 +273,23 @@ func (e *EgressVerification) Run(ctx context.Context) { // Prompt sending a service log instead for other blocked egresses. if !out.IsSuccessful() && len(out.GetEgressURLFailures()) > 0 { failures++ - postCmd := generateServiceLog(out, e.ClusterId) - blockedUrl := strings.Join(postCmd.TemplateParams, ",") - if (strings.Contains(blockedUrl, "deadmanssnitch") || strings.Contains(blockedUrl, "pagerduty")) && e.cluster.State() == "ready" { - fmt.Println("PagerDuty and/or DMS outgoing traffic is blocked, resulting in a loss of observability. As a result, Red Hat can no longer guarantee SLAs and the cluster should be put in limited support") - pCmd := lsupport.Post{Template: limitedSupportTemplate} - if err := pCmd.Run(e.ClusterId); err != nil { - fmt.Printf("failed to post limited support reason: %v", err) + + // Only send service logs if not disabled by flag + if !e.SkipServiceLog { + postCmd := generateServiceLog(out, e.ClusterId) + blockedUrl := strings.Join(postCmd.TemplateParams, ",") + if (strings.Contains(blockedUrl, "deadmanssnitch") || strings.Contains(blockedUrl, "pagerduty")) && e.cluster.State() == "ready" { + fmt.Println("PagerDuty and/or DMS outgoing traffic is blocked, resulting in a loss of observability. As a result, Red Hat can no longer guarantee SLAs and the cluster should be put in limited support") + pCmd := lsupport.Post{Template: limitedSupportTemplate} + if err := pCmd.Run(e.ClusterId); err != nil { + fmt.Printf("failed to post limited support reason: %v", err) + } + } else if err := postCmd.Run(); err != nil { + fmt.Println("Failed to generate service log. Please manually send a service log to the customer for the blocked egresses with:") + fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", e.ClusterId, blockedEgressTemplateUrl, strings.Join(postCmd.TemplateParams, " -p ")) } - } else if err := postCmd.Run(); err != nil { - fmt.Println("Failed to generate service log. Please manually send a service log to the customer for the blocked egresses with:") - fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", e.ClusterId, blockedEgressTemplateUrl, strings.Join(postCmd.TemplateParams, " -p ")) + } else { + fmt.Println("Service log sending disabled by --skip-service-log flag. Network verification failed but no service log will be sent.") } } if failures > 0 { @@ -575,33 +597,57 @@ func printVersion() { log.Println(fmt.Sprintf("Using osd-network-verifier version %v", version)) } -// setupForPodMode creates a Kubernetes client and KubeVerifier for pod-based verification -func (e *EgressVerification) setupForPodMode(ctx context.Context) (*onvKubeClient.KubeVerifier, error) { +// getRestConfig retrieves a Kubernetes REST config using the following priority order: +// 1. User-provided kubeconfig (when --kubeconfig is specified) +// 2. Backplane credentials (when --cluster-id is provided) +// 3. In-cluster configuration (when ServiceAccount token exists and no explicit config provided) +// 4. Default kubeconfig (from ~/.kube/config) +func (e *EgressVerification) getRestConfig(ctx context.Context) (*rest.Config, error) { var restConfig *rest.Config - var err error - // Prefer backplane credentials when cluster ID is available - if e.ClusterId != "" { - restConfig, err = k8s.NewRestConfig(e.ClusterId) - if err != nil { - return nil, fmt.Errorf("failed to get REST config from backplane for cluster %s: %w", e.ClusterId, err) - } - e.log.Info(ctx, "Pod mode using backplane credentials for cluster: %s", e.ClusterId) - } else if e.KubeConfig != "" { - // Fallback to user-provided kubeconfig - restConfig, err = clientcmd.BuildConfigFromFlags("", e.KubeConfig) + // Priority 1: Use explicitly provided kubeconfig + if e.KubeConfig != "" { + restConfig, err := clientcmd.BuildConfigFromFlags("", e.KubeConfig) if err != nil { return nil, fmt.Errorf("failed to build kubeconfig from %s: %w", e.KubeConfig, err) } e.log.Info(ctx, "Pod mode using provided kubeconfig: %s", e.KubeConfig) - } else { - // Fallback to default kubeconfig from environment or home directory - kubeconfig := clientcmd.RecommendedHomeFile - restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + return restConfig, nil + } else if e.ClusterId != "" { + // Priority 2: Use backplane credentials when cluster ID is available + restConfig, err := k8s.NewRestConfig(e.ClusterId) if err != nil { - return nil, fmt.Errorf("failed to build default kubeconfig: %w", err) + return nil, fmt.Errorf("failed to get REST config from backplane for cluster %s: %w", e.ClusterId, err) } - e.log.Info(ctx, "Pod mode using default kubeconfig") + e.log.Info(ctx, "Pod mode using backplane credentials for cluster: %s", e.ClusterId) + return restConfig, nil + } else if _, err := os.Stat(serviceAccountTokenPath); err == nil { + // Priority 3: Try in-cluster configuration when no explicit config provided + var err error + restConfig, err = rest.InClusterConfig() + if err == nil { + e.log.Info(ctx, "Pod mode using in-cluster configuration with ServiceAccount") + return restConfig, nil + } else { + e.log.Info(ctx, "ServiceAccount token found but in-cluster config failed, falling back to default kubeconfig") + } + } + + // Priority 4: Fallback to default kubeconfig from environment or home directory + kubeconfig := clientcmd.RecommendedHomeFile + restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to build default kubeconfig: %w", err) + } + e.log.Info(ctx, "Pod mode using default kubeconfig") + return restConfig, nil +} + +// setupForPodMode creates a Kubernetes client and KubeVerifier for pod-based verification +func (e *EgressVerification) setupForPodMode(ctx context.Context) (*onvKubeClient.KubeVerifier, error) { + restConfig, err := e.getRestConfig(ctx) + if err != nil { + return nil, err } // Create Kubernetes clientset diff --git a/cmd/network/verification_pod_mode_test.go b/cmd/network/verification_pod_mode_test.go index d6604c15c..545d3837e 100644 --- a/cmd/network/verification_pod_mode_test.go +++ b/cmd/network/verification_pod_mode_test.go @@ -2,10 +2,13 @@ package network import ( "context" + "os" + "path/filepath" "testing" "time" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/openshift-online/ocm-sdk-go/logging" "github.com/openshift/osd-network-verifier/pkg/data/cloud" "github.com/openshift/osd-network-verifier/pkg/data/cpu" "github.com/openshift/osd-network-verifier/pkg/probes/curl" @@ -331,3 +334,95 @@ func TestEgressVerification_ValidateInput_PodMode(t *testing.T) { }) } } + +// Validates that setupForPodMode uses the provided kubeconfig path without relying on ServiceAccount or Backplane +func TestSetupForPodMode_UsesProvidedKubeconfig(t *testing.T) { + // Minimal kubeconfig that client-go can parse without contacting a real server + kubeconfigContent := []byte(`apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1 + name: test +contexts: +- context: + cluster: test + user: test + name: test +current-context: test +kind: Config +preferences: {} +users: +- name: test + user: + token: dummy +`) + + tmpDir := t.TempDir() + kcPath := filepath.Join(tmpDir, "config") + if err := os.WriteFile(kcPath, kubeconfigContent, 0600); err != nil { + t.Fatalf("failed to write temp kubeconfig: %v", err) + } + + logger, err := logging.NewGoLoggerBuilder().Debug(true).Build() + if err != nil { + t.Fatalf("failed to build logger: %v", err) + } + + e := &EgressVerification{ + PodMode: true, + KubeConfig: kcPath, + Namespace: "test-ns", + log: logger, + } + + kv, err := e.setupForPodMode(context.Background()) + if err != nil { + t.Fatalf("setupForPodMode returned error: %v", err) + } + if kv == nil { + t.Fatalf("expected non-nil KubeVerifier") + } +} + +// Attempts to validate the ServiceAccount branch of setupForPodMode. +// Skips if the test process lacks permissions to write the token path. +func TestSetupForPodMode_UsesServiceAccountWhenTokenPresent(t *testing.T) { + const saRoot = "/var/run/secrets/kubernetes.io/serviceaccount" + // Try to create the service account directory if it doesn't exist + if err := os.MkdirAll(saRoot, 0755); err != nil { + t.Skipf("skipping: cannot create serviceaccount path (%v)", err) + } + + // Write minimal token and CA files + tokenPath := filepath.Join(saRoot, "token") + caPath := filepath.Join(saRoot, "ca.crt") + if err := os.WriteFile(tokenPath, []byte("dummy-token"), 0600); err != nil { + t.Skipf("skipping: cannot write token file (%v)", err) + } + if err := os.WriteFile(caPath, []byte("dummy-ca"), 0600); err != nil { + t.Skipf("skipping: cannot write ca.crt file (%v)", err) + } + + // Set required in-cluster env vars + t.Setenv("KUBERNETES_SERVICE_HOST", "127.0.0.1") + t.Setenv("KUBERNETES_SERVICE_PORT", "8443") + + logger, err := logging.NewGoLoggerBuilder().Debug(true).Build() + if err != nil { + t.Fatalf("failed to build logger: %v", err) + } + + e := &EgressVerification{ + PodMode: true, + Namespace: "sa-test-ns", + log: logger, + } + + kv, err := e.setupForPodMode(context.Background()) + if err != nil { + t.Fatalf("setupForPodMode returned error using ServiceAccount: %v", err) + } + if kv == nil { + t.Fatalf("expected non-nil KubeVerifier using ServiceAccount") + } +} diff --git a/cmd/network/verification_test.go b/cmd/network/verification_test.go index d164e87f5..e5045ea37 100644 --- a/cmd/network/verification_test.go +++ b/cmd/network/verification_test.go @@ -669,6 +669,89 @@ func TestGetCaBundleFromSyncSet(t *testing.T) { } } +func TestEgressVerification_GetRestConfig(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + ev *EgressVerification + expectedLog string + expectError bool + expectedResult string // Description of expected result type + }{ + { + name: "priority_1_explicit_kubeconfig", + ev: &EgressVerification{ + KubeConfig: "/path/to/test.kubeconfig", + ClusterId: "should-be-ignored", + log: newTestLogger(t), + }, + expectedLog: "Pod mode using provided kubeconfig: /path/to/test.kubeconfig", + expectError: false, + expectedResult: "explicit kubeconfig should be used", + }, + { + name: "priority_2_backplane_credentials", + ev: &EgressVerification{ + ClusterId: "test-cluster-id", + KubeConfig: "", // No explicit kubeconfig + log: newTestLogger(t), + }, + expectedLog: "Pod mode using backplane credentials for cluster: test-cluster-id", + expectError: false, + expectedResult: "backplane credentials should be used", + }, + { + name: "priority_4_default_kubeconfig_fallback", + ev: &EgressVerification{ + ClusterId: "", // No cluster ID + KubeConfig: "", // No explicit kubeconfig + log: newTestLogger(t), + }, + expectedLog: "Pod mode using default kubeconfig", + expectError: false, + expectedResult: "default kubeconfig should be used (may succeed if valid kubeconfig exists)", + }, + { + name: "invalid_explicit_kubeconfig_should_error", + ev: &EgressVerification{ + KubeConfig: "/nonexistent/invalid.kubeconfig", + log: newTestLogger(t), + }, + expectError: true, + expectedResult: "should return error for invalid kubeconfig", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Note: These tests focus on the priority logic and error handling. + // Actual REST config creation will fail in test environment, but we can + // verify the correct code paths are taken and appropriate errors are returned. + + _, err := tt.ev.getRestConfig(ctx) + + if tt.expectError { + assert.Error(t, err, tt.expectedResult) + } else { + // In test environment, the result depends on whether valid kubeconfig files exist + // The important thing is that no panic occurs and the function handles + // the priority order correctly. We verify the function completes successfully. + + // For default kubeconfig case, it might succeed if ~/.kube/config exists and is valid + // For other cases, we typically expect errors due to missing files/services + if tt.name == "priority_4_default_kubeconfig_fallback" { + // Default kubeconfig might actually work in test environment + // Just verify no panic occurred + t.Logf("Default kubeconfig test completed. Error: %v", err) + } else { + // For explicit kubeconfig and backplane cases, we expect errors + assert.NotNil(t, err, "Expected error in test environment due to missing real kubeconfig/backplane") + } + } + }) + } +} func Test_egressVerification_setupForAwsVerification(t *testing.T) { tests := []struct { name string diff --git a/docs/README.md b/docs/README.md index 8f2d83b7b..bdd731dcd 100644 --- a/docs/README.md +++ b/docs/README.md @@ -3082,6 +3082,12 @@ Verify an AWS OSD/ROSA cluster can reach all required external URLs necessary fo public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. 2. Pod mode (--pod-mode): runs verification as Kubernetes Jobs within the target cluster. This mode requires cluster admin access but provides more accurate results as it tests from within the actual cluster environment. + + Pod mode uses the following Kubernetes client configuration priority: + 1. In-cluster configuration (when ServiceAccount token exists) + 2. Backplane credentials (when --cluster-id is provided) + 3. User-provided kubeconfig (when --kubeconfig is specified) + 4. Default kubeconfig (from ~/.kube/config) Docs: https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.html#osd-aws-privatelink-firewall-prerequisites_prerequisites @@ -3116,6 +3122,7 @@ osdctl network verify-egress [flags] --security-group string (optional) security group ID override for osd-network-verifier, required if not specifying --cluster-id -s, --server string The address and port of the Kubernetes API server --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + --skip-service-log (optional) disable automatic service log sending when verification fails -S, --skip-version-check skip checking to see if this is the most recent release --subnet-id stringArray (optional) private subnet ID override, required if not specifying --cluster-id and can be specified multiple times to run against multiple subnets --version When present, prints out the version of osd-network-verifier being used diff --git a/docs/osdctl_network_verify-egress.md b/docs/osdctl_network_verify-egress.md index 3fb4d44d6..23dd92254 100644 --- a/docs/osdctl_network_verify-egress.md +++ b/docs/osdctl_network_verify-egress.md @@ -19,6 +19,12 @@ Verify an AWS OSD/ROSA cluster can reach all required external URLs necessary fo public subnets (in non-privatelink clusters), since they have an internet gateway and no NAT gateway. 2. Pod mode (--pod-mode): runs verification as Kubernetes Jobs within the target cluster. This mode requires cluster admin access but provides more accurate results as it tests from within the actual cluster environment. + + Pod mode uses the following Kubernetes client configuration priority: + 1. In-cluster configuration (when ServiceAccount token exists) + 2. Backplane credentials (when --cluster-id is provided) + 3. User-provided kubeconfig (when --kubeconfig is specified) + 4. Default kubeconfig (from ~/.kube/config) Docs: https://docs.openshift.com/rosa/rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.html#osd-aws-privatelink-firewall-prerequisites_prerequisites @@ -49,9 +55,15 @@ osdctl network verify-egress [flags] # Run in pod mode using Kubernetes jobs (requires cluster access) osdctl network verify-egress --cluster-id my-rosa-cluster --pod-mode + # Run in pod mode using ServiceAccount (when running inside a Kubernetes Pod) + osdctl network verify-egress --pod-mode --region us-east-1 --namespace my-namespace + # Run in pod mode with custom namespace and kubeconfig osdctl network verify-egress --pod-mode --region us-east-1 --namespace my-namespace --kubeconfig ~/.kube/config + # Run network verification without sending service logs on failure + osdctl network verify-egress --cluster-id my-rosa-cluster --skip-service-log + # (Not recommended) Run against a specific VPC, without specifying cluster-id <export environment variables like AWS_ACCESS_KEY_ID or use aws configure> osdctl network verify-egress --subnet-id subnet-abcdefg123 --security-group sg-abcdefgh123 --region us-east-1 @@ -76,6 +88,7 @@ osdctl network verify-egress [flags] --probe string (optional) select the probe to be used for egress testing. Either 'curl' (default) or 'legacy' (default "curl") --region string (optional) AWS region, required for --pod-mode if not passing a --cluster-id --security-group string (optional) security group ID override for osd-network-verifier, required if not specifying --cluster-id + --skip-service-log (optional) disable automatic service log sending when verification fails --subnet-id stringArray (optional) private subnet ID override, required if not specifying --cluster-id and can be specified multiple times to run against multiple subnets --version When present, prints out the version of osd-network-verifier being used --vpc string (optional) VPC name for cases where it can't be fetched from OCM From 188312ae20d8d1d6d050f148d5c3e9ae73159249 Mon Sep 17 00:00:00 2001 From: Evan Lin <evlin@redhat.com> Date: Tue, 19 Aug 2025 13:04:31 +0100 Subject: [PATCH 39/40] SREP-217: Improving filtering on osdctl-cloudtrail command (#760) * SREP-217: Changed cluster-id to id-cluster * ADD: Username Filter for OSDCTL Write Events Still Missing ErrorChecking and Showing errors if no matching username * UPDATE: Added Print Username Not recognized if nothing found * ADD: Events Filter * ADD: Resource Name and Type to PrintEvents * ADD: Resource Type Filters * Update: Resource Type and Name Both Working * REFACTOR: Refactored AWS.go Write-events.go Refactored aws.go to use Case-Switching to prevent duplications of Code Used Slices for Filters * REFACTOR: Created Filter Function Moved Filtering into its own separate function * ADD: Exclusion Filters More said in documentation * ADD: Filter by list and Print out Errors * ADD: Filters with ARN * Test * ADD: Day and Weeks to --since flag FIX: permission_denied.go now uses GetEvents() CLEANUP: Comment and Code Cleanup ADD: Filters for write-event.go REFACTOR: Filters.go CLEANUP: Code Cleanup, Variable Renaming ADD: Sample code in flags REFACTOR & FIX: Code cleanup in cloudtrail & filter.go refactored NEW CODE: Remodel filter function to use lambda for readability REFACTOR: Refactored Files To Provide Linear Imports FIX: Issues with go.mod FEAT: Added ValidateFilters before any time requiring operation are required REFACTOR: Added filter package REMOVE: util.go FEAT: Added functionality to inlcude resource-name & type This is mainly because permission denied also requires PrintEvent FIX: testdata go files had issues with imports REFACTOR: removed event, aws and filter packages to reduce complexity. FEAT: Added EndTime for --until flag FEAT: Added --print-format to allow users to specify which fields to print REFACTOR: Added time.go for --until --after --since flag DONE: Final Cleanup for SREP-217 & Documentation --------- Co-authored-by: openshift-merge-bot[bot] <148852131+openshift-merge-bot[bot]@users.noreply.github.com> --- cmd/cloudtrail/{pkg/aws => }/aws.go | 49 +--- cmd/cloudtrail/cmd.go | 4 +- cmd/cloudtrail/event.go | 188 ++++++++++++ cmd/cloudtrail/filter.go | 275 ++++++++++++++++++ cmd/cloudtrail/permission-denied.go | 23 +- cmd/cloudtrail/pkg/util.go | 112 ------- cmd/cloudtrail/pkg/util_test.go | 247 ---------------- .../{pkg/aws => testdata}/aws_test.go | 8 +- .../{ => testdata}/cloudtrail_test.go | 29 +- .../{pkg/aws => }/testdata/valid_event.json | 0 cmd/cloudtrail/time.go | 120 ++++++++ cmd/cloudtrail/write-events.go | 178 +++++------- docs/README.md | 20 +- docs/osdctl_cloudtrail.md | 2 +- docs/osdctl_cloudtrail_write-events.md | 45 ++- 15 files changed, 753 insertions(+), 547 deletions(-) rename cmd/cloudtrail/{pkg/aws => }/aws.go (60%) create mode 100644 cmd/cloudtrail/event.go create mode 100644 cmd/cloudtrail/filter.go delete mode 100644 cmd/cloudtrail/pkg/util.go delete mode 100644 cmd/cloudtrail/pkg/util_test.go rename cmd/cloudtrail/{pkg/aws => testdata}/aws_test.go (92%) rename cmd/cloudtrail/{ => testdata}/cloudtrail_test.go (89%) rename cmd/cloudtrail/{pkg/aws => }/testdata/valid_event.json (100%) create mode 100644 cmd/cloudtrail/time.go diff --git a/cmd/cloudtrail/pkg/aws/aws.go b/cmd/cloudtrail/aws.go similarity index 60% rename from cmd/cloudtrail/pkg/aws/aws.go rename to cmd/cloudtrail/aws.go index ff9210eb6..1f03b9ca0 100644 --- a/cmd/cloudtrail/pkg/aws/aws.go +++ b/cmd/cloudtrail/aws.go @@ -1,4 +1,4 @@ -package pkg +package cloudtrail import ( "context" @@ -6,14 +6,11 @@ import ( "fmt" "time" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go-v2/service/cloudtrail" - "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" "github.com/aws/aws-sdk-go-v2/service/sts" ) -// RawEventDetails struct represents the structure of an AWS raw event +// RawEventDetails represents the structure of relevant fields extracted from a CloudTrail event JSON. type RawEventDetails struct { EventVersion string `json:"eventVersion"` UserIdentity struct { @@ -31,11 +28,12 @@ type RawEventDetails struct { ErrorCode string `json:"errorCode"` } +// QueryOptions defines the start time for querying CloudTrail events. type QueryOptions struct { StartTime time.Time } -// Extracts Raw cloudtrailEvent Details +// ExtractUserDetails parses a CloudTrail event JSON string and extracts user identity details. func ExtractUserDetails(cloudTrailEvent *string) (*RawEventDetails, error) { if cloudTrailEvent == nil || *cloudTrailEvent == "" { return &RawEventDetails{}, fmt.Errorf("cannot parse a nil input") @@ -59,7 +57,8 @@ func ExtractUserDetails(cloudTrailEvent *string) (*RawEventDetails, error) { return &res, nil } -// whoami retrieves caller identity information +// Whoami retrieves the AWS account ARN and account ID for the current caller +// using the provided STS client. func Whoami(stsClient sts.Client) (accountArn string, accountId string, err error) { ctx := context.TODO() callerIdentityOutput, err := stsClient.GetCallerIdentity(ctx, &sts.GetCallerIdentityInput{}) @@ -74,39 +73,3 @@ func Whoami(stsClient sts.Client) (accountArn string, accountId string, err erro return userArn.String(), userArn.AccountID, nil } - -// getWriteEvents retrieves cloudtrail events since the specified time -// using the provided cloudtrail client and starttime from since flag. -func GetEvents(cloudtailClient *cloudtrail.Client, startTime time.Time, writeOnly bool) ([]types.Event, error) { - - alllookupEvents := []types.Event{} - input := cloudtrail.LookupEventsInput{ - StartTime: &startTime, - EndTime: aws.Time(time.Now()), - } - - if writeOnly { - input.LookupAttributes = []types.LookupAttribute{ - {AttributeKey: "ReadOnly", - AttributeValue: aws.String("false")}, - } - } - - paginator := cloudtrail.NewLookupEventsPaginator(cloudtailClient, &input, func(c *cloudtrail.LookupEventsPaginatorOptions) {}) - for paginator.HasMorePages() { - - lookupOutput, err := paginator.NextPage(context.TODO()) - if err != nil { - return nil, fmt.Errorf("[WARNING] paginator error: \n%w", err) - } - alllookupEvents = append(alllookupEvents, lookupOutput.Events...) - - input.NextToken = lookupOutput.NextToken - if lookupOutput.NextToken == nil { - break - } - - } - - return alllookupEvents, nil -} diff --git a/cmd/cloudtrail/cmd.go b/cmd/cloudtrail/cmd.go index d00918982..6cf52089f 100644 --- a/cmd/cloudtrail/cmd.go +++ b/cmd/cloudtrail/cmd.go @@ -9,8 +9,8 @@ func NewCloudtrailCmd() *cobra.Command { cloudtrailCmd := &cobra.Command{ Use: "cloudtrail", Short: "AWS CloudTrail related utilities", - RunE: func(cmd *cobra.Command, args []string) error { - return cmd.Help() + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() }, } diff --git a/cmd/cloudtrail/event.go b/cmd/cloudtrail/event.go new file mode 100644 index 000000000..5e873145a --- /dev/null +++ b/cmd/cloudtrail/event.go @@ -0,0 +1,188 @@ +package cloudtrail + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudtrail" + "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" +) + +// GetEvents etrieve CloudTrail events using the provided client and time range. +// It paginates through all available events, and returns all. +func GetEvents(cloudtailClient *cloudtrail.Client, startTime time.Time, endTime time.Time, writeOnly bool) ([]types.Event, error) { + + alllookupEvents := []types.Event{} + input := cloudtrail.LookupEventsInput{ + StartTime: &startTime, + EndTime: &endTime, + } + + if writeOnly { + input.LookupAttributes = []types.LookupAttribute{ + {AttributeKey: "ReadOnly", + AttributeValue: aws.String("false")}, + } + } + + paginator := cloudtrail.NewLookupEventsPaginator(cloudtailClient, &input, func(c *cloudtrail.LookupEventsPaginatorOptions) {}) + for paginator.HasMorePages() { + lookupOutput, err := paginator.NextPage(context.TODO()) + if err != nil { + return nil, fmt.Errorf("[WARNING] paginator error: \n%w", err) + } + alllookupEvents = append(alllookupEvents, lookupOutput.Events...) + + input.NextToken = lookupOutput.NextToken + if lookupOutput.NextToken == nil { + break + } + + } + + return alllookupEvents, nil +} + +// PrintEvents prints the filtered CloudTrail events in a human-readable format. +// Allows to print cloudtrail event url link or its raw JSON format. +// Allows to print cloutrail event resource name & type. +func PrintEvents(filterEvents []types.Event, printUrl bool, printRaw bool) { + var eventStringBuilder = strings.Builder{} + + for i := len(filterEvents) - 1; i >= 0; i-- { + if printRaw { + if filterEvents[i].CloudTrailEvent != nil { + fmt.Printf("%v \n", *filterEvents[i].CloudTrailEvent) + return + } + } + rawEventDetails, err := ExtractUserDetails(filterEvents[i].CloudTrailEvent) + if err != nil { + fmt.Printf("[Error] Error extracting event details: %v", err) + } + sessionIssuer := rawEventDetails.UserIdentity.SessionContext.SessionIssuer.UserName + if filterEvents[i].EventName != nil { + eventStringBuilder.WriteString(fmt.Sprintf("\n%v", *filterEvents[i].EventName)) + } + if filterEvents[i].EventTime != nil { + eventStringBuilder.WriteString(fmt.Sprintf(" | %v", filterEvents[i].EventTime.String())) + } + if filterEvents[i].Username != nil { + eventStringBuilder.WriteString(fmt.Sprintf(" | Username: %v", *filterEvents[i].Username)) + } + if sessionIssuer != "" { + eventStringBuilder.WriteString(fmt.Sprintf(" | ARN: %v", sessionIssuer)) + } + + for _, resource := range filterEvents[i].Resources { + if resource.ResourceName != nil { + eventStringBuilder.WriteString(fmt.Sprintf("| Resource Name: %v", *resource.ResourceName)) + } + if resource.ResourceType != nil { + eventStringBuilder.WriteString(fmt.Sprintf(" | Resource Type: %v", *resource.ResourceType)) + } + } + + if printUrl && filterEvents[i].CloudTrailEvent != nil { + if err == nil { + eventStringBuilder.WriteString(fmt.Sprintf("\n%v |", generateLink(*rawEventDetails))) + } else { + fmt.Println("EventLink: <not available>") + } + } + + } + fmt.Println(eventStringBuilder.String()) +} + +// PrintFormat allows the user to specify which fields to print. +// Allows to print cloudtrail event url link +func PrintFormat(filterEvents []types.Event, printUrl bool, printRaw bool, table []string) { + var eventStringBuilder = strings.Builder{} + tableFilter := map[string]struct{}{} + + for _, field := range table { + tableFilter[field] = struct{}{} + } + + for i := len(filterEvents) - 1; i >= 0; i-- { + + rawEventDetails, err := ExtractUserDetails(filterEvents[i].CloudTrailEvent) + if err != nil { + fmt.Printf("[Error] Error extracting event details: %v", err) + } + sessionIssuer := rawEventDetails.UserIdentity.SessionContext.SessionIssuer.UserName + eventStringBuilder.WriteString("\n") + if _, ok := tableFilter["event"]; ok && filterEvents[i].EventName != nil { + eventStringBuilder.WriteString(fmt.Sprintf("%v | ", *filterEvents[i].EventName)) + } + if _, ok := tableFilter["time"]; ok && filterEvents[i].EventTime != nil { + eventStringBuilder.WriteString(fmt.Sprintf("%v | ", filterEvents[i].EventTime.String())) + } + if _, ok := tableFilter["username"]; ok && filterEvents[i].Username != nil { + eventStringBuilder.WriteString(fmt.Sprintf("Username: %v | ", *filterEvents[i].Username)) + } + if _, ok := tableFilter["arn"]; ok && sessionIssuer != "" { + eventStringBuilder.WriteString(fmt.Sprintf("ARN: %v | ", sessionIssuer)) + } + + for _, resource := range filterEvents[i].Resources { + if _, ok := tableFilter["resource-name"]; ok && resource.ResourceName != nil { + eventStringBuilder.WriteString(fmt.Sprintf("Resource Name: %v | ", *resource.ResourceName)) + } + if _, ok := tableFilter["resource-type"]; ok && resource.ResourceType != nil { + eventStringBuilder.WriteString(fmt.Sprintf("Resource Type: %v | ", *resource.ResourceType)) + } + } + + if printUrl && filterEvents[i].CloudTrailEvent != nil { + if err == nil { + eventStringBuilder.WriteString(fmt.Sprintf("%v", generateLink(*rawEventDetails))) + } else { + fmt.Println("EventLink: <not available>") + } + } + + } + fmt.Println(eventStringBuilder.String()) +} + +// generateLink generates a hyperlink to aws cloudTrail event +// based on the provided RawEventDetails. +func generateLink(raw RawEventDetails) (url_link string) { + str1 := "https://" + str2 := ".console.aws.amazon.com/cloudtrailv2/home?region=" + str3 := "#/events/" + + eventRegion := raw.EventRegion + eventId := raw.EventId + + var url = str1 + eventRegion + str2 + eventRegion + str3 + eventId + url_link = url + + return url_link +} + +// ValidateTable checks for the string list given and returns error +// if it does not match. +func ValidateFormat(table []string) error { + allowedKeys := map[string]struct{}{ + "username": {}, + "event": {}, + "resource-name": {}, + "resource-type": {}, + "arn": {}, + "time": {}, + } + + for _, column := range table { + if _, ok := allowedKeys[strings.ToLower(column)]; !ok { + return fmt.Errorf("invalid table column: %s (allowed: username, event, resource-name, resource-type, arn, time, url, region)", column) + } + } + + return nil +} diff --git a/cmd/cloudtrail/filter.go b/cmd/cloudtrail/filter.go new file mode 100644 index 000000000..24781a0ba --- /dev/null +++ b/cmd/cloudtrail/filter.go @@ -0,0 +1,275 @@ +package cloudtrail + +import ( + "fmt" + "regexp" + "strings" + + "slices" + + "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" +) + +// Filter is a function type that takes a CloudTrail event and returns a boolean indicating +// whether the event passes the filter, and an error if the filter evaluation fails. +type Filter func(types.Event) (bool, error) + +// WriteEventFilters defines the structure for filters used in write-events.go +type WriteEventFilters struct { + Include []string + Exclude []string +} + +// ApplyFilters takes the filteredEvents slice and applies an additional filter function. +// The filter function here is an inline function that calls isIgnoredEvent(event, mergedRegex). +// Only events for which isIgnoredEvent returns true (i.e., not ignored by the regex) are returned. +func ApplyFilters(records []types.Event, filters ...Filter) ([]types.Event, error) { + if len(filters) == 0 { + return records, nil + } + + filteredRecords := make([]types.Event, 0, len(records)) + for _, r := range records { + keep := true + for _, f := range filters { + filtered, err := f(r) + if err != nil { + return nil, err + } + if !filtered { + keep = false + break + } + } + + if keep { + filteredRecords = append(filteredRecords, r) + } + } + + return filteredRecords, nil +} + +// isIgnoredEvent filters out events based on the specified ignore list, which contains +// regular expression patterns. It returns true if the event should be kept, and false if it should be filtered out. +func IsIgnoredEvent(event types.Event, mergedRegex string) (bool, error) { + if mergedRegex == "" { + return true, nil + } + raw, err := ExtractUserDetails(event.CloudTrailEvent) + if err != nil { + return true, fmt.Errorf("[ERROR] failed to extract raw CloudTrail event details: %w", err) + } + userArn := raw.UserIdentity.SessionContext.SessionIssuer.Arn + regexObj := regexp.MustCompile(mergedRegex) + + if event.Username != nil { + if regexObj.MatchString(*event.Username) { + return false, nil + } + } + if userArn != "" { + + if regexObj.MatchString(userArn) { + + return false, nil + } + } + if userArn == "" && event.Username == nil { + return false, nil + } + + return true, nil +} + +// Filters applies inclusion and exclusion filters to all Cloudtrail Events +// applies inclusion filters then exclusion filters. +func Filters(f WriteEventFilters, alllookupEvents []types.Event) []types.Event { + filtered := alllookupEvents + + if len(f.Include) > 0 { + filtered = inclusionFilter(filtered, f.Include) + } + if len(f.Exclude) > 0 { + filtered = exclusionFilter(filtered, f.Exclude) + } + return filtered +} + +// inclusionFilter filter events by inclusion criteria. +// Only events that match all specified filter keys and at least one value per key are included. +func inclusionFilter(rawData []types.Event, inclusionFilters []string) []types.Event { + keyValuePair := parseFilters(inclusionFilters) + + filterFunc := map[string]func(data types.Event, values []string) bool{ + "username": func(data types.Event, values []string) bool { + if data.Username != nil { + if slices.Contains(values, *data.Username) { + return true + } + } + return false + }, + "event": func(data types.Event, values []string) bool { + if data.EventName != nil { + if slices.Contains(values, *data.EventName) { + return true + } + } + return false + }, + "resource-name": func(data types.Event, values []string) bool { + for _, resource := range data.Resources { + if resource.ResourceName != nil { + if slices.Contains(values, *resource.ResourceName) { + return true + } + } + } + return false + }, + "resource-type": func(data types.Event, values []string) bool { + for _, resource := range data.Resources { + if resource.ResourceType != nil { + if slices.Contains(values, *resource.ResourceType) { + return true + } + } + } + return false + }, + "arn": func(data types.Event, values []string) bool { + rawEventDetails, err := ExtractUserDetails(data.CloudTrailEvent) + if err != nil { + fmt.Printf("[Error] Failed to extract event details: %v\n", err) + return false + } + val := rawEventDetails.UserIdentity.SessionContext.SessionIssuer.UserName + return slices.Contains(values, val) + }, + } + + var result []types.Event + for _, data := range rawData { + found := true + for key, values := range keyValuePair { + if fn, ok := filterFunc[key]; ok { + if !fn(data, values) { + found = false + break + } + } + } + if found { + result = append(result, data) + } + } + return result +} + +// exclusionFilter filters events by exclusion criteria. +// All events that match any exclusion filter are removed. +func exclusionFilter(rawData []types.Event, exclusionFilters []string) []types.Event { + keyValuePair := parseFilters(exclusionFilters) + + filterFunc := map[string]func(data types.Event, values []string) bool{ + "username": func(data types.Event, values []string) bool { + if data.Username != nil { + if slices.Contains(values, *data.Username) { + return true + } + } + return false + }, + "event": func(data types.Event, values []string) bool { + if data.EventName != nil { + if slices.Contains(values, *data.EventName) { + return true + } + } + return false + }, + "resource-name": func(data types.Event, values []string) bool { + for _, resource := range data.Resources { + if resource.ResourceName != nil { + if slices.Contains(values, *resource.ResourceName) { + return true + } + } + } + return false + }, + "resource-type": func(data types.Event, values []string) bool { + for _, resource := range data.Resources { + if resource.ResourceType != nil { + if slices.Contains(values, *resource.ResourceType) { + return true + } + } + } + return false + }, + "arn": func(data types.Event, values []string) bool { + rawEventDetails, err := ExtractUserDetails(data.CloudTrailEvent) + if err != nil { + fmt.Printf("[Error] Failed to extract event details: %v\n", err) + return false + } + val := rawEventDetails.UserIdentity.SessionContext.SessionIssuer.UserName + return slices.Contains(values, val) + }, + } + + var result []types.Event + for _, data := range rawData { + found := false + for key, values := range keyValuePair { + if fn, ok := filterFunc[key]; ok { + if fn(data, values) { + found = true + break + } + } + } + if !found { + result = append(result, data) + } + } + return result +} + +// parseFilters parses a slice of filter strings in the format "key=value" into a map. +func parseFilters(filters []string) map[string][]string { + keyValuePair := make(map[string][]string) + for _, filter := range filters { + kv := strings.SplitN(filter, "=", 2) + key := kv[0] + value := kv[1] + keyValuePair[key] = append(keyValuePair[key], value) + } + return keyValuePair +} + +// ValidateFilters checks that all filters are in the correct "key=value" format +// Returns an error immediately if a filter is invalid. +func ValidateFilters(filters []string) error { + var allowedFilterKeys = map[string]struct{}{ + "username": {}, + "event": {}, + "resource-name": {}, + "resource-type": {}, + "arn": {}, + } + + for _, filter := range filters { + kv := strings.SplitN(filter, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("invalid filter format: %s (expected key=value)", filter) + } + key := kv[0] + if _, ok := allowedFilterKeys[key]; !ok { + return fmt.Errorf("invalid filter key: %s (allowed: username, event, resource-name, resource-type, arn)", key) + } + } + return nil +} diff --git a/cmd/cloudtrail/permission-denied.go b/cmd/cloudtrail/permission-denied.go index b78958f04..289967305 100644 --- a/cmd/cloudtrail/permission-denied.go +++ b/cmd/cloudtrail/permission-denied.go @@ -5,13 +5,12 @@ import ( "fmt" "regexp" "strings" + "time" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/cloudtrail" "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" "github.com/aws/aws-sdk-go-v2/service/sts" - ctUtil "github.com/openshift/osdctl/cmd/cloudtrail/pkg" - ctAws "github.com/openshift/osdctl/cmd/cloudtrail/pkg/aws" "github.com/openshift/osdctl/pkg/osdCloud" "github.com/openshift/osdctl/pkg/utils" "github.com/spf13/cobra" @@ -38,7 +37,7 @@ func newCmdPermissionDenied() *cobra.Command { permissionDeniedCmd.Flags().StringVarP(&opts.StartTime, "since", "", "5m", "Specifies that only events that occur within the specified time are returned.Defaults to 5m. Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".") permissionDeniedCmd.Flags().BoolVarP(&opts.PrintUrl, "url", "u", false, "Generates Url link to cloud console cloudtrail event") permissionDeniedCmd.Flags().BoolVarP(&opts.PrintRaw, "raw-event", "r", false, "Prints the cloudtrail events to the console in raw json format") - _ = permissionDeniedCmd.MarkFlagRequired("cluster-id") + permissionDeniedCmd.MarkFlagRequired("cluster-id") return permissionDeniedCmd } @@ -49,7 +48,7 @@ func isforbiddenEvent(event types.Event) (bool, error) { if err != nil { return false, fmt.Errorf("failed to compile regex: %w", err) } - raw, err := ctAws.ExtractUserDetails(event.CloudTrailEvent) + raw, err := ExtractUserDetails(event.CloudTrailEvent) if err != nil { return false, fmt.Errorf("[ERROR] failed to extract raw CloudTrail event details: %w", err) } @@ -87,24 +86,24 @@ func (p *permissionDeniedEventsOptions) run() error { return err } - startTime, err := ctUtil.ParseDurationToUTC(p.StartTime) + startTime, err := ParseDurationBefore(p.StartTime, time.Now().UTC()) if err != nil { return err } - arn, accountId, err := ctAws.Whoami(*sts.NewFromConfig(cfg)) + arn, accountId, err := Whoami(*sts.NewFromConfig(cfg)) if err != nil { return err } fmt.Printf("[INFO] Checking Permission Denied History since %v for AWS Account %v as %v \n", startTime, accountId, arn) cloudTrailclient := cloudtrail.NewFromConfig(cfg) fmt.Printf("[INFO] Fetching %v Event History...", cfg.Region) - lookupOutput, err := ctAws.GetEvents(cloudTrailclient, startTime, false) + lookupOutput, err := GetEvents(cloudTrailclient, startTime, time.Now().UTC(), false) if err != nil { return err } - filteredEvents, err := ctUtil.ApplyFilters(lookupOutput, + filteredEvents, err := ApplyFilters(lookupOutput, func(event types.Event) (bool, error) { return isforbiddenEvent(event) }, @@ -113,7 +112,7 @@ func (p *permissionDeniedEventsOptions) run() error { return err } - ctUtil.PrintEvents(filteredEvents, p.PrintUrl, p.PrintRaw) + PrintEvents(filteredEvents, p.PrintUrl, p.PrintRaw) if DefaultRegion != cfg.Region { defaultConfig, err := config.LoadDefaultConfig( @@ -129,11 +128,11 @@ func (p *permissionDeniedEventsOptions) run() error { HTTPClient: cfg.HTTPClient, }) fmt.Printf("[INFO] Fetching Cloudtrail Global Permission Denied Event History from %v Region...", defaultConfig.Region) - lookupOutput, err := ctAws.GetEvents(defaultCloudtrailClient, startTime, false) + lookupOutput, err := GetEvents(defaultCloudtrailClient, startTime, time.Now().UTC(), false) if err != nil { return err } - filteredEvents, err := ctUtil.ApplyFilters(lookupOutput, + filteredEvents, err := ApplyFilters(lookupOutput, func(event types.Event) (bool, error) { return isforbiddenEvent(event) }, @@ -141,7 +140,7 @@ func (p *permissionDeniedEventsOptions) run() error { if err != nil { return err } - ctUtil.PrintEvents(filteredEvents, p.PrintUrl, p.PrintRaw) + PrintEvents(filteredEvents, p.PrintUrl, p.PrintRaw) } return err diff --git a/cmd/cloudtrail/pkg/util.go b/cmd/cloudtrail/pkg/util.go deleted file mode 100644 index da1f463be..000000000 --- a/cmd/cloudtrail/pkg/util.go +++ /dev/null @@ -1,112 +0,0 @@ -package pkg - -import ( - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" - pkg "github.com/openshift/osdctl/cmd/cloudtrail/pkg/aws" -) - -type Filter func(types.Event) (bool, error) - -func ApplyFilters(records []types.Event, filters ...Filter) ([]types.Event, error) { - if len(filters) == 0 { - return records, nil - } - - filteredRecords := make([]types.Event, 0, len(records)) - for _, r := range records { - keep := true - for _, f := range filters { - filtered, err := f(r) - if err != nil { - return nil, err - } - if !filtered { - keep = false - break - } - } - - if keep { - filteredRecords = append(filteredRecords, r) - } - } - - return filteredRecords, nil -} - -// PrintEvents prints the details of each event in the provided slice of events. -// It takes a slice of types.Event -func PrintEvents(filterEvents []types.Event, printUrl bool, printRaw bool) { - var eventStringBuilder = strings.Builder{} - - for i := len(filterEvents) - 1; i >= 0; i-- { - if printRaw { - if filterEvents[i].CloudTrailEvent != nil { - fmt.Printf("%v \n", *filterEvents[i].CloudTrailEvent) - return - } - } - rawEventDetails, err := pkg.ExtractUserDetails(filterEvents[i].CloudTrailEvent) - if err != nil { - fmt.Printf("[Error] Error extracting event details: %v", err) - } - sessionIssuer := rawEventDetails.UserIdentity.SessionContext.SessionIssuer.UserName - if filterEvents[i].EventName != nil { - eventStringBuilder.WriteString(fmt.Sprintf("\n%v", *filterEvents[i].EventName)) - } - if filterEvents[i].EventTime != nil { - eventStringBuilder.WriteString(fmt.Sprintf(" | %v", filterEvents[i].EventTime.String())) - } - if filterEvents[i].Username != nil { - eventStringBuilder.WriteString(fmt.Sprintf(" | Username: %v", *filterEvents[i].Username)) - } - if sessionIssuer != "" { - eventStringBuilder.WriteString(fmt.Sprintf(" | ARN: %v", sessionIssuer)) - } - - if printUrl && filterEvents[i].CloudTrailEvent != nil { - if err == nil { - eventStringBuilder.WriteString(fmt.Sprintf("\n%v |", generateLink(*rawEventDetails))) - } else { - fmt.Println("EventLink: <not available>") - } - } - - } - fmt.Println(eventStringBuilder.String()) -} - -// generateLink generates a hyperlink to aws cloudTrail event. -func generateLink(raw pkg.RawEventDetails) (url_link string) { - str1 := "https://" - str2 := ".console.aws.amazon.com/cloudtrailv2/home?region=" - str3 := "#/events/" - - eventRegion := raw.EventRegion - eventId := raw.EventId - - var url = str1 + eventRegion + str2 + eventRegion + str3 + eventId - url_link = url - - return url_link -} - -// parseDurationToUTC parses the given startTime string as a duration and subtracts it from the current UTC time. -// It returns the resulting time and any parsing error encountered. -func ParseDurationToUTC(input string) (time.Time, error) { - duration, err := time.ParseDuration(input) - if err != nil { - return time.Time{}, fmt.Errorf("unable to parse time duration: %w", err) - } - - return time.Now().UTC().Add(-duration), nil -} - -// Join all individual patterns into a single string separated by the "|" operator -func MergeRegex(regexlist []string) string { - return strings.Join(regexlist, "|") -} diff --git a/cmd/cloudtrail/pkg/util_test.go b/cmd/cloudtrail/pkg/util_test.go deleted file mode 100644 index d377a73b2..000000000 --- a/cmd/cloudtrail/pkg/util_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package pkg - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" - "github.com/stretchr/testify/assert" -) - -func filterByEventName(eventName string) Filter { - return func(e types.Event) (bool, error) { - if e.EventName != nil { - return *e.EventName == eventName, nil - } - return false, nil - } -} - -func filterByUsername(username string) Filter { - return func(e types.Event) (bool, error) { - if e.Username != nil { - return *e.Username == username, nil - } - return false, nil - } -} - -func TestApplyFilters(t *testing.T) { - baseEvents := []types.Event{ - {EventName: aws.String("EventName1"), Username: aws.String("user1"), EventTime: aws.Time(time.Now())}, - {EventName: aws.String("EventName2"), Username: aws.String("user2"), EventTime: aws.Time(time.Now())}, - {EventName: aws.String("EventName3"), Username: aws.String("user1"), EventTime: aws.Time(time.Now())}, - {EventName: nil, Username: aws.String("user3"), EventTime: aws.Time(time.Now())}, // nil EventName - } - - tests := []struct { - name string - filters []Filter - inputEvents []types.Event - expectedLength int - expectedEventNames []*string - expectError bool - errorMessage string - }{ - { - name: "apply_filter_to_return_specific_events", - filters: []Filter{filterByEventName("EventName2")}, - inputEvents: baseEvents, - expectedLength: 1, - expectedEventNames: []*string{aws.String("EventName2")}, - }, - { - name: "apply_no_filters_to_return_all_events", - filters: nil, - inputEvents: baseEvents, - expectedLength: len(baseEvents), - expectedEventNames: []*string{aws.String("EventName1"), aws.String("EventName2"), aws.String("EventName3"), nil}, - }, - { - name: "apply_multiple_filters", - filters: []Filter{filterByEventName("EventName1"), filterByUsername("user1")}, - inputEvents: baseEvents, - expectedLength: 1, - expectedEventNames: []*string{aws.String("EventName1")}, - }, - { - name: "apply_filter_that_returns_error", - filters: []Filter{ - func(e types.Event) (bool, error) { - return false, fmt.Errorf("filter error") - }, - }, - inputEvents: baseEvents, - expectError: true, - errorMessage: "filter error", - expectedLength: 0, - expectedEventNames: []*string{}, - }, - { - name: "handle_empty_event_list", - filters: nil, - inputEvents: []types.Event{}, - expectedLength: 0, - expectedEventNames: []*string{}, - }, - { - name: "event_with_nil_eventname_should_be_skipped_by_filter", - filters: []Filter{filterByEventName("AnyEvent")}, - inputEvents: []types.Event{{EventName: nil, Username: aws.String("user1"), EventTime: aws.Time(time.Now())}}, - expectedLength: 0, - expectedEventNames: []*string{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - events := make([]types.Event, len(tt.inputEvents)) - copy(events, tt.inputEvents) - - filteredEvents, err := ApplyFilters(events, tt.filters...) - - if tt.expectError { - assert.Error(t, err) - assert.EqualError(t, err, tt.errorMessage) - return - } - - assert.NoError(t, err) - assert.Len(t, filteredEvents, tt.expectedLength) - - for i, event := range filteredEvents { - assert.Equal(t, tt.expectedEventNames[i], event.EventName) - } - }) - } -} - -func TestPrintEvents(t *testing.T) { - mockEvents := []types.Event{ - { - EventName: aws.String("LoginEvent"), - Username: aws.String("test-user"), - EventTime: aws.Time(time.Date(2023, 11, 10, 12, 0, 0, 0, time.UTC)), - CloudTrailEvent: aws.String(`{ - "EventVersion": "1.08", - "EventId": "abcd1234", - "UserIdentity": { - "SessionContext": { - "SessionIssuer": { - "UserName": "arn:aws:iam::123456789012:user/test-user" - } - } - } - }`), - }, - } - - tests := []struct { - name string - printRaw bool - printUrl bool - events []types.Event - assertion func(output string) - }{ - { - name: "print_raw_event_only", - printRaw: true, - events: mockEvents, - assertion: func(output string) { - assert.Contains(t, output, `"EventVersion": "1.08"`) - assert.Contains(t, output, `"EventId": "abcd1234"`) - assert.NotContains(t, output, "Username:") - }, - }, - { - name: "print_formatted_output_with_url", - printUrl: true, - events: mockEvents, - assertion: func(output string) { - assert.Contains(t, output, "LoginEvent") - assert.Contains(t, output, "test-user") - assert.Contains(t, output, "arn:aws:iam") - assert.Contains(t, output, "https://") - }, - }, - { - name: "print_formatted_without_url", - events: mockEvents, - assertion: func(output string) { - assert.Contains(t, output, "LoginEvent") - assert.Contains(t, output, "test-user") - assert.NotContains(t, output, "https://") - }, - }, - { - name: "invalid_cloudtrail_json", - events: []types.Event{ - { - EventName: aws.String("InvalidEvent"), - Username: aws.String("broken-user"), - EventTime: aws.Time(time.Now()), - CloudTrailEvent: aws.String(`{invalid json`), - }, - }, - assertion: func(output string) { - assert.Contains(t, output, "[Error] Error extracting event details") - assert.Contains(t, output, "InvalidEvent") - }, - }, - { - name: "unsupported_event_version", - events: []types.Event{ - { - EventName: aws.String("OldEvent"), - Username: aws.String("legacy-user"), - EventTime: aws.Time(time.Now()), - CloudTrailEvent: aws.String(`{ - "EventVersion": "1.01", - "EventId": "xx", - "UserIdentity": { - "SessionContext": { - "SessionIssuer": { - "UserName": "arn:aws:iam::111111111111:user/legacy" - } - } - } - }`), - }, - }, - assertion: func(output string) { - assert.Contains(t, output, "[Error] Error extracting event details") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - output := captureOutput(func() { - PrintEvents(tt.events, tt.printUrl, tt.printRaw) - }) - tt.assertion(output) - }) - } -} - -func captureOutput(f func()) string { - var buf bytes.Buffer - writer := bufio.NewWriter(&buf) - stdout := os.Stdout - r, w, _ := os.Pipe() - os.Stdout = w - - f() - - _ = w.Close() - os.Stdout = stdout - _, _ = io.Copy(writer, r) - writer.Flush() - return buf.String() -} diff --git a/cmd/cloudtrail/pkg/aws/aws_test.go b/cmd/cloudtrail/testdata/aws_test.go similarity index 92% rename from cmd/cloudtrail/pkg/aws/aws_test.go rename to cmd/cloudtrail/testdata/aws_test.go index 53ce88f42..b5a66c70a 100644 --- a/cmd/cloudtrail/pkg/aws/aws_test.go +++ b/cmd/cloudtrail/testdata/aws_test.go @@ -1,9 +1,11 @@ -package pkg +package testdata import ( "os" "strings" "testing" + + cloudtrail "github.com/openshift/osdctl/cmd/cloudtrail" ) // Utility to read file content from testdata @@ -21,7 +23,7 @@ func strPtr(s string) *string { } func TestExtractUserDetails(t *testing.T) { - validEventJSON := readFixture(t, "testdata/valid_event.json") + validEventJSON := readFixture(t, "valid_event.json") tests := []struct { testName string @@ -68,7 +70,7 @@ func TestExtractUserDetails(t *testing.T) { for _, testCase := range tests { t.Run(testCase.testName, func(t *testing.T) { - result, err := ExtractUserDetails(testCase.input) + result, err := cloudtrail.ExtractUserDetails(testCase.input) if testCase.expectError { if err == nil { diff --git a/cmd/cloudtrail/cloudtrail_test.go b/cmd/cloudtrail/testdata/cloudtrail_test.go similarity index 89% rename from cmd/cloudtrail/cloudtrail_test.go rename to cmd/cloudtrail/testdata/cloudtrail_test.go index 3364ce0fd..3528a0e85 100644 --- a/cmd/cloudtrail/cloudtrail_test.go +++ b/cmd/cloudtrail/testdata/cloudtrail_test.go @@ -1,10 +1,11 @@ -package cloudtrail +package testdata import ( + "strings" "testing" "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" - ctUtil "github.com/openshift/osdctl/cmd/cloudtrail/pkg" + cloudtrail "github.com/openshift/osdctl/cmd/cloudtrail" "github.com/stretchr/testify/assert" ) @@ -58,9 +59,9 @@ func TestIgnoreListFilter(t *testing.T) { } ignoreList := []string{".*kube-system-capa-controller.*"} - filtered, err := ctUtil.ApplyFilters(testEvents, + filtered, err := cloudtrail.ApplyFilters(testEvents, func(event types.Event) (bool, error) { - return isIgnoredEvent(event, ctUtil.MergeRegex(ignoreList)) + return cloudtrail.IsIgnoredEvent(event, strings.Join(ignoreList, "|")) }, ) @@ -80,9 +81,9 @@ func TestIgnoreListFilter(t *testing.T) { } ignoreList := []string{} - filtered, err := ctUtil.ApplyFilters(testEvents, + filtered, err := cloudtrail.ApplyFilters(testEvents, func(event types.Event) (bool, error) { - return isIgnoredEvent(event, ctUtil.MergeRegex(ignoreList)) + return cloudtrail.IsIgnoredEvent(event, strings.Join(ignoreList, "|")) }, ) assert.Nil(t, err) @@ -118,9 +119,9 @@ func TestPermissonDeniedFilter(t *testing.T) { {Username: &testUsername2, CloudTrailEvent: &testCloudTrailEvent2}, } - filtered, err := ctUtil.ApplyFilters(TestEvents, + filtered, err := cloudtrail.ApplyFilters(TestEvents, func(event types.Event) (bool, error) { - return isforbiddenEvent(event) + return cloudtrail.IsforbiddenEvent(event) }, ) assert.Nil(t, err) @@ -138,9 +139,9 @@ func TestPermissonDeniedFilter(t *testing.T) { } expected := []types.Event{} - filtered, err := ctUtil.ApplyFilters(edgeCaseEvents, + filtered, err := cloudtrail.ApplyFilters(edgeCaseEvents, func(event types.Event) (bool, error) { - return isforbiddenEvent(event) + return cloudtrail.IsforbiddenEvent(event) }, ) assert.Nil(t, err) @@ -157,9 +158,9 @@ func TestPermissonDeniedFilter(t *testing.T) { } expected := []types.Event{} - filtered, err := ctUtil.ApplyFilters(edgeCaseEvents, + filtered, err := cloudtrail.ApplyFilters(edgeCaseEvents, func(event types.Event) (bool, error) { - return isforbiddenEvent(event) + return cloudtrail.IsforbiddenEvent(event) }, ) assert.Nil(t, err) @@ -176,9 +177,9 @@ func TestPermissonDeniedFilter(t *testing.T) { {Username: &edgeCaseUsername, CloudTrailEvent: &edgeCaseCloudtrailEvent}, } expected := []types.Event{} - filtered, err := ctUtil.ApplyFilters(edgeCaseEvents, + filtered, err := cloudtrail.ApplyFilters(edgeCaseEvents, func(event types.Event) (bool, error) { - return isforbiddenEvent(event) + return cloudtrail.IsforbiddenEvent(event) }, ) assert.EqualErrorf(t, err, "[ERROR] failed to extract raw CloudTrail event details: cannot parse a nil input", "") diff --git a/cmd/cloudtrail/pkg/aws/testdata/valid_event.json b/cmd/cloudtrail/testdata/valid_event.json similarity index 100% rename from cmd/cloudtrail/pkg/aws/testdata/valid_event.json rename to cmd/cloudtrail/testdata/valid_event.json diff --git a/cmd/cloudtrail/time.go b/cmd/cloudtrail/time.go new file mode 100644 index 000000000..a3eb6fe1d --- /dev/null +++ b/cmd/cloudtrail/time.go @@ -0,0 +1,120 @@ +package cloudtrail + +import ( + "fmt" + "strings" + "time" +) + +// ParseStartEndTime parses start time, end time, and duration parameters to calculate +// the actual time range for CloudTrail event queries. +// +// Parameters: +// - start: Start time in "YYYY-MM-DD,HH:MM:SS" format (--after flag) +// - end: End time in "YYYY-MM-DD,HH:MM:SS" format (--until flag) +// - duration: Duration string like "2h", "30m", "1d" (--since flag) +// +// Time calculation logic: +// - If both start and end are provided: Use exact time range +// - If only start is provided: start + duration (forward in time) +// - If only end is provided: end - duration (backward in time) +// - If both start and end are no provided: Use time.Now().UTC() - duration (default 1h) +// +// Returns: +// - startTime: Calculated start time in UTC +// - endTime: Calculated end time in UTC +// - error: Any parsing or validation error +func ParseStartEndTime(start, end, duration string) (time.Time, time.Time, error) { + var startTime, endTime time.Time + var err error + + if start == "" && end == "" { + endTime = time.Now().UTC() + if startTime, err = ParseDurationBefore(duration, endTime); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Failed to parse --since: %w", err) + } + return startTime, endTime, nil + } + + if start != "" && end != "" { + if startTime, err = ParseTimeAndValidate(start); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Time Format Incorrect: %w", err) + } + if endTime, err = ParseTimeAndValidate(end); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Time Format Incorrect: %w", err) + } + return startTime, endTime, nil + } + + if start != "" { + if startTime, err = ParseTimeAndValidate(start); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Time Format Incorrect: %w", err) + } + if endTime, err = ParseDurationAfter(duration, startTime); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Failed to parse --since: %w", err) + } + return startTime, endTime, nil + } + + if end != "" { + if endTime, err = ParseTimeAndValidate(end); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Time Format Incorrect: %w", err) + } + if startTime, err = ParseDurationBefore(duration, endTime); err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Failed to parse --since: %w", err) + } + return startTime, endTime, nil + } + + if startTime.After(endTime) { + return time.Time{}, time.Time{}, fmt.Errorf("start time %v is after end time %v", startTime, endTime) + } + + return time.Time{}, time.Time{}, fmt.Errorf("[ERROR] Invalid time parameter combination") +} + +// parseDurationAfter parses the given startTime string as a duration and adds it from the current UTC time. +// It returns the resulting time and any parsing error encountered. +func ParseDurationAfter(input string, startTime time.Time) (time.Time, error) { + duration, err := time.ParseDuration(input) + if err != nil { + return time.Time{}, fmt.Errorf("unable to parse time duration: %w", err) + } + if startTime.IsZero() { + startTime = time.Now().UTC() + } + + return startTime.UTC().Add(duration), nil +} + +// parseDurationBefore parses the given startTime string as a duration and subtracts it from the current UTC time. +// It returns the resulting time and any parsing error encountered. +func ParseDurationBefore(input string, startTime time.Time) (time.Time, error) { + duration, err := time.ParseDuration(input) + if err != nil { + return time.Time{}, fmt.Errorf("unable to parse time duration: %w", err) + } + if startTime.IsZero() { + startTime = time.Now().UTC() + } + + return startTime.UTC().Add(-duration), nil +} + +// parseTimeAndValidate takes YY-MM-DD,hh:mm:ss format, splits the year and time and convert it to current UTC time. +// It returns the parsed time and any parsing error encountered. +func ParseTimeAndValidate(timeStr string) (time.Time, error) { + parts := strings.Split(timeStr, ",") + if len(parts) != 2 { + return time.Time{}, fmt.Errorf("invalid time format. Expected format: YYYY-MM-DD,HH:MM:SS") + } + + formattedTimeStr := parts[0] + " " + parts[1] + layout := "2006-01-02 15:04:05" + parsedTime, err := time.Parse(layout, formattedTimeStr) + + if err != nil { + return time.Time{}, err + } + return parsedTime.UTC(), nil +} diff --git a/cmd/cloudtrail/write-events.go b/cmd/cloudtrail/write-events.go index 1f69b7fef..59c934fe9 100644 --- a/cmd/cloudtrail/write-events.go +++ b/cmd/cloudtrail/write-events.go @@ -3,16 +3,11 @@ package cloudtrail import ( "context" "fmt" - "regexp" "strings" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/cloudtrail" - "github.com/aws/aws-sdk-go-v2/service/cloudtrail/types" "github.com/aws/aws-sdk-go-v2/service/sts" - ctUtil "github.com/openshift/osdctl/cmd/cloudtrail/pkg" - ctAws "github.com/openshift/osdctl/cmd/cloudtrail/pkg/aws" - envConfig "github.com/openshift/osdctl/pkg/envConfig" "github.com/openshift/osdctl/pkg/osdCloud" "github.com/openshift/osdctl/pkg/utils" "github.com/spf13/cobra" @@ -22,86 +17,89 @@ var DefaultRegion = "us-east-1" // LookupEventsOptions struct for holding options for event lookup type writeEventsOptions struct { - ClusterID string - StartTime string - PrintUrl bool - PrintRaw bool - PrintAll bool + ClusterID string + StartTime string + EndTime string + Duration string + PrintUrl bool + PrintRaw bool + PrintFormat []string } -// RawEventDetails struct represents the structure of an AWS raw event -type RawEventDetails struct { - EventVersion string `json:"eventVersion"` - UserIdentity struct { - AccountId string `json:"accountId"` - SessionContext struct { - SessionIssuer struct { - Type string `json:"type"` - UserName string `json:"userName"` - Arn string `json:"arn"` - } `json:"sessionIssuer"` - } `json:"sessionContext"` - } `json:"userIdentity"` - EventRegion string `json:"awsRegion"` - EventId string `json:"eventID"` -} +const ( + cloudtrailWriteEventsExample = ` + # Time range with user and include events where username=(john.doe or system) and event=(CreateBucket or AssumeRole); print custom format + $ osdctl cloudtrail write-events -C cluster-id --after 2025-07-15,09:00:00 --until 2025-07-15,17:00:00 \ + -I username=john.doe -I event=CreateBucket -E event=AssumeRole -E username=system --print-format event,time,username,resource-name + + # Get all events from a specific time onwards for a 2h duration; print url + $ osdctl cloudtrail write-events -C cluster-id --after 2025-07-15,15:00:00 --since 2h --url + + # Get all events until the specified time since the last 2 hours; print raw-event + $ osdctl cloudtrail write-events -C cluster-id --after 2025-07-15,15:00:00 --since 2h --raw-event` + + cloudtrailWriteEventsDescription = ` + Lists AWS CloudTrail write events for a specific OpenShift/ROSA cluster with advanced + filtering capabilities to help investigate cluster-related activities. + + The command automatically authenticates with OpenShift Cluster Manager (OCM) and assumes + the appropriate AWS role for the target cluster to access CloudTrail logs. + + By default, the command filters out system and service account events using patterns + from the osdctl configuration file. ` +) func newCmdWriteEvents() *cobra.Command { ops := &writeEventsOptions{} + fil := &WriteEventFilters{} listEventsCmd := &cobra.Command{ - Use: "write-events", - Short: "Prints cloudtrail write events to console with optional filtering", + Use: "write-events", + Short: "Prints cloudtrail write events to console with advanced filtering options", + Long: cloudtrailWriteEventsDescription, + Example: cloudtrailWriteEventsExample, + Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - return ops.run() + return ops.run(*fil) }, } listEventsCmd.Flags().StringVarP(&ops.ClusterID, "cluster-id", "C", "", "Cluster ID") - listEventsCmd.Flags().StringVarP(&ops.StartTime, "since", "", "1h", "Specifies that only events that occur within the specified time are returned.Defaults to 1h.Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".") + listEventsCmd.Flags().StringVarP(&ops.StartTime, "after", "", "", "Specifies all events that occur after the specified time. Format \"YY-MM-DD,hh:mm:ss\".") + listEventsCmd.Flags().StringVarP(&ops.EndTime, "until", "", "", "Specifies all events that occur before the specified time. Format \"YY-MM-DD,hh:mm:ss\".") + listEventsCmd.Flags().StringVarP(&ops.Duration, "since", "", "1h", "Specifies that only events that occur within the specified time are returned. Defaults to 1h. Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\", \"d\", \"w\".") + listEventsCmd.Flags().BoolVarP(&ops.PrintUrl, "url", "u", false, "Generates Url link to cloud console cloudtrail event") listEventsCmd.Flags().BoolVarP(&ops.PrintRaw, "raw-event", "r", false, "Prints the cloudtrail events to the console in raw json format") - listEventsCmd.Flags().BoolVarP(&ops.PrintAll, "all", "A", false, "Prints all cloudtrail write events without filtering") - _ = listEventsCmd.MarkFlagRequired("cluster-id") + listEventsCmd.Flags().StringSliceVarP(&ops.PrintFormat, "print-format", "", nil, "Prints all cloudtrail write events in selected format. Can specify (username, time, event, arn, resource-name, resource-type, arn). i.e --print-format username,time,event") + + listEventsCmd.Flags().StringSliceVarP(&fil.Include, "include", "I", nil, "Filter events by inclusion. (i.e. \"-I username=, -I event=, -I resource-name=, -I resource-type=, -I arn=\")") + listEventsCmd.Flags().StringSliceVarP(&fil.Exclude, "exclude", "E", nil, "Filter events by exclusion. (i.e. \"-E username=, -E event=, -E resource-name=, -E resource-type=, -E arn=\")") + listEventsCmd.MarkFlagRequired("cluster-id") return listEventsCmd } -// FilterByIgnorelist filters out events based on the specified ignore list, which contains -// regular expression patterns. It returns true if the event should be kept, and false if it should be filtered out. -func isIgnoredEvent(event types.Event, mergedRegex string) (bool, error) { - if mergedRegex == "" { - return true, nil - } - raw, err := ctAws.ExtractUserDetails(event.CloudTrailEvent) - if err != nil { - return true, fmt.Errorf("[ERROR] failed to extract raw CloudTrail event details: %w", err) - } - userArn := raw.UserIdentity.SessionContext.SessionIssuer.Arn - regexObj := regexp.MustCompile(mergedRegex) +func (o *writeEventsOptions) run(filters WriteEventFilters) error { - if event.Username != nil { - if regexObj.MatchString(*event.Username) { - return false, nil - } + err := utils.IsValidClusterKey(o.ClusterID) + if err != nil { + return err } - if userArn != "" { - - if regexObj.MatchString(userArn) { - return false, nil - } + if err := ValidateFilters(filters.Include); err != nil { + return err } - if userArn == "" && event.Username == nil { - return false, nil + if err := ValidateFilters(filters.Exclude); err != nil { + return err } - return true, nil -} - -func (o *writeEventsOptions) run() error { + if err := ValidateFormat(o.PrintFormat); err != nil { + return err + } - err := utils.IsValidClusterKey(o.ClusterID) + startTime, endTime, err := ParseStartEndTime(o.StartTime, o.EndTime, o.Duration) if err != nil { return err } + connection, err := utils.CreateConnection() if err != nil { return fmt.Errorf("unable to create connection to ocm: %w", err) @@ -116,54 +114,35 @@ func (o *writeEventsOptions) run() error { return fmt.Errorf("[ERROR] this command is only available for AWS clusters") } - Ignore, err := envConfig.LoadCloudTrailConfig() - if err != nil { - return fmt.Errorf("[ERROR] error Loading cloudtrail configuration file: %w", err) - } - if len(Ignore) == 0 { - fmt.Println("\n[WARNING] No filter list detected! If you want intend to apply user filtering for the cloudtrail events, please add cloudtrail_cmd_lists to your osdctl configuration file.") - - } - - mergedRegex := ctUtil.MergeRegex(Ignore) - if o.PrintAll { - mergedRegex = "" - } cfg, err := osdCloud.CreateAWSV2Config(connection, cluster) if err != nil { return err } - DefaultRegion := "us-east-1" - startTime, err := ctUtil.ParseDurationToUTC(o.StartTime) - if err != nil { - return err - } - // FilterAndPrintEvents fetches events and filters them based on a regex string. - // It then prints the filtered events. + DefaultRegion := "us-east-1" - arn, accountId, err := ctAws.Whoami(*sts.NewFromConfig(cfg)) + arn, accountId, err := Whoami(*sts.NewFromConfig(cfg)) if err != nil { return err } - fmt.Printf("[INFO] Checking write event history since %v for AWS Account %v as %v \n", startTime, accountId, arn) + + fmt.Printf("[INFO] Checking write event history since %v until %v for AWS Account %v as %v \n", startTime, endTime, accountId, arn) cloudTrailclient := cloudtrail.NewFromConfig(cfg) fmt.Printf("[INFO] Fetching %v Event History...", cfg.Region) - queriedEvents, err := ctAws.GetEvents(cloudTrailclient, startTime, true) + + queriedEvents, err := GetEvents(cloudTrailclient, startTime, endTime, true) if err != nil { return err } - filteredEvents, err := ctUtil.ApplyFilters(queriedEvents, - func(event types.Event) (bool, error) { - return isIgnoredEvent(event, mergedRegex) - }, - ) - if err != nil { - return err + filteredEvents := Filters(filters, queriedEvents) + + if o.PrintFormat != nil { + PrintFormat(filteredEvents, o.PrintUrl, o.PrintRaw, o.PrintFormat) + } else { + PrintEvents(filteredEvents, o.PrintUrl, o.PrintRaw) } - ctUtil.PrintEvents(filteredEvents, o.PrintUrl, o.PrintRaw) fmt.Println("") if DefaultRegion != cfg.Region { @@ -179,20 +158,15 @@ func (o *writeEventsOptions) run() error { Credentials: cfg.Credentials, HTTPClient: cfg.HTTPClient, }) - fmt.Printf("[INFO] Fetching Cloudtrail Global Event History from %v Region...", defaultConfig.Region) - lookupOutput, err := ctAws.GetEvents(defaultCloudtrailClient, startTime, true) - if err != nil { - return err - } - filteredEvents, err := ctUtil.ApplyFilters(lookupOutput, - func(event types.Event) (bool, error) { - return isIgnoredEvent(event, mergedRegex) - }, - ) + fmt.Printf("[INFO] Fetching Cloudtrail Global Event History from %v Region... \n", defaultConfig.Region) + + queriedEvents, err := GetEvents(defaultCloudtrailClient, startTime, endTime, true) if err != nil { return err } - ctUtil.PrintEvents(filteredEvents, o.PrintUrl, o.PrintRaw) + filteredEvents = Filters(filters, queriedEvents) + + PrintEvents(filteredEvents, o.PrintUrl, o.PrintRaw) } return err diff --git a/docs/README.md b/docs/README.md index bdd731dcd..b6b950f66 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,7 +38,7 @@ - `org <org-id> [--all --duration --comment | --alertname --duration --comment]` - Add new silence for alert for org - `cloudtrail` - AWS CloudTrail related utilities - `permission-denied-events` - Prints cloudtrail permission-denied events to console. - - `write-events` - Prints cloudtrail write events to console with optional filtering + - `write-events` - Prints cloudtrail write events to console with advanced filtering options - `cluster` - Provides information for a specified cluster - `break-glass --cluster-id <cluster-identifier>` - Emergency access to a cluster - `cleanup --cluster-id <cluster-identifier>` - Drop emergency access to a cluster @@ -1131,7 +1131,15 @@ osdctl cloudtrail permission-denied-events [flags] ### osdctl cloudtrail write-events -Prints cloudtrail write events to console with optional filtering + + Lists AWS CloudTrail write events for a specific OpenShift/ROSA cluster with advanced + filtering capabilities to help investigate cluster-related activities. + + The command automatically authenticates with OpenShift Cluster Manager (OCM) and assumes + the appropriate AWS role for the target cluster to access CloudTrail logs. + + By default, the command filters out system and service account events using patterns + from the osdctl configuration file. ``` osdctl cloudtrail write-events [flags] @@ -1140,21 +1148,25 @@ osdctl cloudtrail write-events [flags] #### Flags ``` - -A, --all Prints all cloudtrail write events without filtering + --after string Specifies all events that occur after the specified time. Format "YY-MM-DD,hh:mm:ss". --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. --cluster string The name of the kubeconfig cluster to use -C, --cluster-id string Cluster ID --context string The name of the kubeconfig context to use + -E, --exclude strings Filter events by exclusion. (i.e. "-E username=, -E event=, -E resource-name=, -E resource-type=, -E arn=") -h, --help help for write-events + -I, --include strings Filter events by inclusion. (i.e. "-I username=, -I event=, -I resource-name=, -I resource-type=, -I arn=") --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --print-format strings Prints all cloudtrail write events in selected format. Can specify (username, time, event, arn, resource-name, resource-type, arn). i.e --print-format username,time,event -r, --raw-event Prints the cloudtrail events to the console in raw json format --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") -s, --server string The address and port of the Kubernetes API server - --since string Specifies that only events that occur within the specified time are returned.Defaults to 1h.Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". (default "1h") + --since string Specifies that only events that occur within the specified time are returned. Defaults to 1h. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w". (default "1h") --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value -S, --skip-version-check skip checking to see if this is the most recent release + --until string Specifies all events that occur before the specified time. Format "YY-MM-DD,hh:mm:ss". -u, --url Generates Url link to cloud console cloudtrail event ``` diff --git a/docs/osdctl_cloudtrail.md b/docs/osdctl_cloudtrail.md index 8a33a393e..754263018 100644 --- a/docs/osdctl_cloudtrail.md +++ b/docs/osdctl_cloudtrail.md @@ -31,5 +31,5 @@ osdctl cloudtrail [flags] * [osdctl](osdctl.md) - OSD CLI * [osdctl cloudtrail permission-denied-events](osdctl_cloudtrail_permission-denied-events.md) - Prints cloudtrail permission-denied events to console. -* [osdctl cloudtrail write-events](osdctl_cloudtrail_write-events.md) - Prints cloudtrail write events to console with optional filtering +* [osdctl cloudtrail write-events](osdctl_cloudtrail_write-events.md) - Prints cloudtrail write events to console with advanced filtering options diff --git a/docs/osdctl_cloudtrail_write-events.md b/docs/osdctl_cloudtrail_write-events.md index ce533d520..f691e788d 100644 --- a/docs/osdctl_cloudtrail_write-events.md +++ b/docs/osdctl_cloudtrail_write-events.md @@ -1,20 +1,51 @@ ## osdctl cloudtrail write-events -Prints cloudtrail write events to console with optional filtering +Prints cloudtrail write events to console with advanced filtering options + +### Synopsis + + + Lists AWS CloudTrail write events for a specific OpenShift/ROSA cluster with advanced + filtering capabilities to help investigate cluster-related activities. + + The command automatically authenticates with OpenShift Cluster Manager (OCM) and assumes + the appropriate AWS role for the target cluster to access CloudTrail logs. + + By default, the command filters out system and service account events using patterns + from the osdctl configuration file. ``` osdctl cloudtrail write-events [flags] ``` +### Examples + +``` + + # Time range with user and include events where username=(john.doe or system) and event=(CreateBucket or AssumeRole); print custom format + $ osdctl cloudtrail write-events -C cluster-id --after 2025-07-15,09:00:00 --until 2025-07-15,17:00:00 \ + -I username=john.doe -I event=CreateBucket -E event=AssumeRole -E username=system --print-format event,time,username,resource-name + + # Get all events from a specific time onwards for a 2h duration; print url + $ osdctl cloudtrail write-events -C cluster-id --after 2025-07-15,15:00:00 --since 2h --url + + # Get all events until the specified time since the last 2 hours; print raw-event + $ osdctl cloudtrail write-events -C cluster-id --after 2025-07-15,15:00:00 --since 2h --raw-event +``` + ### Options ``` - -A, --all Prints all cloudtrail write events without filtering - -C, --cluster-id string Cluster ID - -h, --help help for write-events - -r, --raw-event Prints the cloudtrail events to the console in raw json format - --since string Specifies that only events that occur within the specified time are returned.Defaults to 1h.Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". (default "1h") - -u, --url Generates Url link to cloud console cloudtrail event + --after string Specifies all events that occur after the specified time. Format "YY-MM-DD,hh:mm:ss". + -C, --cluster-id string Cluster ID + -E, --exclude strings Filter events by exclusion. (i.e. "-E username=, -E event=, -E resource-name=, -E resource-type=, -E arn=") + -h, --help help for write-events + -I, --include strings Filter events by inclusion. (i.e. "-I username=, -I event=, -I resource-name=, -I resource-type=, -I arn=") + --print-format strings Prints all cloudtrail write events in selected format. Can specify (username, time, event, arn, resource-name, resource-type, arn). i.e --print-format username,time,event + -r, --raw-event Prints the cloudtrail events to the console in raw json format + --since string Specifies that only events that occur within the specified time are returned. Defaults to 1h. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w". (default "1h") + --until string Specifies all events that occur before the specified time. Format "YY-MM-DD,hh:mm:ss". + -u, --url Generates Url link to cloud console cloudtrail event ``` ### Options inherited from parent commands From 63ba5a1195ce8ce6d856f1913e6aa11ca9e713b2 Mon Sep 17 00:00:00 2001 From: Matt Clark <maclark@redhat.com> Date: Thu, 28 Aug 2025 19:28:29 -0700 Subject: [PATCH 40/40] OSD-26415: Skip pull-secret auths validations during dry-run. --- cmd/cluster/transferowner.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index 9b7920896..58b074f30 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -941,10 +941,14 @@ func (o *transferOwnerOptions) run() error { } } } - - err = verifyClusterPullSecret(targetClientSet, string(pullSecret), auths) - if err != nil { - return fmt.Errorf("error verifying cluster pull secret: %w", err) + // During a dry-run the PS has not yet been updated and can provide misleading errors + if o.dryrun { + fmt.Println("This is a 'dryrun', skipping the verification of cluster's pull secret.") + } else { + err = verifyClusterPullSecret(targetClientSet, string(pullSecret), auths) + if err != nil { + return fmt.Errorf("error verifying cluster pull secret: %w", err) + } } if o.doPullSecretOnly {