diff --git a/Gopkg.lock b/Gopkg.lock index 53c079c..764c2d5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -95,6 +95,16 @@ revision = "a105b96453fe85139acc07b68de48f2cbdd71249" version = "v0.2.0" +[[projects]] + digest = "1:ef796be125d656ea1556ea57c6b1911092c9c763085644ef9035c63ac3f2b573" + name = "github.com/coreos/go-semver" + packages = [ + ".", + "semver", + ] + pruneopts = "NT" + revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6" + [[projects]] digest = "1:8449bb0c1da0fd7ec4706afb5ae46ad0ad658fd87f17a7eb067c5a307ffe3818" name = "github.com/cyphar/filepath-securejoin" @@ -529,6 +539,22 @@ pruneopts = "NT" revision = "84c2b942258aea2462e675e03aeb8eb4cb5f3c29" +[[projects]] + digest = "1:e23f911287c9b01dd85ad7d21ec1913cb562ff46c06f5b399eb19bd26ae82a2e" + name = "github.com/operator-framework/operator-lifecycle-manager" + packages = [ + "pkg/api/apis/operators", + "pkg/api/apis/operators/v1", + "pkg/api/apis/operators/v1alpha1", + "pkg/api/client", + "pkg/api/client/clientset/versioned", + "pkg/api/client/clientset/versioned/scheme", + "pkg/api/client/clientset/versioned/typed/operators/v1", + "pkg/api/client/clientset/versioned/typed/operators/v1alpha1", + ] + pruneopts = "NT" + revision = "aeb24aeb363b25c8f9e8267f27579c1483dacc42" + [[projects]] digest = "1:674610d54812d3c36ab7861fc826176bf581a9426cc09abec0107414c17f89cd" name = "github.com/operator-framework/operator-sdk" @@ -1507,6 +1533,7 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "github.com/coreos/go-semver", "github.com/golang/protobuf/proto", "github.com/openshift/api/apps/v1", "github.com/openshift/api/build/v1", @@ -1515,6 +1542,9 @@ "github.com/openshift/api/route/v1", "github.com/openshift/client-go/image/clientset/versioned/fake", "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1", + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1", + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client", + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1", "github.com/operator-framework/operator-sdk/pkg/k8sutil", "github.com/operator-framework/operator-sdk/pkg/leader", "github.com/operator-framework/operator-sdk/pkg/ready", @@ -1535,8 +1565,11 @@ "k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/types", "k8s.io/apimachinery/pkg/util/intstr", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "k8s.io/client-go/tools/clientcmd", "k8s.io/code-generator/cmd/client-gen", "k8s.io/code-generator/cmd/conversion-gen", "k8s.io/code-generator/cmd/deepcopy-gen", diff --git a/Gopkg.toml b/Gopkg.toml index 48b1a0f..8bb9947 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -10,6 +10,7 @@ required = [ "k8s.io/gengo/args", "sigs.k8s.io/controller-tools/pkg/crd/generator", "github.com/golang/protobuf/proto", + "github.com/coreos/go-semver", ] [[override]] @@ -107,3 +108,12 @@ required = [ "pkg/apis", "pkg/apis/devconsole/v1alpha1", ] + + [[constraint]] + name = "github.com/operator-framework/operator-lifecycle-manager" + revision = "aeb24aeb363b25c8f9e8267f27579c1483dacc42" + + [[constraint]] + name = "github.com/coreos/go-semver" + revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6" + diff --git a/test/operatorsource/basic_test.go b/test/operatorsource/basic_test.go index 05985bb..8d4ae24 100644 --- a/test/operatorsource/basic_test.go +++ b/test/operatorsource/basic_test.go @@ -1,146 +1,101 @@ package operatorsource import ( - "bytes" "fmt" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" "os" - "os/exec" - "strings" "testing" - - "github.com/stretchr/testify/require" + "time" ) -const ShellToUse = "bash" - -func Shellout(command string) (string, string, error) { - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd := exec.Command(ShellToUse, "-c", command) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err := cmd.Run() - return stdout.String(), stderr.String(), err -} - -func Test_OperatorSource_oc_commands(t *testing.T) { - - defer CleanUp(t) +var ( + Client = NewTestClient() + namespace = "openshift-operators" + subName = "devconsole" + label = "name=devconsole-operator" + subscription, suberr = Client.GetSubscription(subName, namespace) +) - t.Run("login", func(t *testing.T) { Login(t) }) - t.Run("subscription", func(t *testing.T) { Subscription(t) }) - t.Run("install plan", func(t *testing.T) { InstallPlan(t) }) - t.Run("operator pod", func(t *testing.T) { OperatorPod(t) }) -} +func Test_OperatorSource(t *testing.T) { + pods, err := Client.GetPodByLabel(label, namespace) + if err != nil { + t.Fatal(err) + } + defer CleanUp(t, &pods.Items[0]) + retryInterval := time.Second * 10 + timeout := time.Second * 120 -func Login(t *testing.T) { - // Start - Login to oc - out, _, err := Shellout("oc login -u " + os.Getenv("OC_LOGIN_USERNAME") + " -p " + os.Getenv("OC_LOGIN_PASSWORD")) + err = Client.WaitForOperatorDeployment(t, pods.Items[0].Name, namespace, retryInterval, timeout) if err != nil { - t.Fatalf("error: %v\n", err) + t.Fatal(err) } else { - require.True(t, strings.Contains(out, "Login successful."), "Expecting successful login") + t.Run("subscription", func(t *testing.T) { Subscription(t) }) + t.Run("install plan", func(t *testing.T) { InstallPlan(t) }) + t.Run("operator pod", func(t *testing.T) { OperatorPod(t) }) } } func Subscription(t *testing.T) { // 1) Verify that the subscription was created - out, errout, err := Shellout("oc get sub devconsole -n openshift-operators") - if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Fatalf("error: %v\n", err) - } else { - require.True(t, strings.Contains(out, "devconsole"), "Expecting the subscription name to be found") - require.True(t, strings.Contains(out, "installed-custom-openshift-operators"), "Expecting the subscription namespace to be found") + if suberr != nil { + t.Fatal(suberr) } + require.Equal(t, subName, subscription.Name) + require.Equal(t, "installed-custom-openshift-operators", subscription.Spec.CatalogSource) } func InstallPlan(t *testing.T) { // 2) Find the name of the install plan - out, errout, err := Shellout("oc get sub devconsole -n openshift-operators -o jsonpath='{.status.installplan.name}'") - var installPlan string + installPlanName := subscription.Status.Install.Name + installPlan, err := Client.GetInstallPlan(installPlanName, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Fatalf("error: %v\n", err) - } else { - installPlan = out + t.Fatal(err) } - - // 3) Verify the install plan - out, errout, err = Shellout(fmt.Sprintf("oc get installplan %s -n openshift-operators", installPlan)) - if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Fatalf("error: %v\n", err) - } else { - require.True(t, strings.Contains(out, installPlan), "Expecting the Install Plan name to be found") - require.True(t, strings.Contains(out, "devconsole-operator.v0.1.0"), "Expecting the Operator release to be found") - require.True(t, strings.Contains(out, "Automatic"), "Expecting the approval method to be found") - require.True(t, strings.Contains(out, "true"), "Expecting the approved state to be found") + require.Equal(t, "devconsole-operator.v0.1.0", installPlan.Spec.ClusterServiceVersionNames[0]) + require.Equal(t, "Automatic", string(installPlan.Spec.Approval)) + if !installPlan.Spec.Approved { + require.FailNow(t, "Install plan approved is false") } } func OperatorPod(t *testing.T) { - // Verify that the operator's pod is running - out, errout, err := Shellout("oc get pods -l name=devconsole-operator -n openshift-operators -o jsonpath='{.items[*].status.phase}'") + // 3) Check operator pod status, fail status != Running + pods, err := Client.GetPodByLabel(label, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Fatalf("error: %v\n", err) - } else { - require.True(t, strings.Contains(out, "Running"), "Expecting the state of the Operator pod to be running") + t.Fatal(err) } + pod := pods.Items[0] + require.Equal(t, pod.Status.Phase, corev1.PodRunning) } -func CleanUp(t *testing.T) { +func CleanUp(t *testing.T, pod *corev1.Pod) { // Clean up resources - operatorSourceName := os.Getenv("OPSRC_NAME") operatorVersion := os.Getenv("DEVCONSOLE_OPERATOR_VERSION") - out, errout, err := Shellout(fmt.Sprintf("oc delete opsrc %s -n openshift-marketplace", operatorSourceName)) + err := Client.Delete("installplan", subscription.Status.Install.Name, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Logf("error: %v\n", err) - } else { - t.Logf(out) + t.Logf("Error: %v\n", err) } - out, errout, err = Shellout("oc delete sub devconsole -n openshift-operators") + err = Client.Delete("catsrc", subscription.Spec.CatalogSource, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Logf("error: %v\n", err) - } else { - t.Logf(out) + t.Logf("Error: %v\n", err) } - out, errout, err = Shellout("oc delete catsrc installed-custom-openshift-operators -n openshift-operators") + err = Client.Delete("sub", subName, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Logf("error: %v\n", err) - } else { - t.Logf(out) + t.Logf("Error: %v\n", err) } - out, errout, err = Shellout("oc delete csc installed-custom-openshift-operators -n openshift-marketplace") + csv := fmt.Sprintf("devconsole-operator.v%s", operatorVersion) + err = Client.Delete("csv", csv, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Logf("error: %v\n", err) - } else { - t.Logf(out) + t.Logf("Error: %v\n", err) } - out, errout, err = Shellout(fmt.Sprintf("oc delete csv devconsole-operator.v%s -n openshift-operators", operatorVersion)) + err = Client.Delete("pod", pod.Name, namespace) if err != nil { - t.Logf("stdout: %s\n", out) - t.Logf("stderr: %s\n", errout) - t.Logf("error: %v\n", err) - } else { - t.Logf(out) + t.Logf("Error: %v\n", err) } } diff --git a/test/operatorsource/helpers.go b/test/operatorsource/helpers.go new file mode 100644 index 0000000..886f2f4 --- /dev/null +++ b/test/operatorsource/helpers.go @@ -0,0 +1,127 @@ +package operatorsource + +import ( + "errors" + "fmt" + apis_v1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" + client "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client" + v1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/typed/operators/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "log" + "os" + "testing" + "time" +) + +//ClientSetK8sCoreAPI returns new Clientset for the given config, use to interact with K8s resources like pods +func ClientSetK8sCoreAPI(kubeconfig string) *kubernetes.Clientset { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + log.Fatal(err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + log.Fatal(err) + } + return clientset +} + +//ClientSet Creates clientset for given config, use to interact with custom resources +func ClientSet(kubeconfig string) v1alpha1.OperatorsV1alpha1Interface { + client, err := client.NewClient(kubeconfig) + if err != nil { + log.Fatal(err) + } + return client.OperatorsV1alpha1() +} + +// TestClient wraps all the clientsets required while testing +type TestClient struct { + K8sClient *kubernetes.Clientset + OperatorClient v1alpha1.OperatorsV1alpha1Interface +} + +//NewTestClient initialises the TestClient +func NewTestClient() *TestClient { + kubeconfig := os.Getenv("KUBECONFIG") + + return &TestClient{ + K8sClient: ClientSetK8sCoreAPI(kubeconfig), + OperatorClient: ClientSet(kubeconfig), + } +} + +// GetPodByLabel is a function that takes label and namespace and returns the pod and error +func (tc *TestClient) GetPodByLabel(label string, namespace string) (*corev1.PodList, error) { + + pods, err := tc.K8sClient.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: label}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, nil + } + return pods, nil +} + +//GetSubscription returns subscription struct +func (tc *TestClient) GetSubscription(subName, namespace string) (*apis_v1alpha1.Subscription, error) { + subscription, err := tc.OperatorClient.Subscriptions(namespace).Get(subName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return subscription, nil +} + +//GetInstallPlan returns install plan struct +func (tc *TestClient) GetInstallPlan(installPlanName, namespace string) (*apis_v1alpha1.InstallPlan, error) { + + installPlan, err := tc.OperatorClient.InstallPlans(namespace).Get(installPlanName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return installPlan, nil +} + +//Delete takes kind, name and namespace of the resource and deletes it. Returns an error if one occurs. +func (tc *TestClient) Delete(resource, name, namespace string) error { + switch resource { + case "subscription", "sub": + return tc.OperatorClient.Subscriptions(namespace).Delete(name, &metav1.DeleteOptions{}) + case "installplan": + return tc.OperatorClient.InstallPlans(namespace).Delete(name, &metav1.DeleteOptions{}) + case "catalogsource", "catsrc", "csc": + return tc.OperatorClient.CatalogSources(namespace).Delete(name, &metav1.DeleteOptions{}) + case "clusterserviceversion", "csv": + return tc.OperatorClient.ClusterServiceVersions(namespace).Delete(name, &metav1.DeleteOptions{}) + case "pod": + return tc.K8sClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{}) + default: + option := fmt.Sprintf("Invalid resource: %s", resource) + return errors.New(option) + + } +} + +//WaitForOperatorDeployment takes pod name and wait till pod gets in runnig state +func (tc *TestClient) WaitForOperatorDeployment(t *testing.T, name, namespace string, retryInterval, timeout time.Duration) error { + return wait.Poll(retryInterval, timeout, func() (bool, error) { + pod, err := tc.K8sClient.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if pod == nil { + return false, errors.New("pod returned empty") + } + if pod.Status.Phase == corev1.PodRunning { + return true, nil + } + t.Logf("Waiting for pod %s to get running, Current Status: %s\n", pod.Name, pod.Status.Phase) + return false, nil + }) +}