Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

extended tests for jenkins openshift V3 plugin #6329

Merged
merged 1 commit into from Jan 9, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions test/extended/extended_test.go
Expand Up @@ -6,6 +6,7 @@ import (
_ "github.com/openshift/origin/test/extended/builds"
_ "github.com/openshift/origin/test/extended/cli"
_ "github.com/openshift/origin/test/extended/images"
_ "github.com/openshift/origin/test/extended/jenkins"
_ "github.com/openshift/origin/test/extended/job"
_ "github.com/openshift/origin/test/extended/router"
_ "github.com/openshift/origin/test/extended/security"
Expand Down
88 changes: 88 additions & 0 deletions test/extended/fixtures/testjob-plugin.xml
@@ -0,0 +1,88 @@
<?xml version='1.0' encoding='UTF-8'?>
<project>
<actions/>
<description></description>
<keepDependencies>false</keepDependencies>
<scm class="hudson.scm.NullSCM"/>
<canRoam>true</canRoam>
<disabled>false</disabled>
<blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
<blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
<triggers/>
<concurrentBuild>false</concurrentBuild>
<builders>

<com.openshift.jenkins.plugins.pipeline.OpenShiftScaler>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<depCfg>frontend</depCfg>
<namespace>PROJECT_NAME</namespace>
<replicaCount>0</replicaCount>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftScaler>

<com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<depCfg>frontend</depCfg>
<namespace>PROJECT_NAME</namespace>
<replicaCount>0</replicaCount>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>

<com.openshift.jenkins.plugins.pipeline.OpenShiftBuilder>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<bldCfg>frontend</bldCfg>
<namespace>PROJECT_NAME</namespace>
<authToken></authToken>
<followLog>true</followLog>
</com.openshift.jenkins.plugins.pipeline.OpenShiftBuilder>

<com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<depCfg>frontend</depCfg>
<namespace>PROJECT_NAME</namespace>
<replicaCount>1</replicaCount>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>

<com.openshift.jenkins.plugins.pipeline.OpenShiftScaler>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<depCfg>frontend</depCfg>
<namespace>PROJECT_NAME</namespace>
<replicaCount>1</replicaCount>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftScaler>
<com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<depCfg>frontend</depCfg>
<namespace>PROJECT_NAME</namespace>
<replicaCount>1</replicaCount>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>

<com.openshift.jenkins.plugins.pipeline.OpenShiftServiceVerifier>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<svcName>frontend</svcName>
<namespace>PROJECT_NAME</namespace>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftServiceVerifier>

<com.openshift.jenkins.plugins.pipeline.OpenShiftImageTagger>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<namespace>PROJECT_NAME</namespace>
<testTag>origin-nodejs-sample:latest</testTag>
<prodTag>origin-nodejs-sample:prod</prodTag>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftImageTagger>

<com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>
<apiURL>https://openshift.default.svc.cluster.local</apiURL>
<depCfg>frontend-prod</depCfg>
<namespace>PROJECT_NAME</namespace>
<replicaCount>1</replicaCount>
<authToken></authToken>
</com.openshift.jenkins.plugins.pipeline.OpenShiftDeploymentVerifier>

</builders>
<publishers/>
<buildWrappers/>
</project>
160 changes: 160 additions & 0 deletions test/extended/jenkins/jenkins_plugin.go
@@ -0,0 +1,160 @@
package jenkins

import (
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"

"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"

"k8s.io/kubernetes/pkg/util/wait"

exutil "github.com/openshift/origin/test/extended/util"
)

func immediateInteractionWithJenkins(uri, method string, body io.Reader, status int) {
req, err := http.NewRequest(method, uri, body)
o.Expect(err).NotTo(o.HaveOccurred())

if body != nil {
req.Header.Set("Content-Type", "application/xml")
// jenkins will return 417 if we have an expect hdr
req.Header.Del("Expect")
}
req.SetBasicAuth("admin", "password")

client := &http.Client{}
resp, err := client.Do(req)
o.Expect(err).NotTo(o.HaveOccurred())

defer resp.Body.Close()
o.Expect(resp.StatusCode).To(o.BeEquivalentTo(status))

}

func waitForJenkinsActivity(uri, verificationString string, status int) error {
consoleLogs := ""

err := wait.Poll(1*time.Second, 3*time.Minute, func() (bool, error) {
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return false, err
}
req.SetBasicAuth("admin", "password")
client := &http.Client{}
resp, _ := client.Do(req)
// the http req failing here (which we see occassionally in the ci.jenkins runs) could stem
// from simply hitting our test jenkins server too soon ... so rather than returning false,err
// and aborting the poll, we return false, nil to try again
if resp == nil {
return false, nil
}
defer resp.Body.Close()
if resp.StatusCode == status {
if len(verificationString) > 0 {
contents, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
consoleLogs = string(contents)
if strings.Contains(consoleLogs, verificationString) {
return true, nil
} else {
return false, nil
}
} else {
return true, nil
}
}
return false, nil
})

if err != nil {
return fmt.Errorf("got error %v waiting on uri %s with verificationString %s and last set of console logs %s", err, uri, verificationString, consoleLogs)
} else {
return nil
}
}

func jenkinsJobBytes(filename, namespace string) []byte {
pre := exutil.FixturePath("fixtures", filename)
post := exutil.ArtifactPath(filename)
err := exutil.VarSubOnFile(pre, post, "PROJECT_NAME", namespace)
o.Expect(err).NotTo(o.HaveOccurred())
data, err := ioutil.ReadFile(post)
o.Expect(err).NotTo(o.HaveOccurred())
return data
}

var _ = g.Describe("jenkins: plugin: run job leveraging openshift pipeline plugin", func() {
defer g.GinkgoRecover()
var oc = exutil.NewCLI("jenkins-plugin", exutil.KubeConfigPath())
var hostPort string

g.BeforeEach(func() {

g.By("set up policy for jenkins jobs")
err := oc.Run("policy").Args("add-role-to-user", "edit", "system:serviceaccount:"+oc.Namespace()+":default").Execute()
o.Expect(err).NotTo(o.HaveOccurred())

g.By("kick off the build for the jenkins ephermeral and application templates")
jenkinsEphemeralPath := exutil.FixturePath("..", "..", "examples", "jenkins", "jenkins-ephemeral-template.json")
err = oc.Run("new-app").Args(jenkinsEphemeralPath).Execute()
o.Expect(err).NotTo(o.HaveOccurred())
jenkinsApplicationPath := exutil.FixturePath("..", "..", "examples", "jenkins", "application-template.json")
err = oc.Run("new-app").Args(jenkinsApplicationPath).Execute()
o.Expect(err).NotTo(o.HaveOccurred())

g.By("waiting for jenkins deployment")
err = exutil.WaitForADeployment(oc.KubeREST().ReplicationControllers(oc.Namespace()), "jenkins", exutil.CheckDeploymentCompletedFn, exutil.CheckDeploymentFailedFn)
o.Expect(err).NotTo(o.HaveOccurred())

g.By("get ip and port for jenkins service")
serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
hostPort = fmt.Sprintf("%s:%s", serviceIP, port)

g.By("wait for jenkins to come up")
err = waitForJenkinsActivity(fmt.Sprintf("http://%s", hostPort), "", 200)
o.Expect(err).NotTo(o.HaveOccurred())

})

g.Context("jenkins-plugin test context ", func() {

g.It("jenkins-plugin test case execution", func() {

g.By("create jenkins job config xml file, convert to bytes for http post")
data := jenkinsJobBytes("testjob-plugin.xml", oc.Namespace())

g.By("make http request to create job")
immediateInteractionWithJenkins(fmt.Sprintf("http://%s/createItem?name=test-plugin-job", hostPort), "POST", bytes.NewBuffer(data), 200)

g.By("make http request to kick off build")
immediateInteractionWithJenkins(fmt.Sprintf("http://%s/job/test-plugin-job/build?delay=0sec", hostPort), "POST", nil, 201)

// the build and deployment is by far the most time consuming portion of the test jenkins job;
// we leverage some of the openshift utilities for waiting for the deployment before we poll
// jenkins for the sucessful job completion
g.By("waiting for frontend, frontend-prod deployments as signs that the build has finished")
err := exutil.WaitForADeployment(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", exutil.CheckDeploymentCompletedFn, exutil.CheckDeploymentFailedFn)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how long does this wait before giving up? looks like it might wait forever which would be bad for fast-failing.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, perhaps it depends on the overall ginkgo timeout, or the watch has a TO, but otherwise, I agree it seems like it would wait forever ... WaitForAnImageStream seems similar; it is leveraged in some other test cases;

According to git blame, @stevekuznetsov last manipulated these methods .. any insight on when those were expected (or not expected) to timeout @stevekuznetsov ?

Otherwise, @bparees I could leverage WaitForABuild in the new test case ... it is not quite as far along the build flow, but still gives us a head start for subsequent polling

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the watch timeout won't matter since it'll just re-list and then start a new watch if the watch times out.

the right thing to do is probably do make WaitForADeployment take a duration, or at least make it implicitly include one like WaitForABuild does.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

want to make sure I reconcile your "re-list" comment correctly - you mean the logic in WaitForADeployment with this code:
val, ok := <-w.ResultChan()
if !ok {
// reget and re-watch
break
where the watch timing out means w.ResultChan() exits, and the starting of the new watch is in WaitForADeployment, not some magic withing w.ResultChan(), correct?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

right, just saying that when w.ResultChan fails, the existing WaitForADeployment logic is going to go back to the top of the loop, do a fresh list of the resources, and then start a new watch on the resources. so it's not going to return based on the watch timing out.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That doesn't strike me as sound reasoning... These are tests, everything
should be expected to potentially fail...

Ben Parees | OpenShift
On Dec 15, 2015 9:49 PM, "Steve Kuznetsov" notifications@github.com wrote:

In test/extended/jenkins/jenkins_plugin.go
#6329 (comment):

  •   g.It("jenkins-plugin test case execution", func() {
    
  •       g.By("create jenkins job config xml file, convert to bytes for http post")
    
  •       data := jenkinsJobBytes("testjob-plugin.xml", oc.Namespace())
    
  •       g.By("make http request to create job")
    
  •       immediateInteractionWithJenkins(fmt.Sprintf("http://%s/createItem?name=test-plugin-job", hostPort), "POST", bytes.NewBuffer(data), 200)
    
  •       g.By("make http request to kick off build")
    
  •       immediateInteractionWithJenkins(fmt.Sprintf("http://%s/job/test-plugin-job/build?delay=0sec", hostPort), "POST", nil, 201)
    
  •       // the build and deployment is by far the most time consuming portion of the test jenkins job;
    
  •       // we leverage some of the openshift utilities for waiting for the deployment before we poll
    
  •       // jenkins for the sucessful job completion
    
  •       g.By("waiting for frontend deployment as sign that the build has finished")
    
  •       err := exutil.WaitForADeployment(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend", exutil.CheckDeploymentCompletedFn, exutil.CheckDeploymentFailedFn)
    

I believe the WaitFor methods were never used for something that was
expected to fail so the timeouts on them were those imposed by Ginkgo on
the rest itself.


Reply to this email directly or view it on GitHub
https://github.com/openshift/origin/pull/6329/files#r47731264.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Quite a lot of our caching unit tests do exactly the same thing, where the native timeout given by go test is used as the threshold. The tests mostly predate me so I assumed we used the same approach here.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. Did this question get resolved (whether it's going to wait forever)?
  2. Shouldn't this be checking the frontend-prod deployment, not the frontend deployment?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

answered (1) for myself, forgot you reworked waitforadeployment.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for 2) both frontend and frontend-prod are deployed ... I've added a check for both

o.Expect(err).NotTo(o.HaveOccurred())
err = exutil.WaitForADeployment(oc.KubeREST().ReplicationControllers(oc.Namespace()), "frontend-prod", exutil.CheckDeploymentCompletedFn, exutil.CheckDeploymentFailedFn)
o.Expect(err).NotTo(o.HaveOccurred())

g.By("get build console logs and see if succeeded")
err = waitForJenkinsActivity(fmt.Sprintf("http://%s/job/test-plugin-job/1/console", hostPort), "SUCCESS", 200)
o.Expect(err).NotTo(o.HaveOccurred())

})

})

})
5 changes: 4 additions & 1 deletion test/extended/util/framework.go
Expand Up @@ -189,7 +189,9 @@ var CheckImageStreamTagNotFoundFn = func(i *imageapi.ImageStream) bool {
func WaitForADeployment(client kclient.ReplicationControllerInterface,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think i would have done this differently.... either:

  1. just check the start time and calculate and end time, and then if the end time has passed at the top of the outer loop
  2. get rid of the the Watch entirely and just wrap the list() in a wait.Poll check (which is basically what WaitForABuild does).

with the existing logic it's pretty hard to follow how long it's really going to end up waiting, based on expiration of watches. also 3 minutes between polls seems like a lot.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

went with option 1) ... will push in a sec

name string,
isOK, isFailed func(*kapi.ReplicationController) bool) error {
for {
startTime := time.Now()
endTime := startTime.Add(15 * time.Minute)
for time.Now().Before(endTime) {
requirement, err := labels.NewRequirement(deployapi.DeploymentConfigAnnotation, labels.EqualsOperator, sets.NewString(name))
if err != nil {
return fmt.Errorf("unexpected error generating label selector: %v", err)
Expand Down Expand Up @@ -233,6 +235,7 @@ func WaitForADeployment(client kclient.ReplicationControllerInterface,
}
}
}
return fmt.Errorf("the deploy did not finish within 3 minutes")
}

// CheckDeploymentCompletedFn returns true if the deployment completed
Expand Down