Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve the failure mode of timeout_test. #3396

Merged
merged 2 commits into from
Oct 21, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions test/init_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,15 @@ func setup(ctx context.Context, t *testing.T, fn ...func(context.Context, *testi
return c, namespace
}

func header(logf logging.FormatLogger, text string) {
func header(t *testing.T, text string) {
t.Helper()
left := "### "
right := " ###"
txt := left + text + right
bar := strings.Repeat("#", len(txt))
logf(bar)
logf(txt)
logf(bar)
t.Logf(bar)
t.Logf(txt)
t.Logf(bar)
}

func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string) {
Expand All @@ -88,14 +89,14 @@ func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string)
return
}
if t.Failed() {
header(t.Logf, fmt.Sprintf("Dumping objects from %s", namespace))
header(t, fmt.Sprintf("Dumping objects from %s", namespace))
bs, err := getCRDYaml(ctx, cs, namespace)
if err != nil {
t.Error(err)
} else {
t.Log(string(bs))
}
header(t.Logf, fmt.Sprintf("Dumping logs from Pods in the %s", namespace))
header(t, fmt.Sprintf("Dumping logs from Pods in the %s", namespace))
taskruns, err := cs.TaskRunClient.List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error getting TaskRun list %s", err)
Expand Down
32 changes: 15 additions & 17 deletions test/timeout_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,14 @@ import (
// verify that pipelinerun timeout works and leads to the the correct TaskRun statuses
// and pod deletions.
func TestPipelineRunTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
// cancel the context after we have waited a suitable buffer beyond the given deadline.
ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Task in namespace %s", namespace)
task := &v1beta1.Task{
Expand Down Expand Up @@ -171,14 +171,13 @@ func TestPipelineRunTimeout(t *testing.T) {

// TestStepTimeout is an integration test that will verify a Step can be timed out.
func TestStepTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Task with Step step-no-timeout, Step step-timeout, and Step step-canceled in namespace %s", namespace)

Expand Down Expand Up @@ -243,14 +242,14 @@ func TestStepTimeout(t *testing.T) {

// TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out.
func TestTaskRunTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
timeout := 30 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Task and TaskRun in namespace %s", namespace)
task := &v1beta1.Task{
Expand All @@ -272,7 +271,7 @@ func TestTaskRunTimeout(t *testing.T) {
TaskRef: &v1beta1.TaskRef{Name: "giraffe"},
// Do not reduce this timeout. Taskrun e2e test is also verifying
// if reconcile is triggered from timeout handler and not by pod informers
Timeout: &metav1.Duration{Duration: 30 * time.Second},
Timeout: &metav1.Duration{Duration: timeout},
},
}
if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
Expand Down Expand Up @@ -300,14 +299,13 @@ func TestTaskRunTimeout(t *testing.T) {
}

func TestPipelineTaskTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Tasks in namespace %s", namespace)
task1 := &v1beta1.Task{
Expand Down
21 changes: 9 additions & 12 deletions test/v1alpha1/timeout_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,13 @@ import (
// verify that pipelinerun timeout works and leads to the the correct TaskRun statuses
// and pod deletions.
func TestPipelineRunTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Task in namespace %s", namespace)
task := tb.Task("banana", tb.TaskSpec(
Expand Down Expand Up @@ -140,14 +139,13 @@ func TestPipelineRunTimeout(t *testing.T) {

// TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out.
func TestTaskRunTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Task and TaskRun in namespace %s", namespace)
if _, err := c.TaskClient.Create(ctx, tb.Task("giraffe",
Expand All @@ -168,14 +166,13 @@ func TestTaskRunTimeout(t *testing.T) {
}

func TestPipelineTaskTimeout(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
defer cancel()
c, namespace := setup(ctx, t)
t.Parallel()

knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
defer tearDown(context.Background(), t, c, namespace)

t.Logf("Creating Tasks in namespace %s", namespace)
task1 := tb.Task("success", tb.TaskSpec(
Expand Down
23 changes: 18 additions & 5 deletions test/v1alpha1/wait.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,17 @@ const (
// ConditionAccessorFn is a condition function used polling functions
type ConditionAccessorFn func(ca apis.ConditionAccessor) (bool, error)

func pollImmediateWithContext(ctx context.Context, fn func() (bool, error)) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
select {
case <-ctx.Done():
return true, ctx.Err()
default:
}
return fn()
})
}

// WaitForTaskRunState polls the status of the TaskRun called name from client every
// interval until inState returns `true` indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
Expand All @@ -74,7 +85,7 @@ func WaitForTaskRunState(ctx context.Context, c *clients, name string, inState C
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.TaskRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -92,7 +103,7 @@ func WaitForDeploymentState(ctx context.Context, c *clients, name string, namesp
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -110,7 +121,7 @@ func WaitForPodState(ctx context.Context, c *clients, name string, namespace str
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -128,7 +139,9 @@ func WaitForPipelineRunState(ctx context.Context, c *clients, name string, pollt
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, polltimeout, func() (bool, error) {
ctx, cancel := context.WithTimeout(ctx, polltimeout)
defer cancel()
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.PipelineRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -146,7 +159,7 @@ func WaitForServiceExternalIPState(ctx context.Context, c *clients, namespace, n
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand Down
23 changes: 18 additions & 5 deletions test/wait.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,17 @@ const (
// ConditionAccessorFn is a condition function used polling functions
type ConditionAccessorFn func(ca apis.ConditionAccessor) (bool, error)

func pollImmediateWithContext(ctx context.Context, fn func() (bool, error)) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
select {
case <-ctx.Done():
return true, ctx.Err()
default:
}
return fn()
})
}

// WaitForTaskRunState polls the status of the TaskRun called name from client every
// interval until inState returns `true` indicating it is done, returns an
// error or timeout. desc will be used to name the metric that is emitted to
Expand All @@ -74,7 +85,7 @@ func WaitForTaskRunState(ctx context.Context, c *clients, name string, inState C
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.TaskRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -92,7 +103,7 @@ func WaitForDeploymentState(ctx context.Context, c *clients, name string, namesp
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -110,7 +121,7 @@ func WaitForPodState(ctx context.Context, c *clients, name string, namespace str
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -128,7 +139,9 @@ func WaitForPipelineRunState(ctx context.Context, c *clients, name string, pollt
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, polltimeout, func() (bool, error) {
ctx, cancel := context.WithTimeout(ctx, polltimeout)
defer cancel()
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.PipelineRunClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand All @@ -146,7 +159,7 @@ func WaitForServiceExternalIPState(ctx context.Context, c *clients, namespace, n
_, span := trace.StartSpan(context.Background(), metricName)
defer span.End()

return wait.PollImmediate(interval, timeout, func() (bool, error) {
return pollImmediateWithContext(ctx, func() (bool, error) {
r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return true, err
Expand Down