Skip to content

Commit 6283b40

Browse files
committed
Added WaitForRunningPodCount back in to helpers and made it the main point of assertion for testing the accurate strategy
I hastily assumed that a combination of WaitForScaledJobCount and WaitForAllPodRunningInNamespace would satisfy what is needed for the test. However, that approach comes with a major flaw. That is, unless you delete the jobs, those pods count towards the condition in WaitForAllPodRunningInNamespace. Thus, a much simpler way to test the strategy is to wait for the correct amount of pods to be running. A running pod in the given namespace implies a corresponding Job exists. So, we test the 2 strategy cases with pod count. This also passes local tests Issue #3661 Signed-off-by: jansworld <navon.josh@gmail.com>
1 parent 5dbb7b6 commit 6283b40

2 files changed

Lines changed: 45 additions & 19 deletions

File tree

tests/helper/helper.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,35 @@ func WaitForAllPodRunningInNamespace(t *testing.T, kc *kubernetes.Clientset, nam
490490
return false
491491
}
492492

493+
func WaitForRunningPodCount(t *testing.T, kc *kubernetes.Clientset, scaledJobName, namespace string, target, iterations, interval int) bool {
494+
for i := 0; i < iterations; i++ {
495+
pods, err := kc.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
496+
LabelSelector: fmt.Sprintf("scaledjob.keda.sh/name=%s", scaledJobName),
497+
})
498+
if err != nil {
499+
t.Logf("cannot list pods - %s", err)
500+
}
501+
502+
runningPodCount := 0
503+
for _, pod := range pods.Items {
504+
if pod.Status.Phase == corev1.PodRunning {
505+
runningPodCount++
506+
}
507+
}
508+
509+
t.Logf("Waiting for running pods. Namespace - %s, Current - %d, Target - %d",
510+
namespace, runningPodCount, target)
511+
if runningPodCount == target {
512+
return true
513+
} else if runningPodCount > target {
514+
return false
515+
}
516+
517+
time.Sleep(time.Duration(interval) * time.Second)
518+
}
519+
return false
520+
}
521+
493522
// Waits until the Horizontal Pod Autoscaler for the scaledObject reports that it has metrics available
494523
// to calculate, or until the number of iterations are done, whichever happens first.
495524
func WaitForHPAMetricsToPopulate(t *testing.T, kc *kubernetes.Clientset, name, namespace string, iterations, intervalSeconds int) bool {

tests/internals/scaling_strategies/accurate_scaling_strategy/accurate_scaling_strategy_test.go

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ spec:
6262
image: docker.io/library/busybox
6363
command:
6464
- sleep
65-
- "60"
65+
- "30"
6666
imagePullPolicy: IfNotPresent
6767
envFrom:
6868
- secretRef:
@@ -130,41 +130,38 @@ func getTemplateData() (templateData, []Template) {
130130
}
131131

132132
func testAccurateScaling(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) {
133-
iterationCount := 30
133+
iterationCount := 60
134134

135-
// Base case (number of scale = maxScale since pendingJobs = 0)
135+
// Base case (number of scale = maxScale since pendingJobs = 0). Enqueue up 4 messages
136136
enqueueMessages(ctx, t, client, 4)
137-
assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1),
138-
"job count should be %d after %d iterations", 4, iterationCount)
139-
assert.True(t, WaitForAllPodRunningInNamespace(t, kc, testNamespace, iterationCount, 1),
140-
"all scaledjob pods should be running after %d iterations", iterationCount)
137+
assert.True(t, WaitForRunningPodCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1),
138+
"running pod count should be %d after %d iterations", 4, iterationCount)
141139

142-
// Clear the queue to simulate message consumption and wait for job completion
140+
// Clear the messages to simulate message consumption
143141
_, err := client.ClearMessages(ctx, nil)
144142
assert.NoErrorf(t, err, "cannot clear queue - %s", err)
143+
144+
// Wait for job completion
145145
WaitForAllJobsSuccess(t, kc, testNamespace, 90, 1)
146146

147-
// Test the cap condition (maxScale + runningJobs > maxReplicaCount)
147+
// Test the cap condition (maxScale + runningJobs > maxReplicaCount). Enqueue 4 messages
148148
enqueueMessages(ctx, t, client, 4)
149-
assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1),
150-
"running job count should be %d after %d iterations", 4, iterationCount)
151-
assert.True(t, WaitForAllPodRunningInNamespace(t, kc, testNamespace, iterationCount, 1),
152-
"all scaledjob pods should be running after %d iterations", iterationCount)
149+
assert.True(t, WaitForRunningPodCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1),
150+
"running pod count should be %d after %d iterations", 4, iterationCount)
153151

154152
// Clear the messages to simulate message consumption
155153
_, err = client.ClearMessages(ctx, nil)
156154
assert.NoErrorf(t, err, "cannot clear queue - %s", err)
157155

158-
// Queue up 8 more messages to trigger the cap condition
156+
// Enqueue 8 more messages to trigger the cap condition
159157
enqueueMessages(ctx, t, client, 8)
160-
assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 10, iterationCount, 1),
161-
"running job count should be %d after %d iterations", 10, iterationCount)
162-
assert.True(t, WaitForAllPodRunningInNamespace(t, kc, testNamespace, iterationCount, 1),
163-
"all scaledjob pods should be running after %d iterations", iterationCount)
158+
assert.True(t, WaitForRunningPodCount(t, kc, scaledJobName, testNamespace, 10, iterationCount, 1),
159+
"running pod count should be %d after %d iterations", 10, iterationCount)
164160

165-
// Message cleanup and wait for jobs to complete
166161
_, err = client.ClearMessages(ctx, nil)
167162
assert.NoErrorf(t, err, "cannot clear queue - %s", err)
163+
164+
// Message cleanup and wait for jobs to complete
168165
WaitForAllJobsSuccess(t, kc, testNamespace, 120, 1)
169166
}
170167

0 commit comments

Comments
 (0)