@@ -9,11 +9,14 @@ import (
99
1010 "github.com/github/deployment-tracker/internal/metadata"
1111 "github.com/github/deployment-tracker/pkg/deploymentrecord"
12+ "github.com/stretchr/testify/assert"
13+ "github.com/stretchr/testify/require"
1214 appsv1 "k8s.io/api/apps/v1"
1315 corev1 "k8s.io/api/core/v1"
1416 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1517 "k8s.io/client-go/kubernetes"
1618 k8smetadata "k8s.io/client-go/metadata"
19+ "k8s.io/client-go/tools/cache"
1720 "sigs.k8s.io/controller-runtime/pkg/envtest"
1821)
1922
@@ -30,14 +33,14 @@ func (m *mockRecordPoster) PostOne(_ context.Context, record *deploymentrecord.D
3033 return m.err
3134}
3235
33- // Helper that allows tests to read captured records safely
36+ // Helper that allows tests to read captured records safely.
3437func (m *mockRecordPoster) getRecords() []*deploymentrecord.DeploymentRecord {
3538 m.mu.Lock()
3639 defer m.mu.Unlock()
3740 return slices.Clone(m.records)
3841}
3942
40- func setup(t *testing.T, namespace string) (*envtest.Environment, context.CancelFunc, * kubernetes.Clientset, *mockRecordPoster) {
43+ func setup(t *testing.T, namespace string) (*kubernetes.Clientset, *mockRecordPoster) {
4144 t.Helper()
4245 testEnv := &envtest.Environment{}
4346
@@ -52,6 +55,10 @@ func setup(t *testing.T, namespace string) (*envtest.Environment, context.Cancel
5255 }
5356
5457 ctx, cancel := context.WithCancel(context.Background())
58+ t.Cleanup(func() {
59+ cancel()
60+ _ = testEnv.Stop()
61+ })
5562
5663 ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
5764 _, err = clientset.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
@@ -90,10 +97,14 @@ func setup(t *testing.T, namespace string) (*envtest.Environment, context.Cancel
9097 mockDeploymentrecord := &mockRecordPoster{}
9198 ctrl.apiClient = mockDeploymentrecord
9299
93- go ctrl.Run(ctx, 1)
94- time.Sleep(1 * time.Second)
100+ go func() {
101+ _ = ctrl.Run(ctx, 1)
102+ }()
103+ if !cache.WaitForCacheSync(ctx.Done(), ctrl.podInformer.HasSynced) {
104+ t.Fatal("timed out waiting for informer cache to sync")
105+ }
95106
96- return testEnv, cancel, clientset, mockDeploymentrecord
107+ return clientset, mockDeploymentrecord
97108}
98109
99110func makeDeployment(t *testing.T, clientset *kubernetes.Clientset, owners []metav1.OwnerReference, namespace, name string) *appsv1.Deployment {
@@ -207,52 +218,12 @@ func deletePod(t *testing.T, clientset *kubernetes.Clientset, namespace, name st
207218 }
208219}
209220
210- // pollForRecords polls until the mock has at least minCount records, then returns them.
211- func pollForRecords(t *testing.T, mock *mockRecordPoster, minCount int, timeout time.Duration) []*deploymentrecord.DeploymentRecord {
212- t.Helper()
213- deadline := time.After(timeout)
214- for {
215- records := mock.getRecords()
216- if len(records) >= minCount {
217- return records
218- }
219- select {
220- case <-deadline:
221- t.Fatalf("timed out waiting for at least %d records, got %d", minCount, len(records))
222- case <-time.After(100 * time.Millisecond):
223- }
224- }
225- }
226-
227- // assertNoNewRecords polls for the given duration and fails if the record count deviates from expectedCount.
228- func assertNoNewRecords(t *testing.T, mock *mockRecordPoster, expectedCount int, timeout time.Duration) {
229- t.Helper()
230- deadline := time.After(timeout)
231- for {
232- select {
233- case <-deadline:
234- if got := len(mock.getRecords()); got != expectedCount {
235- t.Fatalf("expected %d records, got %d", expectedCount, got)
236- }
237- return
238- case <-time.After(100 * time.Millisecond):
239- if got := len(mock.getRecords()); got != expectedCount {
240- t.Fatalf("expected %d records, got %d", expectedCount, got)
241- }
242- }
243- }
244- }
245-
246221func TestControllerIntegration_KubernetesDeployment(t *testing.T) {
247222 if testing.Short() {
248223 t.Skip("skipping integration test in short mode")
249224 }
250225 namespace := "test-namespace"
251- testEnv, cancel, clientset, mock := setup(t, namespace)
252- defer func(testEnv *envtest.Environment, cancelFunc context.CancelFunc) {
253- _ = testEnv.Stop()
254- cancel()
255- }(testEnv, cancel)
226+ clientset, mock := setup(t, namespace)
256227
257228 // Create deployment, replicaset, and pod; expect 1 record
258229 deployment := makeDeployment(t, clientset, []metav1.OwnerReference{}, namespace, "test-deployment")
@@ -269,13 +240,12 @@ func TestControllerIntegration_KubernetesDeployment(t *testing.T) {
269240 UID: replicaSet.UID,
270241 }}, namespace, "test-deployment-123456-1")
271242
272- records := pollForRecords(t, mock, 1, 5*time.Second)
273- if len(records) != 1 {
274- t.Fatalf("expected 1 record, got %d", len(records))
275- }
276- if records[0].Status != deploymentrecord.StatusDeployed {
277- t.Errorf("expected %s, got %s", deploymentrecord.StatusDeployed, records[0].Status)
278- }
243+ require.Eventually(t, func() bool {
244+ return len(mock.getRecords()) >= 1
245+ }, 5*time.Second, 100*time.Millisecond)
246+ records := mock.getRecords()
247+ require.Len(t, records, 1)
248+ assert.Equal(t, deploymentrecord.StatusDeployed, records[0].Status)
279249
280250 // Create another pod in replicaset; the dedup cache should prevent a new record as there is only one worker
281251 // and no risk of multiple works processing before cache is set.
@@ -285,22 +255,25 @@ func TestControllerIntegration_KubernetesDeployment(t *testing.T) {
285255 Name: replicaSet.Name,
286256 UID: replicaSet.UID,
287257 }}, namespace, "test-deployment-123456-2")
288- assertNoNewRecords(t, mock, 1, 5*time.Second)
258+ require.Never(t, func() bool {
259+ return len(mock.getRecords()) != 1
260+ }, 5*time.Second, 100*time.Millisecond)
289261
290262 // Delete second pod; still expect 1 record
291263 deletePod(t, clientset, namespace, "test-deployment-123456-2")
292- assertNoNewRecords(t, mock, 1, 5*time.Second)
264+ require.Never(t, func() bool {
265+ return len(mock.getRecords()) != 1
266+ }, 5*time.Second, 100*time.Millisecond)
293267
294268 // Delete deployment, replicaset, and first pod; expect 2 records
295269 deleteDeployment(t, clientset, namespace, "test-deployment")
296270 deleteReplicaSet(t, clientset, namespace, "test-deployment-123456")
297271 deletePod(t, clientset, namespace, "test-deployment-123456-1")
298272
299- records = pollForRecords(t, mock, 2, 5*time.Second)
300- if len(records) != 2 {
301- t.Fatalf("expected 2 records after deletion, got %d", len(records))
302- }
303- if records[1].Status != deploymentrecord.StatusDecommissioned {
304- t.Errorf("expected second record to be %s, got %s", deploymentrecord.StatusDecommissioned, records[1].Status)
305- }
273+ require.Eventually(t, func() bool {
274+ return len(mock.getRecords()) >= 2
275+ }, 5*time.Second, 100*time.Millisecond)
276+ records = mock.getRecords()
277+ require.Len(t, records, 2)
278+ assert.Equal(t, deploymentrecord.StatusDecommissioned, records[1].Status)
306279}
0 commit comments