diff --git a/pkg/virt-handler/vm.go b/pkg/virt-handler/vm.go index 339d2f78fc2d..4b1649106fdf 100644 --- a/pkg/virt-handler/vm.go +++ b/pkg/virt-handler/vm.go @@ -2718,6 +2718,8 @@ func (d *VirtualMachineController) vmUpdateHelperMigrationTarget(origVMI *v1.Vir } log.Log.Object(vmi).Infof("Signaled target pod for failed migration to clean up") // nothing left to do here if the migration failed. + // Re-enqueue to trigger final cleanup + d.Queue.AddAfter(controller.VirtualMachineInstanceKey(vmi), time.Second) return nil } else if migrations.IsMigrating(vmi) { // If the migration has already started, diff --git a/tests/migration/migration.go b/tests/migration/migration.go index 5528f5fa3130..827dc0020723 100644 --- a/tests/migration/migration.go +++ b/tests/migration/migration.go @@ -1921,6 +1921,13 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { }) It("old finalized migrations should get garbage collected", func() { + kvConfig := getCurrentKvConfig(virtClient) + kvConfig.DeveloperConfiguration.LogVerbosity = &v1.LogVerbosity{ + VirtController: 9, + VirtHandler: 9, + VirtLauncher: 9, + } + tests.UpdateKubeVirtConfigValueAndWait(kvConfig) vmi := tests.NewRandomFedoraVMI() vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("1Gi") @@ -1959,6 +1966,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { migrations, err := virtClient.VirtualMachineInstanceMigration(vmi.Namespace).List(context.Background(), metav1.ListOptions{}) Expect(err).ToNot(HaveOccurred()) Expect(migrations.Items).To(HaveLen(5)) + Fail("Failing on purpose") }) It("[test_id:6979]Target pod should exit after failed migration", func() {