-
Notifications
You must be signed in to change notification settings - Fork 536
Adding tests for syncing service endpoint and endpointslice to host #3187
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,25 +2,84 @@ package servicesync | |
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
| "time" | ||
|
|
||
| "github.com/loft-sh/vcluster/pkg/util/translate" | ||
| "github.com/loft-sh/vcluster/test/framework" | ||
| "github.com/onsi/ginkgo/v2" | ||
| "github.com/onsi/gomega" | ||
| appsv1 "k8s.io/api/apps/v1" | ||
| corev1 "k8s.io/api/core/v1" | ||
| kerrors "k8s.io/apimachinery/pkg/api/errors" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/util/intstr" | ||
| "k8s.io/apimachinery/pkg/util/wait" | ||
| "k8s.io/client-go/kubernetes" | ||
| ) | ||
|
|
||
| var _ = ginkgo.Describe("map services from host to virtual cluster and vice versa", func() { | ||
| var f *framework.Framework | ||
|
|
||
| ginkgo.JustBeforeEach(func() { | ||
| // use default framework | ||
| var _ = ginkgo.Describe("Verify mapping and syncing of services and endpoints", ginkgo.Ordered, func() { | ||
| var ( | ||
| f *framework.Framework | ||
| testService *corev1.Service | ||
| //nolint:staticcheck // SA1019: corev1.Endpoints is deprecated, but still required for compatibility | ||
| testEndpoint *corev1.Endpoints | ||
| serviceName = "test-service-sync" | ||
| serviceNamespace = "default" | ||
| endpointName = "test-service-sync" | ||
| ) | ||
| ginkgo.BeforeAll(func() { | ||
| f = framework.DefaultFramework | ||
| testService = &corev1.Service{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: serviceName, | ||
| Namespace: serviceNamespace, | ||
| }, | ||
| Spec: corev1.ServiceSpec{ | ||
| ClusterIP: "None", | ||
| Ports: []corev1.ServicePort{ | ||
| { | ||
| Name: "custom-port", | ||
| Port: 8080, | ||
| Protocol: corev1.ProtocolTCP, | ||
| TargetPort: intstr.FromInt(5000), | ||
| }, | ||
| }, | ||
| }, | ||
| } | ||
| //nolint:staticcheck // SA1019: corev1.Endpoints is deprecated, but still required for compatibility | ||
| testEndpoint = &corev1.Endpoints{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: endpointName, | ||
| Namespace: serviceNamespace, | ||
| }, | ||
| //nolint:staticcheck // SA1019: corev1.Endpoints is deprecated, but still required for compatibility | ||
| Subsets: []corev1.EndpointSubset{ | ||
| { | ||
| Addresses: []corev1.EndpointAddress{ | ||
| { | ||
| IP: "1.1.1.1", | ||
| }, | ||
| }, | ||
| Ports: []corev1.EndpointPort{ | ||
| { | ||
| Port: 5000, | ||
| }, | ||
| }, | ||
| }, | ||
| }, | ||
| } | ||
| }) | ||
|
|
||
| ginkgo.AfterAll(func() { | ||
| err := f.VClusterClient.CoreV1().Endpoints(serviceNamespace).Delete(f.Context, endpointName, metav1.DeleteOptions{}) | ||
| if err != nil && !kerrors.IsNotFound(err) { | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| } | ||
| err = f.VClusterClient.CoreV1().Services(serviceNamespace).Delete(f.Context, serviceName, metav1.DeleteOptions{}) | ||
| if err != nil && !kerrors.IsNotFound(err) { | ||
| gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| } | ||
| }) | ||
|
|
||
| ginkgo.It("Test service mapping", func() { | ||
|
|
@@ -54,6 +113,64 @@ var _ = ginkgo.Describe("map services from host to virtual cluster and vice vers | |
| checkEndpointsSync(f.Context, f.VClusterClient, "test", "nginx", f.HostClient, f.VClusterNamespace, "nginx") | ||
| }) | ||
| }) | ||
|
|
||
| ginkgo.Context("Verify endpoint sync when endpoint is deployed before service", func() { | ||
| ginkgo.It("Should sync Service, Endpoints, and EndpointSlice from vCluster to host cluster", func() { | ||
| ginkgo.By("Create Service Endpoint in vCluster") | ||
| _, err := f.VClusterClient.CoreV1().Endpoints(serviceNamespace).Create(f.Context, testEndpoint, metav1.CreateOptions{}) | ||
| framework.ExpectNoError(err) | ||
|
|
||
| ginkgo.By("Create Service in vCluster") | ||
| _, err = f.VClusterClient.CoreV1().Services(serviceNamespace).Create(f.Context, testService, metav1.CreateOptions{}) | ||
| framework.ExpectNoError(err) | ||
|
|
||
| ginkgo.By("Verify Endpoint exists in vCluster") | ||
| _, err = f.VClusterClient.CoreV1().Endpoints(serviceNamespace).Get(f.Context, endpointName, metav1.GetOptions{}) | ||
| framework.ExpectNoError(err) | ||
|
|
||
| ginkgo.By("Verify Service exists in vCluster") | ||
| _, err = f.VClusterClient.CoreV1().Services(serviceNamespace).Get(f.Context, serviceName, metav1.GetOptions{}) | ||
| framework.ExpectNoError(err) | ||
|
|
||
| ginkgo.By("Verify EndpointSlice exists in vCluster") | ||
| _, err = f.VClusterClient.DiscoveryV1().EndpointSlices(serviceNamespace).List(f.Context, metav1.ListOptions{ | ||
| LabelSelector: fmt.Sprintf("kubernetes.io/service-name=%s", serviceName), | ||
| }) | ||
| framework.ExpectNoError(err) | ||
|
|
||
| translatedServiceName := translate.SingleNamespaceHostName(serviceName, serviceNamespace, translate.VClusterName) | ||
|
|
||
| ginkgo.By("Verify Service exists in Host Cluster") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I noticed this is only a check about existence. But is this sufficient? I'd test also the content of the objects. E.g. the presence of the input values from above: This also applies to the endpoint object.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test wasn't specifically to test it, as the other test was checking the details. But i have added the resource checks also. |
||
| gomega.Eventually(func(g gomega.Gomega) { | ||
| hostService, err := f.HostClient.CoreV1().Services(f.VClusterNamespace).Get(f.Context, translatedServiceName, metav1.GetOptions{}) | ||
| g.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| g.Expect(hostService.Spec.Ports).To(gomega.HaveLen(1)) | ||
| g.Expect(hostService.Spec.Ports[0].Name).To(gomega.Equal("custom-port")) | ||
| g.Expect(hostService.Spec.Ports[0].Port).To(gomega.Equal(int32(8080))) | ||
| }).WithPolling(time.Second).WithTimeout(framework.PollTimeout).Should(gomega.Succeed()) | ||
|
|
||
| ginkgo.By("Verify Endpoint exists in Host Cluster") | ||
| gomega.Eventually(func(g gomega.Gomega) { | ||
| hostEndpoint, err := f.HostClient.CoreV1().Endpoints(f.VClusterNamespace).Get(f.Context, translatedServiceName, metav1.GetOptions{}) | ||
| g.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| g.Expect(hostEndpoint.Subsets).To(gomega.HaveLen(1)) | ||
| g.Expect(hostEndpoint.Subsets[0].Addresses).To(gomega.HaveLen(1)) | ||
| g.Expect(hostEndpoint.Subsets[0].Addresses[0].IP).To(gomega.Equal("1.1.1.1")) | ||
| g.Expect(hostEndpoint.Subsets[0].Ports).To(gomega.HaveLen(1)) | ||
| g.Expect(hostEndpoint.Subsets[0].Ports[0].Port).To(gomega.Equal(int32(5000))) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Bug: Eventually Panics: Unsafe Slice AccessThe test accesses |
||
| }).WithPolling(time.Second).WithTimeout(framework.PollTimeout).Should(gomega.Succeed()) | ||
sowmyav27 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| ginkgo.By("Verify EndpointSlice exists in Host Cluster") | ||
| gomega.Eventually(func(g gomega.Gomega) { | ||
| hostEndpointSlice, err := f.HostClient.DiscoveryV1().EndpointSlices(f.VClusterNamespace).List(f.Context, metav1.ListOptions{ | ||
| LabelSelector: fmt.Sprintf("kubernetes.io/service-name=%s", translatedServiceName), | ||
| }) | ||
| g.Expect(err).NotTo(gomega.HaveOccurred()) | ||
| g.Expect(hostEndpointSlice.Items).To(gomega.HaveLen(1)) | ||
| }).WithPolling(time.Second).WithTimeout(framework.PollTimeout).Should(gomega.Succeed()) | ||
|
|
||
| }) | ||
| }) | ||
sowmyav27 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| }) | ||
|
|
||
| func testMapping(ctx context.Context, fromClient kubernetes.Interface, fromNamespace, fromName string, toClient kubernetes.Interface, toNamespace, toName string, checkEndpoints bool) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Bug: Asynchronous Checks Cause Inconsistent Test Flakiness
The test verifies
EndpointSliceexistence immediately after creating the service without polling or waiting. SinceEndpointSliceobjects are created asynchronously by the EndpointSlice controller, this check may fail intermittently in real clusters. The test usesgomega.Eventuallyfor host cluster verification but not for vCluster, creating inconsistent behavior and potential flakiness.