Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
127 changes: 122 additions & 5 deletions test/e2e/servicesync/servicesync.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,84 @@ package servicesync

import (
"context"
"fmt"
"time"

"github.com/loft-sh/vcluster/pkg/util/translate"
"github.com/loft-sh/vcluster/test/framework"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
)

var _ = ginkgo.Describe("map services from host to virtual cluster and vice versa", func() {
var f *framework.Framework

ginkgo.JustBeforeEach(func() {
// use default framework
var _ = ginkgo.Describe("Verify mapping and syncing of services and endpoints", ginkgo.Ordered, func() {
var (
f *framework.Framework
testService *corev1.Service
//nolint:staticcheck // SA1019: corev1.Endpoints is deprecated, but still required for compatibility
testEndpoint *corev1.Endpoints
serviceName = "test-service-sync"
serviceNamespace = "default"
endpointName = "test-service-sync"
)
ginkgo.BeforeAll(func() {
f = framework.DefaultFramework
testService = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: serviceNamespace,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
Ports: []corev1.ServicePort{
{
Name: "custom-port",
Port: 8080,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(5000),
},
},
},
}
//nolint:staticcheck // SA1019: corev1.Endpoints is deprecated, but still required for compatibility
testEndpoint = &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: endpointName,
Namespace: serviceNamespace,
},
//nolint:staticcheck // SA1019: corev1.Endpoints is deprecated, but still required for compatibility
Subsets: []corev1.EndpointSubset{
{
Addresses: []corev1.EndpointAddress{
{
IP: "1.1.1.1",
},
},
Ports: []corev1.EndpointPort{
{
Port: 5000,
},
},
},
},
}
})

ginkgo.AfterAll(func() {
err := f.VClusterClient.CoreV1().Endpoints(serviceNamespace).Delete(f.Context, endpointName, metav1.DeleteOptions{})
if err != nil && !kerrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
err = f.VClusterClient.CoreV1().Services(serviceNamespace).Delete(f.Context, serviceName, metav1.DeleteOptions{})
if err != nil && !kerrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
})

ginkgo.It("Test service mapping", func() {
Expand Down Expand Up @@ -54,6 +113,64 @@ var _ = ginkgo.Describe("map services from host to virtual cluster and vice vers
checkEndpointsSync(f.Context, f.VClusterClient, "test", "nginx", f.HostClient, f.VClusterNamespace, "nginx")
})
})

ginkgo.Context("Verify endpoint sync when endpoint is deployed before service", func() {
ginkgo.It("Should sync Service, Endpoints, and EndpointSlice from vCluster to host cluster", func() {
ginkgo.By("Create Service Endpoint in vCluster")
_, err := f.VClusterClient.CoreV1().Endpoints(serviceNamespace).Create(f.Context, testEndpoint, metav1.CreateOptions{})
framework.ExpectNoError(err)

ginkgo.By("Create Service in vCluster")
_, err = f.VClusterClient.CoreV1().Services(serviceNamespace).Create(f.Context, testService, metav1.CreateOptions{})
framework.ExpectNoError(err)

ginkgo.By("Verify Endpoint exists in vCluster")
_, err = f.VClusterClient.CoreV1().Endpoints(serviceNamespace).Get(f.Context, endpointName, metav1.GetOptions{})
framework.ExpectNoError(err)

ginkgo.By("Verify Service exists in vCluster")
_, err = f.VClusterClient.CoreV1().Services(serviceNamespace).Get(f.Context, serviceName, metav1.GetOptions{})
framework.ExpectNoError(err)

ginkgo.By("Verify EndpointSlice exists in vCluster")
_, err = f.VClusterClient.DiscoveryV1().EndpointSlices(serviceNamespace).List(f.Context, metav1.ListOptions{
LabelSelector: fmt.Sprintf("kubernetes.io/service-name=%s", serviceName),
})
framework.ExpectNoError(err)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Asynchronous Checks Cause Inconsistent Test Flakiness

The test verifies EndpointSlice existence immediately after creating the service without polling or waiting. Since EndpointSlice objects are created asynchronously by the EndpointSlice controller, this check may fail intermittently in real clusters. The test uses gomega.Eventually for host cluster verification but not for vCluster, creating inconsistent behavior and potential flakiness.

Fix in Cursor Fix in Web


translatedServiceName := translate.SingleNamespaceHostName(serviceName, serviceNamespace, translate.VClusterName)

ginkgo.By("Verify Service exists in Host Cluster")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I noticed this is only a check about existence. But is this sufficient? I'd test also the content of the objects. E.g. the presence of the input values from above:

				Ports: []corev1.ServicePort{
				{
					Name:       "custom-port",
					Port:       8080,
					Protocol:   corev1.ProtocolTCP,
					TargetPort: intstr.FromInt(5000),
				},
			},

This also applies to the endpoint object.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test wasn't specifically to test it, as the other test was checking the details. But i have added the resource checks also.

gomega.Eventually(func(g gomega.Gomega) {
hostService, err := f.HostClient.CoreV1().Services(f.VClusterNamespace).Get(f.Context, translatedServiceName, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
g.Expect(hostService.Spec.Ports).To(gomega.HaveLen(1))
g.Expect(hostService.Spec.Ports[0].Name).To(gomega.Equal("custom-port"))
g.Expect(hostService.Spec.Ports[0].Port).To(gomega.Equal(int32(8080)))
}).WithPolling(time.Second).WithTimeout(framework.PollTimeout).Should(gomega.Succeed())

ginkgo.By("Verify Endpoint exists in Host Cluster")
gomega.Eventually(func(g gomega.Gomega) {
hostEndpoint, err := f.HostClient.CoreV1().Endpoints(f.VClusterNamespace).Get(f.Context, translatedServiceName, metav1.GetOptions{})
g.Expect(err).NotTo(gomega.HaveOccurred())
g.Expect(hostEndpoint.Subsets).To(gomega.HaveLen(1))
g.Expect(hostEndpoint.Subsets[0].Addresses).To(gomega.HaveLen(1))
g.Expect(hostEndpoint.Subsets[0].Addresses[0].IP).To(gomega.Equal("1.1.1.1"))
g.Expect(hostEndpoint.Subsets[0].Ports).To(gomega.HaveLen(1))
g.Expect(hostEndpoint.Subsets[0].Ports[0].Port).To(gomega.Equal(int32(5000)))
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Eventually Panics: Unsafe Slice Access

The test accesses hostEndpoint.Subsets[0] immediately after asserting its length, but within a gomega.Eventually block, failed assertions don't prevent subsequent code execution. If Subsets is empty or has unexpected structure during polling, the direct slice access will panic instead of allowing the Eventually block to retry gracefully. The same issue exists for accessing Addresses[0] and Ports[0].

Fix in Cursor Fix in Web

}).WithPolling(time.Second).WithTimeout(framework.PollTimeout).Should(gomega.Succeed())

ginkgo.By("Verify EndpointSlice exists in Host Cluster")
gomega.Eventually(func(g gomega.Gomega) {
hostEndpointSlice, err := f.HostClient.DiscoveryV1().EndpointSlices(f.VClusterNamespace).List(f.Context, metav1.ListOptions{
LabelSelector: fmt.Sprintf("kubernetes.io/service-name=%s", translatedServiceName),
})
g.Expect(err).NotTo(gomega.HaveOccurred())
g.Expect(hostEndpointSlice.Items).To(gomega.HaveLen(1))
}).WithPolling(time.Second).WithTimeout(framework.PollTimeout).Should(gomega.Succeed())

})
})
})

func testMapping(ctx context.Context, fromClient kubernetes.Interface, fromNamespace, fromName string, toClient kubernetes.Interface, toNamespace, toName string, checkEndpoints bool) {
Expand Down
Loading