@@ -3,6 +3,7 @@ package tests
3
3
import (
4
4
"context"
5
5
"fmt"
6
+ "path/filepath"
6
7
7
8
"github.com/onsi/ginkgo"
8
9
"github.com/onsi/gomega"
@@ -12,6 +13,9 @@ import (
12
13
corev1 "k8s.io/api/core/v1"
13
14
k8serrors "k8s.io/apimachinery/pkg/api/errors"
14
15
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16
+ "k8s.io/client-go/kubernetes"
17
+ "k8s.io/client-go/tools/clientcmd"
18
+ "k8s.io/client-go/util/homedir"
15
19
16
20
"github.com/openebs/lvm-localpv/pkg/lvm"
17
21
"github.com/openebs/lvm-localpv/tests/container"
@@ -249,14 +253,12 @@ func VerifyLVMVolume(expect_ready bool, expected_vg string) {
249
253
// It gets deleted, by the csi provisioner only when the owner node of cr marks is
250
254
// as Failed. So incase, we do a get of cr when the cr was being handled then we expect
251
255
// state to be either Pending or Failed.
252
- fmt .Printf ("checking vol object as vol is non nil, vol is %v\n " , vol )
253
256
gomega .Expect (vol .Status .State ).To (gomega .Or (gomega .Equal ("Pending" ), gomega .Equal ("Failed" )),
254
257
"While checking if lvmvolume: %s is in Pending or Failed state" , pvcObj .Spec .VolumeName )
255
258
}
256
259
} else {
257
260
gomega .Expect (err ).To (gomega .BeNil (), "while fetching the lvm volume {%s}" , pvcObj .Spec .VolumeName )
258
261
if expected_vg != "" {
259
- fmt .Printf ("vol is %v\n " , vol )
260
262
gomega .Expect (vol .Spec .VolGroup ).To (gomega .Equal (expected_vg ),
261
263
"while checking volume group of lvm volume" , pvcObj .Spec .VolumeName )
262
264
} else {
@@ -302,14 +304,7 @@ func createAndVerifyPVC(expect_bound bool) {
302
304
pvcName ,
303
305
OpenEBSNamespace ,
304
306
)
305
- ok := false
306
- if ! expect_bound {
307
- ok = IsPVCPendingConsistently (pvcName )
308
- } else {
309
- ok = IsPVCBoundEventually (pvcName )
310
- }
311
- gomega .Expect (ok ).To (gomega .Equal (true ),
312
- "while checking the pvc status" )
307
+ verifyPVCStatus (pvcName , expect_bound )
313
308
314
309
pvcObj , err = PVCClient .WithNamespace (OpenEBSNamespace ).Get (pvcObj .Name , metav1.GetOptions {})
315
310
gomega .Expect (err ).To (
@@ -320,6 +315,18 @@ func createAndVerifyPVC(expect_bound bool) {
320
315
)
321
316
}
322
317
318
+ // Verifies state of already created pvc based on expect_bound.
319
+ func verifyPVCStatus (pvc_name string , expect_bound bool ) {
320
+ ok := false
321
+ if ! expect_bound {
322
+ ok = IsPVCPendingConsistently (pvc_name )
323
+ } else {
324
+ ok = IsPVCBoundEventually (pvc_name )
325
+ }
326
+ gomega .Expect (ok ).To (gomega .Equal (true ),
327
+ "while checking the pvc status" )
328
+ }
329
+
323
330
func createAndVerifyBlockPVC (expect_bound bool ) {
324
331
var (
325
332
err error
@@ -819,3 +826,79 @@ func createNodeDaemonSet(ds *appsv1.DaemonSet) {
819
826
gomega .BeNil (),
820
827
"creating node plugin daemonset %v" , nodeDaemonSet )
821
828
}
829
+
830
+ // Creates a k8s client using kubeconfig path.
831
+ func getk8sClient () (client * kubernetes.Clientset ) {
832
+ kubeconfigPath := filepath .Join (homedir .HomeDir (), ".kube" , "config" )
833
+ config , err := clientcmd .BuildConfigFromFlags ("" , kubeconfigPath )
834
+ gomega .Expect (err ).To (
835
+ gomega .BeNil (),
836
+ "Could not created a k8s client" ,
837
+ )
838
+ client , c_err := kubernetes .NewForConfig (config )
839
+ gomega .Expect (c_err ).To (
840
+ gomega .BeNil (),
841
+ "Could not created a k8s client" ,
842
+ )
843
+ return client
844
+ }
845
+
846
+ // Lists k8s nodes.
847
+ func listNodes (client * kubernetes.Clientset ) (nodes * corev1.NodeList ) {
848
+ nodes , n_err := client .CoreV1 ().Nodes ().List (context .TODO (), metav1.ListOptions {})
849
+ gomega .Expect (n_err ).To (
850
+ gomega .BeNil (),
851
+ "Could not list nodes" ,
852
+ )
853
+ return nodes
854
+ }
855
+
856
+ // Cordons all k8s nodes.
857
+ func cordonk8sNode () {
858
+ client := getk8sClient ()
859
+ nodes := listNodes (client )
860
+ for _ , node := range nodes .Items {
861
+ if ! node .Spec .Unschedulable {
862
+ c_err := cordonNode (client , & node )
863
+ gomega .Expect (c_err ).To (
864
+ gomega .BeNil (),
865
+ "Could not cordon node" ,
866
+ )
867
+ }
868
+ }
869
+ }
870
+
871
+ // UnCordons all k8s nodes.
872
+ func uncordonk8sNode () {
873
+ client := getk8sClient ()
874
+ nodes := listNodes (client )
875
+ for _ , node := range nodes .Items {
876
+ if node .Spec .Unschedulable {
877
+ c_err := uncordonNode (client , & node )
878
+ gomega .Expect (c_err ).To (
879
+ gomega .BeNil (),
880
+ "Could not uncordon node" ,
881
+ )
882
+ }
883
+ }
884
+ }
885
+
886
+ // Adds cordon taint to a specific node.
887
+ func cordonNode (clientset * kubernetes.Clientset , node * corev1.Node ) error {
888
+ updatedNode := node .DeepCopy ()
889
+ updatedNode .Spec .Unschedulable = true
890
+
891
+ _ , err := clientset .CoreV1 ().Nodes ().Update (context .TODO (), updatedNode , metav1.UpdateOptions {})
892
+ return err
893
+
894
+ }
895
+
896
+ // Removes cordon taint from a specific node.
897
+ func uncordonNode (clientset * kubernetes.Clientset , node * corev1.Node ) error {
898
+ updatedNode := node .DeepCopy ()
899
+ updatedNode .Spec .Unschedulable = false
900
+
901
+ _ , err := clientset .CoreV1 ().Nodes ().Update (context .TODO (), updatedNode , metav1.UpdateOptions {})
902
+ return err
903
+
904
+ }
0 commit comments