From 72ac6d58e4f314f156a34a0607a44c80ec8954b5 Mon Sep 17 00:00:00 2001 From: namkyu1999 Date: Fri, 12 May 2023 01:10:34 +0900 Subject: [PATCH 1/6] feat: add unit tests to k8s package Signed-off-by: namkyu1999 --- litmus-portal/graphql-server/go.mod | 6 +- litmus-portal/graphql-server/go.sum | 22 +- .../graphql-server/graph/resolver.go | 8 +- .../graphql-server/pkg/cluster/service.go | 52 +- .../graphql-server/pkg/cluster/util.go | 24 +- .../graphql-server/pkg/cluster/util_test.go | 36 + .../graphql-server/pkg/k8s/client.go | 46 +- .../graphql-server/pkg/k8s/client_test.go | 123 ++++ .../graphql-server/pkg/k8s/cluster.go | 200 +++--- .../graphql-server/pkg/k8s/cluster_test.go | 632 ++++++++++++++++++ .../pkg/projects/project_handler.go | 6 + .../pkg/rest_handlers/file_handler.go | 6 + .../graphql-server/pkg/self-deployer/start.go | 6 +- .../graphql-server/utils/variables.go | 13 + 14 files changed, 1005 insertions(+), 175 deletions(-) create mode 100644 litmus-portal/graphql-server/pkg/cluster/util_test.go create mode 100644 litmus-portal/graphql-server/pkg/k8s/client_test.go create mode 100644 litmus-portal/graphql-server/pkg/k8s/cluster_test.go diff --git a/litmus-portal/graphql-server/go.mod b/litmus-portal/graphql-server/go.mod index 8c7987253fa..9fbb1107f1e 100644 --- a/litmus-portal/graphql-server/go.mod +++ b/litmus-portal/graphql-server/go.mod @@ -23,14 +23,16 @@ require ( github.com/prometheus/client_golang v1.12.1 github.com/prometheus/common v0.32.1 github.com/sirupsen/logrus v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/tidwall/gjson v1.14.0 github.com/tidwall/sjson v1.2.4 github.com/vektah/gqlparser/v2 v2.1.0 - go.mongodb.org/mongo-driver v1.8.2 - golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd + go.mongodb.org/mongo-driver v1.11.4 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d google.golang.org/grpc v1.44.0 google.golang.org/protobuf v1.27.1 gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.23.3 k8s.io/apimachinery v0.23.3 k8s.io/client-go v12.0.0+incompatible ) diff --git a/litmus-portal/graphql-server/go.sum b/litmus-portal/graphql-server/go.sum index 2851a6590f3..aa06d2afc57 100644 --- a/litmus-portal/graphql-server/go.sum +++ b/litmus-portal/graphql-server/go.sum @@ -738,7 +738,6 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-swagger/go-swagger v0.29.0/go.mod h1:Z4GJzI+bHKKkGB2Ji1rawpi3/ldXX8CkzGIa9HAC5EE= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.mod h1:b65mBPzqzZWxOZGxSWrqs4GInLIn+u99Q9q7p+GKni0= @@ -1381,6 +1380,7 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1730,6 +1730,8 @@ github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1Sd github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1738,9 +1740,12 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stripe/stripe-go v70.15.0+incompatible/go.mod h1:A1dQZmO/QypXmsL0T8axYZkSN/uA/T/A64pfKdBAMiY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1813,10 +1818,12 @@ github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6e github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.0 h1:d70R37I0HrDLsafRrMBXyrD4lmQbCHE873t00Vr0gm0= github.com/xdg-go/scram v1.1.0/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= @@ -1885,8 +1892,9 @@ go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.2 h1:8ssUXufb90ujcIvR6MyE1SchaNj0SFxsakiZgxIyrMk= go.mongodb.org/mongo-driver v1.8.2/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1977,8 +1985,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220128200615-198e4374d7ed/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/litmus-portal/graphql-server/graph/resolver.go b/litmus-portal/graphql-server/graph/resolver.go index 6f9513b2368..21d0315333a 100644 --- a/litmus-portal/graphql-server/graph/resolver.go +++ b/litmus-portal/graphql-server/graph/resolver.go @@ -23,7 +23,9 @@ import ( dbOperationsWorkflowTemplate "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/workflowtemplate" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/gitops" imageRegistry "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/image_registry" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/usage" + log "github.com/sirupsen/logrus" ) // This file will not be regenerated automatically. @@ -51,8 +53,12 @@ func NewConfig(mongodbOperator mongodb.MongoOperator) generated.Config { analyticsOperator := dbSchemaAnalytics.NewAnalyticsOperator(mongodbOperator) imageRegistryOperator := dbOperationsImageRegistry.NewImageRegistryOperator(mongodbOperator) + kubeCluster, err := k8s.NewKubeCluster() + if err != nil { + log.Fatalf("Error in getting k8s cluster, err: %v", err) + } // service - clusterService := cluster.NewService(clusterOperator, chaosWorkflowOperator) + clusterService := cluster.NewService(clusterOperator, chaosWorkflowOperator, kubeCluster) chaosHubService := chaoshub.NewService(chaosHubOperator) analyticsService := service.NewService(analyticsOperator, chaosWorkflowOperator, clusterService) usageService := usage.NewService(clusterOperator) diff --git a/litmus-portal/graphql-server/pkg/cluster/service.go b/litmus-portal/graphql-server/pkg/cluster/service.go index c8eee4f3a8e..d29625c456e 100644 --- a/litmus-portal/graphql-server/pkg/cluster/service.go +++ b/litmus-portal/graphql-server/pkg/cluster/service.go @@ -20,6 +20,7 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" "go.mongodb.org/mongo-driver/bson" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) type Service interface { @@ -35,24 +36,28 @@ type Service interface { VerifyCluster(identity model.ClusterIdentity) (*dbSchemaCluster.Cluster, error) GetManifest(token string) ([]byte, int, error) GetCluster(clusterID string) (dbSchemaCluster.Cluster, error) + GetEndpoint(agentType utils.AgentType) (string, error) + GetClusterResource(manifest string, namespace string) (*unstructured.Unstructured, error) } type clusterService struct { clusterOperator *dbSchemaCluster.Operator chaosWorkflowOperator *dbOperationsWorkflow.Operator + kubeCluster *k8s.KubeClients } // NewService returns a new instance of Service -func NewService(clusterOperator *dbSchemaCluster.Operator, chaosWorkflowOperator *dbOperationsWorkflow.Operator) Service { +func NewService(clusterOperator *dbSchemaCluster.Operator, chaosWorkflowOperator *dbOperationsWorkflow.Operator, kubeCluster *k8s.KubeClients) Service { return &clusterService{ clusterOperator: clusterOperator, chaosWorkflowOperator: chaosWorkflowOperator, + kubeCluster: kubeCluster, } } // RegisterCluster creates an entry for a new cluster in DB and generates the url used to apply manifest func (c *clusterService) RegisterCluster(request model.RegisterClusterRequest) (*model.RegisterClusterResponse, error) { - endpoint, err := GetEndpoint(request.ClusterType) + endpoint, err := c.GetEndpoint(utils.AgentType(request.ClusterType)) if err != nil { return nil, err } @@ -312,27 +317,27 @@ func (c *clusterService) GetManifestWithClusterID(clusterID string, accessKey st } var config subscriberConfigurations - config.ServerEndpoint, err = GetEndpoint(reqCluster.ClusterType) + config.ServerEndpoint, err = c.GetEndpoint(utils.AgentType(reqCluster.ClusterType)) if err != nil { return nil, fmt.Errorf("failed to retrieve the server endpoint %v", err) } var scope = utils.Config.ChaosCenterScope - if scope == clusterScope && utils.Config.TlsSecretName != "" { - config.TLSCert, err = k8s.GetTLSCert(utils.Config.TlsSecretName) + if scope == string(utils.AgentScopeCluster) && utils.Config.TlsSecretName != "" { + config.TLSCert, err = c.kubeCluster.GetTLSCert(utils.Config.TlsSecretName) if err != nil { return nil, fmt.Errorf("failed to retrieve the tls cert %v", err) } } - if scope == namespaceScope { + if scope == string(utils.AgentScopeNamespace) { config.TLSCert = utils.Config.TlsCertB64 } var respData []byte - if reqCluster.AgentScope == clusterScope { + if reqCluster.AgentScope == string(utils.AgentScopeCluster) { respData, err = manifestParser(reqCluster, "manifests/cluster", &config) - } else if reqCluster.AgentScope == namespaceScope { + } else if reqCluster.AgentScope == string(utils.AgentScopeNamespace) { respData, err = manifestParser(reqCluster, "manifests/namespace", &config) } else { log.Error("env AGENT_SCOPE is empty") @@ -401,20 +406,20 @@ func (c *clusterService) GetManifest(token string) ([]byte, int, error) { } var config subscriberConfigurations - config.ServerEndpoint, err = GetEndpoint(reqCluster.ClusterType) + config.ServerEndpoint, err = c.GetEndpoint(utils.AgentType(reqCluster.ClusterType)) if err != nil { return nil, http.StatusInternalServerError, err } var scope = utils.Config.ChaosCenterScope - if scope == clusterScope && utils.Config.TlsSecretName != "" { - config.TLSCert, err = k8s.GetTLSCert(utils.Config.TlsSecretName) + if scope == string(utils.AgentScopeCluster) && utils.Config.TlsSecretName != "" { + config.TLSCert, err = c.kubeCluster.GetTLSCert(utils.Config.TlsSecretName) if err != nil { return nil, http.StatusInternalServerError, err } } - if scope == namespaceScope { + if scope == string(utils.AgentScopeNamespace) { config.TLSCert = utils.Config.TlsCertB64 } @@ -437,6 +442,29 @@ func (c *clusterService) GetManifest(token string) ([]byte, int, error) { } } +// GetCluster returns cluster details for a given clusterID func (c *clusterService) GetCluster(clusterID string) (dbSchemaCluster.Cluster, error) { return c.clusterOperator.GetCluster(clusterID) } + +// GetEndpoint returns the endpoint for the subscriber +func (c *clusterService) GetEndpoint(agentType utils.AgentType) (string, error) { + // returns endpoint from env, if provided by user + if utils.Config.ChaosCenterUiEndpoint != "" { + return utils.Config.ChaosCenterUiEndpoint + "/ws/query", nil + } + + // generating endpoint based on ChaosCenter Scope & AgentType (Self or External) + agentEndpoint, err := c.kubeCluster.GetServerEndpoint(utils.AgentScope(utils.Config.ChaosCenterScope), agentType) + + if agentEndpoint == "" || err != nil { + return "", fmt.Errorf("failed to retrieve the server endpoint %v", err) + } + + return agentEndpoint, err +} + +// GetClusterResource returns the cluster resource for a given manifest +func (c *clusterService) GetClusterResource(manifest string, namespace string) (*unstructured.Unstructured, error) { + return c.kubeCluster.ClusterResource(manifest, namespace) +} diff --git a/litmus-portal/graphql-server/pkg/cluster/util.go b/litmus-portal/graphql-server/pkg/cluster/util.go index 4090377f2ec..801e1dcf8cf 100644 --- a/litmus-portal/graphql-server/pkg/cluster/util.go +++ b/litmus-portal/graphql-server/pkg/cluster/util.go @@ -10,15 +10,12 @@ import ( "github.com/litmuschaos/litmus/litmus-portal/graphql-server/graph/model" store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store" dbSchemaCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster" - "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" ) const ( // CIVersion specifies the version tag used for ci builds - CIVersion = "ci" - clusterScope string = "cluster" - namespaceScope string = "namespace" + CIVersion = "ci" ) // subscriberConfigurations contains the configurations required for the subscriber @@ -27,23 +24,6 @@ type subscriberConfigurations struct { TLSCert string } -// GetEndpoint returns the endpoint for the subscriber -func GetEndpoint(agentType string) (string, error) { - // returns endpoint from env, if provided by user - if utils.Config.ChaosCenterUiEndpoint != "" { - return utils.Config.ChaosCenterUiEndpoint + "/ws/query", nil - } - - // generating endpoint based on ChaosCenter Scope & AgentType (Self or External) - agentEndpoint, err := k8s.GetServerEndpoint(utils.Config.ChaosCenterScope, agentType) - - if agentEndpoint == "" || err != nil { - return "", fmt.Errorf("failed to retrieve the server endpoint %v", err) - } - - return agentEndpoint, err -} - // ManifestParser parses manifests yaml and generates dynamic manifest with specified keys func manifestParser(cluster dbSchemaCluster.Cluster, rootPath string, config *subscriberConfigurations) ([]byte, error) { var ( @@ -189,7 +169,7 @@ func manifestParser(cluster dbSchemaCluster.Cluster, rootPath string, config *su // SendRequestToSubscriber sends events from the graphQL server to the subscribers listening for the requests func SendRequestToSubscriber(subscriberRequest SubscriberRequests, r store.StateData) { - if utils.Config.AgentScope == "cluster" { + if utils.Config.AgentScope == string(AgentScopeCluster) { /* namespace = Obtain from WorkflowManifest or from frontend as a separate workflowNamespace field under ChaosWorkFlowRequest model diff --git a/litmus-portal/graphql-server/pkg/cluster/util_test.go b/litmus-portal/graphql-server/pkg/cluster/util_test.go new file mode 100644 index 00000000000..d59ec198fdc --- /dev/null +++ b/litmus-portal/graphql-server/pkg/cluster/util_test.go @@ -0,0 +1,36 @@ +package cluster_test + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/graph/model" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster" + dataStore "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store" + "github.com/stretchr/testify/assert" +) + +func TestSendRequestToSubscriber(t *testing.T) { + // given + state := dataStore.NewStore() + workflow := cluster.SubscriberRequests{ + ProjectID: uuid.NewString(), + ClusterID: uuid.NewString(), + } + t.Run("success", func(t *testing.T) { + // given + action := make(chan *model.ClusterActionResponse, 1) + t.Cleanup(func() { delete(state.ConnectedCluster, workflow.ClusterID) }) + state.ConnectedCluster[workflow.ClusterID] = action + // when + cluster.SendRequestToSubscriber(workflow, *state) + // then + select { + case result := <-action: + assert.Equal(t, workflow.ProjectID, result.ProjectID) + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + }) +} diff --git a/litmus-portal/graphql-server/pkg/k8s/client.go b/litmus-portal/graphql-server/pkg/k8s/client.go index 274c63adcb2..067efd45032 100644 --- a/litmus-portal/graphql-server/pkg/k8s/client.go +++ b/litmus-portal/graphql-server/pkg/k8s/client.go @@ -9,44 +9,30 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func GetKubeConfig() (*rest.Config, error) { - kubeConfig := utils.Config.KubeConfigFilePath - // Use in-cluster config if kubeconfig path is not specified - if kubeConfig == "" { - return rest.InClusterConfig() - } - - return clientcmd.BuildConfigFromFlags("", kubeConfig) -} - -func GetGenericK8sClient() (*kubernetes.Clientset, error) { - config, err := GetKubeConfig() +// GetK8sClients returns the k8s clients +func GetK8sClients(config *rest.Config) (*kubernetes.Clientset, dynamic.Interface, discovery.DiscoveryInterface, error) { + genericClient, err := kubernetes.NewForConfig(config) if err != nil { - return nil, err + return nil, nil, nil, err } - - return kubernetes.NewForConfig(config) -} - -// This function returns dynamic client and discovery client -func GetDynamicAndDiscoveryClient() (discovery.DiscoveryInterface, dynamic.Interface, error) { - // returns a config object which uses the service account kubernetes gives to pods - config, err := GetKubeConfig() + dynamicClient, err := dynamic.NewForConfig(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - - // NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { - return nil, nil, err + return nil, nil, nil, err } + return genericClient, dynamicClient, discoveryClient, nil +} - // NewForConfig creates a new dynamic client or returns an error. - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, nil, err +// GetKubeConfig returns the *rest.Config +func GetKubeConfig() (*rest.Config, error) { + kubeConfig := utils.Config.KubeConfigFilePath + // Use in-cluster config if KubeConfig path is not specified + if kubeConfig == "" { + return rest.InClusterConfig() } - return discoveryClient, dynamicClient, nil + return clientcmd.BuildConfigFromFlags("", kubeConfig) } diff --git a/litmus-portal/graphql-server/pkg/k8s/client_test.go b/litmus-portal/graphql-server/pkg/k8s/client_test.go new file mode 100644 index 00000000000..58f7e668ea3 --- /dev/null +++ b/litmus-portal/graphql-server/pkg/k8s/client_test.go @@ -0,0 +1,123 @@ +// Package k8s_test contains all the tests for the k8s package +package k8s_test + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/gin-gonic/gin" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/rest" +) + +var ( + tempPath = "/tmp/client/" +) + +// TestMain is the entry point for testing +func TestMain(m *testing.M) { + gin.SetMode(gin.TestMode) + log.SetOutput(ioutil.Discard) + os.Exit(m.Run()) +} + +// TestGetK8sClients tests the GetK8sClients function +func TestGetK8sClients(t *testing.T) { + testcase := struct { + name string + config *rest.Config + }{ + name: "success", + config: &rest.Config{ + Host: "https://localhost:8080", + }, + } + t.Run(testcase.name, func(t *testing.T) { + // when + _, _, _, err := k8s.GetK8sClients(testcase.config) + // then + assert.NoError(t, err) + }) +} + +// TestGetKubeConfig tests the GetKubeConfig function +func TestGetKubeConfig(t *testing.T) { + testcases := []struct { + name string + wantErr bool + }{ + { + name: "failure: invalid KubeConfig path", + wantErr: true, + }, + { + name: "success", + wantErr: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.wantErr { + // given + utils.Config.KubeConfigFilePath = "invalid path" + // when + _, err := k8s.GetKubeConfig() + // then + assert.Error(t, err) + } else { + // given + content := ` +apiVersion: v1 +clusters: +- cluster: + server: https://localhost:8080 + extensions: + - name: client.authentication.k8s.io/exec + extension: + audience: foo + other: bar + name: foo-cluster +contexts: +- context: + cluster: foo-cluster + user: foo-user + namespace: bar + name: foo-context +current-context: foo-context +kind: Config +users: +- name: foo-user + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - arg-1 + - arg-2 + command: foo-command + provideClusterInfo: true +` + err := os.MkdirAll(tempPath, os.ModePerm) + if err != nil { + t.Error(err) + } + tmpFile, err := os.Create(tempPath + "kubeconfig") + if err != nil { + t.Error(err) + } + t.Cleanup(func() { _ = os.Remove(tempPath + "kubeconfig") }) + if err := os.WriteFile(tmpFile.Name(), []byte(content), 0666); err != nil { + t.Error(err) + } + utils.Config.KubeConfigFilePath = tmpFile.Name() + // when + _, err = k8s.GetKubeConfig() + // then + assert.NoError(t, err) + } + }) + } +} diff --git a/litmus-portal/graphql-server/pkg/k8s/cluster.go b/litmus-portal/graphql-server/pkg/k8s/cluster.go index d5faf91476b..071b73d564a 100644 --- a/litmus-portal/graphql-server/pkg/k8s/cluster.go +++ b/litmus-portal/graphql-server/pkg/k8s/cluster.go @@ -10,60 +10,74 @@ import ( "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" log "github.com/sirupsen/logrus" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/serializer/yaml" memory "k8s.io/client-go/discovery/cached" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/restmapper" ) -var ( - decUnstructured = yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) - dr dynamic.ResourceInterface - AgentNamespace = utils.Config.AgentNamespace -) +// KubeClients is a struct for kubernetes cluster +type KubeClients struct { + GenericClient kubernetes.Interface + DynamicClient dynamic.Interface + RESTMapper meta.RESTMapper +} -// This function handles cluster operations -func ClusterResource(manifest string, namespace string) (*unstructured.Unstructured, error) { - // Getting dynamic and discovery client - ctx := context.TODO() - discoveryClient, dynamicClient, err := GetDynamicAndDiscoveryClient() +// NewKubeCluster returns a new KubeClients instance +func NewKubeCluster() (*KubeClients, error) { + config, err := GetKubeConfig() if err != nil { return nil, err } - // Create a mapper using dynamic client - mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(discoveryClient)) + genericClient, dynamicClient, discoveryClient, err := GetK8sClients(config) + if err != nil { + return nil, err + } + + RESTMapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(discoveryClient)) + return &KubeClients{ + GenericClient: genericClient, + DynamicClient: dynamicClient, + RESTMapper: RESTMapper, + }, nil +} - // Decode YAML manifest into unstructured.Unstructured - obj := &unstructured.Unstructured{} +// ClusterResource handles cluster operations +func (k *KubeClients) ClusterResource(manifest string, namespace string) (*unstructured.Unstructured, error) { + var ( + decUnstructured = yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + dr dynamic.ResourceInterface + ctx = context.TODO() + obj = &unstructured.Unstructured{} // Decode YAML manifest into unstructured.Unstructured + ) _, gvk, err := decUnstructured.Decode([]byte(manifest), nil, obj) if err != nil { return nil, err } - // Find GVR - mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + mapping, err := k.RESTMapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { return nil, err } - // Obtain REST interface for the GVR if mapping.Scope.Name() == meta.RESTScopeNameNamespace { // namespaced resources should specify the namespace - dr = dynamicClient.Resource(mapping.Resource).Namespace(namespace) + dr = k.DynamicClient.Resource(mapping.Resource).Namespace(namespace) } else { // for cluster-wide resources - dr = dynamicClient.Resource(mapping.Resource) + dr = k.DynamicClient.Resource(mapping.Resource) } response, err := dr.Create(ctx, obj, metaV1.CreateOptions{}) - if k8serrors.IsAlreadyExists(err) { - // This doesnt ever happen even if it does already exist + if k8sErrors.IsAlreadyExists(err) { + // This doesn't ever happen even if it does already exist log.Info("already exists") return nil, nil } @@ -78,54 +92,50 @@ func ClusterResource(manifest string, namespace string) (*unstructured.Unstructu } /* -This function returns the endpoint of the server by which external agents can communicate. +GetServerEndpoint returns the endpoint of the server by which external agents can communicate. The order of generating the endpoint is based on different network type: - Ingress - LoadBalancer > NodePort > ClusterIP */ -func GetServerEndpoint(portalScope, agentType string) (string, error) { +func (k *KubeClients) GetServerEndpoint(portalScope utils.AgentScope, agentType utils.AgentType) (string, error) { var ( - NodePort int32 - Port int32 - InternalIP string - IngressPath string - IPAddress string - Scheme string - FinalUrl string - ServerServiceName = utils.Config.ServerServiceName - NodeName = utils.Config.NodeName - LitmusPortalNS = utils.Config.LitmusPortalNamespace - Ingress = utils.Config.Ingress - IngressName = utils.Config.IngressName + nodePort int32 + port int32 + internalIP string + ingressPath string + ipAddress string + scheme string + finalURL string + serverServiceName = utils.Config.ServerServiceName + nodeName = utils.Config.NodeName + litmusPortalNS = utils.Config.LitmusPortalNamespace + ingress = utils.Config.Ingress + ingressName = utils.Config.IngressName ) ctx := context.TODO() - clientset, err := GetGenericK8sClient() - if err != nil { - return "", err - } - svc, err := clientset.CoreV1().Services(LitmusPortalNS).Get(ctx, ServerServiceName, metaV1.GetOptions{}) + svc, err := k.GenericClient.CoreV1().Services(litmusPortalNS).Get(ctx, serverServiceName, metaV1.GetOptions{}) if err != nil { return "", err } - for _, port := range svc.Spec.Ports { - if port.Name == "graphql-server" { - NodePort = port.NodePort - Port = port.Port + for _, p := range svc.Spec.Ports { + if p.Name == "graphql-server" { + nodePort = p.NodePort + port = p.Port } } // If current agent is self-agent, then servicename FQDN will be used irrespective of service type. - if agentType == "internal" { - FinalUrl = "http://" + ServerServiceName + "." + LitmusPortalNS + ":" + strconv.Itoa(int(Port)) + "/query" - return FinalUrl, nil + if agentType == utils.AgentTypeInternal { + finalURL = "http://" + serverServiceName + "." + litmusPortalNS + ":" + strconv.Itoa(int(port)) + "/query" + return finalURL, nil } // Ingress endpoint will be generated for external agents only. - if Ingress == "true" { - getIng, err := clientset.NetworkingV1().Ingresses(LitmusPortalNS).Get(ctx, IngressName, metaV1.GetOptions{}) + if ingress == "true" { + getIng, err := k.GenericClient.NetworkingV1().Ingresses(litmusPortalNS).Get(ctx, ingressName, metaV1.GetOptions{}) if err != nil { return "", err } @@ -136,22 +146,22 @@ func GetServerEndpoint(portalScope, agentType string) (string, error) { 2. IPAddress */ if len(getIng.Spec.Rules) > 0 && getIng.Spec.Rules[0].Host != "" { - IPAddress = getIng.Spec.Rules[0].Host + ipAddress = getIng.Spec.Rules[0].Host } else if len(getIng.Status.LoadBalancer.Ingress) > 0 && getIng.Status.LoadBalancer.Ingress[0].IP != "" { - IPAddress = getIng.Status.LoadBalancer.Ingress[0].IP + ipAddress = getIng.Status.LoadBalancer.Ingress[0].IP } else if len(getIng.Status.LoadBalancer.Ingress) > 0 && getIng.Status.LoadBalancer.Ingress[0].Hostname != "" { - IPAddress = getIng.Status.LoadBalancer.Ingress[0].Hostname + ipAddress = getIng.Status.LoadBalancer.Ingress[0].Hostname } else { return "", errors.New("IP Address or HostName not generated") } - if IPAddress == "" { - return "", errors.New("IP Address or Hostname is not available in the ingress of " + IngressName) + if ipAddress == "" { + return "", errors.New("IP Address or Hostname is not available in the ingress of " + ingressName) } for _, rule := range getIng.Spec.Rules { for _, path := range rule.HTTP.Paths { - if path.Backend.Service.Name == ServerServiceName { + if path.Backend.Service.Name == serverServiceName { f := func(c rune) bool { return c == '/' } @@ -167,96 +177,84 @@ func GetServerEndpoint(portalScope, agentType string) (string, error) { path_arr = append(path_arr, "query") } - IngressPath = strings.Join(path_arr[:], "/") + ingressPath = strings.Join(path_arr[:], "/") } } } if len(getIng.Spec.TLS) > 0 { - Scheme = "https" + scheme = "https" } else { - Scheme = "http" + scheme = "http" } - FinalUrl = Scheme + "://" + wrapIPV6(IPAddress) + "/" + IngressPath + finalURL = scheme + "://" + wrapIPV6(ipAddress) + "/" + ingressPath - } else if Ingress == "false" || Ingress == "" { + } else if ingress == "false" || ingress == "" { exp := strings.ToLower(string(svc.Spec.Type)) switch exp { - case "loadbalancer": + case strings.ToLower(string(v1.ServiceTypeLoadBalancer)): if len(svc.Status.LoadBalancer.Ingress) > 0 { if svc.Status.LoadBalancer.Ingress[0].Hostname != "" { - IPAddress = svc.Status.LoadBalancer.Ingress[0].Hostname + ipAddress = svc.Status.LoadBalancer.Ingress[0].Hostname } else if svc.Status.LoadBalancer.Ingress[0].IP != "" { - IPAddress = svc.Status.LoadBalancer.Ingress[0].IP + ipAddress = svc.Status.LoadBalancer.Ingress[0].IP } else { return "", errors.New("LoadBalancerIP/Hostname not present for loadbalancer service type") } } else { return "", errors.New("LoadBalancerIP/Hostname not present for loadbalancer service type") } - FinalUrl = "http://" + wrapIPV6(IPAddress) + ":" + strconv.Itoa(int(Port)) + "/query" - case "nodeport": - + finalURL = "http://" + wrapIPV6(ipAddress) + ":" + strconv.Itoa(int(port)) + "/query" + case strings.ToLower(string(v1.ServiceTypeNodePort)): // Cannot fetch Node Ip Address when ChaosCenter is installed in Namespaced scope - if portalScope == "namespace" { + if portalScope == utils.AgentScopeNamespace { return "", errors.New("Cannot get NodeIP in namespaced mode") } - nodeIP, err := clientset.CoreV1().Nodes().Get(ctx, NodeName, metaV1.GetOptions{}) + nodeIP, err := k.GenericClient.CoreV1().Nodes().Get(ctx, nodeName, metaV1.GetOptions{}) if err != nil { return "", err } for _, addr := range nodeIP.Status.Addresses { if strings.ToLower(string(addr.Type)) == "externalip" && addr.Address != "" { - IPAddress = addr.Address + ipAddress = addr.Address } else if strings.ToLower(string(addr.Type)) == "internalip" && addr.Address != "" { - InternalIP = addr.Address + internalIP = addr.Address } } // Whichever one of External IP and Internal IP is present, that will be selected for Server Endpoint - if IPAddress != "" { - FinalUrl = "http://" + wrapIPV6(IPAddress) + ":" + strconv.Itoa(int(NodePort)) + "/query" - } else if InternalIP != "" { - FinalUrl = "http://" + wrapIPV6(InternalIP) + ":" + strconv.Itoa(int(NodePort)) + "/query" + if ipAddress != "" { + finalURL = "http://" + wrapIPV6(ipAddress) + ":" + strconv.Itoa(int(nodePort)) + "/query" + } else if internalIP != "" { + finalURL = "http://" + wrapIPV6(internalIP) + ":" + strconv.Itoa(int(nodePort)) + "/query" } else { - return "", errors.New("Both ExternalIP and InternalIP aren't present for NodePort service type") + return "", errors.New("both ExternalIP and InternalIP aren't present for NodePort service type") } - case "clusterip": + case strings.ToLower(string(v1.ServiceTypeClusterIP)): log.Info("external agents can't be connected to the server if the service type is set to ClusterIP\n") if svc.Spec.ClusterIP == "" { return "", errors.New("ClusterIP is not present") } - FinalUrl = "http://" + wrapIPV6(svc.Spec.ClusterIP) + ":" + strconv.Itoa(int(Port)) + "/query" + finalURL = "http://" + wrapIPV6(svc.Spec.ClusterIP) + ":" + strconv.Itoa(int(port)) + "/query" default: - return "", errors.New("No service type found") + return "", errors.New("no service type found") } } else { - return "", errors.New("Ingress value is not correct") + return "", errors.New("ingress value is not correct") } - log.Info("server endpoint: ", FinalUrl) + log.Info("server endpoint: ", finalURL) - return FinalUrl, nil + return finalURL, nil } -func wrapIPV6(addr string) string { - if strings.Count(addr, ":") > 0 { - return "[" + addr + "]" - } - return addr -} - -func GetTLSCert(secretName string) (string, error) { - clientset, err := GetGenericK8sClient() - if err != nil { - return "", err - } - - secret, err := clientset.CoreV1().Secrets(utils.Config.LitmusPortalNamespace).Get(context.Background(), secretName, metaV1.GetOptions{}) +// GetTLSCert returns the TLS certificate of the provided secret +func (k *KubeClients) GetTLSCert(secretName string) (string, error) { + secret, err := k.GenericClient.CoreV1().Secrets(utils.Config.LitmusPortalNamespace).Get(context.Background(), secretName, metaV1.GetOptions{}) if err != nil { return "", err } @@ -266,3 +264,11 @@ func GetTLSCert(secretName string) (string, error) { } return "", fmt.Errorf("could not find tls.crt value in provided TLS Secret %v", secretName) } + +// wrapIPV6 wraps ipv6 address in square brackets +func wrapIPV6(addr string) string { + if strings.Count(addr, ":") > 0 { + return "[" + addr + "]" + } + return addr +} diff --git a/litmus-portal/graphql-server/pkg/k8s/cluster_test.go b/litmus-portal/graphql-server/pkg/k8s/cluster_test.go new file mode 100644 index 00000000000..bb693a3bb94 --- /dev/null +++ b/litmus-portal/graphql-server/pkg/k8s/cluster_test.go @@ -0,0 +1,632 @@ +package k8s_test + +import ( + "context" + "os" + "testing" + + "github.com/google/uuid" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + networkingV1 "k8s.io/api/networking/v1" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + fakeDynamic "k8s.io/client-go/dynamic/fake" + fakeClientSet "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/restmapper" +) + +// TestNewKubeCluster tests the NewKubeCluster function +func TestNewKubeCluster(t *testing.T) { + // given + testcases := []struct { + name string + wantErr bool + }{ + { + name: "failure: invalid KubeConfig path", + wantErr: true, + }, + { + name: "success", + wantErr: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.wantErr { + // given + utils.Config.KubeConfigFilePath = "invalid path" + // when + _, err := k8s.GetKubeConfig() + // then + assert.Error(t, err) + } else { + // given + content := ` +apiVersion: v1 +clusters: +- cluster: + server: https://localhost:8080 + extensions: + - name: client.authentication.k8s.io/exec + extension: + audience: foo + other: bar + name: foo-cluster +contexts: +- context: + cluster: foo-cluster + user: foo-user + namespace: bar + name: foo-context +current-context: foo-context +kind: Config +users: +- name: foo-user + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - arg-1 + - arg-2 + command: foo-command + provideClusterInfo: true +` + err := os.MkdirAll(tempPath, os.ModePerm) + if err != nil { + t.Error(err) + } + tmpFile, err := os.Create(tempPath + "kubeconfig") + if err != nil { + t.Error(err) + } + t.Cleanup(func() { _ = os.Remove(tempPath + "kubeconfig") }) + if err := os.WriteFile(tmpFile.Name(), []byte(content), 0666); err != nil { + t.Error(err) + } + utils.Config.KubeConfigFilePath = tmpFile.Name() + // when + _, err = k8s.NewKubeCluster() + // then + assert.NoError(t, err) + } + }) + } +} + +// TestClusterResource tests the ClusterResource function +func TestClusterResource(t *testing.T) { + // given + validManifest := ` +apiVersion: v1 +kind: Service +metadata: + name: litmusportal-server-service + namespace: some-ns +spec: + type: NodePort + ports: + - name: graphql-server + port: 9002 + targetPort: 8080 + - name: graphql-rpc-server + port: 8000 + targetPort: 8000 + selector: + component: litmusportal-server +` + utils.Config.LitmusPortalNamespace = "some-ns" + type args struct { + manifest string + resources []*restmapper.APIGroupResources + } + testcases := []struct { + name string + args args + wantErr bool + }{ + { + name: "success", + args: args{ + manifest: validManifest, + resources: []*restmapper.APIGroupResources{ + { + Group: metaV1.APIGroup{ + Versions: []metaV1.GroupVersionForDiscovery{ + {Version: "v1"}, + }, + PreferredVersion: metaV1.GroupVersionForDiscovery{Version: "v1"}, + }, + VersionedResources: map[string][]metaV1.APIResource{ + "v1": { + {Name: "services", Namespaced: true, Kind: "Service"}, + }, + }, + }, + }, + }, + }, + { + name: "failure: invalid manifest", + args: args{ + manifest: "invalid yaml", + }, + wantErr: true, + }, + { + name: "failure: cannot find GVR", + args: args{ + manifest: validManifest, + resources: []*restmapper.APIGroupResources{ + { + Group: metaV1.APIGroup{ + Versions: []metaV1.GroupVersionForDiscovery{ + {Version: "v1"}, + }, + PreferredVersion: metaV1.GroupVersionForDiscovery{Version: "v1"}, + }, + VersionedResources: map[string][]metaV1.APIResource{ + "v1": {}, + }, + }, + }, + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + genericClient := fakeClientSet.NewSimpleClientset() + dynamicClient := fakeDynamic.NewSimpleDynamicClient(runtime.NewScheme()) + restMapper := restmapper.NewDiscoveryRESTMapper(tc.args.resources) + client := &k8s.KubeClients{ + GenericClient: genericClient, + DynamicClient: dynamicClient, + RESTMapper: restMapper, + } + // when + _, err := client.ClusterResource(tc.args.manifest, utils.Config.LitmusPortalNamespace) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// getServiceObject returns a service object +func getServiceObject(serviceType v1.ServiceType) *v1.Service { + service := &v1.Service{ + TypeMeta: metaV1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: utils.Config.ServerServiceName, + Namespace: utils.Config.LitmusPortalNamespace, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "graphql-server", + Protocol: "", + Port: 9002, + TargetPort: intstr.IntOrString{ + Type: 0, + IntVal: 8080, + }, + }, + { + Name: "graphql-rpc-server", + Protocol: "", + Port: 8000, + TargetPort: intstr.IntOrString{ + Type: 0, + IntVal: 8000, + }, + }, + }, + Type: serviceType, + Selector: map[string]string{ + "component": "litmusportal-server", + }, + }, + } + switch serviceType { + case v1.ServiceTypeNodePort: + service.Spec.Ports[0].NodePort = 31001 + case v1.ServiceTypeClusterIP: + service.Spec.ClusterIP = "1.1.1.1" + } + return service +} + +// getServiceTypeLoadBalancerObject returns a service object with load balancer type +func getServiceTypeLoadBalancerObject(ip, hostname string) *v1.Service { + service := getServiceObject(v1.ServiceTypeLoadBalancer) + if ip != "" || hostname != "" { + service.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{IP: ip, Hostname: hostname}} + } + return service +} + +// getNodeObject returns a node object +func getNodeObject(nodeAddressType v1.NodeAddressType) *v1.Node { + return &v1.Node{ + ObjectMeta: metaV1.ObjectMeta{ + Name: uuid.NewString(), + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: nodeAddressType, + Address: "1.2.3.4", + }, + }, + }, + } +} + +// getIngressObject returns an ingress object +func getIngressObject(ruleHostnameExist bool, ip, hostname string) *networkingV1.Ingress { + ruleHostname := "" + if ruleHostnameExist { + ruleHostname = "hostname.com" + } + ingress := &networkingV1.Ingress{ + ObjectMeta: metaV1.ObjectMeta{ + Name: uuid.NewString(), + Namespace: utils.Config.LitmusPortalNamespace, + }, + Spec: networkingV1.IngressSpec{ + Rules: []networkingV1.IngressRule{ + { + Host: ruleHostname, + IngressRuleValue: networkingV1.IngressRuleValue{ + HTTP: &networkingV1.HTTPIngressRuleValue{ + Paths: []networkingV1.HTTPIngressPath{ + { + Path: "/", Backend: networkingV1.IngressBackend{ + Service: &networkingV1.IngressServiceBackend{ + Name: utils.Config.ServerServiceName, + Port: networkingV1.ServiceBackendPort{ + Number: 9002, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + if ip != "" || hostname != "" { + ingress.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{IP: ip, Hostname: hostname}} + } + return ingress +} + +// TestGetServerEndpoint tests the GetServerEndpoint function +func TestGetServerEndpoint(t *testing.T) { + // given + genericClient := fakeClientSet.NewSimpleClientset() + client := &k8s.KubeClients{GenericClient: genericClient} + utils.Config.LitmusPortalNamespace = "some-ns" + utils.Config.ServerServiceName = "litmusportal-server-service" + type args struct { + portalScope utils.AgentScope + agentType utils.AgentType + service *v1.Service + } + testcases := []struct { + name string + args args + given func() + want string + wantErr bool + }{ + { + name: "success: self agent", + args: args{ + portalScope: utils.AgentScopeNamespace, + agentType: utils.AgentTypeInternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() {}, + want: "http://litmusportal-server-service.some-ns:9002/query", + wantErr: false, + }, + { + name: "failure: external agent & Ingress is absent & service type is NodePort but portal scope is namespace", + args: args{ + portalScope: utils.AgentScopeNamespace, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() {}, + wantErr: true, + }, + { + name: "failure: external agent & Ingress is absent & service type is NodePort but cannot find node name", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() {}, + wantErr: true, + }, + { + name: "success: external agent & Ingress is absent & service type is NodePort and address type is ExternalIP", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + node := getNodeObject(v1.NodeExternalIP) + _, err := client.GenericClient.CoreV1().Nodes().Create(context.Background(), node, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + utils.Config.NodeName = node.Name + }, + want: "http://1.2.3.4:31001/query", + wantErr: false, + }, + { + name: "success: external agent & Ingress is absent & service type is NodePort and address type is InternalIP", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + node := getNodeObject(v1.NodeInternalIP) + _, err := client.GenericClient.CoreV1().Nodes().Create(context.Background(), node, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + utils.Config.NodeName = node.Name + }, + want: "http://1.2.3.4:31001/query", + wantErr: false, + }, + { + name: "success: external agent & Ingress is absent & service type is ClusterIP", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeClusterIP), + }, + given: func() {}, + want: "http://1.1.1.1:9002/query", + wantErr: false, + }, + { + name: "failure: external agent & Ingress is absent but service type not found", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject("invalid serviceType"), + }, + given: func() {}, + wantErr: true, + }, + { + name: "failure: external agent & invalid ingress value", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + utils.Config.Ingress = "invalid value" + }, + wantErr: true, + }, + { + name: "failure: external agent & Ingress is absent & service type is LoadBalancer but LoadBalancerIP/Hostname not present", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceTypeLoadBalancerObject("", ""), + }, + given: func() { + utils.Config.Ingress = "false" + }, + wantErr: true, + }, + { + name: "success: external agent & Ingress is absent & service type is LoadBalancer and hostname is present", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceTypeLoadBalancerObject("", "hostname.com"), + }, + given: func() {}, + want: "http://hostname.com:9002/query", + wantErr: false, + }, + { + name: "success: external agent & Ingress is absent & service type is LoadBalancer and ip is present", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceTypeLoadBalancerObject("1.1.1.1", ""), + }, + given: func() {}, + want: "http://1.1.1.1:9002/query", + wantErr: false, + }, + { + name: "failure: external agent & Ingress is present but cannot get ingress", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + utils.Config.Ingress = "true" + }, + wantErr: true, + }, + { + name: "success: external agent & Ingress is present", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + utils.Config.Ingress = "true" + ingress := getIngressObject(true, "", "") + _, err := client.GenericClient.NetworkingV1().Ingresses(utils.Config.LitmusPortalNamespace).Create(context.Background(), ingress, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + utils.Config.IngressName = ingress.Name + }, + want: "http://hostname.com/query", + wantErr: false, + }, + { + name: "success: external agent & Ingress is present and ingress hostname is present", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + utils.Config.Ingress = "true" + ingress := getIngressObject(false, "", "hostname.com") + _, err := client.GenericClient.NetworkingV1().Ingresses(utils.Config.LitmusPortalNamespace).Create(context.Background(), ingress, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + utils.Config.IngressName = ingress.Name + }, + want: "http://hostname.com/query", + wantErr: false, + }, + { + name: "success: external agent & Ingress is present and ingress ip is present", + args: args{ + portalScope: utils.AgentScopeCluster, + agentType: utils.AgentTypeExternal, + service: getServiceObject(v1.ServiceTypeNodePort), + }, + given: func() { + utils.Config.Ingress = "true" + ingress := getIngressObject(false, "1.2.3.4", "") + _, err := client.GenericClient.NetworkingV1().Ingresses(utils.Config.LitmusPortalNamespace).Create(context.Background(), ingress, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + utils.Config.IngressName = ingress.Name + }, + want: "http://1.2.3.4/query", + wantErr: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + _, err := client.GenericClient.CoreV1().Services(utils.Config.LitmusPortalNamespace).Create(context.Background(), tc.args.service, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + t.Cleanup(func() { + _ = client.GenericClient.CoreV1().Services(utils.Config.LitmusPortalNamespace).Delete(context.Background(), utils.Config.ServerServiceName, metaV1.DeleteOptions{}) + }) + // when + endpoint, err := client.GetServerEndpoint(tc.args.portalScope, tc.args.agentType) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.want, endpoint) + } + }) + } +} + +// TestGetTLSCert tests the GetTLSCert function +func TestGetTLSCert(t *testing.T) { + // given + genericClient := fakeClientSet.NewSimpleClientset() + client := &k8s.KubeClients{GenericClient: genericClient} + utils.Config.LitmusPortalNamespace = uuid.NewString() + + testcases := []struct { + name string + given func() string + wantErr bool + }{ + { + name: "success", + given: func() string { + secretName, secretData := "tls.crt", []byte(uuid.NewString()) + secret, err := client.GenericClient.CoreV1().Secrets(utils.Config.LitmusPortalNamespace).Create(context.Background(), &v1.Secret{ + Data: map[string][]byte{secretName: secretData}, + Type: v1.SecretTypeTLS, + }, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + return secret.Name + }, + wantErr: false, + }, + { + name: "failure: secret not found", + given: func() string { + return uuid.NewString() + }, + wantErr: true, + }, + { + name: "failure: cannot find tls.crt", + given: func() string { + secretName, secretData := "invalid", []byte(uuid.NewString()) + secret, err := client.GenericClient.CoreV1().Secrets(utils.Config.LitmusPortalNamespace).Create(context.Background(), &v1.Secret{ + Data: map[string][]byte{secretName: secretData}, + Type: v1.SecretTypeTLS, + }, metaV1.CreateOptions{}) + if err != nil { + t.FailNow() + } + return secret.Name + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + objectName := tc.given() + t.Cleanup(func() { + client.GenericClient.CoreV1().Secrets(utils.Config.LitmusPortalNamespace).Delete(context.Background(), objectName, metaV1.DeleteOptions{}) + }) + // when + _, err := client.GetTLSCert(objectName) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/litmus-portal/graphql-server/pkg/projects/project_handler.go b/litmus-portal/graphql-server/pkg/projects/project_handler.go index f8fad5b7fd6..a1550aea6ad 100644 --- a/litmus-portal/graphql-server/pkg/projects/project_handler.go +++ b/litmus-portal/graphql-server/pkg/projects/project_handler.go @@ -12,6 +12,7 @@ import ( dbOperationsImageRegistry "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/image_registry" dbOperationsWorkflow "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/workflow" imageRegistry "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/image_registry" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" log "github.com/sirupsen/logrus" @@ -79,9 +80,14 @@ func ProjectInitializer(ctx context.Context, projectID string, role string, oper if strings.ToLower(selfCluster) == "true" && strings.ToLower(role) == "admin" { log.Info("starting self deployer") + kubeCluster, err := k8s.NewKubeCluster() + if err != nil { + log.Fatalf("error in getting kube client, err: %v", err) + } go selfDeployer.StartDeployer(cluster.NewService( dbSchemaCluster.NewClusterOperator(operator), dbOperationsWorkflow.NewChaosWorkflowOperator(operator), + kubeCluster, ), projectID) } diff --git a/litmus-portal/graphql-server/pkg/rest_handlers/file_handler.go b/litmus-portal/graphql-server/pkg/rest_handlers/file_handler.go index 40c56faa447..3fa01cc5fc0 100644 --- a/litmus-portal/graphql-server/pkg/rest_handlers/file_handler.go +++ b/litmus-portal/graphql-server/pkg/rest_handlers/file_handler.go @@ -8,6 +8,7 @@ import ( "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb" dbSchemaCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster" dbOperationsWorkflow "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/workflow" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" log "github.com/sirupsen/logrus" ) @@ -16,9 +17,14 @@ import ( func FileHandler(mongodbOperator mongodb.MongoOperator) gin.HandlerFunc { return func(c *gin.Context) { token := strings.TrimSuffix(c.Param("key"), ".yaml") + kubeCluster, err := k8s.NewKubeCluster() + if err != nil { + log.Fatalf("error while getting kube config: %v", err) + } response, statusCode, err := cluster.NewService( dbSchemaCluster.NewClusterOperator(mongodbOperator), dbOperationsWorkflow.NewChaosWorkflowOperator(mongodbOperator), + kubeCluster, ).GetManifest(token) if err != nil { log.WithError(err).Error("error while generating manifest file") diff --git a/litmus-portal/graphql-server/pkg/self-deployer/start.go b/litmus-portal/graphql-server/pkg/self-deployer/start.go index 47803f23ece..9a8841c9807 100644 --- a/litmus-portal/graphql-server/pkg/self-deployer/start.go +++ b/litmus-portal/graphql-server/pkg/self-deployer/start.go @@ -7,10 +7,8 @@ import ( "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" log "github.com/sirupsen/logrus" - "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster" - "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" - "github.com/litmuschaos/litmus/litmus-portal/graphql-server/graph/model" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster" ) // StartDeployer registers a new internal self-cluster and starts the deployer @@ -76,7 +74,7 @@ func StartDeployer(clusterService cluster.Service, projectID string) { manifests := strings.Split(string(response), "---") for _, manifest := range manifests { if len(strings.TrimSpace(manifest)) > 0 { - _, err = k8s.ClusterResource(manifest, deployerNamespace) + _, err = clusterService.GetClusterResource(manifest, deployerNamespace) if err != nil { log.Error(err) failedManifest = failedManifest + manifest diff --git a/litmus-portal/graphql-server/utils/variables.go b/litmus-portal/graphql-server/utils/variables.go index 9c323c13738..28ebfda4f19 100644 --- a/litmus-portal/graphql-server/utils/variables.go +++ b/litmus-portal/graphql-server/utils/variables.go @@ -42,3 +42,16 @@ type Configurations struct { } var Config Configurations + +// AgentScope is the scope of the agent +type AgentScope string + +// AgentType is the type of the agent +type AgentType string + +const ( + AgentScopeCluster AgentScope = "cluster" + AgentScopeNamespace AgentScope = "namespace" + AgentTypeInternal AgentType = "internal" + AgentTypeExternal AgentType = "external" +) From 53bcf9a440bd0f4126e9103d1d9279409d3844b9 Mon Sep 17 00:00:00 2001 From: namkyu1999 Date: Fri, 12 May 2023 12:01:21 +0900 Subject: [PATCH 2/6] feat: add unit tests to cluster package Signed-off-by: namkyu1999 --- .../graphql-server/pkg/cluster/cluster_jwt.go | 8 +- .../pkg/cluster/cluster_jwt_test.go | 69 ++ .../graphql-server/pkg/cluster/service.go | 24 +- .../pkg/cluster/service_test.go | 886 ++++++++++++++++++ 4 files changed, 971 insertions(+), 16 deletions(-) create mode 100644 litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go create mode 100644 litmus-portal/graphql-server/pkg/cluster/service_test.go diff --git a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go index cc47a560da1..697761ebcf9 100644 --- a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go +++ b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go @@ -8,8 +8,8 @@ import ( "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" ) -// ClusterCreateJWT generates jwt used in cluster registration -func ClusterCreateJWT(id string) (string, error) { +// CreateJWT generates jwt used in cluster registration +func CreateJWT(id string) (string, error) { claims := jwt.MapClaims{} claims["cluster_id"] = id token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) @@ -22,8 +22,8 @@ func ClusterCreateJWT(id string) (string, error) { return tokenString, nil } -// ClusterValidateJWT validates the cluster jwt -func ClusterValidateJWT(token string) (string, error) { +// ValidateJWT validates the cluster jwt +func ValidateJWT(token string) (string, error) { tkn, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) diff --git a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go new file mode 100644 index 00000000000..1e82e6b7f2e --- /dev/null +++ b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go @@ -0,0 +1,69 @@ +package cluster_test + +import ( + "testing" + + "github.com/google/uuid" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" + "github.com/stretchr/testify/assert" +) + +func TestCreateJwt(t *testing.T) { + // given + utils.Config.JwtSecret = uuid.NewString() + // when + _, err := cluster.CreateJWT(uuid.NewString()) + // then + assert.NoError(t, err) +} + +func TestValidateJWT(t *testing.T) { + // given + testcases := []struct { + name string + wantError bool + given func() string + }{ + { + name: "success", + wantError: false, + given: func() string { + utils.Config.JwtSecret = uuid.NewString() + token, _ := cluster.CreateJWT(uuid.NewString()) + return token + }, + }, + { + name: "failure: invalid token", + wantError: true, + given: func() string { + return uuid.NewString() + }, + }, + { + name: "failure: valid token but secret changed", + wantError: true, + given: func() string { + utils.Config.JwtSecret = uuid.NewString() + token, _ := cluster.CreateJWT(uuid.NewString()) + utils.Config.JwtSecret = uuid.NewString() + return token + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + token := tc.given() + // when + _, err := cluster.ValidateJWT(token) + // then + if tc.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/litmus-portal/graphql-server/pkg/cluster/service.go b/litmus-portal/graphql-server/pkg/cluster/service.go index d29625c456e..c5a231529cc 100644 --- a/litmus-portal/graphql-server/pkg/cluster/service.go +++ b/litmus-portal/graphql-server/pkg/cluster/service.go @@ -43,15 +43,15 @@ type Service interface { type clusterService struct { clusterOperator *dbSchemaCluster.Operator chaosWorkflowOperator *dbOperationsWorkflow.Operator - kubeCluster *k8s.KubeClients + kubeClients *k8s.KubeClients } // NewService returns a new instance of Service -func NewService(clusterOperator *dbSchemaCluster.Operator, chaosWorkflowOperator *dbOperationsWorkflow.Operator, kubeCluster *k8s.KubeClients) Service { +func NewService(clusterOperator *dbSchemaCluster.Operator, chaosWorkflowOperator *dbOperationsWorkflow.Operator, kubeClients *k8s.KubeClients) Service { return &clusterService{ clusterOperator: clusterOperator, chaosWorkflowOperator: chaosWorkflowOperator, - kubeCluster: kubeCluster, + kubeClients: kubeClients, } } @@ -67,7 +67,7 @@ func (c *clusterService) RegisterCluster(request model.RegisterClusterRequest) ( } clusterID := uuid.New().String() - token, err := ClusterCreateJWT(clusterID) + token, err := CreateJWT(clusterID) if err != nil { return &model.RegisterClusterResponse{}, err } @@ -209,7 +209,7 @@ func (c *clusterService) DeleteClusters(ctx context.Context, projectID string, c } clusters, err := c.clusterOperator.ListClusters(ctx, query) if err != nil { - return "", nil + return "", err } for _, cluster := range clusters { @@ -258,7 +258,7 @@ func (c *clusterService) ListClusters(projectID string, clusterType *string) ([] if err != nil { return nil, err } - newClusters := []*model.Cluster{} + var newClusters []*model.Cluster for _, cluster := range clusters { var totalNoOfSchedules int @@ -324,7 +324,7 @@ func (c *clusterService) GetManifestWithClusterID(clusterID string, accessKey st var scope = utils.Config.ChaosCenterScope if scope == string(utils.AgentScopeCluster) && utils.Config.TlsSecretName != "" { - config.TLSCert, err = c.kubeCluster.GetTLSCert(utils.Config.TlsSecretName) + config.TLSCert, err = c.kubeClients.GetTLSCert(utils.Config.TlsSecretName) if err != nil { return nil, fmt.Errorf("failed to retrieve the tls cert %v", err) } @@ -340,7 +340,7 @@ func (c *clusterService) GetManifestWithClusterID(clusterID string, accessKey st } else if reqCluster.AgentScope == string(utils.AgentScopeNamespace) { respData, err = manifestParser(reqCluster, "manifests/namespace", &config) } else { - log.Error("env AGENT_SCOPE is empty") + return nil, fmt.Errorf("env AGENT_SCOPE is empty") } if err != nil { @@ -395,7 +395,7 @@ func (c *clusterService) VerifyCluster(identity model.ClusterIdentity) (*dbSchem // GetManifest returns manifest for a given cluster func (c *clusterService) GetManifest(token string) ([]byte, int, error) { - clusterID, err := ClusterValidateJWT(token) + clusterID, err := ValidateJWT(token) if err != nil { return nil, http.StatusNotFound, err } @@ -413,7 +413,7 @@ func (c *clusterService) GetManifest(token string) ([]byte, int, error) { var scope = utils.Config.ChaosCenterScope if scope == string(utils.AgentScopeCluster) && utils.Config.TlsSecretName != "" { - config.TLSCert, err = c.kubeCluster.GetTLSCert(utils.Config.TlsSecretName) + config.TLSCert, err = c.kubeClients.GetTLSCert(utils.Config.TlsSecretName) if err != nil { return nil, http.StatusInternalServerError, err } @@ -455,7 +455,7 @@ func (c *clusterService) GetEndpoint(agentType utils.AgentType) (string, error) } // generating endpoint based on ChaosCenter Scope & AgentType (Self or External) - agentEndpoint, err := c.kubeCluster.GetServerEndpoint(utils.AgentScope(utils.Config.ChaosCenterScope), agentType) + agentEndpoint, err := c.kubeClients.GetServerEndpoint(utils.AgentScope(utils.Config.ChaosCenterScope), agentType) if agentEndpoint == "" || err != nil { return "", fmt.Errorf("failed to retrieve the server endpoint %v", err) @@ -466,5 +466,5 @@ func (c *clusterService) GetEndpoint(agentType utils.AgentType) (string, error) // GetClusterResource returns the cluster resource for a given manifest func (c *clusterService) GetClusterResource(manifest string, namespace string) (*unstructured.Unstructured, error) { - return c.kubeCluster.ClusterResource(manifest, namespace) + return c.kubeClients.ClusterResource(manifest, namespace) } diff --git a/litmus-portal/graphql-server/pkg/cluster/service_test.go b/litmus-portal/graphql-server/pkg/cluster/service_test.go new file mode 100644 index 00000000000..8a49e97ce5b --- /dev/null +++ b/litmus-portal/graphql-server/pkg/cluster/service_test.go @@ -0,0 +1,886 @@ +// Package cluster_test implements the unit test cases of cluster service functions +package cluster_test + +import ( + "context" + "errors" + "io/ioutil" + "os" + "testing" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/graph/model" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/authorization" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster" + store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb" + dbSchemaCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster" + mongodbMocks "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/model/mocks" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/workflow" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/k8s" + "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "k8s.io/client-go/kubernetes/fake" +) + +var ( + mongoOperator = new(mongodbMocks.MongoOperator) + clusterOperator = dbSchemaCluster.NewClusterOperator(mongoOperator) + chaosWorkflowOperator = workflow.NewChaosWorkflowOperator(mongoOperator) + kubeClients = new(k8s.KubeClients) + clusterService = cluster.NewService(clusterOperator, chaosWorkflowOperator, kubeClients) +) + +// TestMain is the entry point for testing +func TestMain(m *testing.M) { + gin.SetMode(gin.TestMode) + log.SetOutput(ioutil.Discard) + os.Exit(m.Run()) +} + +// TestClusterService_RegisterCluster tests the RegisterCluster function of cluster service +func TestClusterService_RegisterCluster(t *testing.T) { + // given + nodeSelector := "key1=value2,key2=value2" + testcases := []struct { + name string + wantErr bool + request model.RegisterClusterRequest + given func() + }{ + { + name: "failure: cannot get cluster endpoint", + wantErr: true, + given: func() { + kubeClients.GenericClient = fake.NewSimpleClientset() + t.Cleanup(func() { kubeClients.GenericClient = nil }) + }, + }, + { + name: "success", + wantErr: false, + request: model.RegisterClusterRequest{ + NodeSelector: &nodeSelector, + }, + given: func() { + utils.Config.ChaosCenterUiEndpoint = "1.2.3.4" + t.Cleanup(func() { utils.Config.ChaosCenterUiEndpoint = "" }) + mongoOperator.On("Create", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(nil).Once() + }, + }, + { + name: "failure: mongo create error", + wantErr: true, + given: func() { + utils.Config.ChaosCenterUiEndpoint = "1.2.3.4" + t.Cleanup(func() { utils.Config.ChaosCenterUiEndpoint = "" }) + mongoOperator.On("Create", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(errors.New("")).Once() + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + _, err := clusterService.RegisterCluster(tc.request) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_UpdateCluster tests the UpdateCluster function of cluster service +func TestClusterService_UpdateCluster(t *testing.T) { + // given + testcases := []struct { + name string + wantErr bool + given func() + }{ + { + name: "success", + given: func() { + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once() + }, + }, + { + name: "failure: mongo update error", + wantErr: true, + given: func() { + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, errors.New("")).Once() + }, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + err := clusterService.UpdateCluster(bson.D{}, bson.D{}) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_ConfirmClusterRegistration tests the ConfirmClusterRegistration function of cluster service +func TestClusterService_ConfirmClusterRegistration(t *testing.T) { + // given + testcases := []struct { + name string + wantErr bool + request model.ClusterIdentity + given func(request model.ClusterIdentity) + }{ + { + name: "success: access key mismatch", + request: model.ClusterIdentity{ + Version: uuid.NewString(), + }, + given: func(request model.ClusterIdentity) { + utils.Config.Version = request.Version + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", uuid.NewString()}, {"access_key", uuid.NewString()}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + + }, + }, + { + name: "success: access key match", + request: model.ClusterIdentity{ + Version: uuid.NewString(), + AccessKey: uuid.NewString(), + }, + given: func(request model.ClusterIdentity) { + utils.Config.Version = request.Version + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", uuid.NewString()}, {"access_key", request.AccessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once() + }, + }, + { + name: "failure: version mismatch", + request: model.ClusterIdentity{ + Version: uuid.NewString(), + }, + given: func(request model.ClusterIdentity) { + utils.Config.Version = uuid.NewString() + }, + wantErr: true, + }, + { + name: "failure: mongo get error", + request: model.ClusterIdentity{ + Version: uuid.NewString(), + }, + given: func(request model.ClusterIdentity) { + utils.Config.Version = request.Version + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + wantErr: true, + }, + { + name: "failure: mongo update error", + request: model.ClusterIdentity{ + Version: uuid.NewString(), + AccessKey: uuid.NewString(), + }, + given: func(request model.ClusterIdentity) { + utils.Config.Version = request.Version + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", uuid.NewString()}, {"access_key", request.AccessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, errors.New("")).Once() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given(tc.request) + // when + _, err := clusterService.ConfirmClusterRegistration(tc.request, *store.NewStore()) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_NewClusterEvent tests the NewClusterEvent function of cluster service +func TestClusterService_NewClusterEvent(t *testing.T) { + // given + testcases := []struct { + name string + wantErr bool + request model.NewClusterEventRequest + given func(request model.NewClusterEventRequest) + }{ + { + name: "success", + request: model.NewClusterEventRequest{ + AccessKey: uuid.NewString(), + }, + given: func(request model.NewClusterEventRequest) { + findResult := bson.D{{"project_id", uuid.NewString()}, {"is_registered", true}, {"access_key", request.AccessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + }, + { + name: "failure: mongo get error", + request: model.NewClusterEventRequest{}, + given: func(request model.NewClusterEventRequest) { + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + wantErr: true, + }, + { + name: "failure: cluster not registered", + request: model.NewClusterEventRequest{}, + given: func(request model.NewClusterEventRequest) { + findResult := bson.D{{"project_id", uuid.NewString()}, {"access_key", request.AccessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given(tc.request) + // when + _, err := clusterService.NewClusterEvent(tc.request, *store.NewStore()) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_DeleteClusters tests the DeleteClusters function of cluster service +func TestClusterService_DeleteClusters(t *testing.T) { + // given + ctx := context.WithValue(context.Background(), authorization.AuthKey, uuid.NewString()) + clusterID := uuid.NewString() + type args struct { + clusterIDs []*string + projectID string + } + testcases := []struct { + name string + wantErr bool + args args + given func() + }{ + { + name: "success", + args: args{ + clusterIDs: []*string{&clusterID}, + projectID: uuid.NewString(), + }, + given: func() { + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once() + wants := make([]interface{}, 1) + wants[0] = bson.D{{"project_id", uuid.NewString()}, {"cluster_id", uuid.NewString()}, {"agent_namespace", uuid.NewString()}} + cursor, _ := mongo.NewCursorFromDocuments(wants, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(cursor, nil).Once() + }, + }, + { + name: "failure: mongo update error", + args: args{ + clusterIDs: []*string{&clusterID}, + projectID: uuid.NewString(), + }, + given: func() { + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, errors.New("")).Once() + }, + wantErr: true, + }, + { + name: "failure: mongo list error", + args: args{ + clusterIDs: []*string{&clusterID}, + projectID: uuid.NewString(), + }, + given: func() { + mongoOperator.On("Update", mock.Anything, mongodb.ClusterCollection, mock.Anything, mock.Anything, mock.Anything).Return(&mongo.UpdateResult{}, nil).Once() + cursor, _ := mongo.NewCursorFromDocuments(nil, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(cursor, errors.New("")).Once() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + _, err := clusterService.DeleteClusters(ctx, tc.args.projectID, tc.args.clusterIDs, *store.NewStore()) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_ListClusters tests the ListClusters function of cluster service +func TestClusterService_ListClusters(t *testing.T) { + testcases := []struct { + name string + wantErr bool + given func(projectID string) + }{ + { + name: "success", + given: func(projectID string) { + clusters, workflows, clusterID := make([]interface{}, 1), make([]interface{}, 1), uuid.NewString() + clusters[0] = bson.D{{"project_id", projectID}, {"cluster_id", clusterID}} + cursor, _ := mongo.NewCursorFromDocuments(clusters, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(cursor, nil).Once() + workflows[0] = bson.D{{"project_id", projectID}, {"cluster_id", clusterID}} + workflowsCursor, _ := mongo.NewCursorFromDocuments(workflows, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.WorkflowCollection, mock.Anything).Return(workflowsCursor, nil).Once() + }, + }, + { + name: "failure: mongo cluster list error", + given: func(projectID string) { + cursor, _ := mongo.NewCursorFromDocuments(nil, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(cursor, errors.New("")).Once() + }, + wantErr: true, + }, + { + name: "failure: mongo workflow list error", + given: func(projectID string) { + clusters, clusterID := make([]interface{}, 1), uuid.NewString() + clusters[0] = bson.D{{"project_id", projectID}, {"cluster_id", clusterID}} + cursor, _ := mongo.NewCursorFromDocuments(clusters, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(cursor, nil).Once() + workflowsCursor, _ := mongo.NewCursorFromDocuments(nil, nil, nil) + mongoOperator.On("List", mock.Anything, mongodb.WorkflowCollection, mock.Anything).Return(workflowsCursor, errors.New("")).Once() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + projectID := uuid.NewString() + tc.given(projectID) + // when + clusterType := uuid.NewString() + _, err := clusterService.ListClusters(projectID, &clusterType) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_GetAgentDetails tests the GetAgentDetails function of cluster service +func TestClusterService_GetAgentDetails(t *testing.T) { + // given + projectID := uuid.NewString() + testcases := []struct { + name string + wantErr bool + given func() + }{ + { + name: "success", + given: func() { + findResult := bson.D{{"project_id", projectID}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + }, + { + name: "failure: mongo get error", + given: func() { + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + _, err := clusterService.GetAgentDetails(context.Background(), uuid.NewString(), projectID) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_GetManifestWithClusterID tests the GetManifestWithClusterID function of cluster service +func TestClusterService_GetManifestWithClusterID(t *testing.T) { + // given + clusterID, accessKey := uuid.NewString(), uuid.NewString() + testcases := []struct { + name string + wantErr bool + given func() + }{ + { + name: "success", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}, {"agent_scope", string(utils.AgentScopeCluster)}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "http://1.2.3.4" + utils.Config.ChaosCenterScope = string(utils.AgentScopeCluster) + t.Cleanup(func() { + utils.Config.ChaosCenterUiEndpoint = "" + utils.Config.ChaosCenterScope = "" + }) + + manifest := ` +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: #{AGENT_NAMESPACE} +` + // not using filepath. see Dockerfile + err := os.MkdirAll("manifests/cluster", 0755) + if err != nil { + t.FailNow() + } + temp, err := os.Create("manifests/cluster/1b_argo_rbac.yaml") + if err != nil { + t.FailNow() + } + defer func(temp *os.File) { + err := temp.Close() + if err != nil { + t.FailNow() + } + }(temp) + _, err = temp.WriteString(manifest) + if err != nil { + t.FailNow() + } + t.Cleanup(func() { + _ = os.Remove("manifests/cluster/1b_argo_rbac.yaml") + _ = os.Remove("manifests/cluster/") + _ = os.Remove("manifests/") + }) + }, + }, + { + name: "failure: mongo get error", + given: func() { + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + wantErr: true, + }, + { + name: "failure: access key mismatch", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", uuid.NewString()}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + wantErr: true, + }, + { + name: "failure: endpoint not set", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "" + kubeClients.GenericClient = fake.NewSimpleClientset() + t.Cleanup(func() { kubeClients.GenericClient = nil }) + }, + wantErr: true, + }, + { + name: "failure: agent scope not set", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "http://1.2.3.4" + utils.Config.ChaosCenterScope = string(utils.AgentScopeNamespace) + t.Cleanup(func() { + utils.Config.ChaosCenterUiEndpoint = "" + utils.Config.ChaosCenterScope = "" + }) + }, + wantErr: true, + }, + { + name: "failure: failed to get tls.cert", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "http://1.2.3.4" + utils.Config.ChaosCenterScope = string(utils.AgentScopeCluster) + utils.Config.TlsSecretName = uuid.NewString() + t.Cleanup(func() { + utils.Config.ChaosCenterUiEndpoint = "" + utils.Config.ChaosCenterScope = "" + utils.Config.TlsSecretName = "" + }) + kubeClients.GenericClient = fake.NewSimpleClientset() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + _, err := clusterService.GetManifestWithClusterID(clusterID, accessKey) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_VerifyCluster tests the VerifyCluster function of cluster service +func TestClusterService_VerifyCluster(t *testing.T) { + // given + testcases := []struct { + name string + wantErr bool + identity model.ClusterIdentity + given func(identity model.ClusterIdentity) + }{ + { + name: "success", + given: func(identity model.ClusterIdentity) { + utils.Config.Version = identity.Version + t.Cleanup(func() { utils.Config.Version = "" }) + findResult := bson.D{{"cluster_id", identity.ClusterID}, {"access_key", identity.AccessKey}, {"is_registered", true}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + identity: model.ClusterIdentity{ + ClusterID: uuid.NewString(), + AccessKey: uuid.NewString(), + Version: "1.0.0", + }, + }, + { + name: "failure: ci version mismatch", + given: func(identity model.ClusterIdentity) { + utils.Config.Version = cluster.CIVersion + t.Cleanup(func() { utils.Config.Version = "" }) + }, + identity: model.ClusterIdentity{ + ClusterID: uuid.NewString(), + AccessKey: uuid.NewString(), + Version: "1.0.0", + }, + wantErr: true, + }, + { + name: "failure: version mismatch", + given: func(identity model.ClusterIdentity) { + utils.Config.Version = "1.1.1" + t.Cleanup(func() { utils.Config.Version = "" }) + }, + identity: model.ClusterIdentity{ + ClusterID: uuid.NewString(), + AccessKey: uuid.NewString(), + Version: "1.0.0", + }, + wantErr: true, + }, + { + name: "failure: mongo get error", + given: func(identity model.ClusterIdentity) { + utils.Config.Version = identity.Version + t.Cleanup(func() { utils.Config.Version = "" }) + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + identity: model.ClusterIdentity{ + ClusterID: uuid.NewString(), + AccessKey: uuid.NewString(), + Version: "1.0.0", + }, + wantErr: true, + }, + { + name: "failure: cluster not registered", + given: func(identity model.ClusterIdentity) { + utils.Config.Version = identity.Version + t.Cleanup(func() { utils.Config.Version = "" }) + findResult := bson.D{{"cluster_id", identity.ClusterID}, {"access_key", identity.AccessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + identity: model.ClusterIdentity{ + ClusterID: uuid.NewString(), + AccessKey: uuid.NewString(), + Version: "1.0.0", + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given(tc.identity) + // when + _, err := clusterService.VerifyCluster(tc.identity) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_GetManifest tests the GetManifest function of cluster service +func TestClusterService_GetManifest(t *testing.T) { + // given + clusterID := uuid.NewString() + accessKey, _ := cluster.CreateJWT(uuid.NewString()) + testcases := []struct { + name string + wantErr bool + given func() + }{ + { + name: "success", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}, {"agent_scope", string(utils.AgentScopeCluster)}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "http://1.2.3.4" + utils.Config.ChaosCenterScope = string(utils.AgentScopeCluster) + t.Cleanup(func() { + utils.Config.ChaosCenterUiEndpoint = "" + utils.Config.ChaosCenterScope = "" + }) + + manifest := ` +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: #{AGENT_NAMESPACE} +` + // not using filepath. see Dockerfile + err := os.MkdirAll("manifests/cluster", 0755) + if err != nil { + t.FailNow() + } + temp, err := os.Create("manifests/cluster/1b_argo_rbac.yaml") + if err != nil { + t.FailNow() + } + defer func(temp *os.File) { + err := temp.Close() + if err != nil { + t.FailNow() + } + }(temp) + _, err = temp.WriteString(manifest) + if err != nil { + t.FailNow() + } + t.Cleanup(func() { + _ = os.Remove("manifests/cluster/1b_argo_rbac.yaml") + _ = os.Remove("manifests/cluster/") + _ = os.Remove("manifests/") + }) + }, + }, + { + name: "success", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}, {"agent_scope", string(utils.AgentScopeNamespace)}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "http://1.2.3.4" + utils.Config.ChaosCenterScope = string(utils.AgentScopeNamespace) + t.Cleanup(func() { + utils.Config.ChaosCenterUiEndpoint = "" + utils.Config.ChaosCenterScope = "" + }) + + manifest := ` +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: #{AGENT_NAMESPACE} +` + // not using filepath. see Dockerfile + err := os.MkdirAll("manifests/namespace", 0755) + if err != nil { + t.FailNow() + } + temp, err := os.Create("manifests/namespace/1b_argo_rbac.yaml") + if err != nil { + t.FailNow() + } + defer func(temp *os.File) { + err := temp.Close() + if err != nil { + t.FailNow() + } + }(temp) + _, err = temp.WriteString(manifest) + if err != nil { + t.FailNow() + } + t.Cleanup(func() { + _ = os.Remove("manifests/namespace/1b_argo_rbac.yaml") + _ = os.Remove("manifests/namespace/") + _ = os.Remove("namespace/") + }) + }, + }, + { + name: "failure: mongo get error", + given: func() { + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + wantErr: true, + }, + { + name: "failure: endpoint not set", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "" + kubeClients.GenericClient = fake.NewSimpleClientset() + t.Cleanup(func() { kubeClients.GenericClient = nil }) + }, + wantErr: true, + }, + { + name: "failure: failed to get tls.cert", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", clusterID}, {"access_key", accessKey}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + utils.Config.ChaosCenterUiEndpoint = "http://1.2.3.4" + utils.Config.ChaosCenterScope = string(utils.AgentScopeCluster) + utils.Config.TlsSecretName = uuid.NewString() + t.Cleanup(func() { + utils.Config.ChaosCenterUiEndpoint = "" + utils.Config.ChaosCenterScope = "" + utils.Config.TlsSecretName = "" + }) + kubeClients.GenericClient = fake.NewSimpleClientset() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + _, _, err := clusterService.GetManifest(accessKey) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestClusterService_GetCluster tests the GetCluster function of cluster service +func TestClusterService_GetCluster(t *testing.T) { + testcases := []struct { + name string + given func() + wantErr bool + }{ + { + name: "success", + given: func() { + findResult := bson.D{{"project_id", uuid.NewString()}, {"cluster_id", uuid.NewString()}, {"access_key", uuid.NewString()}} + singleResult := mongo.NewSingleResultFromDocument(findResult, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, nil).Once() + }, + }, + { + name: "failure: mongo get error", + given: func() { + singleResult := mongo.NewSingleResultFromDocument(nil, nil, nil) + mongoOperator.On("Get", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(singleResult, errors.New("")).Once() + }, + wantErr: true, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // given + tc.given() + // when + _, err := clusterService.GetCluster(uuid.NewString()) + // then + if tc.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} From beff4af790f59bdcc032bd8c8d5d1f45618ef3f3 Mon Sep 17 00:00:00 2001 From: namkyu1999 Date: Fri, 12 May 2023 13:21:08 +0900 Subject: [PATCH 3/6] fix: fix codacy analysis Signed-off-by: namkyu1999 --- litmus-portal/graphql-server/utils/variables.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/litmus-portal/graphql-server/utils/variables.go b/litmus-portal/graphql-server/utils/variables.go index 28ebfda4f19..c0384b9fa69 100644 --- a/litmus-portal/graphql-server/utils/variables.go +++ b/litmus-portal/graphql-server/utils/variables.go @@ -50,8 +50,12 @@ type AgentScope string type AgentType string const ( - AgentScopeCluster AgentScope = "cluster" + // AgentScopeCluster is the cluster scope + AgentScopeCluster AgentScope = "cluster" + // AgentScopeNamespace is the namespace scope AgentScopeNamespace AgentScope = "namespace" - AgentTypeInternal AgentType = "internal" - AgentTypeExternal AgentType = "external" + // AgentTypeInternal is the internal agent + AgentTypeInternal AgentType = "internal" + // AgentTypeExternal is the external agent + AgentTypeExternal AgentType = "external" ) From c7315c5cfa0665bdc3e60a76df3c974c42ec1224 Mon Sep 17 00:00:00 2001 From: namkyu1999 Date: Sun, 14 May 2023 19:33:06 +0900 Subject: [PATCH 4/6] fix: fix test cases Signed-off-by: namkyu1999 --- litmus-portal/graphql-server/pkg/cluster/service_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/litmus-portal/graphql-server/pkg/cluster/service_test.go b/litmus-portal/graphql-server/pkg/cluster/service_test.go index 8a49e97ce5b..7664d1dc1bc 100644 --- a/litmus-portal/graphql-server/pkg/cluster/service_test.go +++ b/litmus-portal/graphql-server/pkg/cluster/service_test.go @@ -59,6 +59,7 @@ func TestClusterService_RegisterCluster(t *testing.T) { given: func() { kubeClients.GenericClient = fake.NewSimpleClientset() t.Cleanup(func() { kubeClients.GenericClient = nil }) + mongoOperator.On("Create", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(errors.New("")).Once() }, }, { From 1d17c3104b090c5cc4d6a258579b092406addeda Mon Sep 17 00:00:00 2001 From: namkyu1999 Date: Sun, 14 May 2023 19:58:23 +0900 Subject: [PATCH 5/6] fix: chore Signed-off-by: namkyu1999 --- litmus-portal/graphql-server/pkg/cluster/service_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/litmus-portal/graphql-server/pkg/cluster/service_test.go b/litmus-portal/graphql-server/pkg/cluster/service_test.go index 7664d1dc1bc..f60a0a6c20c 100644 --- a/litmus-portal/graphql-server/pkg/cluster/service_test.go +++ b/litmus-portal/graphql-server/pkg/cluster/service_test.go @@ -53,15 +53,6 @@ func TestClusterService_RegisterCluster(t *testing.T) { request model.RegisterClusterRequest given func() }{ - { - name: "failure: cannot get cluster endpoint", - wantErr: true, - given: func() { - kubeClients.GenericClient = fake.NewSimpleClientset() - t.Cleanup(func() { kubeClients.GenericClient = nil }) - mongoOperator.On("Create", mock.Anything, mongodb.ClusterCollection, mock.Anything).Return(errors.New("")).Once() - }, - }, { name: "success", wantErr: false, From a7e0dcd1869cc9b1cf608d23a30ff9f024e8fd87 Mon Sep 17 00:00:00 2001 From: namkyu1999 Date: Fri, 19 May 2023 20:08:59 +0900 Subject: [PATCH 6/6] fix: rename function Signed-off-by: namkyu1999 --- .../graphql-server/pkg/cluster/cluster_jwt.go | 8 ++++---- .../graphql-server/pkg/cluster/cluster_jwt_test.go | 14 ++++++++------ .../graphql-server/pkg/cluster/service.go | 4 ++-- .../graphql-server/pkg/cluster/service_test.go | 2 +- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go index 697761ebcf9..3ee2a9eb396 100644 --- a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go +++ b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt.go @@ -8,8 +8,8 @@ import ( "github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils" ) -// CreateJWT generates jwt used in cluster registration -func CreateJWT(id string) (string, error) { +// CreateClusterJWT generates jwt used in cluster registration +func CreateClusterJWT(id string) (string, error) { claims := jwt.MapClaims{} claims["cluster_id"] = id token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) @@ -22,8 +22,8 @@ func CreateJWT(id string) (string, error) { return tokenString, nil } -// ValidateJWT validates the cluster jwt -func ValidateJWT(token string) (string, error) { +// ValidateClusterJWT validates the cluster jwt +func ValidateClusterJWT(token string) (string, error) { tkn, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) diff --git a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go index 1e82e6b7f2e..6f697e94b1f 100644 --- a/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go +++ b/litmus-portal/graphql-server/pkg/cluster/cluster_jwt_test.go @@ -9,16 +9,18 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCreateJwt(t *testing.T) { +// TestCreateClusterJWT tests the CreateClusterJWT function +func TestCreateClusterJWT(t *testing.T) { // given utils.Config.JwtSecret = uuid.NewString() // when - _, err := cluster.CreateJWT(uuid.NewString()) + _, err := cluster.CreateClusterJWT(uuid.NewString()) // then assert.NoError(t, err) } -func TestValidateJWT(t *testing.T) { +// TestValidateClusterJWT tests the ValidateClusterJWT function +func TestValidateClusterJWT(t *testing.T) { // given testcases := []struct { name string @@ -30,7 +32,7 @@ func TestValidateJWT(t *testing.T) { wantError: false, given: func() string { utils.Config.JwtSecret = uuid.NewString() - token, _ := cluster.CreateJWT(uuid.NewString()) + token, _ := cluster.CreateClusterJWT(uuid.NewString()) return token }, }, @@ -46,7 +48,7 @@ func TestValidateJWT(t *testing.T) { wantError: true, given: func() string { utils.Config.JwtSecret = uuid.NewString() - token, _ := cluster.CreateJWT(uuid.NewString()) + token, _ := cluster.CreateClusterJWT(uuid.NewString()) utils.Config.JwtSecret = uuid.NewString() return token }, @@ -57,7 +59,7 @@ func TestValidateJWT(t *testing.T) { // given token := tc.given() // when - _, err := cluster.ValidateJWT(token) + _, err := cluster.ValidateClusterJWT(token) // then if tc.wantError { assert.Error(t, err) diff --git a/litmus-portal/graphql-server/pkg/cluster/service.go b/litmus-portal/graphql-server/pkg/cluster/service.go index c5a231529cc..84dd05a0734 100644 --- a/litmus-portal/graphql-server/pkg/cluster/service.go +++ b/litmus-portal/graphql-server/pkg/cluster/service.go @@ -67,7 +67,7 @@ func (c *clusterService) RegisterCluster(request model.RegisterClusterRequest) ( } clusterID := uuid.New().String() - token, err := CreateJWT(clusterID) + token, err := CreateClusterJWT(clusterID) if err != nil { return &model.RegisterClusterResponse{}, err } @@ -395,7 +395,7 @@ func (c *clusterService) VerifyCluster(identity model.ClusterIdentity) (*dbSchem // GetManifest returns manifest for a given cluster func (c *clusterService) GetManifest(token string) ([]byte, int, error) { - clusterID, err := ValidateJWT(token) + clusterID, err := ValidateClusterJWT(token) if err != nil { return nil, http.StatusNotFound, err } diff --git a/litmus-portal/graphql-server/pkg/cluster/service_test.go b/litmus-portal/graphql-server/pkg/cluster/service_test.go index f60a0a6c20c..723513f07fb 100644 --- a/litmus-portal/graphql-server/pkg/cluster/service_test.go +++ b/litmus-portal/graphql-server/pkg/cluster/service_test.go @@ -682,7 +682,7 @@ func TestClusterService_VerifyCluster(t *testing.T) { func TestClusterService_GetManifest(t *testing.T) { // given clusterID := uuid.NewString() - accessKey, _ := cluster.CreateJWT(uuid.NewString()) + accessKey, _ := cluster.CreateClusterJWT(uuid.NewString()) testcases := []struct { name string wantErr bool