Skip to content

Commit

Permalink
Chnage as per review
Browse files Browse the repository at this point in the history
Signed-off-by: Drumil Patel <[email protected]>
  • Loading branch information
weastel committed Jul 6, 2020
1 parent acf4a18 commit 8aa54a9
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 36 deletions.
18 changes: 9 additions & 9 deletions infra/infra.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,10 @@ func main() {
k8sEKS.Flag("vars", "When provided it will substitute the token holders in the yaml file. Follows the standard golang template formating - {{ .hashStable }}.").
Short('v').
StringMapVar(&e.DeploymentVars)
k8sEKS.Flag("rangeVars", "Similar to vars but for range values which is stringified with seperator.").
k8sEKS.Flag("rangeVars", "Similar to vars, but for specifying range values. Usage `get_range .RANGE_VAR .SEPERATOR`.").
Short('r').
StringMapVar(&e.DeploymentRangeVars)
k8sEKS.Flag("sep", "Separator helps to split stringified list into corresponding ranges. Default separator is `_`.").
k8sEKS.Flag("seperator", "The separator used to split rangeVar into a []string. Defaults to `_`.").
Short('s').
Default("_").
StringVar(&e.Separator)
Expand All @@ -110,22 +110,22 @@ func main() {
// Cluster node-pool operations
k8sEKSNodeGroup := k8sEKS.Command("nodegroup", "manage EKS clusters nodegroups").
Action(e.EKSDeploymentParse)
k8sEKSNodeGroup.Command("create", "eks nodegroup create -a credentials -f FileOrFolder").
k8sEKSNodeGroup.Command("create", "eks nodegroup create -a credentials -f FileOrFolder -v REGION:europe-west1-b -v CLUSTER_NAME:test -r SUBNET_IDS: subnetId1_subnetId2_subnetId3").
Action(e.NodeGroupCreate)
k8sEKSNodeGroup.Command("delete", "eks nodegroup delete -a credentials -f FileOrFolder").
k8sEKSNodeGroup.Command("delete", "eks nodegroup delete -a credentials -f FileOrFolder -v REGION:europe-west1-b -v CLUSTER_NAME:test -r SUBNET_IDS: subnetId1_subnetId2_subnetId3").
Action(e.NodeGroupDelete)
k8sEKSNodeGroup.Command("check-running", "eks nodegroup check-running -a credentails -f FileOrFolder").
k8sEKSNodeGroup.Command("check-running", "eks nodegroup check-running -a credentails -f FileOrFolder -v REGION:europe-west1-b -v CLUSTER_NAME:test -r SUBNET_IDS: subnetId1_subnetId2_subnetId3").
Action(e.AllNodeGroupsRunning)
k8sEKSNodeGroup.Command("check-deleted", "eks nodegroup check-deleted -a credentials -f FileOrFolder").
k8sEKSNodeGroup.Command("check-deleted", "eks nodegroup check-deleted -a credentials -f FileOrFolder -v REGION:europe-west1-b -v CLUSTER_NAME:test -r SUBNET_IDS: subnetId1_subnetId2_subnetId3").
Action(e.AllNodeGroupsDeleted)

// K8s resource operations.
k8sEKSResource := k8sEKS.Command("resource", `Apply and delete different k8s resources - deployments, services, config maps etc.Required variables -v PROJECT_ID, -v ZONE: -west1-b -v CLUSTER_NAME`).
k8sEKSResource := k8sEKS.Command("resource", `Apply and delete different k8s resources - deployments, services, config maps etc.Required variables -v REGION:europe-west1-b -v CLUSTER_NAME:test `).
Action(e.NewK8sProvider).
Action(e.K8SDeploymentsParse)
k8sEKSResource.Command("apply", "eks resource apply -a credentials -f manifestsFileOrFolder -v PROJECT_ID:test -v ZONE:europe-west1-b -v CLUSTER_NAME:test -v hashStable:COMMIT1 -v hashTesting:COMMIT2").
k8sEKSResource.Command("apply", "eks resource apply -a credentials -f manifestsFileOrFolder -v hashStable:COMMIT1 -v hashTesting:COMMIT2").
Action(e.ResourceApply)
k8sEKSResource.Command("delete", "eks resource delete -a credentials -f manifestsFileOrFolder -v PROJECT_ID:test -v ZONE:europe-west1-b -v CLUSTER_NAME:test -v hashStable:COMMIT1 -v hashTesting:COMMIT2").
k8sEKSResource.Command("delete", "eks resource delete -a credentials -f manifestsFileOrFolder -v hashStable:COMMIT1 -v hashTesting:COMMIT2").
Action(e.ResourceDelete)

if _, err := app.Parse(os.Args[1:]); err != nil {
Expand Down
96 changes: 73 additions & 23 deletions prombench/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,37 +11,87 @@ deploy: nodepool_create resource_apply
clean: resource_delete nodepool_delete

nodepool_create:
$(INFRA_CMD) gke nodepool create -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/environment/gke/nodepools.yaml
ifeq (${PROVIDER}, gke)
${INFRA_CMD} ${PROVIDER} nodepool create -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif
ifeq ($(PROVIDER), eks)
${INFRA_CMD} ${PROVIDER} nodegroup create -a ${CREDENTIALS} \
-v REGION:${REGION} -v NODE_ROLE:${NODE_ROLE} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-v ROLE_ARN:${ROLE_ARN} -r SUBNET_IDS:${SUBNET_IDS} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif

resource_apply:
$(INFRA_CMD) gke resource apply -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} \
-v PR_NUMBER:${PR_NUMBER} -v RELEASE:${RELEASE} -v DOMAIN_NAME:${DOMAIN_NAME} \
-v GITHUB_ORG:${GITHUB_ORG} -v GITHUB_REPO:${GITHUB_REPO} \
-f manifests/prombench/benchmark
ifeq (${PROVIDER}, gke)
$(INFRA_CMD) ${PROVIDER} resource apply -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} \
-v PR_NUMBER:${PR_NUMBER} -v RELEASE:${RELEASE} -v DOMAIN_NAME:${DOMAIN_NAME} \
-v GITHUB_ORG:${GITHUB_ORG} -v GITHUB_REPO:${GITHUB_REPO} \
-f manifests/prombench/benchmark
endif
ifeq (${PROVIDER}, eks)
${INFRA_CMD} ${PROVIDER} resource apply -a ${CREDENTIALS} \
-v REGION:${REGION} -v NODE_ROLE:${NODE_ROLE} -v CLUSTER_NAME:${CLUSTER_NAME} \
-v PR_NUMBER:${PR_NUMBER} -v RELEASE:${RELEASE} -v DOMAIN_NAME:${DOMAIN_NAME} \
-v GITHUB_ORG:${GITHUB_ORG} -v GITHUB_REPO:${GITHUB_REPO} \
-f manifests/prombench/benchmark
endif

# Required because namespace and cluster-role are not part of the created nodepools
resource_delete:
$(INFRA_CMD) gke resource delete -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/benchmark/1c_cluster-role-binding.yaml \
-f manifests/prombench/benchmark/1a_namespace.yaml
ifeq (${PROVIDER}, gke)
$(INFRA_CMD) ${PROVIDER} resource delete -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/benchmark/1c_cluster-role-binding.yaml \
-f manifests/prombench/benchmark/1a_namespace.yaml
endif
ifeq (${PROVIDER}, eks)
$(INFRA_CMD) ${PROVIDER} resource delete -a ${AUTH_FILE} \
-v REGION:${REGION} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/benchmark/1c_cluster-role-binding.yaml \
-f manifests/prombench/benchmark/1a_namespace.yaml
endif

nodepool_delete:
$(INFRA_CMD) gke nodepool delete -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/environment/gke/nodepools.yaml
ifeq (${PROVIDER}, gke)
$(INFRA_CMD) ${PROVIDER} nodepool delete -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif
ifeq (${PROVIDER}, eks)
${INFRA_CMD} ${PROVIDER} nodegroup delete -a ${CREDENTIALS} \
-v REGION:${REGION} -v NODE_ROLE:${NODE_ROLE} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-v ROLE_ARN:${ROLE_ARN} -r SUBNET_IDS:${SUBNET_IDS} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif


all_nodepools_running:
$(INFRA_CMD) gke nodepool check-running -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} \
-v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/environment/gke/nodepools.yaml
ifeq (${PROVIDER}, gke)
$(INFRA_CMD) ${PROVIDER} nodepool check-running -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} \
-v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif
ifeq (${PROVIDER}, eks)
${INFRA_CMD} ${PROVIDER} nodegroup check-running -a ${CREDENTIALS} \
-v REGION:${REGION} -v NODE_ROLE:${NODE_ROLE} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-v ROLE_ARN:${ROLE_ARN} -r SUBNET_IDS:${SUBNET_IDS} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif

all_nodepools_deleted:
$(INFRA_CMD) gke nodepool check-deleted -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} \
-v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f -f manifests/environment/gke/nodepools.yaml
ifeq (${PROVIDER}, gke)
$(INFRA_CMD) ${PROVIDER} nodepool check-deleted -a ${AUTH_FILE} \
-v ZONE:${ZONE} -v PROJECT_ID:${PROJECT_ID} \
-v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif
ifeq (${PROVIDER}, eks)
${INFRA_CMD} ${PROVIDER} nodegroup check-deleted -a ${CREDENTIALS} \
-v REGION:${REGION} -v NODE_ROLE:${NODE_ROLE} -v CLUSTER_NAME:${CLUSTER_NAME} -v PR_NUMBER:${PR_NUMBER} \
-v ROLE_ARN:${ROLE_ARN} -r SUBNET_IDS:${SUBNET_IDS} \
-f manifests/prombench/nodes_${PROVIDER}.yaml
endif
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ cluster:
name: {{ .CLUSTER_NAME }}
initialclusterversion: 1.14
nodepools:
# GKE creates a label on every node in a pool
# cloud.google.com/gke-nodepool: <NODEPOOL_NAME>
# This node-pool will be used for running monitoring components
- name: main-node
initialnodecount: 1
config:
machinetype: n1-standard-4
imagetype: COS
disksizegb: 300
labels:
node-name: main-node
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ nodegroups:
- {{ $subnetId }}
{{ end }}
instancetypes:
- m5d.2xlarge
- r5d.2xlarge #This machine has SSD. SSD is used to give fast-lookup to Prometheus servers being benchmarked
scalingconfig:
desiredsize: 2
maxsize: 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ cluster:
localssdcount: 1 #SSD is used to give fast-lookup to Prometheus servers being benchmarked
labels:
isolation: prometheus
node-name: prometheus-{{ .PR_NUMBER }}
- name: nodes-{{ .PR_NUMBER }}
initialnodecount: 1
config:
Expand All @@ -21,4 +22,5 @@ cluster:
disksizegb: 100
localssdcount: 0 #use standard HDD. SSD not needed for fake-webservers.
labels:
isolation: none
isolation: none
node-name: nodes-{{ .PR_NUMBER }}

0 comments on commit 8aa54a9

Please sign in to comment.