Skip to content
This repository has been archived by the owner on Jan 11, 2023. It is now read-only.

E2E Addons #2294

Merged
merged 29 commits into from
Feb 16, 2018
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
82b5646
add features on and features off
Feb 13, 2018
e17abdd
Merge branch 'master' of https://github.com/CecileRobertMichon/acs-en…
Feb 13, 2018
b2e283a
fix off model
Feb 14, 2018
ddabe64
Merge branch 'master' of https://github.com/Azure/acs-engine into mor…
Feb 14, 2018
c567806
add seperate tests for each feature disabled
Feb 14, 2018
94ef10a
Merge branch 'master' of https://github.com/Azure/acs-engine into mor…
Feb 14, 2018
913671d
move features off dir
Feb 14, 2018
c60e25e
rbac bool
Feb 14, 2018
6a17e0c
added clear containers
Feb 15, 2018
e93d361
added addons enabled test
Feb 15, 2018
e2e908b
fix typo in apimodel
Feb 15, 2018
974efa0
remove aci-connector
Feb 15, 2018
51dc9fd
wip add mem/cpu limits/requests checks
Feb 15, 2018
a28fbbf
Merge branch 'master' of https://github.com/Azure/acs-engine into add…
Feb 15, 2018
49c0ae2
add resources to container spec
Feb 15, 2018
3c059ad
fix resources type
Feb 15, 2018
72e05b8
add checks to tiller
Feb 15, 2018
098efb0
remove extra err var
Feb 16, 2018
e72a004
add check for dashboard and aci connector
Feb 16, 2018
5ae6b2e
update default definition
Feb 16, 2018
5a5bbd3
fmt
Feb 16, 2018
c47c3c5
fix typo
Feb 16, 2018
2d216ad
Merge branch 'master' of https://github.com/Azure/acs-engine into add…
Feb 16, 2018
e71f4dc
Refactor resources validation
Feb 16, 2018
4ce083c
fix error string
Feb 16, 2018
328a065
fix linter
Feb 16, 2018
47c7188
remove pointer
Feb 16, 2018
db5ea23
fix ineffassign
Feb 16, 2018
6b1626c
small fixes
Feb 16, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions examples/e2e-tests/kubernetes/release/default/definition.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
{
"name": "tiller",
"cpuRequests": "1",
"memoryRequests": "1024Mi",
"memoryRequests": "1Gi",
"cpuLimits": "1",
"memoryLimits": "1024Mi"
"memoryLimits": "1Gi"
}
]
},
Expand Down Expand Up @@ -66,8 +66,13 @@
"count": 3,
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"storageProfile" : "ManagedDisks",
"diskSizesGB": [128, 128, 128, 128],
"storageProfile": "ManagedDisks",
"diskSizesGB": [
128,
128,
128,
128
],
"availabilityProfile": "AvailabilitySet",
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME"
},
Expand All @@ -77,7 +82,9 @@
"vmSize": "Standard_D2_v2",
"OSDiskSizeGB": 200,
"storageProfile": "StorageAccount",
"diskSizesGB": [128],
"diskSizesGB": [
128
],
"availabilityProfile": "AvailabilitySet",
"vnetSubnetId": "/subscriptions/SUB_ID/resourceGroups/RG_NAME/providers/Microsoft.Network/virtualNetworks/VNET_NAME/subnets/SUBNET_NAME"
}
Expand Down
36 changes: 3 additions & 33 deletions test/e2e/engine/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,46 +158,16 @@ func (e *Engine) HasWindowsAgents() bool {
return false
}

// HasDashboard will return true if kubernetes-dashboard addon is enabled
func (e *Engine) HasDashboard() bool {
// HasAddon will return true if an addon is enabled
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💥

func (e *Engine) HasAddon(name string) (bool, api.KubernetesAddon) {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "kubernetes-dashboard" {
return *addon.Enabled
}
}
return false
}

// HasTiller will return true if tiller addon is enabled
func (e *Engine) HasTiller() (bool, api.KubernetesAddon) {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "tiller" {
if addon.Name == name {
return *addon.Enabled, addon
}
}
return false, api.KubernetesAddon{}
}

// HasACIConnector will return true if aci-connector addon is enabled
func (e *Engine) HasACIConnector() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "aci-connector" {
return *addon.Enabled
}
}
return false
}

// HasRescheduler will return true if rescheduler addon is enabled
func (e *Engine) HasRescheduler() bool {
for _, addon := range e.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.Addons {
if addon.Name == "rescheduler" {
return *addon.Enabled
}
}
return false
}

// OrchestratorVersion1Dot8AndUp will return true if the orchestrator version is 1.8 and up
func (e *Engine) OrchestratorVersion1Dot8AndUp() bool {
return e.ClusterDefinition.ContainerService.Properties.OrchestratorProfile.OrchestratorVersion >= "1.8"
Expand Down
94 changes: 80 additions & 14 deletions test/e2e/kubernetes/kubernetes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,27 +125,41 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
})

It("should have tiller running", func() {
if hasTiller, tillerAddon := eng.HasTiller(); hasTiller {
if hasTiller, tillerAddon := eng.HasAddon("tiller"); hasTiller {
running, err := pod.WaitOnReady("tiller", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
if tillerAddon.Config != nil {
By("Ensuring that the correct max-history has been applied")
maxHistory := tillerAddon.Config["max-history"]
pods, err := pod.GetAllByPrefix("tiller-deploy", "kube-system")
Expect(err).NotTo(HaveOccurred())
// There is only one tiller pod and one container in that pod.
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
Expect(err).NotTo(HaveOccurred())
Expect(actualTillerMaxHistory).To(Equal(maxHistory))
}
pods, err := pod.GetAllByPrefix("tiller-deploy", "kube-system")
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the correct max-history has been applied")
maxHistory := tillerAddon.Config["max-history"]
// There is only one tiller pod and one container in that pod.
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
Expect(err).NotTo(HaveOccurred())
Expect(actualTillerMaxHistory).To(Equal(maxHistory))
By("Ensuring that the correct resources have been applied")
c := tillerAddon.Containers[0]
cpuRequests := c.CPURequests
cpuLimits := c.CPULimits
memoryRequests := c.MemoryRequests
memoryLimits := c.MemoryLimits
Expect(err).NotTo(HaveOccurred())
// There is only one tiller pod and one container in that pod.
actualTillerCPURequests := pods[0].Spec.Containers[0].GetCPURequests()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this (and the 3 blocks below) easily generalizeable in a convenience func?

Expect(actualTillerCPURequests).To(Equal(cpuRequests))
actualTillerCPULimits := pods[0].Spec.Containers[0].GetCPULimits()
Expect(actualTillerCPULimits).To(Equal(cpuLimits))
actualTillerMemoryRequests := pods[0].Spec.Containers[0].GetMemoryRequests()
Expect(actualTillerMemoryRequests).To(Equal(memoryRequests))
actualTillerMemoryLimits := pods[0].Spec.Containers[0].GetMemoryLimits()
Expect(actualTillerMemoryLimits).To(Equal(memoryLimits))
} else {
Skip("tiller disabled for this cluster, will not test")
}
})

It("should be able to access the dashboard from each node", func() {
if eng.HasDashboard() {
if hasDashboard, dashboardAddon := eng.HasAddon("kubernetes-dashboard"); hasDashboard {
By("Ensuring that the kubernetes-dashboard pod is Running")

running, err := pod.WaitOnReady("kubernetes-dashboard", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expand Down Expand Up @@ -201,27 +215,79 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu
}
Expect(success).To(BeTrue())
}
By("Ensuring that the correct resources have been applied")
pods, err := pod.GetAllByPrefix("kubernetes-dashboard", "kube-system")
for i, c := range dashboardAddon.Containers {
cpuRequests := c.CPURequests
cpuLimits := c.CPULimits
memoryRequests := c.MemoryRequests
memoryLimits := c.MemoryLimits
Expect(err).NotTo(HaveOccurred())
actualDashboardCPURequests := pods[0].Spec.Containers[i].GetCPURequests()
Expect(actualDashboardCPURequests).To(Equal(cpuRequests))
actualDashboardCPULimits := pods[0].Spec.Containers[i].GetCPULimits()
Expect(actualDashboardCPULimits).To(Equal(cpuLimits))
actualDashboardMemoryRequests := pods[0].Spec.Containers[i].GetMemoryRequests()
Expect(actualDashboardMemoryRequests).To(Equal(memoryRequests))
actualDashboardMemoryLimits := pods[0].Spec.Containers[i].GetMemoryLimits()
Expect(actualDashboardMemoryLimits).To(Equal(memoryLimits))
}
}
} else {
Skip("kubernetes-dashboard disabled for this cluster, will not test")
}
})

It("should have aci-connector running", func() {
if eng.HasACIConnector() {
if hasACIConnector, ACIConnectorAddon := eng.HasAddon("aci-connector"); hasACIConnector {
running, err := pod.WaitOnReady("aci-connector", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensuring that the correct resources have been applied")
pods, err := pod.GetAllByPrefix("aci-connector", "kube-system")
for i, c := range ACIConnectorAddon.Containers {
cpuRequests := c.CPURequests
cpuLimits := c.CPULimits
memoryRequests := c.MemoryRequests
memoryLimits := c.MemoryLimits
Expect(err).NotTo(HaveOccurred())
actualACIConnectorCPURequests := pods[0].Spec.Containers[i].GetCPURequests()
Expect(actualACIConnectorCPURequests).To(Equal(cpuRequests))
actualACIConnectorCPULimits := pods[0].Spec.Containers[i].GetCPULimits()
Expect(actualACIConnectorCPULimits).To(Equal(cpuLimits))
actualACIConnectorMemoryRequests := pods[0].Spec.Containers[i].GetMemoryRequests()
Expect(actualACIConnectorMemoryRequests).To(Equal(memoryRequests))
actualACIConnectorMemoryLimits := pods[0].Spec.Containers[i].GetMemoryLimits()
Expect(actualACIConnectorMemoryLimits).To(Equal(memoryLimits))
}

} else {
Skip("aci-connector disabled for this cluster, will not test")
}
})

It("should have rescheduler running", func() {
if eng.HasRescheduler() {
if hasRescheduler, reschedulerAddon := eng.HasAddon("rescheduler"); hasRescheduler {
running, err := pod.WaitOnReady("rescheduler", "kube-system", 3, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensuring that the correct resources have been applied")
pods, err := pod.GetAllByPrefix("rescheduler", "kube-system")
for i, c := range reschedulerAddon.Containers {
cpuRequests := c.CPURequests
cpuLimits := c.CPULimits
memoryRequests := c.MemoryRequests
memoryLimits := c.MemoryLimits
Expect(err).NotTo(HaveOccurred())
actualReschedulerCPURequests := pods[0].Spec.Containers[i].GetCPURequests()
Expect(actualReschedulerCPURequests).To(Equal(cpuRequests))
actualReschedulerCPULimits := pods[0].Spec.Containers[i].GetCPULimits()
Expect(actualReschedulerCPULimits).To(Equal(cpuLimits))
actualReschedulerMemoryRequests := pods[0].Spec.Containers[i].GetMemoryRequests()
Expect(actualReschedulerMemoryRequests).To(Equal(memoryRequests))
actualReschedulerMemoryLimits := pods[0].Spec.Containers[i].GetMemoryLimits()
Expect(actualReschedulerMemoryLimits).To(Equal(memoryLimits))
}
} else {
Skip("rescheduler disabled for this cluster, will not test")
}
Expand Down
45 changes: 42 additions & 3 deletions test/e2e/kubernetes/pod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,10 @@ type Spec struct {

// Container holds information like image and ports
type Container struct {
Image string `json:"image"`
Ports []Port `json:"ports"`
Env []EnvVar `json:"env"`
Image string `json:"image"`
Ports []Port `json:"ports"`
Env []EnvVar `json:"env"`
Resources Resources `json:"resources"`
}

// EnvVar holds environment variables
Expand All @@ -62,6 +63,24 @@ type Port struct {
HostPort int `json:"hostPort"`
}

// Resources represents a container resources definition
type Resources struct {
Requests Requests `json:"requests"`
Limits Limits `json:"limits"`
}

// Requests represents container resource requests
type Requests struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
}

// Limits represents container resource limits
type Limits struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
}

// Status holds information like hostIP and phase
type Status struct {
HostIP string `json:"hostIP"`
Expand Down Expand Up @@ -425,3 +444,23 @@ func (c *Container) GetEnvironmentVariable(varName string) (string, error) {
}
return "", errors.New("environment variable not found")
}

// GetCPURequests returns an the CPU Requests value from a container within a pod
func (c *Container) GetCPURequests() string {
return c.Resources.Requests.CPU
}

// GetCPULimits returns an the CPU Requests value from a container within a pod
func (c *Container) GetCPULimits() string {
return c.Resources.Limits.CPU
}

// GetMemoryRequests returns an the CPU Requests value from a container within a pod
func (c *Container) GetMemoryRequests() string {
return c.Resources.Requests.Memory
}

// GetMemoryLimits returns an the CPU Requests value from a container within a pod
func (c *Container) GetMemoryLimits() string {
return c.Resources.Limits.Memory
}