Skip to content

Commit

Permalink
Merge pull request hashicorp#280 from pmcatominey/integration
Browse files Browse the repository at this point in the history
test: add count and canary integration tests
  • Loading branch information
jrasell authored Apr 2, 2019
2 parents 33581e0 + aacc9ed commit 9362bac
Show file tree
Hide file tree
Showing 8 changed files with 271 additions and 15 deletions.
22 changes: 22 additions & 0 deletions test/acctest/acctest.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,28 @@ func CheckDeploymentStatus(status string) TestStateFunc {
}
}

// CheckTaskGroupCount is a TestStateFunc to check a TaskGroup count
func CheckTaskGroupCount(groupName string, count int) TestStateFunc {
return func(s *TestState) error {
job, _, err := s.Nomad.Jobs().Info(s.JobName, nil)
if err != nil {
return err
}

for _, group := range job.TaskGroups {
if groupName == *group.Name {
if *group.Count == count {
return nil
}

return fmt.Errorf("task group %s count is %d, expected %d", groupName, *group.Count, count)
}
}

return fmt.Errorf("unable to find task group %s", groupName)
}
}

// newNomadClient creates a Nomad API client configrable by NOMAD_
// env variables or returns an error if Nomad is in an unhealthy state
func newNomadClient() (*nomad.Client, error) {
Expand Down
20 changes: 13 additions & 7 deletions test/acctest/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,30 @@ import (
type DeployTestStepRunner struct {
FixtureName string

Canary int
ForceBatch bool
ForceCounts bool
Vars map[string]string

Canary int
ForceBatch bool
ForceCount bool
}

// Run renders the job fixture and triggers a deployment
func (c DeployTestStepRunner) Run(s *TestState) error {
vars := map[string]string{
"job_name": s.JobName,
if c.Vars == nil {
c.Vars = map[string]string{}
}
job, err := template.RenderJob("fixtures/"+c.FixtureName, []string{}, "", &vars)
c.Vars["job_name"] = s.JobName

job, err := template.RenderJob("fixtures/"+c.FixtureName, []string{}, "", &c.Vars)
if err != nil {
return fmt.Errorf("error rendering template: %s", err)
}

cfg := &levant.DeployConfig{
Deploy: &structs.DeployConfig{
Canary: c.Canary,
Canary: c.Canary,
ForceBatch: c.ForceBatch,
ForceCount: c.ForceCount,
},
Client: &structs.ClientConfig{},
Template: &structs.TemplateConfig{
Expand Down
97 changes: 95 additions & 2 deletions test/deploy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ func TestDeploy_basic(t *testing.T) {
})
}

func TestDeploy_failure(t *testing.T) {
func TestDeploy_driverError(t *testing.T) {
acctest.Test(t, acctest.TestCase{
Steps: []acctest.TestStep{
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_failure.nomad",
FixtureName: "deploy_driver_error.nomad",
},
ExpectErr: true,
CheckErr: func(err error) bool {
Expand All @@ -41,3 +41,96 @@ func TestDeploy_failure(t *testing.T) {
CleanupFunc: acctest.CleanupPurgeJob,
})
}

func TestDeploy_allocError(t *testing.T) {
acctest.Test(t, acctest.TestCase{
Steps: []acctest.TestStep{
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_alloc_error.nomad",
},
ExpectErr: true,
CheckErr: func(err error) bool {
// this is a bit pointless without the error bubbled up from levant
return true
},
},
{
// allows us to check a job was registered and previous step error wasn't a parse failure etc.
Check: acctest.CheckDeploymentStatus("failed"),
},
},
CleanupFunc: acctest.CleanupPurgeJob,
})
}

func TestDeploy_count(t *testing.T) {
acctest.Test(t, acctest.TestCase{
Steps: []acctest.TestStep{
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_count.nomad",
Vars: map[string]string{
"count": "3",
},
},
Check: acctest.CheckDeploymentStatus("successful"),
},
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_count.nomad",
Vars: map[string]string{
"count": "1",
},
},
Check: acctest.CheckDeploymentStatus("successful"),
},
{
// expect levant to read counts from the api
Check: acctest.CheckTaskGroupCount("test", 3),
},
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_count.nomad",
Vars: map[string]string{
"count": "1",
},
ForceCount: true,
},
Check: acctest.CheckDeploymentStatus("successful"),
},
{
Check: acctest.CheckTaskGroupCount("test", 1),
},
},
CleanupFunc: acctest.CleanupPurgeJob,
})
}

func TestDeploy_canary(t *testing.T) {
acctest.Test(t, acctest.TestCase{
Steps: []acctest.TestStep{
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_canary.nomad",
Canary: 10,
Vars: map[string]string{
"env_version": "1",
},
},
Check: acctest.CheckDeploymentStatus("successful"),
},
{
Runner: acctest.DeployTestStepRunner{
FixtureName: "deploy_canary.nomad",
Canary: 10,
Vars: map[string]string{
"env_version": "2",
},
},
Check: acctest.CheckDeploymentStatus("successful"),
},
},
CleanupFunc: acctest.CleanupPurgeJob,
})
}
39 changes: 39 additions & 0 deletions test/fixtures/deploy_alloc_error.nomad
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# test alloc error with a command failure

job "[[.job_name]]" {
datacenters = ["dc1"]
type = "service"
update {
max_parallel = 1
min_healthy_time = "10s"
healthy_deadline = "15s"
progress_deadline = "20s"
}

group "test" {
count = 1
restart {
attempts = 1
interval = "10s"
delay = "5s"
mode = "fail"
}
ephemeral_disk {
size = 300
}
task "alpine" {
driver = "docker"
config {
image = "alpine"
command = "badcommandname"
}
resources {
cpu = 100
memory = 128
network {
mbits = 10
}
}
}
}
}
12 changes: 9 additions & 3 deletions test/fixtures/deploy_basic.nomad
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# tests a healthy deployment

job "[[.job_name]]" {
datacenters = ["dc1"]
type = "service"
Expand All @@ -8,7 +10,7 @@ job "[[.job_name]]" {
auto_revert = true
}

group "cache" {
group "test" {
count = 1
restart {
attempts = 10
Expand All @@ -19,10 +21,14 @@ job "[[.job_name]]" {
ephemeral_disk {
size = 300
}
task "redis" {
task "alpine" {
driver = "docker"
config {
image = "redis:alpine"
image = "alpine"
command = "tail"
args = [
"-f", "/dev/null"
]
}
resources {
cpu = 100
Expand Down
46 changes: 46 additions & 0 deletions test/fixtures/deploy_canary.nomad
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# tests a canary deployment

job "[[.job_name]]" {
datacenters = ["dc1"]
type = "service"
update {
max_parallel = 1
min_healthy_time = "2s"
healthy_deadline = "1m"
auto_revert = true
canary = 1
}

group "test" {
count = 1
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
ephemeral_disk {
size = 300
}
task "alpine" {
driver = "docker"
config {
image = "alpine"
command = "tail"
args = [
"-f", "/dev/null"
]
}
env {
version = "[[ .env_version ]]"
}
resources {
cpu = 100
memory = 128
network {
mbits = 10
}
}
}
}
}
42 changes: 42 additions & 0 deletions test/fixtures/deploy_count.nomad
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# tests a healthy deployment with a count

job "[[.job_name]]" {
datacenters = ["dc1"]
type = "service"
update {
max_parallel = 1
min_healthy_time = "10s"
healthy_deadline = "1m"
auto_revert = true
}

group "test" {
count = [[.count]]
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
ephemeral_disk {
size = 300
}
task "alpine" {
driver = "docker"
config {
image = "alpine"
command = "tail"
args = [
"-f", "/dev/null"
]
}
resources {
cpu = 100
memory = 128
network {
mbits = 10
}
}
}
}
}
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# tests driver error with an invalid docker image tag

job "[[.job_name]]" {
datacenters = ["dc1"]
type = "service"
Expand All @@ -8,7 +10,7 @@ job "[[.job_name]]" {
progress_deadline = "20s"
}

group "cache" {
group "test" {
count = 1
restart {
attempts = 1
Expand All @@ -19,10 +21,10 @@ job "[[.job_name]]" {
ephemeral_disk {
size = 300
}
task "redis" {
task "alpine" {
driver = "docker"
config {
image = "redis:badimagetag"
image = "alpine:badimagetag"
}
resources {
cpu = 100
Expand Down

0 comments on commit 9362bac

Please sign in to comment.