From efc80ecd3ecae9345437f8ba611dbefc86e03be4 Mon Sep 17 00:00:00 2001 From: James Rasell Date: Fri, 2 Feb 2018 16:24:21 +0000 Subject: [PATCH] Implament config struct for Levant to track config during run. This commit adds a config struct to Levant which is used during every Levant invoction. This allows for easier tracking of info and also easier future extensibility. Closes #95 --- command/deploy.go | 49 +++++------- command/meta.go | 2 +- levant/auto_revert.go | 12 +-- levant/deploy.go | 151 ++++++++++++++++++++++------------- levant/failure_inspector.go | 10 +-- levant/job_status_checker.go | 15 ++-- levant/structs/config.go | 33 ++++++++ 7 files changed, 166 insertions(+), 106 deletions(-) create mode 100644 levant/structs/config.go diff --git a/command/deploy.go b/command/deploy.go index c4e64f42..571c0eb7 100644 --- a/command/deploy.go +++ b/command/deploy.go @@ -8,6 +8,7 @@ import ( "github.com/jrasell/levant/helper" "github.com/jrasell/levant/levant" + "github.com/jrasell/levant/levant/structs" "github.com/jrasell/levant/logging" ) @@ -40,6 +41,10 @@ General Options: The time in seconds, after which Levant will auto-promote a canary job if all canaries within the deployment are healthy. + -force-count + Use the taskgroup count from the Nomad jobfile instead of the count that + is currently set in a running job. + -log-level= Specify the verbosity level of Levant's logs. Valid values include DEBUG, INFO, and WARN, in decreasing order of verbosity. The default is INFO. @@ -47,10 +52,6 @@ General Options: -var-file= Used in conjunction with the -job-file will deploy a templated job to your Nomad cluster. [default: levant.(yaml|yml|tf)] - - -force-count - Use the taskgroup count from the Nomad jobfile instead of the count that - is currently set in a running job. ` return strings.TrimSpace(helpText) } @@ -63,20 +64,17 @@ func (c *DeployCommand) Synopsis() string { // Run triggers a run of the Levant template and deploy functions. func (c *DeployCommand) Run(args []string) int { - var variables, addr, log, templateFile string var err error - var job *nomad.Job - var canary int - var forceCount bool + config := &structs.Config{} flags := c.Meta.FlagSet("deploy", FlagSetVars) flags.Usage = func() { c.UI.Output(c.Help()) } - flags.StringVar(&addr, "address", "", "") - flags.IntVar(&canary, "canary-auto-promote", 0, "") - flags.StringVar(&log, "log-level", "INFO", "") - flags.StringVar(&variables, "var-file", "", "") - flags.BoolVar(&forceCount, "force-count", false, "") + flags.StringVar(&config.Addr, "address", "", "") + flags.IntVar(&config.Canary, "canary-auto-promote", 0, "") + flags.BoolVar(&config.ForceCount, "force-count", false, "") + flags.StringVar(&config.LogLevel, "log-level", "INFO", "") + flags.StringVar(&config.VaiableFile, "var-file", "", "") if err = flags.Parse(args); err != nil { return 1 @@ -84,12 +82,12 @@ func (c *DeployCommand) Run(args []string) int { args = flags.Args() - logging.SetLevel(log) + logging.SetLevel(config.LogLevel) if len(args) == 1 { - templateFile = args[0] + config.TemplateFile = args[0] } else if len(args) == 0 { - if templateFile = helper.GetDefaultTmplFile(); templateFile == "" { + if config.TemplateFile = helper.GetDefaultTmplFile(); config.TemplateFile == "" { c.UI.Error(c.Help()) c.UI.Error("\nERROR: Template arg missing and no default template found") return 1 @@ -99,35 +97,26 @@ func (c *DeployCommand) Run(args []string) int { return 1 } - job, err = levant.RenderJob(templateFile, variables, &c.Meta.flagVars) + config.Job, err = levant.RenderJob(config.TemplateFile, config.VaiableFile, &c.Meta.flagVars) if err != nil { c.UI.Error(fmt.Sprintf("[ERROR] levant/command: %v", err)) return 1 } - if canary > 0 { - if err = c.checkCanaryAutoPromote(job, canary); err != nil { + if config.Canary > 0 { + if err = c.checkCanaryAutoPromote(config.Job, config.Canary); err != nil { c.UI.Error(fmt.Sprintf("[ERROR] levant/command: %v", err)) return 1 } - c.UI.Info(fmt.Sprintf("[INFO] levant/command: running canary-auto-update of %vs", canary)) + c.UI.Info(fmt.Sprintf("[INFO] levant/command: running canary-auto-update of %vs", config.Canary)) } - client, err := levant.NewNomadClient(addr) - if err != nil { - c.UI.Error(fmt.Sprintf("[ERROR] levant/command: %v", err)) - return 1 - } - - success := client.Deploy(job, canary, forceCount) + success := levant.TriggerDeployment(config) if !success { - c.UI.Error(fmt.Sprintf("[ERROR] levant/command: deployment of job %s failed", *job.Name)) return 1 } - c.UI.Info(fmt.Sprintf("[INFO] levant/command: deployment of job %s successful", *job.Name)) - return 0 } diff --git a/command/meta.go b/command/meta.go index f67050e8..0133aa83 100644 --- a/command/meta.go +++ b/command/meta.go @@ -21,7 +21,7 @@ const ( ) // Meta contains the meta-options and functionality that nearly every -// Packer command inherits. +// Levant command inherits. type Meta struct { UI cli.Ui diff --git a/levant/auto_revert.go b/levant/auto_revert.go index 4f11ac47..a9a85e45 100644 --- a/levant/auto_revert.go +++ b/levant/auto_revert.go @@ -5,28 +5,28 @@ import ( "github.com/jrasell/levant/logging" ) -func (c *nomadClient) autoRevert(jobID *string) { +func (l *levantDeployment) autoRevert(jobID *string) { - dep, _, err := c.nomad.Jobs().LatestDeployment(*jobID, nil) + dep, _, err := l.nomad.Jobs().LatestDeployment(*jobID, nil) if err != nil { logging.Error("levant/auto_revert: unable to query latest deployment of job %s", *jobID) return } logging.Info("levant/auto_revert: beginning deployment watcher for job %s", *jobID) - success := c.deploymentWatcher(dep.ID, 0) + success := l.deploymentWatcher(dep.ID) if success { logging.Info("levant/auto_revert: auto-revert of job %s was successful", *jobID) } else { logging.Error("levant/auto_revert: auto-revert of job %s failed; POTENTIAL OUTAGE SITUATION", *jobID) - c.checkFailedDeployment(&dep.ID) + l.checkFailedDeployment(&dep.ID) } } // checkAutoRevert inspects a Nomad deployment to determine if any TashGroups // have been auto-reverted. -func (c *nomadClient) checkAutoRevert(dep *nomad.Deployment) { +func (l *levantDeployment) checkAutoRevert(dep *nomad.Deployment) { var revert bool @@ -44,7 +44,7 @@ func (c *nomadClient) checkAutoRevert(dep *nomad.Deployment) { dep.JobID) // Run the levant autoRevert function. - c.autoRevert(&dep.JobID) + l.autoRevert(&dep.JobID) } else { logging.Info("levant/auto_revert: job %v is not in auto-revert; POTENTIAL OUTAGE SITUATION", dep.JobID) } diff --git a/levant/deploy.go b/levant/deploy.go index 55a51c57..b7d92bef 100644 --- a/levant/deploy.go +++ b/levant/deploy.go @@ -7,22 +7,19 @@ import ( nomad "github.com/hashicorp/nomad/api" nomadStructs "github.com/hashicorp/nomad/nomad/structs" + "github.com/jrasell/levant/levant/structs" "github.com/jrasell/levant/logging" ) -type nomadClient struct { - nomad *nomad.Client +// levantDeployment is the all deployment related objects for this Levant +// deployment invoction. +type levantDeployment struct { + nomad *nomad.Client + config *structs.Config } -// NomadClient is an interface to the Nomad API and deployment functions. -type NomadClient interface { - // Deploy triggers a register of the job resulting in a Nomad deployment which - // is monitored to determine the eventual state. - Deploy(*nomad.Job, int, bool) bool -} - -// NewNomadClient is used to create a new client to interact with Nomad. -func NewNomadClient(addr string) (NomadClient, error) { +// newNomadClient is used to create a new client to interact with Nomad. +func newNomadClient(addr string) (*nomad.Client, error) { config := nomad.DefaultConfig() if addr != "" { @@ -34,108 +31,148 @@ func NewNomadClient(addr string) (NomadClient, error) { return nil, err } - return &nomadClient{nomad: c}, nil + return c, nil +} + +// newLevantDeployment sets up the Levant deployment object and Nomad client +// to interact with the Nomad API. +func newLevantDeployment(config *structs.Config) (*levantDeployment, error) { + + var err error + + dep := &levantDeployment{} + dep.config = config + + dep.nomad, err = newNomadClient(config.Addr) + if err != nil { + return nil, err + } + + return dep, nil +} + +// TriggerDeployment provides the main entry point into a Levant deployment and +// is used to setup the clients before triggering the deployment process. +func TriggerDeployment(config *structs.Config) bool { + + // Create our new deployment object. + levantDep, err := newLevantDeployment(config) + if err != nil { + logging.Error("levant/deploy: unable to setup Levant deployment: %v", err) + return false + } + + // Start the main deployment function. + success := levantDep.deploy() + if !success { + logging.Error("levant/deploy: deployment of job %v failed", *config.Job.Name) + return false + } + + logging.Info("levant/deploy: deployment of job %v successful", *config.Job.Name) + return true } -// Deploy triggers a register of the job resulting in a Nomad deployment which +// deploy triggers a register of the job resulting in a Nomad deployment which // is monitored to determine the eventual state. -func (c *nomadClient) Deploy(job *nomad.Job, autoPromote int, forceCount bool) (success bool) { +func (l *levantDeployment) deploy() (success bool) { // Validate the job to check it is syntactically correct. - if _, _, err := c.nomad.Jobs().Validate(job, nil); err != nil { + if _, _, err := l.nomad.Jobs().Validate(l.config.Job, nil); err != nil { logging.Error("levant/deploy: job validation failed: %v", err) return } // If job.Type isn't set we can't continue - if job.Type == nil { + if l.config.Job.Type == nil { logging.Error("levant/deploy: Nomad job `type` is not set; should be set to `%s`, `%s` or `%s`", nomadStructs.JobTypeBatch, nomadStructs.JobTypeSystem, nomadStructs.JobTypeService) return } - if !forceCount { - logging.Debug("levant/deploy: running dynamic job count updater for job %s", *job.Name) - if err := c.dynamicGroupCountUpdater(job); err != nil { + if !l.config.ForceCount { + logging.Debug("levant/deploy: running dynamic job count updater for job %s", *l.config.Job.Name) + if err := l.dynamicGroupCountUpdater(); err != nil { return } } - logging.Info("levant/deploy: triggering a deployment of job %s", *job.Name) + logging.Info("levant/deploy: triggering a deployment of job %s", *l.config.Job.Name) - eval, _, err := c.nomad.Jobs().Register(job, nil) + eval, _, err := l.nomad.Jobs().Register(l.config.Job, nil) if err != nil { - logging.Error("levant/deploy: unable to register job %s with Nomad: %v", *job.Name, err) + logging.Error("levant/deploy: unable to register job %s with Nomad: %v", *l.config.Job.Name, err) return } // Periodic and parameterized jobs do not return an evaluation and therefore // can't perform the evaluationInspector. - if !job.IsPeriodic() && !job.IsParameterized() { + if !l.config.Job.IsPeriodic() && !l.config.Job.IsParameterized() { // Trigger the evaluationInspector to identify any potential errors in the // Nomad evaluation run. As far as I can tell from testing; a single alloc // failure in an evaluation means no allocs will be placed so we exit here. - err = c.evaluationInspector(&eval.EvalID) + err = l.evaluationInspector(&eval.EvalID) if err != nil { logging.Error("levant/deploy: %v", err) return } } - switch *job.Type { + switch *l.config.Job.Type { case nomadStructs.JobTypeService: // If the service job doesn't have an update stanza, the job will not use // Nomad deployments. - if job.Update == nil { + if l.config.Job.Update == nil { logging.Info("levant/deploy: job %s is not configured with update stanza, consider adding to use deployments", - *job.Name) - return c.checkJobStatus(job.Name) + *l.config.Job.Name) + return l.checkJobStatus() } - logging.Info("levant/deploy: beginning deployment watcher for job %s", *job.Name) + logging.Info("levant/deploy: beginning deployment watcher for job %s", *l.config.Job.Name) // Get the deploymentID from the evaluationID so that we can watch the // deployment for end status. - depID, err := c.getDeploymentID(eval.EvalID) + depID, err := l.getDeploymentID(eval.EvalID) if err != nil { logging.Error("levant/deploy: unable to get info of evaluation %s: %v", eval.EvalID, err) return } // Get the success of the deployment. - success = c.deploymentWatcher(depID, autoPromote) + success = l.deploymentWatcher(depID) // If the deployment has not been successful; check whether the job is // configured to auto-revert so that this can be tracked. if !success { - dep, _, err := c.nomad.Deployments().Info(depID, nil) + dep, _, err := l.nomad.Deployments().Info(depID, nil) if err != nil { logging.Error("levant/deploy: unable to query deployment %s for auto-revert check: %v", dep.ID, err) break } - c.checkAutoRevert(dep) + l.checkAutoRevert(dep) } case nomadStructs.JobTypeBatch: - return c.checkJobStatus(job.Name) + return l.checkJobStatus() case nomadStructs.JobTypeSystem: - return c.checkJobStatus(job.Name) + return l.checkJobStatus() default: - logging.Debug("levant/deploy: Levant does not support advanced deployments of job type %s", *job.Type) + logging.Debug("levant/deploy: Levant does not support advanced deployments of job type %s", + *l.config.Job.Type) success = true } return } -func (c *nomadClient) evaluationInspector(evalID *string) error { +func (l *levantDeployment) evaluationInspector(evalID *string) error { for { - evalInfo, _, err := c.nomad.Evaluations().Info(*evalID, nil) + evalInfo, _, err := l.nomad.Evaluations().Info(*evalID, nil) if err != nil { return err } @@ -192,7 +229,7 @@ func (c *nomadClient) evaluationInspector(evalID *string) error { } } -func (c *nomadClient) deploymentWatcher(depID string, autoPromote int) (success bool) { +func (l *levantDeployment) deploymentWatcher(depID string) (success bool) { var canaryChan chan interface{} deploymentChan := make(chan interface{}) @@ -202,16 +239,16 @@ func (c *nomadClient) deploymentWatcher(depID string, autoPromote int) (success // Setup the canaryChan and launch the autoPromote go routine if autoPromote // has been enabled. - if autoPromote > 0 { + if l.config.Canary > 0 { canaryChan = make(chan interface{}) - go c.canaryAutoPromote(depID, autoPromote, canaryChan, deploymentChan) + go l.canaryAutoPromote(depID, l.config.Canary, canaryChan, deploymentChan) } q := &nomad.QueryOptions{WaitIndex: 1, AllowStale: true, WaitTime: wt} for { - dep, meta, err := c.nomad.Deployments().Info(depID, q) + dep, meta, err := l.nomad.Deployments().Info(depID, q) logging.Debug("levant/deploy: deployment %v running for %.2fs", depID, time.Since(t).Seconds()) // Listen for the deploymentChan closing which indicates Levant should exit @@ -234,7 +271,7 @@ func (c *nomadClient) deploymentWatcher(depID string, autoPromote int) (success q.WaitIndex = meta.LastIndex - cont, err := c.checkDeploymentStatus(dep, canaryChan) + cont, err := l.checkDeploymentStatus(dep, canaryChan) if err != nil { return false } @@ -247,7 +284,7 @@ func (c *nomadClient) deploymentWatcher(depID string, autoPromote int) (success } } -func (c *nomadClient) checkDeploymentStatus(dep *nomad.Deployment, shutdownChan chan interface{}) (bool, error) { +func (l *levantDeployment) checkDeploymentStatus(dep *nomad.Deployment, shutdownChan chan interface{}) (bool, error) { switch dep.Status { case nomadStructs.DeploymentStatusSuccessful: @@ -264,14 +301,14 @@ func (c *nomadClient) checkDeploymentStatus(dep *nomad.Deployment, shutdownChan logging.Error("levant/deploy: deployment %v has status %s", dep.ID, dep.Status) // Launch the failure inspector. - c.checkFailedDeployment(&dep.ID) + l.checkFailedDeployment(&dep.ID) return false, fmt.Errorf("deployment failed") } } // canaryAutoPromote handles Levant's canary-auto-promote functionality. -func (c *nomadClient) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) { +func (l *levantDeployment) canaryAutoPromote(depID string, waitTime int, shutdownChan, deploymentChan chan interface{}) { // Setup the AutoPromote timer. autoPromote := time.After(time.Duration(waitTime) * time.Second) @@ -283,7 +320,7 @@ func (c *nomadClient) canaryAutoPromote(depID string, waitTime int, shutdownChan waitTime, depID) // Check the deployment is healthy before promoting. - if healthy := c.checkCanaryDeploymentHealth(depID); !healthy { + if healthy := l.checkCanaryDeploymentHealth(depID); !healthy { logging.Error("levant/deploy: the canary deployment %s has unhealthy allocations, unable to promote", depID) close(deploymentChan) return @@ -292,7 +329,7 @@ func (c *nomadClient) canaryAutoPromote(depID string, waitTime int, shutdownChan logging.Info("levant/deploy: triggering auto promote of deployment %s", depID) // Promote the deployment. - _, _, err := c.nomad.Deployments().PromoteAll(depID, nil) + _, _, err := l.nomad.Deployments().PromoteAll(depID, nil) if err != nil { logging.Error("levant/deploy: unable to promote deployment %s: %v", depID, err) close(deploymentChan) @@ -308,11 +345,11 @@ func (c *nomadClient) canaryAutoPromote(depID string, waitTime int, shutdownChan // checkCanaryDeploymentHealth is used to check the health status of each // task-group within a canary deployment. -func (c *nomadClient) checkCanaryDeploymentHealth(depID string) (healthy bool) { +func (l *levantDeployment) checkCanaryDeploymentHealth(depID string) (healthy bool) { var unhealthy int - dep, _, err := c.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: true}) + dep, _, err := l.nomad.Deployments().Info(depID, &nomad.QueryOptions{AllowStale: true}) if err != nil { logging.Error("levant/deploy: unable to query deployment %s for health: %v", depID, err) return @@ -346,12 +383,12 @@ func (c *nomadClient) checkCanaryDeploymentHealth(depID string) (healthy bool) { // evaluationID. This is only needed as sometimes Nomad initially returns eval // info with an empty deploymentID; and a retry is required in order to get the // updated response from Nomad. -func (c *nomadClient) getDeploymentID(evalID string) (depID string, err error) { +func (l *levantDeployment) getDeploymentID(evalID string) (depID string, err error) { var evalInfo *nomad.Evaluation for { - if evalInfo, _, err = c.nomad.Evaluations().Info(evalID, nil); err != nil { + if evalInfo, _, err = l.nomad.Evaluations().Info(evalID, nil); err != nil { return } @@ -369,17 +406,17 @@ func (c *nomadClient) getDeploymentID(evalID string) (depID string, err error) { // dynamicGroupCountUpdater takes the templated and rendered job and updates the // group counts based on the currently deployed job; if its running. -func (c *nomadClient) dynamicGroupCountUpdater(job *nomad.Job) error { +func (l *levantDeployment) dynamicGroupCountUpdater() error { // Gather information about the current state, if any, of the job on the // Nomad cluster. - rJob, _, err := c.nomad.Jobs().Info(*job.Name, &nomad.QueryOptions{}) + rJob, _, err := l.nomad.Jobs().Info(*l.config.Job.Name, &nomad.QueryOptions{}) // This is a hack due to GH-1849; we check the error string for 404 which // indicates the job is not running, not that there was an error in the API // call. if err != nil && strings.Contains(err.Error(), "404") { - logging.Info("levant/deploy: job %s not running, using template file group counts", *job.Name) + logging.Info("levant/deploy: job %s not running, using template file group counts", *l.config.Job.Name) return nil } else if err != nil { logging.Error("levant/deploy: unable to perform job evaluation: %v", err) @@ -389,10 +426,10 @@ func (c *nomadClient) dynamicGroupCountUpdater(job *nomad.Job) error { // Iterate the templated job and the Nomad returned job and update group count // based on matches. for _, rGroup := range rJob.TaskGroups { - for _, group := range job.TaskGroups { + for _, group := range l.config.Job.TaskGroups { if *rGroup.Name == *group.Name { logging.Info("levant/deploy: using dynamic count %v for job %s and group %s", - *rGroup.Count, *job.Name, *group.Name) + *rGroup.Count, *l.config.Job.Name, *group.Name) group.Count = rGroup.Count } } diff --git a/levant/failure_inspector.go b/levant/failure_inspector.go index bfea0b73..a3d25565 100644 --- a/levant/failure_inspector.go +++ b/levant/failure_inspector.go @@ -11,11 +11,11 @@ import ( ) // checkFailedDeployment helps log information about deployment failures. -func (c *nomadClient) checkFailedDeployment(depID *string) { +func (l *levantDeployment) checkFailedDeployment(depID *string) { var allocIDS []string - allocs, _, err := c.nomad.Deployments().Allocations(*depID, nil) + allocs, _, err := l.nomad.Deployments().Allocations(*depID, nil) if err != nil { logging.Error("levant/failure_inspector: unable to query deployment allocations for deployment %s", depID) @@ -37,7 +37,7 @@ func (c *nomadClient) checkFailedDeployment(depID *string) { // Inspect each allocation. for _, id := range allocIDS { logging.Debug("levant/failure_inspector: launching allocation inspector for alloc %v", id) - go c.allocInspector(id, &wg) + go l.allocInspector(id, &wg) } wg.Wait() @@ -45,12 +45,12 @@ func (c *nomadClient) checkFailedDeployment(depID *string) { // allocInspector inspects an allocations events to log any useful information // which may help debug deployment failures. -func (c *nomadClient) allocInspector(allocID string, wg *sync.WaitGroup) { +func (l *levantDeployment) allocInspector(allocID string, wg *sync.WaitGroup) { // Inform the wait group we have finished our task upon completion. defer wg.Done() - resp, _, err := c.nomad.Allocations().Info(allocID, nil) + resp, _, err := l.nomad.Allocations().Info(allocID, nil) if err != nil { logging.Error("levant/failure_inspector: unable to query alloc %v: %v", allocID, err) return diff --git a/levant/job_status_checker.go b/levant/job_status_checker.go index 65cef819..986acba2 100644 --- a/levant/job_status_checker.go +++ b/levant/job_status_checker.go @@ -11,9 +11,10 @@ import ( // checkJobStatus checks the status of a job at least reaches a status of // running. This is required as currently Nomad does not support deployments // across all job types. -func (c *nomadClient) checkJobStatus(jobName *string) bool { +func (l *levantDeployment) checkJobStatus() bool { - logging.Info("levant/job_status_checker: running job status checker for %s", *jobName) + j := l.config.Job.Name + logging.Info("levant/job_status_checker: running job status checker for %s", *j) // Initialiaze our WaitIndex var wi uint64 @@ -24,9 +25,9 @@ func (c *nomadClient) checkJobStatus(jobName *string) bool { for { - job, meta, err := c.nomad.Jobs().Info(*jobName, q) + job, meta, err := l.nomad.Jobs().Info(*j, q) if err != nil { - logging.Error("levant/job_status_checker: unable to query batch job %s: %v", *jobName, err) + logging.Error("levant/job_status_checker: unable to query batch job %s: %v", *j, err) return false } @@ -37,17 +38,17 @@ func (c *nomadClient) checkJobStatus(jobName *string) bool { } if *job.Status == nomadStructs.JobStatusRunning { - logging.Info("levant/job_status_checker: job %s has status %s", *jobName, *job.Status) + logging.Info("levant/job_status_checker: job %s has status %s", *j, *job.Status) return true } select { case <-timeout: logging.Error("levant/job_status_checker: timeout reached while verifying the status of job %s", - *jobName) + *j) return false default: - logging.Debug("levant/job_status_checker: job %s currently has status %s", *jobName, *job.Status) + logging.Debug("levant/job_status_checker: job %s currently has status %s", *j, *job.Status) q.WaitIndex = meta.LastIndex continue } diff --git a/levant/structs/config.go b/levant/structs/config.go new file mode 100644 index 00000000..446b8b2d --- /dev/null +++ b/levant/structs/config.go @@ -0,0 +1,33 @@ +package structs + +import nomad "github.com/hashicorp/nomad/api" + +// Config is the main struct used to configure and run a Levant deployment on +// a given target job. +type Config struct { + // Addr is the Nomad API address to use for all calls and must include both + // protocol and port. + Addr string + + // Canary enables canary autopromote and is the value in seconds to wait + // until attempting to perfrom autopromote. + Canary int + + // ForceCount is a boolean flag that can be used to ignore running job counts + // and force the count based on the rendered job file. + ForceCount bool + + // Job represents the Nomad Job definition that will be deployed. + Job *nomad.Job + + // LogLevel is the level at which Levant will log. + LogLevel string + + // TemplateFile is the job specification template which will be rendered + // before being deployed to the cluster. + TemplateFile string + + // VaiableFile contains the variables which will be substituted into the + // templateFile before deployment. + VaiableFile string +}