new module nomad_job & nomad_job_info (#867)

* nomad_job module

* Delete nomad_job.py

* new module nomad_job

* fix symlink

* disable test with centos6 , not supported

* fix centos unsupported

* fix

* requested changes doc

* disable freebsd ci

* requested change docs + check_mode

* lint

* fix syntax

* update docs

* doc fix

Co-authored-by: Felix Fontein <felix@fontein.de>

* Update nomad_job.py

fix docs + ssl true default

* Update nomad_job.yml

disable ssl ci

* nomad_job_info

* Update nomad_job_info.py

fix token nomad job info

* Update nomad_job.py

idempotence + check_mode plan result

* Update nomad_job.py

fail if no id with json content

* Update nomad_job.yml

ci idempotence + check_mode , nomad_job and nomad_job_info

* Update nomad_job.yml

fix ci

* Update main.yml

add kill nomad ci

* Update main.yml

always kill

* fix check mode delete job

* ci with delete and check_mode

* lint

* force start in first deploy

* 12.4 nomad

* fix version nomad

* fix ci assert

* fix ci

* fix ci

* lint

* fix version job id None, import os unused

* lint job_info

* Update aliases

* docs frag + info refacto

* lint

lint

* ci

* jmespath

* fix ci

Co-authored-by: FERREIRA Christophe <christophe.ferreira@cnaf.fr>
Co-authored-by: Felix Fontein <felix@fontein.de>
This commit is contained in:
chris93111 2020-10-19 13:40:07 +02:00 committed by GitHub
commit b2e075e6d3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 1255 additions and 0 deletions

View file

@ -0,0 +1,6 @@
shippable/posix/group2
nomad_job_info
destructive
skip/aix
skip/centos6
skip/freebsd

View file

@ -0,0 +1,396 @@
# There can only be a single job definition per file. This job is named
# "example" so it will create a job with the ID and Name "example".
# The "job" stanza is the top-most configuration option in the job
# specification. A job is a declarative specification of tasks that Nomad
# should run. Jobs have a globally unique name, one or many task groups, which
# are themselves collections of one or many tasks.
#
# For more information and examples on the "job" stanza, please see
# the online documentation at:
#
#
# https://www.nomadproject.io/docs/job-specification/job.html
#
job "example" {
# The "region" parameter specifies the region in which to execute the job.
# If omitted, this inherits the default region name of "global".
# region = "global"
#
# The "datacenters" parameter specifies the list of datacenters which should
# be considered when placing this task. This must be provided.
datacenters = ["dc1"]
# The "type" parameter controls the type of job, which impacts the scheduler's
# decision on placement. This configuration is optional and defaults to
# "service". For a full list of job types and their differences, please see
# the online documentation.
#
# For more information, please see the online documentation at:
#
# https://www.nomadproject.io/docs/jobspec/schedulers.html
#
type = "service"
# The "constraint" stanza defines additional constraints for placing this job,
# in addition to any resource or driver constraints. This stanza may be placed
# at the "job", "group", or "task" level, and supports variable interpolation.
#
# For more information and examples on the "constraint" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/constraint.html
#
# constraint {
# attribute = "${attr.kernel.name}"
# value = "linux"
# }
# The "update" stanza specifies the update strategy of task groups. The update
# strategy is used to control things like rolling upgrades, canaries, and
# blue/green deployments. If omitted, no update strategy is enforced. The
# "update" stanza may be placed at the job or task group. When placed at the
# job, it applies to all groups within the job. When placed at both the job and
# group level, the stanzas are merged with the group's taking precedence.
#
# For more information and examples on the "update" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/update.html
#
update {
# The "max_parallel" parameter specifies the maximum number of updates to
# perform in parallel. In this case, this specifies to update a single task
# at a time.
max_parallel = 1
# The "min_healthy_time" parameter specifies the minimum time the allocation
# must be in the healthy state before it is marked as healthy and unblocks
# further allocations from being updated.
min_healthy_time = "10s"
# The "healthy_deadline" parameter specifies the deadline in which the
# allocation must be marked as healthy after which the allocation is
# automatically transitioned to unhealthy. Transitioning to unhealthy will
# fail the deployment and potentially roll back the job if "auto_revert" is
# set to true.
healthy_deadline = "3m"
# The "progress_deadline" parameter specifies the deadline in which an
# allocation must be marked as healthy. The deadline begins when the first
# allocation for the deployment is created and is reset whenever an allocation
# as part of the deployment transitions to a healthy state. If no allocation
# transitions to the healthy state before the progress deadline, the
# deployment is marked as failed.
progress_deadline = "10m"
# The "auto_revert" parameter specifies if the job should auto-revert to the
# last stable job on deployment failure. A job is marked as stable if all the
# allocations as part of its deployment were marked healthy.
auto_revert = false
# The "canary" parameter specifies that changes to the job that would result
# in destructive updates should create the specified number of canaries
# without stopping any previous allocations. Once the operator determines the
# canaries are healthy, they can be promoted which unblocks a rolling update
# of the remaining allocations at a rate of "max_parallel".
#
# Further, setting "canary" equal to the count of the task group allows
# blue/green deployments. When the job is updated, a full set of the new
# version is deployed and upon promotion the old version is stopped.
canary = 0
}
# The migrate stanza specifies the group's strategy for migrating off of
# draining nodes. If omitted, a default migration strategy is applied.
#
# For more information on the "migrate" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/migrate.html
#
migrate {
# Specifies the number of task groups that can be migrated at the same
# time. This number must be less than the total count for the group as
# (count - max_parallel) will be left running during migrations.
max_parallel = 1
# Specifies the mechanism in which allocations health is determined. The
# potential values are "checks" or "task_states".
health_check = "checks"
# Specifies the minimum time the allocation must be in the healthy state
# before it is marked as healthy and unblocks further allocations from being
# migrated. This is specified using a label suffix like "30s" or "15m".
min_healthy_time = "10s"
# Specifies the deadline in which the allocation must be marked as healthy
# after which the allocation is automatically transitioned to unhealthy. This
# is specified using a label suffix like "2m" or "1h".
healthy_deadline = "5m"
}
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
# For more information and examples on the "group" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/group.html
#
group "cache" {
# The "count" parameter specifies the number of the task groups that should
# be running under this group. This value must be non-negative and defaults
# to 1.
count = 1
# The "restart" stanza configures a group's behavior on task failure. If
# left unspecified, a default restart policy is used based on the job type.
#
# For more information and examples on the "restart" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/restart.html
#
restart {
# The number of attempts to run the job within the specified interval.
attempts = 2
interval = "30m"
# The "delay" parameter specifies the duration to wait before restarting
# a task after it has failed.
delay = "15s"
# The "mode" parameter controls what happens when a task has restarted
# "attempts" times within the interval. "delay" mode delays the next
# restart until the next interval. "fail" mode does not restart the task
# if "attempts" has been hit within the interval.
mode = "fail"
}
# The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
# instead of a hard disk requirement. Clients using this stanza should
# not specify disk requirements in the resources stanza of the task. All
# tasks in this group will share the same ephemeral disk.
#
# For more information and examples on the "ephemeral_disk" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
#
ephemeral_disk {
# When sticky is true and the task group is updated, the scheduler
# will prefer to place the updated allocation on the same node and
# will migrate the data. This is useful for tasks that store data
# that should persist across allocation updates.
# sticky = true
#
# Setting migrate to true results in the allocation directory of a
# sticky allocation directory to be migrated.
# migrate = true
#
# The "size" parameter specifies the size in MB of shared ephemeral disk
# between tasks in the group.
size = 300
}
# The "affinity" stanza enables operators to express placement preferences
# based on node attributes or metadata.
#
# For more information and examples on the "affinity" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/affinity.html
#
# affinity {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# value specifies the desired attribute value. In this example Nomad
# will prefer placement in the "us-west1" datacenter.
# value = "us-west1"
# weight can be used to indicate relative preference
# when the job has more than one affinity. It defaults to 50 if not set.
# weight = 100
# }
# The "spread" stanza allows operators to increase the failure tolerance of
# their applications by specifying a node attribute that allocations
# should be spread over.
#
# For more information and examples on the "spread" stanza, please
# see the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/spread.html
#
# spread {
# attribute specifies the name of a node attribute or metadata
# attribute = "${node.datacenter}"
# targets can be used to define desired percentages of allocations
# for each targeted attribute value.
#
# target "us-east1" {
# percent = 60
# }
# target "us-west1" {
# percent = 40
# }
# }
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
# For more information and examples on the "task" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/task.html
#
task "redis" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "docker"
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
image = "redis:3.2"
port_map {
db = 6379
}
}
# The "artifact" stanza instructs Nomad to download an artifact from a
# remote source prior to starting the task. This provides a convenient
# mechanism for downloading configuration files or data needed to run the
# task. It is possible to specify the "artifact" stanza multiple times to
# download multiple artifacts.
#
# For more information and examples on the "artifact" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/artifact.html
#
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# The "logs" stanza instructs the Nomad client on how many log files and
# the maximum size of those logs files to retain. Logging is enabled by
# default, but the "logs" stanza allows for finer-grained control over
# the log rotation and storage configuration.
#
# For more information and examples on the "logs" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/logs.html
#
# logs {
# max_files = 10
# max_file_size = 15
# }
# The "resources" stanza describes the requirements a task needs to
# execute. Resource requirements include memory, network, cpu, and more.
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
# For more information and examples on the "resources" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/resources.html
#
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {}
}
}
# The "service" stanza instructs Nomad to register this task as a service
# in the service discovery engine, which is currently Consul. This will
# make the service addressable after Nomad has placed it on a host and
# port.
#
# For more information and examples on the "service" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/service.html
#
service {
name = "redis-cache"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# The "template" stanza instructs Nomad to manage a template, such as
# a configuration file or script. This template can optionally pull data
# from Consul or Vault to populate runtime configuration data.
#
# For more information and examples on the "template" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/template.html
#
# template {
# data = "---\nkey: {{ key \"service/my-key\" }}"
# destination = "local/file.yml"
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# The "template" stanza can also be used to create environment variables
# for tasks that prefer those to config files. The task will be restarted
# when data pulled from Consul or Vault changes.
#
# template {
# data = "KEY={{ key \"service/my-key\" }}"
# destination = "local/file.env"
# env = true
# }
# The "vault" stanza instructs the Nomad client to acquire a token from
# a HashiCorp Vault server. The Nomad servers must be configured and
# authorized to communicate with Vault. By default, Nomad will inject
# The token into the job via an environment variable and make the token
# available to the "template" stanza. The Nomad client handles the renewal
# and revocation of the Vault token.
#
# For more information and examples on the "vault" stanza, please see
# the online documentation at:
#
# https://www.nomadproject.io/docs/job-specification/vault.html
#
# vault {
# policies = ["cdn", "frontend"]
# change_mode = "signal"
# change_signal = "SIGHUP"
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}

View file

@ -0,0 +1,4 @@
---
dependencies:
- setup_pkg_mgr
- setup_openssl

View file

@ -0,0 +1,106 @@
- name: Skip unsupported platforms
meta: end_play
when: ansible_distribution == 'CentOS' and ansible_distribution_major_version is not version('7', '>=')
- name: Install Nomad and test
vars:
nomad_version: 0.12.4
nomad_uri: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_{{ ansible_system | lower }}_{{ nomad_arch }}.zip
nomad_cmd: '{{ output_dir }}/nomad'
block:
- name: register pyOpenSSL version
command: '{{ ansible_python_interpreter }} -c ''import OpenSSL; print(OpenSSL.__version__)'''
register: pyopenssl_version
- name: Install requests<2.20 (CentOS/RHEL 6)
pip:
name: requests<2.20
register: result
until: result is success
when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
- name: Install python-nomad
pip:
name: python-nomad
register: result
until: result is success
- name: Install jmespath
pip:
name: jmespath
register: result
until: result is success
- when: pyopenssl_version.stdout is version('0.15', '>=')
block:
- name: Generate privatekey
community.crypto.openssl_privatekey:
path: '{{ output_dir }}/privatekey.pem'
- name: Generate CSR
community.crypto.openssl_csr:
path: '{{ output_dir }}/csr.csr'
privatekey_path: '{{ output_dir }}/privatekey.pem'
subject:
commonName: localhost
- name: Generate selfsigned certificate
register: selfsigned_certificate
community.crypto.openssl_certificate:
path: '{{ output_dir }}/cert.pem'
csr_path: '{{ output_dir }}/csr.csr'
privatekey_path: '{{ output_dir }}/privatekey.pem'
provider: selfsigned
selfsigned_digest: sha256
- name: Install unzip
package:
name: unzip
register: result
until: result is success
when: ansible_distribution != "MacOSX"
- assert:
that: ansible_architecture in ['i386', 'x86_64', 'amd64']
- set_fact:
nomad_arch: '386'
when: ansible_architecture == 'i386'
- set_fact:
nomad_arch: amd64
when: ansible_architecture in ['x86_64', 'amd64']
- name: Download nomad binary
unarchive:
src: '{{ nomad_uri }}'
dest: '{{ output_dir }}'
remote_src: true
register: result
until: result is success
- vars:
remote_dir: '{{ echo_output_dir.stdout }}'
block:
- command: echo {{ output_dir }}
register: echo_output_dir
- name: Run tests integration
block:
- name: Start nomad (dev mode enabled)
shell: nohup {{ nomad_cmd }} agent -dev </dev/null >/dev/null 2>&1 &
- name: wait nomad up
wait_for:
host: localhost
port: 4646
delay: 10
timeout: 60
- import_tasks: nomad_job.yml
always:
- name: kill nomad
shell: pkill nomad

View file

@ -0,0 +1,90 @@
---
- name: run check deploy nomad job
nomad_job:
host: localhost
state: present
use_ssl: false
content: "{{ lookup('file', 'job.hcl') }}"
register: job_check_deployed
check_mode: true
- name: run create nomad job
nomad_job:
host: localhost
state: present
use_ssl: false
content: "{{ lookup('file', 'job.hcl') }}"
force_start: true
register: job_deployed
- name: get nomad job deployed
nomad_job_info:
host: localhost
use_ssl: false
name: example
register: get_nomad_job
- name: get list of nomad jobs
nomad_job_info:
host: localhost
use_ssl: false
register: list_nomad_jobs
- name: assert job is deployed and tasks is changed
assert:
that:
- job_check_deployed is changed
- job_deployed is changed
- get_nomad_job.result[0].ID == "example"
- list_nomad_jobs.result | length == 1
- name: run check deploy job idempotence
nomad_job:
host: localhost
state: present
use_ssl: false
content: "{{ lookup('file', 'job.hcl') }}"
register: job_check_deployed_idempotence
check_mode: true
- name: run create nomad job idempotence
nomad_job:
host: localhost
state: present
use_ssl: false
content: "{{ lookup('file', 'job.hcl') }}"
register: job_deployed_idempotence
- name: run check delete nomad job
nomad_job:
host: localhost
state: absent
use_ssl: false
content: "{{ lookup('file', 'job.hcl') }}"
register: job_deleted_check
check_mode: true
- name: run delete nomad job
nomad_job:
host: localhost
state: absent
use_ssl: false
content: "{{ lookup('file', 'job.hcl') }}"
register: job_deleted
- name: get job deleted
nomad_job_info:
host: localhost
use_ssl: false
name: example
register: get_job_delete
- name: assert idempotence
assert:
that:
- job_check_deployed_idempotence is not changed
- job_deployed_idempotence is not changed
- job_deleted_check is changed
- job_deleted is changed
- get_job_delete.result[0].Stop