Initial commit of Ansible support for the Consul clustering framework (http://consul.io).

Submission includes support for
 - creating and registering services and checks
 - reading, writing and lookup for values in consul's kv store
 - creating and manipulating sessions for distributed locking on values in the kv
 - creating and manipulating ACLs for restricting access to the kv store
 - inventory support that reads the Consul catalog and group nodes according to
     - datacenters
     - exposed services
     - service availability
     - arbitrary groupings from the kv store

This submission makes extensive use of the python-consul library and this is required
as a dependency and can be installed from pip.

The tests were written to target a vagrant cluster which can be setup by following the
instructions here http://github.com/sgargan/consul-vagrant
This commit is contained in:
Steve Gargan 2015-01-24 01:09:03 +00:00
commit c02f114967
12 changed files with 1121 additions and 1 deletions

View file

@ -19,6 +19,8 @@ TMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
VAULT_PASSWORD_FILE = vault-password
CONSUL_RUNNING := $(shell python consul_running.py)
all: parsing test_var_precedence unicode non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault
parsing:
@ -30,7 +32,7 @@ parsing:
ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
includes:
ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)
ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)
unicode:
ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS)
@ -119,6 +121,16 @@ rackspace: $(CREDENTIALS_FILE)
CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make rackspace_cleanup ; \
exit $$RC;
$(CONSUL_RUNNING):
consul:
ifeq ($(CONSUL_RUNNING), True)
ansible-playbook -i $(INVENTORY) consul.yml ; \
ansible-playbook -i ../../plugins/inventory/consul_io.py consul_inventory.yml
else
@echo "Consul agent is not running locally. To run a cluster locally see http://github.com/sgargan/consul-vagrant"
endif
test_galaxy: test_galaxy_spec test_galaxy_yaml
test_galaxy_spec:

View file

@ -0,0 +1,82 @@
- hosts: localhost
connection: local
gather_facts: false
vars:
# these are the defaults from the consul-vagrant cluster setup
- mgmt_token: '4791402A-D875-4C18-8316-E652DBA53B18'
- acl_host: '11.0.0.2'
- metadata_json: '{"clearance": "top_secret"}'
pre_tasks:
# this works except for the KV_lookusp
- name: check that the consul agent is running locally
local_action: wait_for port=8500 timeout=5
ignore_errors: true
register: consul_running
roles:
- {role: test_consul_service,
when: not consul_running.failed is defined}
- {role: test_consul_kv,
when: not consul_running.failed is defined}
- {role: test_consul_acl,
when: not consul_running.failed is defined}
- {role: test_consul_session,
when: not consul_running.failed is defined}
tasks:
- name: setup services with passing check for consul inventory test
consul:
service_name: nginx
service_port: 80
script: "sh -c true"
interval: 5
token: '4791402A-D875-4C18-8316-E652DBA53B18'
tags:
- dev
- master
- name: setup failing service for inventory test
consul:
service_name: nginx
service_port: 443
script: "sh -c false"
interval: 5
tags:
- qa
- slave
- name: setup ssh service for inventory test
consul:
service_name: ssh
service_port: 2222
script: "sh -c true"
interval: 5
token: '4791402A-D875-4C18-8316-E652DBA53B18'
- name: update the Anonymous token to allow anon access to kv store
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
token: 'anonymous'
rules:
- key: ''
policy: write
register: inventory_token
- name: add metadata for the node through kv_store
consul_kv: "key=ansible/metadata/dc1/consul-1 value='{{metadata_json}}'"
- name: add metadata for the node through kv_store
consul_kv: key=ansible/groups/dc1/consul-1 value='a_group, another_group'
- name: warn that tests are ignored if consul agent is not running
debug: msg="A consul agent needs to be running inorder to run the tests. To setup a vagrant cluster for use in testing see http://github.com/sgargan/consul-vagrant"
when: consul_running.failed is defined

View file

@ -0,0 +1,19 @@
- hosts: all;!localhost
gather_facts: false
pre_tasks:
- name: check that the consul agent is running locally
local_action: wait_for port=8500 timeout=5
ignore_errors: true
register: consul_running
roles:
- {role: test_consul_inventory,
when: not consul_running.failed is defined}
tasks:
- name: warn that tests are ignored if consul agent is not running
debug: msg="A consul agent needs to be running inorder to run the tests. To setup a vagrant cluster for use in testing see http://github.com/sgargan/consul-vagrant"
when: consul_running.failed is defined

View file

@ -0,0 +1,11 @@
''' Checks that the consul agent is running locally. '''
if __name__ == '__main__':
try:
import consul
consul = consul.Consul(host='0.0.0.0', port=8500)
consul.catalog.nodes()
print "True"
except:
pass

View file

@ -0,0 +1,42 @@
- name: create a new acl token
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
name: 'New ACL'
register: new_ruleless
- name: verify ruleless key created
assert:
that:
- new_ruleless.token | length == 36
- new_ruleless.name == 'New ACL'
- name: add rules to an acl token
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
name: 'With rule'
rules:
- key: 'foo'
policy: read
- key: 'private/foo'
policy: deny
register: with_rules
- name: verify rules created
assert:
that:
- with_rules.token | length == 36
- with_rules.name == 'With rule'
- with_rules.rules | match('.*"foo".*')
- with_rules.rules | match('.*"private/foo".*')
- name: clear up
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
token: '{{item}}'
state: absent
with_items:
- '{{new_ruleless.token}}'
- '{{with_rules.token}}'

View file

@ -0,0 +1,39 @@
- name: there are three hosts with an available consul service
assert:
that:
- groups.consul_servers | length == 3
- name: there is one host with an available ssh service
assert:
that:
- groups.ssh_up | length == 1
- name: there is one host with a failing nginx service
assert:
that:
- groups.nginx_down | length == 1
- name: services get added to groups with their tags
assert:
that:
- groups.nginx_servers_qa | length == 1
- groups.nginx_servers_slave | length == 1
- name: metadata from the kv store gets added to the facts for a host
assert:
that:
- clearance | match('top_secret')
when: inventory_hostname == '11.0.0.2'
- name: extra groups a host should be added to can be loaded from kv
assert:
that:
- groups.a_group | length == 1
- groups.another_group | length == 1
- name: ansible_ssh_port is set if the ssh service is registered
assert:
that:
- ansible_ssh_port == 2222
when: not inventory_hostname in ['11.0.0.2', '11.0.0.3', '11.0.0.4']

View file

@ -0,0 +1,90 @@
- name: add rules to an acl token
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
name: 'ACL rule for testing'
rules:
- key: 'somekey'
policy: all
register: test_acl
- name: cleanup from previous failed runs
consul_kv: key={{item}} state=absent token='{{test_acl.token}}'
with_items:
- somekey
- name: add a kv pair to the kv store
consul_kv: key=somekey value=somevalue token='{{test_acl.token}}'
register: new_key
- name: verify new key
assert:
that:
- new_key.key == 'somekey'
- new_key.data.Value == 'somevalue'
- new_key.changed == true
- name: add an existing kv to the kv store
consul_kv: key=somekey value=somevalue token='{{test_acl.token}}'
register: existing_key
- name: verify existing key cause no change
assert:
that:
- existing_key.key == 'somekey'
- existing_key.data.Value == 'somevalue'
- existing_key.changed == False
- name: remove an existing kv from the kv store
consul_kv: key=somekey state=absent token='{{test_acl.token}}'
register: remove_key
- name: verify removal causes change and existing value is returned
assert:
that:
- remove_key.key == 'somekey'
- remove_key.data.Value == 'somevalue'
- remove_key.changed == True
- name: attempting to remove an non-existant kv from the kv store causes no change
consul_kv: key=not_present state=absent token='{{test_acl.token}}'
register: non_existant_key
- name: verify removal causes change and existing value is returned
assert:
that:
- non_existant_key.key == 'not_present'
- non_existant_key.data == None
- non_existant_key.changed == False
- name: Add a key to lookup with the lookup capability
consul_kv: key='key/to/lookup_{{item}}' value='somevalue_{{item}}' token='{{test_acl.token}}'
with_items:
- one
- two
register: lookup_keys
# necessary to make the new token available to the
- set_fact: acl_token={{test_acl.token}}
- name: kv test
assert:
that:
- "{{item | match('somevalue_one')}}"
with_consul_kv:
- 'key/to/lookup_one token={{acl_token}}'
- name: recursive kv lookup test
assert:
that:
- "{{item| match('somevalue_(one|two)')}}"
with_consul_kv:
- 'key/to recurse=true token={{acl_token}}'
- name: remove test acl rule
consul_acl:
mgmt_token: '{{mgmt_token}}'
host: '{{acl_host}}'
token: '{{test_acl.token}}'
state: absent

View file

@ -0,0 +1,156 @@
- name: cleanup any previous failed runs
consul:
service_id: '{{item}}'
state: absent
with_items:
- service1
- service2
- with_check
- with_tags
- name: register very basic service with consul gets default id
consul:
service_name: service1
service_port: 80
register: basic_result
- name: verify basic service registration
assert:
that:
- basic_result.changed
- basic_result.service_port == 80
- basic_result.service_id == 'service1'
- basic_result.service_name == 'service1'
- name: register very basic service with explicit id
consul:
service_name: Basic Service
service_id: service2
service_port: 80
register: basic2_result
- name: verify service2 registration
assert:
that:
- basic2_result.changed
- basic2_result.service_port == 80
- basic2_result.service_id == 'service2'
- basic2_result.service_name == 'Basic Service'
- name: register very basic service with check script
consul:
service_name: with_check
service_port: 80
script: "sh -c true"
interval: 60
register: script_result
- name: verify service with check registration
assert:
that:
- script_result.changed
- script_result.service_port == 80
- script_result.service_id == 'with_check'
- script_result.checks | length == 1
- name: register service with some service tags
consul:
service_name: with_tags
service_port: 80
tags:
- prod
- webservers
register: tags_result
- name: verify tags registration
assert:
that:
- tags_result.changed
- "tags_result.tags == ['prod', 'webservers']"
- name: create a node level check
consul:
check_name: Node Level Check
check_id: node_level
script: "sh -c true"
interval: 50m
register: nodelevel_result
- name: verify service with check registration
assert:
that:
- nodelevel_result.changed
- nodelevel_result.check_name == 'Node Level Check'
- nodelevel_result.check_id == 'node_level'
- nodelevel_result.script == 'sh -c true'
- nodelevel_result.interval == '50m'
- name: remove a service
consul:
service_id: 'service1'
state: absent
register: delete_service_result
- name: verify service removal
assert:
that:
- delete_service_result.changed
- name: removal of an non-existant service causes no change
consul:
service_id: 'service1'
state: absent
register: delete_service_result
- name: verify service removal caused no change
assert:
that:
- not delete_service_result.changed
- name: remove a check
consul:
check_id: 'node_level'
state: absent
register: delete_check_result
- name: verify check removal
assert:
that:
- delete_check_result.changed
- name: removal of an non-existant check causes no change
consul:
check_id: 'node_level'
state: absent
register: delete_check_result
- name: verify check removal cause no change
assert:
that:
- not delete_service_result.changed
- name: add service to test removal by name
consul:
service_name: by_name
service_port: 12345
- name: remove service by name
consul:
service_name: by_name
state: absent
register: delete_service_by_name_result
- name: verify service removal
assert:
that:
- delete_service_by_name_result.changed
- name: cleanup
consul:
service_id: '{{item}}'
state: absent
with_items:
- service2
- with_check
- with_tags

View file

@ -0,0 +1,77 @@
- name: register basic session with consul
consul_session:
name: session1
register: basic_result
- name: verify basic session registration
assert:
that:
- basic_result.changed
- basic_result.session_id | length == 36
- basic_result.name == 'session1'
- name: add checks for session health check
consul:
check_name: session_check
script: /usr/bin/true
interval: 15
- name: register a session with check
consul_session:
name: session_with_check
checks:
- session_check
register: with_check
- name: verify basic session registration
assert:
that:
- with_check.changed
- with_check.session_id | length == 36
- with_check.name == 'session_with_check'
- with_check.checks == ['session_check']
- name: register a session with lock_delay
consul_session:
name: session_with_delay
delay: 20
register: with_delay
- name: verify registration of session with delay
assert:
that:
- with_delay.changed
- with_delay.session_id | length == 36
- with_delay.name == 'session_with_delay'
- with_delay.delay == 20
- name: retrieve session by id
consul_session: id='{{with_delay.session_id}}' state=info
register: retrieved_by_id
- name: verify retrieval by id
assert:
that:
- with_delay.session_id == retrieved_by_id.sessions[1].ID
- name: retrieve sessions by id
consul_session: state=list
register: retrieved_by_list
- name: verify retrieval by list
assert:
that:
- 3 <= retrieved_by_list.sessions[0]
- name: remove sessions
consul_session: id={{item}} state=absent
with_items:
- basic_result.session_id
- with_check.session_id
- with_delay.session_id
- name: remove check
consul:
check_name: session_check
state: absent