mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-06-27 18:50:21 -07:00
Initial commit
This commit is contained in:
commit
aebc1b03fd
4861 changed files with 812621 additions and 0 deletions
0
plugins/inventory/__init__.py
Normal file
0
plugins/inventory/__init__.py
Normal file
211
plugins/inventory/cloudscale.py
Normal file
211
plugins/inventory/cloudscale.py
Normal file
|
@ -0,0 +1,211 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright: (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
name: cloudscale
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Gaudenz Steinlin (@gaudenz)
|
||||
short_description: cloudscale.ch inventory source
|
||||
description:
|
||||
- Get inventory hosts from cloudscale.ch API
|
||||
- Uses an YAML configuration file ending with either I(cloudscale.yml) or I(cloudscale.yaml) to set parameter values (also see examples).
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
options:
|
||||
plugin:
|
||||
description: |
|
||||
Token that ensures this is a source file for the 'cloudscale'
|
||||
plugin.
|
||||
required: True
|
||||
choices: ['cloudscale']
|
||||
inventory_hostname:
|
||||
description: |
|
||||
What to register as the inventory hostname.
|
||||
If set to 'uuid' the uuid of the server will be used and a
|
||||
group will be created for the server name.
|
||||
If set to 'name' the name of the server will be used unless
|
||||
there are more than one server with the same name in which
|
||||
case the 'uuid' logic will be used.
|
||||
type: str
|
||||
choices:
|
||||
- name
|
||||
- uuid
|
||||
default: "name"
|
||||
ansible_host:
|
||||
description: |
|
||||
Which IP address to register as the ansible_host. If the
|
||||
requested value does not exist or this is set to 'none', no
|
||||
ansible_host will be set.
|
||||
type: str
|
||||
choices:
|
||||
- public_v4
|
||||
- public_v6
|
||||
- private
|
||||
- none
|
||||
default: public_v4
|
||||
api_token:
|
||||
description: cloudscale.ch API token
|
||||
env:
|
||||
- name: CLOUDSCALE_API_TOKEN
|
||||
type: str
|
||||
api_timeout:
|
||||
description: Timeout in seconds for calls to the cloudscale.ch API.
|
||||
default: 30
|
||||
type: int
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# cloudscale.yml name ending file in YAML format
|
||||
# Example command line: ansible-inventory --list -i inventory_cloudscale.yml
|
||||
|
||||
plugin: cloudscale
|
||||
|
||||
# Example grouping by tag key "project"
|
||||
plugin: cloudscale
|
||||
keyed_groups:
|
||||
- prefix: project
|
||||
key: cloudscale.tags.project
|
||||
|
||||
# Example grouping by key "operating_system" lowercased and prefixed with "os"
|
||||
plugin: cloudscale
|
||||
keyed_groups:
|
||||
- prefix: os
|
||||
key: cloudscale.image.operating_system | lower
|
||||
'''
|
||||
|
||||
from collections import defaultdict
|
||||
from json import loads
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible_collections.community.general.plugins.module_utils.cloudscale import API_URL
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.inventory.group import to_safe_group_name
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
|
||||
iface_type_map = {
|
||||
'public_v4': ('public', 4),
|
||||
'public_v6': ('public', 6),
|
||||
'private': ('private', 4),
|
||||
'none': (None, None),
|
||||
}
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
NAME = 'community.general.cloudscale'
|
||||
|
||||
def _get_server_list(self):
|
||||
# Get list of servers from cloudscale.ch API
|
||||
response = open_url(
|
||||
API_URL + '/servers',
|
||||
headers={'Authorization': 'Bearer %s' % self._token}
|
||||
)
|
||||
return loads(response.read())
|
||||
|
||||
def verify_file(self, path):
|
||||
'''
|
||||
:param path: the path to the inventory config file
|
||||
:return the contents of the config file
|
||||
'''
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('cloudscale.yml', 'cloudscale.yaml')):
|
||||
return True
|
||||
self.display.debug(
|
||||
"cloudscale inventory filename must end with 'cloudscale.yml' or 'cloudscale.yaml'"
|
||||
)
|
||||
return False
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
self._read_config_data(path)
|
||||
|
||||
self._token = self.get_option('api_token')
|
||||
if not self._token:
|
||||
raise AnsibleError('Could not find an API token. Set the '
|
||||
'CLOUDSCALE_API_TOKEN environment variable.')
|
||||
|
||||
inventory_hostname = self.get_option('inventory_hostname')
|
||||
if inventory_hostname not in ('name', 'uuid'):
|
||||
raise AnsibleError('Invalid value for option inventory_hostname: %s'
|
||||
% inventory_hostname)
|
||||
|
||||
ansible_host = self.get_option('ansible_host')
|
||||
if ansible_host not in iface_type_map:
|
||||
raise AnsibleError('Invalid value for option ansible_host: %s'
|
||||
% ansible_host)
|
||||
|
||||
# Merge servers with the same name
|
||||
firstpass = defaultdict(list)
|
||||
for server in self._get_server_list():
|
||||
firstpass[server['name']].append(server)
|
||||
|
||||
# Add servers to inventory
|
||||
for name, servers in firstpass.items():
|
||||
if len(servers) == 1 and inventory_hostname == 'name':
|
||||
self.inventory.add_host(name)
|
||||
servers[0]['inventory_hostname'] = name
|
||||
else:
|
||||
# Two servers with the same name exist, create a group
|
||||
# with this name and add the servers by UUID
|
||||
group_name = to_safe_group_name(name)
|
||||
if group_name not in self.inventory.groups:
|
||||
self.inventory.add_group(group_name)
|
||||
for server in servers:
|
||||
self.inventory.add_host(server['uuid'], group_name)
|
||||
server['inventory_hostname'] = server['uuid']
|
||||
|
||||
# Set variables
|
||||
iface_type, iface_version = iface_type_map[ansible_host]
|
||||
for server in servers:
|
||||
hostname = server.pop('inventory_hostname')
|
||||
if ansible_host != 'none':
|
||||
addresses = [address['address']
|
||||
for interface in server['interfaces']
|
||||
for address in interface['addresses']
|
||||
if interface['type'] == iface_type
|
||||
and address['version'] == iface_version]
|
||||
|
||||
if len(addresses) > 0:
|
||||
self.inventory.set_variable(
|
||||
hostname,
|
||||
'ansible_host',
|
||||
addresses[0],
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
hostname,
|
||||
'cloudscale',
|
||||
server,
|
||||
)
|
||||
|
||||
variables = self.inventory.hosts[hostname].get_vars()
|
||||
# Set composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option('compose'),
|
||||
variables,
|
||||
hostname,
|
||||
self.get_option('strict'),
|
||||
)
|
||||
|
||||
# Add host to composed groups
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option('groups'),
|
||||
variables,
|
||||
hostname,
|
||||
self.get_option('strict'),
|
||||
)
|
||||
|
||||
# Add host to keyed groups
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option('keyed_groups'),
|
||||
variables,
|
||||
hostname,
|
||||
self.get_option('strict'),
|
||||
)
|
256
plugins/inventory/docker_machine.py
Normal file
256
plugins/inventory/docker_machine.py
Normal file
|
@ -0,0 +1,256 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_machine
|
||||
plugin_type: inventory
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: yes
|
||||
choices: ['docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
|
||||
A warning will be issued for any skipped host if the choice is C(require).
|
||||
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
|
||||
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
|
||||
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: yes
|
||||
verbose_output:
|
||||
description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: yes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example
|
||||
plugin: docker_machine
|
||||
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), e.g.:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
strict: no
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.utils.display import Display
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using Docker machine as source. '''
|
||||
|
||||
NAME = 'community.general.docker_machine'
|
||||
|
||||
DOCKER_MACHINE_PATH = None
|
||||
|
||||
def _run_command(self, args):
|
||||
if not self.DOCKER_MACHINE_PATH:
|
||||
try:
|
||||
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
|
||||
command = [self.DOCKER_MACHINE_PATH]
|
||||
command.extend(args)
|
||||
display.debug('Executing command {0}'.format(command))
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name):
|
||||
'''
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
'''
|
||||
try:
|
||||
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
vars.append((env_var_name, env_var_value))
|
||||
|
||||
return vars
|
||||
|
||||
def _get_machine_names(self):
|
||||
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
|
||||
# with them.
|
||||
ls_command = ['ls', '-q']
|
||||
if self.get_option('running_required'):
|
||||
ls_command.extend(['--filter', 'state=Running'])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node):
|
||||
try:
|
||||
inspect_lines = self._run_command(['inspect', self.node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
|
||||
if not env_var_tuples:
|
||||
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
|
||||
if daemon_env in ('require', 'require-silently'):
|
||||
if daemon_env == 'require':
|
||||
display.warning('{0}: host will be skipped'.format(warning_prefix))
|
||||
return True
|
||||
else: # 'optional', 'optional-silently'
|
||||
if daemon_env == 'optional':
|
||||
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
|
||||
return False
|
||||
|
||||
def _populate(self):
|
||||
daemon_env = self.get_option('daemon_env')
|
||||
try:
|
||||
for self.node in self._get_machine_names():
|
||||
self.node_attrs = self._inspect_docker_machine_host(self.node)
|
||||
if not self.node_attrs:
|
||||
continue
|
||||
|
||||
machine_name = self.node_attrs['Driver']['MachineName']
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == 'skip':
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(machine_name, 'ansible_host', self.node_attrs['Driver']['IPAddress'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
|
||||
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = self.node_attrs['Driver'].get('Tags') or ''
|
||||
self.inventory.set_variable(machine_name, 'dm_tags', tags)
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
|
||||
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
|
||||
to_native(e), orig_exc=e)
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
255
plugins/inventory/docker_swarm.py
Normal file
255
plugins/inventory/docker_swarm.py
Normal file
|
@ -0,0 +1,255 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: docker_swarm
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file docker_swarm.[yml|yaml].
|
||||
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
|
||||
I(managers) - all manager nodes; I(leader) - the swarm leader node;
|
||||
I(nonleaders) - all nodes except the swarm leader."
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
|
||||
recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices: docker_swarm
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- "Use C(unix://var/run/docker.sock) to connect via local socket."
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ docker_url ]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
|
||||
C(EngineVersion))
|
||||
type: bool
|
||||
default: yes
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: no
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
|
||||
host server.
|
||||
type: bool
|
||||
default: no
|
||||
aliases: [ tls_verify ]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [ tls_client_key, key_path ]
|
||||
ca_cert:
|
||||
description: Use a CA certificate when performing server verification by providing the path to a CA
|
||||
certificate file.
|
||||
type: path
|
||||
aliases: [ tls_ca_cert, cacert_path ]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [ tls_client_cert, cert_path ]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of
|
||||
the server.
|
||||
type: str
|
||||
ssl_version:
|
||||
description: Provide a valid SSL version number. Default value determined by ssl.py module.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by docker-py.
|
||||
type: str
|
||||
aliases: [ docker_api_version ]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
|
||||
will be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [ time_out ]
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
|
||||
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
|
||||
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
|
||||
The port always defaults to C(2376).
|
||||
type: bool
|
||||
default: no
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in I(ansible_host_uri)
|
||||
type: int
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Minimal example using local docker
|
||||
plugin: docker_swarm
|
||||
docker_host: unix://var/run/docker.sock
|
||||
|
||||
# Minimal example using remote docker
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: yes
|
||||
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: yes
|
||||
ca_cert: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: False
|
||||
keyed_groups:
|
||||
# add e.g. x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add e.g. linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# e.g. a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
|
||||
try:
|
||||
import docker
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using Docker swarm as source. '''
|
||||
|
||||
NAME = 'community.general.docker_swarm'
|
||||
|
||||
def _fail(self, msg):
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self):
|
||||
raw_params = dict(
|
||||
docker_host=self.get_option('docker_host'),
|
||||
tls=self.get_option('tls'),
|
||||
tls_verify=self.get_option('validate_certs'),
|
||||
key_path=self.get_option('client_key'),
|
||||
cacert_path=self.get_option('ca_cert'),
|
||||
cert_path=self.get_option('client_cert'),
|
||||
tls_hostname=self.get_option('tls_hostname'),
|
||||
api_version=self.get_option('api_version'),
|
||||
timeout=self.get_option('timeout'),
|
||||
ssl_version=self.get_option('ssl_version'),
|
||||
debug=None,
|
||||
)
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
self.client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group('all')
|
||||
self.inventory.add_group('manager')
|
||||
self.inventory.add_group('worker')
|
||||
self.inventory.add_group('leader')
|
||||
self.inventory.add_group('nonleaders')
|
||||
|
||||
if self.get_option('include_host_uri'):
|
||||
if self.get_option('include_host_uri_port'):
|
||||
host_uri_port = str(self.get_option('include_host_uri_port'))
|
||||
elif self.get_option('tls') or self.get_option('validate_certs'):
|
||||
host_uri_port = '2376'
|
||||
else:
|
||||
host_uri_port = '2375'
|
||||
|
||||
try:
|
||||
self.nodes = self.client.nodes.list()
|
||||
for self.node in self.nodes:
|
||||
self.node_attrs = self.client.nodes.get(self.node.id).attrs
|
||||
self.inventory.add_host(self.node_attrs['ID'])
|
||||
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
|
||||
self.node_attrs['Status']['Addr'])
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
|
||||
if self.get_option('verbose_output'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
|
||||
if 'ManagerStatus' in self.node_attrs:
|
||||
if self.node_attrs['ManagerStatus'].get('Leader'):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
|
||||
self.node_attrs['Status']['Addr']
|
||||
if self.get_option('include_host_uri'):
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
|
||||
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
|
||||
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='leader')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
else:
|
||||
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
|
||||
self.node_attrs,
|
||||
self.node_attrs['ID'],
|
||||
strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
|
||||
to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
|
||||
'https://github.com/docker/docker-py.')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
130
plugins/inventory/gitlab_runners.py
Normal file
130
plugins/inventory/gitlab_runners.py
Normal file
|
@ -0,0 +1,130 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: gitlab_runners
|
||||
plugin_type: inventory
|
||||
authors:
|
||||
- Stefan Heitmüller (stefan.heitmueller@gmx.com)
|
||||
short_description: Ansible dynamic inventory plugin for GitLab runners.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- python-gitlab > 1.8.0
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Reads inventories from the GitLab API.
|
||||
- Uses a YAML configuration file gitlab_runners.[yml|yaml].
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
|
||||
type: str
|
||||
required: true
|
||||
choices:
|
||||
- gitlab_runners
|
||||
server_url:
|
||||
description: The URL of the GitLab server, with protocol (i.e. http or https).
|
||||
type: str
|
||||
required: true
|
||||
default: https://gitlab.com
|
||||
api_token:
|
||||
description: GitLab token for logging in.
|
||||
type: str
|
||||
aliases:
|
||||
- private_token
|
||||
- access_token
|
||||
filter:
|
||||
description: filter runners from GitLab API
|
||||
type: str
|
||||
choices: ['active', 'paused', 'online', 'specific', 'shared']
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata
|
||||
type: bool
|
||||
default: yes
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# gitlab_runners.yml
|
||||
plugin: gitlab_runners
|
||||
host: https://gitlab.com
|
||||
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: gitlab_runners
|
||||
host: https://gitlab.com
|
||||
strict: False
|
||||
keyed_groups:
|
||||
# add e.g. amd64 hosts to an arch_amd64 group
|
||||
- prefix: arch
|
||||
key: 'architecture'
|
||||
# add e.g. linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'platform'
|
||||
# create a group per runner tag
|
||||
# e.g. a runner tagged w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'tag_list'
|
||||
prefix: tag
|
||||
'''
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
|
||||
try:
|
||||
import gitlab
|
||||
HAS_GITLAB = True
|
||||
except ImportError:
|
||||
HAS_GITLAB = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
''' Host inventory parser for ansible using GitLab API as source. '''
|
||||
|
||||
NAME = 'community.general.gitlab_runners'
|
||||
|
||||
def _populate(self):
|
||||
gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token'))
|
||||
self.inventory.add_group('gitlab_runners')
|
||||
try:
|
||||
if self.get_option('filter'):
|
||||
runners = gl.runners.all(scope=self.get_option('filter'))
|
||||
else:
|
||||
runners = gl.runners.all()
|
||||
for runner in runners:
|
||||
host = str(runner['id'])
|
||||
ip_address = runner['ip_address']
|
||||
host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
|
||||
self.inventory.add_host(host, group='gitlab_runners')
|
||||
self.inventory.set_variable(host, 'ansible_host', ip_address)
|
||||
if self.get_option('verbose_output', True):
|
||||
self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
|
||||
except Exception as e:
|
||||
raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return (
|
||||
super(InventoryModule, self).verify_file(path) and
|
||||
path.endswith((self.NAME + ".yaml", self.NAME + ".yml")))
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
if not HAS_GITLAB:
|
||||
raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/')
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
255
plugins/inventory/kubevirt.py
Normal file
255
plugins/inventory/kubevirt.py
Normal file
|
@ -0,0 +1,255 @@
|
|||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: kubevirt
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- KubeVirt Team (@kubevirt)
|
||||
|
||||
short_description: KubeVirt inventory source
|
||||
extends_documentation_fragment:
|
||||
- inventory_cache
|
||||
- constructed
|
||||
description:
|
||||
- Fetch running VirtualMachines for one or more namespaces.
|
||||
- Groups by namespace, namespace_vms and labels.
|
||||
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
|
||||
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'kubevirt' plugin.
|
||||
required: True
|
||||
choices: ['kubevirt']
|
||||
type: str
|
||||
host_format:
|
||||
description:
|
||||
- Specify the format of the host in the inventory group.
|
||||
default: "{namespace}-{name}-{uid}"
|
||||
connections:
|
||||
type: list
|
||||
description:
|
||||
- Optional list of cluster connection settings. If no connections are provided, the default
|
||||
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
|
||||
the active user is authorized to access.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
|
||||
and port.
|
||||
type: str
|
||||
kubeconfig:
|
||||
description:
|
||||
- Path to an existing Kubernetes config file. If not provided, and no other connection
|
||||
options are provided, the OpenShift client will attempt to load the default
|
||||
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
|
||||
environment variable.
|
||||
type: str
|
||||
context:
|
||||
description:
|
||||
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
|
||||
variable.
|
||||
type: str
|
||||
host:
|
||||
description:
|
||||
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
|
||||
type: str
|
||||
api_key:
|
||||
description:
|
||||
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
|
||||
variable.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
|
||||
environment variable.
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
|
||||
environment variable.
|
||||
type: str
|
||||
cert_file:
|
||||
description:
|
||||
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
|
||||
environment variable.
|
||||
type: str
|
||||
key_file:
|
||||
description:
|
||||
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
|
||||
environment variable.
|
||||
type: str
|
||||
ssl_ca_cert:
|
||||
description:
|
||||
- Path to a CA certificate used to authenticate with the API. Can also be specified via
|
||||
K8S_AUTH_SSL_CA_CERT environment variable.
|
||||
type: str
|
||||
verify_ssl:
|
||||
description:
|
||||
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
|
||||
K8S_AUTH_VERIFY_SSL environment variable."
|
||||
type: bool
|
||||
namespaces:
|
||||
description:
|
||||
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
|
||||
to access.
|
||||
type: list
|
||||
network_name:
|
||||
description:
|
||||
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
|
||||
address.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- "Specify the KubeVirt API version."
|
||||
type: str
|
||||
annotation_variable:
|
||||
description:
|
||||
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
|
||||
- "Note, that the value in ansible annotations should be json."
|
||||
type: str
|
||||
default: 'ansible'
|
||||
requirements:
|
||||
- "openshift >= 0.6"
|
||||
- "PyYAML >= 3.11"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# File must be named kubevirt.yaml or kubevirt.yml
|
||||
|
||||
# Authenticate with token, and return all virtual machines for all namespaces
|
||||
plugin: kubevirt
|
||||
connections:
|
||||
- host: https://kubevirt.io
|
||||
token: xxxxxxxxxxxxxxxx
|
||||
ssl_verify: false
|
||||
|
||||
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
|
||||
# connected to network myovsnetwork and from namespace vms
|
||||
plugin: kubevirt
|
||||
connections:
|
||||
- namespaces:
|
||||
- vms
|
||||
network_name: myovsnetwork
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
|
||||
|
||||
try:
|
||||
from openshift.dynamic.exceptions import DynamicApiError
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
API_VERSION = 'kubevirt.io/v1alpha3'
|
||||
|
||||
|
||||
class InventoryModule(K8sInventoryModule):
|
||||
NAME = 'community.general.kubevirt'
|
||||
|
||||
def setup(self, config_data, cache, cache_key):
|
||||
self.config_data = config_data
|
||||
super(InventoryModule, self).setup(config_data, cache, cache_key)
|
||||
|
||||
def fetch_objects(self, connections):
|
||||
client = self.get_api_client()
|
||||
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
|
||||
|
||||
if connections:
|
||||
for connection in connections:
|
||||
client = self.get_api_client(**connection)
|
||||
name = connection.get('name', self.get_default_host_name(client.configuration.host))
|
||||
if connection.get('namespaces'):
|
||||
namespaces = connection['namespaces']
|
||||
else:
|
||||
namespaces = self.get_available_namespaces(client)
|
||||
interface_name = connection.get('network_name')
|
||||
api_version = connection.get('api_version', API_VERSION)
|
||||
annotation_variable = connection.get('annotation_variable', 'ansible')
|
||||
for namespace in namespaces:
|
||||
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
|
||||
else:
|
||||
name = self.get_default_host_name(client.configuration.host)
|
||||
namespaces = self.get_available_namespaces(client)
|
||||
for namespace in namespaces:
|
||||
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
|
||||
|
||||
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
|
||||
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
|
||||
try:
|
||||
obj = v1_vm.get(namespace=namespace)
|
||||
except DynamicApiError as exc:
|
||||
self.display.debug(exc)
|
||||
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
|
||||
|
||||
namespace_group = 'namespace_{0}'.format(namespace)
|
||||
namespace_vms_group = '{0}_vms'.format(namespace_group)
|
||||
|
||||
name = self._sanitize_group_name(name)
|
||||
namespace_group = self._sanitize_group_name(namespace_group)
|
||||
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_group(namespace_group)
|
||||
self.inventory.add_child(name, namespace_group)
|
||||
self.inventory.add_group(namespace_vms_group)
|
||||
self.inventory.add_child(namespace_group, namespace_vms_group)
|
||||
for vm in obj.items:
|
||||
if not (vm.status and vm.status.interfaces):
|
||||
continue
|
||||
|
||||
# Find interface by its name:
|
||||
if interface_name is None:
|
||||
interface = vm.status.interfaces[0]
|
||||
else:
|
||||
interface = next(
|
||||
(i for i in vm.status.interfaces if i.name == interface_name),
|
||||
None
|
||||
)
|
||||
|
||||
# If interface is not found or IP address is not reported skip this VM:
|
||||
if interface is None or interface.ipAddress is None:
|
||||
continue
|
||||
|
||||
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
|
||||
vm_ip = interface.ipAddress
|
||||
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
|
||||
|
||||
self.inventory.add_host(vm_name)
|
||||
|
||||
if vm.metadata.labels:
|
||||
# create a group for each label_value
|
||||
for key, value in vm.metadata.labels:
|
||||
group_name = 'label_{0}_{1}'.format(key, value)
|
||||
group_name = self._sanitize_group_name(group_name)
|
||||
self.inventory.add_group(group_name)
|
||||
self.inventory.add_child(group_name, vm_name)
|
||||
vm_labels = dict(vm.metadata.labels)
|
||||
else:
|
||||
vm_labels = {}
|
||||
|
||||
self.inventory.add_child(namespace_vms_group, vm_name)
|
||||
|
||||
# add hostvars
|
||||
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
|
||||
self.inventory.set_variable(vm_name, 'labels', vm_labels)
|
||||
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
|
||||
self.inventory.set_variable(vm_name, 'object_type', 'vm')
|
||||
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
|
||||
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
|
||||
|
||||
# Add all variables which are listed in 'ansible' annotation:
|
||||
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
|
||||
for k, v in annotations_data.items():
|
||||
self.inventory.set_variable(vm_name, k, v)
|
||||
|
||||
def verify_file(self, path):
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
|
||||
return True
|
||||
return False
|
207
plugins/inventory/linode.py
Normal file
207
plugins/inventory/linode.py
Normal file
|
@ -0,0 +1,207 @@
|
|||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: linode
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Luke Murphy (@decentral1se)
|
||||
short_description: Ansible dynamic inventory plugin for Linode.
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- linode_api4 >= 2.0.0
|
||||
description:
|
||||
- Reads inventories from the Linode API v4.
|
||||
- Uses a YAML configuration file that ends with linode.(yml|yaml).
|
||||
- Linode labels are used by default as the hostnames.
|
||||
- The inventory groups are built from groups and not tags.
|
||||
options:
|
||||
plugin:
|
||||
description: marks this as an instance of the 'linode' plugin
|
||||
required: true
|
||||
choices: ['linode']
|
||||
access_token:
|
||||
description: The Linode account personal access token.
|
||||
required: true
|
||||
env:
|
||||
- name: LINODE_ACCESS_TOKEN
|
||||
regions:
|
||||
description: Populate inventory with instances in this region.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
types:
|
||||
description: Populate inventory with instances with this type.
|
||||
default: []
|
||||
type: list
|
||||
required: false
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
|
||||
plugin: linode
|
||||
|
||||
# Example with regions, types, groups and access token
|
||||
plugin: linode
|
||||
access_token: foobar
|
||||
regions:
|
||||
- eu-west
|
||||
types:
|
||||
- g5-standard-2
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
|
||||
|
||||
try:
|
||||
from linode_api4 import LinodeClient
|
||||
from linode_api4.errors import ApiError as LinodeApiError
|
||||
except ImportError:
|
||||
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin):
|
||||
|
||||
NAME = 'community.general.linode'
|
||||
|
||||
def _build_client(self):
|
||||
"""Build the Linode client."""
|
||||
|
||||
access_token = self.get_option('access_token')
|
||||
|
||||
if access_token is None:
|
||||
try:
|
||||
access_token = os.environ['LINODE_ACCESS_TOKEN']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if access_token is None:
|
||||
raise AnsibleError((
|
||||
'Could not retrieve Linode access token '
|
||||
'from plugin configuration or environment'
|
||||
))
|
||||
|
||||
self.client = LinodeClient(access_token)
|
||||
|
||||
def _get_instances_inventory(self):
|
||||
"""Retrieve Linode instance information from cloud inventory."""
|
||||
try:
|
||||
self.instances = self.client.linode.instances()
|
||||
except LinodeApiError as exception:
|
||||
raise AnsibleError('Linode client raised: %s' % exception)
|
||||
|
||||
def _add_groups(self):
|
||||
"""Add Linode instance groups to the dynamic inventory."""
|
||||
self.linode_groups = set(
|
||||
filter(None, [
|
||||
instance.group
|
||||
for instance
|
||||
in self.instances
|
||||
])
|
||||
)
|
||||
|
||||
for linode_group in self.linode_groups:
|
||||
self.inventory.add_group(linode_group)
|
||||
|
||||
def _filter_by_config(self, regions, types):
|
||||
"""Filter instances by user specified configuration."""
|
||||
if regions:
|
||||
self.instances = [
|
||||
instance for instance in self.instances
|
||||
if instance.region.id in regions
|
||||
]
|
||||
|
||||
if types:
|
||||
self.instances = [
|
||||
instance for instance in self.instances
|
||||
if instance.type.id in types
|
||||
]
|
||||
|
||||
def _add_instances_to_groups(self):
|
||||
"""Add instance names to their dynamic inventory groups."""
|
||||
for instance in self.instances:
|
||||
self.inventory.add_host(instance.label, group=instance.group)
|
||||
|
||||
def _add_hostvars_for_instances(self):
|
||||
"""Add hostvars for instances in the dynamic inventory."""
|
||||
for instance in self.instances:
|
||||
hostvars = instance._raw_json
|
||||
for hostvar_key in hostvars:
|
||||
self.inventory.set_variable(
|
||||
instance.label,
|
||||
hostvar_key,
|
||||
hostvars[hostvar_key]
|
||||
)
|
||||
|
||||
def _validate_option(self, name, desired_type, option_value):
|
||||
"""Validate user specified configuration data against types."""
|
||||
if isinstance(option_value, string_types) and desired_type == list:
|
||||
option_value = [option_value]
|
||||
|
||||
if option_value is None:
|
||||
option_value = desired_type()
|
||||
|
||||
if not isinstance(option_value, desired_type):
|
||||
raise AnsibleParserError(
|
||||
'The option %s (%s) must be a %s' % (
|
||||
name, option_value, desired_type
|
||||
)
|
||||
)
|
||||
|
||||
return option_value
|
||||
|
||||
def _get_query_options(self, config_data):
|
||||
"""Get user specified query options from the configuration."""
|
||||
options = {
|
||||
'regions': {
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('regions', [])
|
||||
},
|
||||
'types': {
|
||||
'type_to_be': list,
|
||||
'value': config_data.get('types', [])
|
||||
},
|
||||
}
|
||||
|
||||
for name in options:
|
||||
options[name]['value'] = self._validate_option(
|
||||
name,
|
||||
options[name]['type_to_be'],
|
||||
options[name]['value']
|
||||
)
|
||||
|
||||
regions = options['regions']['value']
|
||||
types = options['types']['value']
|
||||
|
||||
return regions, types
|
||||
|
||||
def verify_file(self, path):
|
||||
"""Verify the Linode configuration file."""
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
endings = ('linode.yaml', 'linode.yml')
|
||||
if any((path.endswith(ending) for ending in endings)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
"""Dynamically parse Linode the cloud inventory."""
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
self._build_client()
|
||||
|
||||
self._get_instances_inventory()
|
||||
|
||||
config_data = self._read_config_data(path)
|
||||
regions, types = self._get_query_options(config_data)
|
||||
self._filter_by_config(regions, types)
|
||||
|
||||
self._add_groups()
|
||||
self._add_instances_to_groups()
|
||||
self._add_hostvars_for_instances()
|
167
plugins/inventory/nmap.py
Normal file
167
plugins/inventory/nmap.py
Normal file
|
@ -0,0 +1,167 @@
|
|||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: nmap
|
||||
plugin_type: inventory
|
||||
short_description: Uses nmap to find hosts to target
|
||||
description:
|
||||
- Uses a YAML configuration file with a valid YAML extension.
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
requirements:
|
||||
- nmap CLI installed
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'nmap' plugin.
|
||||
required: True
|
||||
choices: ['nmap']
|
||||
address:
|
||||
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
|
||||
required: True
|
||||
exclude:
|
||||
description: list of addresses to exclude
|
||||
type: list
|
||||
ports:
|
||||
description: Enable/disable scanning for open ports
|
||||
type: boolean
|
||||
default: True
|
||||
ipv4:
|
||||
description: use IPv4 type addresses
|
||||
type: boolean
|
||||
default: True
|
||||
ipv6:
|
||||
description: use IPv6 type addresses
|
||||
type: boolean
|
||||
default: True
|
||||
notes:
|
||||
- At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
|
||||
- 'TODO: add OS fingerprinting'
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# inventory.config file in YAML format
|
||||
plugin: nmap
|
||||
strict: False
|
||||
address: 192.168.0.0/24
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
|
||||
NAME = 'community.general.nmap'
|
||||
find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?')
|
||||
find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)')
|
||||
|
||||
def __init__(self):
|
||||
self._nmap = None
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
file_name, ext = os.path.splitext(path)
|
||||
|
||||
if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
|
||||
valid = True
|
||||
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=False):
|
||||
|
||||
try:
|
||||
self._nmap = get_bin_path('nmap')
|
||||
except ValueError as e:
|
||||
raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
|
||||
|
||||
self._read_config_data(path)
|
||||
|
||||
# setup command
|
||||
cmd = [self._nmap]
|
||||
if not self._options['ports']:
|
||||
cmd.append('-sP')
|
||||
|
||||
if self._options['ipv4'] and not self._options['ipv6']:
|
||||
cmd.append('-4')
|
||||
elif self._options['ipv6'] and not self._options['ipv4']:
|
||||
cmd.append('-6')
|
||||
elif not self._options['ipv6'] and not self._options['ipv4']:
|
||||
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
|
||||
|
||||
if self._options['exclude']:
|
||||
cmd.append('--exclude')
|
||||
cmd.append(','.join(self._options['exclude']))
|
||||
|
||||
cmd.append(self._options['address'])
|
||||
try:
|
||||
# execute
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
|
||||
|
||||
# parse results
|
||||
host = None
|
||||
ip = None
|
||||
ports = []
|
||||
|
||||
try:
|
||||
t_stdout = to_text(stdout, errors='surrogate_or_strict')
|
||||
except UnicodeError as e:
|
||||
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
|
||||
|
||||
for line in t_stdout.splitlines():
|
||||
hits = self.find_host.match(line)
|
||||
if hits:
|
||||
if host is not None:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
|
||||
# if dns only shows arpa, just use ip instead as hostname
|
||||
if hits.group(1).endswith('.in-addr.arpa'):
|
||||
host = hits.group(2)
|
||||
else:
|
||||
host = hits.group(1)
|
||||
|
||||
# if no reverse dns exists, just use ip instead as hostname
|
||||
if hits.group(2) is not None:
|
||||
ip = hits.group(2)
|
||||
else:
|
||||
ip = hits.group(1)
|
||||
|
||||
if host is not None:
|
||||
# update inventory
|
||||
self.inventory.add_host(host)
|
||||
self.inventory.set_variable(host, 'ip', ip)
|
||||
ports = []
|
||||
continue
|
||||
|
||||
host_ports = self.find_port.match(line)
|
||||
if host is not None and host_ports:
|
||||
ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
|
||||
continue
|
||||
|
||||
# TODO: parse more data, OS?
|
||||
|
||||
# if any leftovers
|
||||
if host and ports:
|
||||
self.inventory.set_variable(host, 'ports', ports)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
|
262
plugins/inventory/online.py
Normal file
262
plugins/inventory/online.py
Normal file
|
@ -0,0 +1,262 @@
|
|||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: online
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Remy Leone (@sieben)
|
||||
short_description: Online inventory source
|
||||
description:
|
||||
- Get inventory hosts from Online
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'online' plugin.
|
||||
required: True
|
||||
choices: ['online']
|
||||
oauth_token:
|
||||
required: True
|
||||
description: Online OAuth token.
|
||||
env:
|
||||
# in order of precedence
|
||||
- name: ONLINE_TOKEN
|
||||
- name: ONLINE_API_KEY
|
||||
- name: ONLINE_OAUTH_TOKEN
|
||||
hostnames:
|
||||
description: List of preference about what to use as an hostname.
|
||||
type: list
|
||||
default:
|
||||
- public_ipv4
|
||||
choices:
|
||||
- public_ipv4
|
||||
- private_ipv4
|
||||
- hostname
|
||||
groups:
|
||||
description: List of groups.
|
||||
type: list
|
||||
choices:
|
||||
- location
|
||||
- offer
|
||||
- rpn
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# online_inventory.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i online_inventory.yml
|
||||
|
||||
plugin: online
|
||||
hostnames:
|
||||
- public_ipv4
|
||||
groups:
|
||||
- location
|
||||
- offer
|
||||
- rpn
|
||||
'''
|
||||
|
||||
import json
|
||||
from sys import version as python_version
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin
|
||||
from ansible.module_utils._text import to_native, to_text
|
||||
from ansible.module_utils.ansible_release import __version__ as ansible_version
|
||||
from ansible.module_utils.six.moves.urllib.parse import urljoin
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin):
|
||||
NAME = 'community.general.online'
|
||||
API_ENDPOINT = "https://api.online.net"
|
||||
|
||||
def extract_public_ipv4(self, host_infos):
|
||||
try:
|
||||
return host_infos["network"]["ip"][0]
|
||||
except (KeyError, TypeError, IndexError):
|
||||
self.display.warning("An error happened while extracting public IPv4 address. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_private_ipv4(self, host_infos):
|
||||
try:
|
||||
return host_infos["network"]["private"][0]
|
||||
except (KeyError, TypeError, IndexError):
|
||||
self.display.warning("An error happened while extracting private IPv4 address. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_os_name(self, host_infos):
|
||||
try:
|
||||
return host_infos["os"]["name"]
|
||||
except (KeyError, TypeError):
|
||||
self.display.warning("An error happened while extracting OS name. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_os_version(self, host_infos):
|
||||
try:
|
||||
return host_infos["os"]["version"]
|
||||
except (KeyError, TypeError):
|
||||
self.display.warning("An error happened while extracting OS version. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_hostname(self, host_infos):
|
||||
try:
|
||||
return host_infos["hostname"]
|
||||
except (KeyError, TypeError):
|
||||
self.display.warning("An error happened while extracting hostname. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_location(self, host_infos):
|
||||
try:
|
||||
return host_infos["location"]["datacenter"]
|
||||
except (KeyError, TypeError):
|
||||
self.display.warning("An error happened while extracting datacenter location. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_offer(self, host_infos):
|
||||
try:
|
||||
return host_infos["offer"]
|
||||
except (KeyError, TypeError):
|
||||
self.display.warning("An error happened while extracting commercial offer. Information skipped.")
|
||||
return None
|
||||
|
||||
def extract_rpn(self, host_infos):
|
||||
try:
|
||||
return self.rpn_lookup_cache[host_infos["id"]]
|
||||
except (KeyError, TypeError):
|
||||
self.display.warning("An error happened while extracting RPN information. Information skipped.")
|
||||
return None
|
||||
|
||||
def _fetch_information(self, url):
|
||||
try:
|
||||
response = open_url(url, headers=self.headers)
|
||||
except Exception as e:
|
||||
self.display.warning("An error happened while fetching: %s" % url)
|
||||
return None
|
||||
|
||||
try:
|
||||
raw_data = to_text(response.read(), errors='surrogate_or_strict')
|
||||
except UnicodeError:
|
||||
raise AnsibleError("Incorrect encoding of fetched payload from Online servers")
|
||||
|
||||
try:
|
||||
return json.loads(raw_data)
|
||||
except ValueError:
|
||||
raise AnsibleError("Incorrect JSON payload")
|
||||
|
||||
@staticmethod
|
||||
def extract_rpn_lookup_cache(rpn_list):
|
||||
lookup = {}
|
||||
for rpn in rpn_list:
|
||||
for member in rpn["members"]:
|
||||
lookup[member["id"]] = rpn["name"]
|
||||
return lookup
|
||||
|
||||
def _fill_host_variables(self, hostname, host_infos):
|
||||
targeted_attributes = (
|
||||
"offer",
|
||||
"id",
|
||||
"hostname",
|
||||
"location",
|
||||
"boot_mode",
|
||||
"power",
|
||||
"last_reboot",
|
||||
"anti_ddos",
|
||||
"hardware_watch",
|
||||
"support"
|
||||
)
|
||||
for attribute in targeted_attributes:
|
||||
self.inventory.set_variable(hostname, attribute, host_infos[attribute])
|
||||
|
||||
if self.extract_public_ipv4(host_infos=host_infos):
|
||||
self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
|
||||
self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
|
||||
|
||||
if self.extract_private_ipv4(host_infos=host_infos):
|
||||
self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
|
||||
|
||||
if self.extract_os_name(host_infos=host_infos):
|
||||
self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
|
||||
|
||||
if self.extract_os_version(host_infos=host_infos):
|
||||
self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
|
||||
|
||||
def _filter_host(self, host_infos, hostname_preferences):
|
||||
|
||||
for pref in hostname_preferences:
|
||||
if self.extractors[pref](host_infos):
|
||||
return self.extractors[pref](host_infos)
|
||||
|
||||
return None
|
||||
|
||||
def do_server_inventory(self, host_infos, hostname_preferences, group_preferences):
|
||||
|
||||
hostname = self._filter_host(host_infos=host_infos,
|
||||
hostname_preferences=hostname_preferences)
|
||||
|
||||
# No suitable hostname were found in the attributes and the host won't be in the inventory
|
||||
if not hostname:
|
||||
return
|
||||
|
||||
self.inventory.add_host(host=hostname)
|
||||
self._fill_host_variables(hostname=hostname, host_infos=host_infos)
|
||||
|
||||
for g in group_preferences:
|
||||
group = self.group_extractors[g](host_infos)
|
||||
|
||||
if not group:
|
||||
return
|
||||
|
||||
self.inventory.add_group(group=group)
|
||||
self.inventory.add_host(group=group, host=hostname)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
token = self.get_option("oauth_token")
|
||||
hostname_preferences = self.get_option("hostnames")
|
||||
|
||||
group_preferences = self.get_option("groups")
|
||||
if group_preferences is None:
|
||||
group_preferences = []
|
||||
|
||||
self.extractors = {
|
||||
"public_ipv4": self.extract_public_ipv4,
|
||||
"private_ipv4": self.extract_private_ipv4,
|
||||
"hostname": self.extract_hostname,
|
||||
}
|
||||
|
||||
self.group_extractors = {
|
||||
"location": self.extract_location,
|
||||
"offer": self.extract_offer,
|
||||
"rpn": self.extract_rpn
|
||||
}
|
||||
|
||||
self.headers = {
|
||||
'Authorization': "Bearer %s" % token,
|
||||
'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]),
|
||||
'Content-type': 'application/json'
|
||||
}
|
||||
|
||||
servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server")
|
||||
servers_api_path = self._fetch_information(url=servers_url)
|
||||
|
||||
if "rpn" in group_preferences:
|
||||
rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group")
|
||||
rpn_list = self._fetch_information(url=rpn_groups_url)
|
||||
self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list)
|
||||
|
||||
for server_api_path in servers_api_path:
|
||||
|
||||
server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path)
|
||||
raw_server_info = self._fetch_information(url=server_url)
|
||||
|
||||
if raw_server_info is None:
|
||||
continue
|
||||
|
||||
self.do_server_inventory(host_infos=raw_server_info,
|
||||
hostname_preferences=hostname_preferences,
|
||||
group_preferences=group_preferences)
|
279
plugins/inventory/scaleway.py
Normal file
279
plugins/inventory/scaleway.py
Normal file
|
@ -0,0 +1,279 @@
|
|||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: scaleway
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Remy Leone (@sieben)
|
||||
short_description: Scaleway inventory source
|
||||
description:
|
||||
- Get inventory hosts from Scaleway
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'scaleway' plugin.
|
||||
required: True
|
||||
choices: ['scaleway']
|
||||
regions:
|
||||
description: Filter results on a specific Scaleway region
|
||||
type: list
|
||||
default:
|
||||
- ams1
|
||||
- par1
|
||||
tags:
|
||||
description: Filter results on a specific tag
|
||||
type: list
|
||||
oauth_token:
|
||||
required: True
|
||||
description: Scaleway OAuth token.
|
||||
env:
|
||||
# in order of precedence
|
||||
- name: SCW_TOKEN
|
||||
- name: SCW_API_KEY
|
||||
- name: SCW_OAUTH_TOKEN
|
||||
hostnames:
|
||||
description: List of preference about what to use as an hostname.
|
||||
type: list
|
||||
default:
|
||||
- public_ipv4
|
||||
choices:
|
||||
- public_ipv4
|
||||
- private_ipv4
|
||||
- public_ipv6
|
||||
- hostname
|
||||
- id
|
||||
variables:
|
||||
description: 'set individual variables: keys are variable names and
|
||||
values are templates. Any value returned by the
|
||||
L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
|
||||
can be used.'
|
||||
type: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# scaleway_inventory.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
|
||||
|
||||
# use hostname as inventory_hostname
|
||||
# use the private IP address to connect to the host
|
||||
plugin: scaleway
|
||||
regions:
|
||||
- ams1
|
||||
- par1
|
||||
tags:
|
||||
- foobar
|
||||
hostnames:
|
||||
- hostname
|
||||
variables:
|
||||
ansible_host: private_ip
|
||||
state: state
|
||||
|
||||
# use hostname as inventory_hostname and public IP address to connect to the host
|
||||
plugin: scaleway
|
||||
hostnames:
|
||||
- hostname
|
||||
regions:
|
||||
- par1
|
||||
variables:
|
||||
ansible_host: public_ip.address
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
|
||||
|
||||
|
||||
def _fetch_information(token, url):
|
||||
results = []
|
||||
paginated_url = url
|
||||
while True:
|
||||
try:
|
||||
response = open_url(paginated_url,
|
||||
headers={'X-Auth-Token': token,
|
||||
'Content-type': 'application/json'})
|
||||
except Exception as e:
|
||||
raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
|
||||
try:
|
||||
raw_json = json.loads(response.read())
|
||||
except ValueError:
|
||||
raise AnsibleError("Incorrect JSON payload")
|
||||
|
||||
try:
|
||||
results.extend(raw_json["servers"])
|
||||
except KeyError:
|
||||
raise AnsibleError("Incorrect format from the Scaleway API response")
|
||||
|
||||
link = response.headers['Link']
|
||||
if not link:
|
||||
return results
|
||||
relations = parse_pagination_link(link)
|
||||
if 'next' not in relations:
|
||||
return results
|
||||
paginated_url = urllib_parse.urljoin(paginated_url, relations['next'])
|
||||
|
||||
|
||||
def _build_server_url(api_endpoint):
|
||||
return "/".join([api_endpoint, "servers"])
|
||||
|
||||
|
||||
def extract_public_ipv4(server_info):
|
||||
try:
|
||||
return server_info["public_ip"]["address"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def extract_private_ipv4(server_info):
|
||||
try:
|
||||
return server_info["private_ip"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def extract_hostname(server_info):
|
||||
try:
|
||||
return server_info["hostname"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def extract_server_id(server_info):
|
||||
try:
|
||||
return server_info["id"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def extract_public_ipv6(server_info):
|
||||
try:
|
||||
return server_info["ipv6"]["address"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def extract_tags(server_info):
|
||||
try:
|
||||
return server_info["tags"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def extract_zone(server_info):
|
||||
try:
|
||||
return server_info["location"]["zone_id"]
|
||||
except (KeyError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
extractors = {
|
||||
"public_ipv4": extract_public_ipv4,
|
||||
"private_ipv4": extract_private_ipv4,
|
||||
"public_ipv6": extract_public_ipv6,
|
||||
"hostname": extract_hostname,
|
||||
"id": extract_server_id
|
||||
}
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
NAME = 'community.general.scaleway'
|
||||
|
||||
def _fill_host_variables(self, host, server_info):
|
||||
targeted_attributes = (
|
||||
"arch",
|
||||
"commercial_type",
|
||||
"id",
|
||||
"organization",
|
||||
"state",
|
||||
"hostname",
|
||||
)
|
||||
for attribute in targeted_attributes:
|
||||
self.inventory.set_variable(host, attribute, server_info[attribute])
|
||||
|
||||
self.inventory.set_variable(host, "tags", server_info["tags"])
|
||||
|
||||
if extract_public_ipv6(server_info=server_info):
|
||||
self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info))
|
||||
|
||||
if extract_public_ipv4(server_info=server_info):
|
||||
self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info))
|
||||
|
||||
if extract_private_ipv4(server_info=server_info):
|
||||
self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info))
|
||||
|
||||
def _get_zones(self, config_zones):
|
||||
return set(SCALEWAY_LOCATION.keys()).intersection(config_zones)
|
||||
|
||||
def match_groups(self, server_info, tags):
|
||||
server_zone = extract_zone(server_info=server_info)
|
||||
server_tags = extract_tags(server_info=server_info)
|
||||
|
||||
# If a server does not have a zone, it means it is archived
|
||||
if server_zone is None:
|
||||
return set()
|
||||
|
||||
# If no filtering is defined, all tags are valid groups
|
||||
if tags is None:
|
||||
return set(server_tags).union((server_zone,))
|
||||
|
||||
matching_tags = set(server_tags).intersection(tags)
|
||||
|
||||
if not matching_tags:
|
||||
return set()
|
||||
else:
|
||||
return matching_tags.union((server_zone,))
|
||||
|
||||
def _filter_host(self, host_infos, hostname_preferences):
|
||||
|
||||
for pref in hostname_preferences:
|
||||
if extractors[pref](host_infos):
|
||||
return extractors[pref](host_infos)
|
||||
|
||||
return None
|
||||
|
||||
def do_zone_inventory(self, zone, token, tags, hostname_preferences):
|
||||
self.inventory.add_group(zone)
|
||||
zone_info = SCALEWAY_LOCATION[zone]
|
||||
|
||||
url = _build_server_url(zone_info["api_endpoint"])
|
||||
raw_zone_hosts_infos = _fetch_information(url=url, token=token)
|
||||
|
||||
for host_infos in raw_zone_hosts_infos:
|
||||
|
||||
hostname = self._filter_host(host_infos=host_infos,
|
||||
hostname_preferences=hostname_preferences)
|
||||
|
||||
# No suitable hostname were found in the attributes and the host won't be in the inventory
|
||||
if not hostname:
|
||||
continue
|
||||
|
||||
groups = self.match_groups(host_infos, tags)
|
||||
|
||||
for group in groups:
|
||||
self.inventory.add_group(group=group)
|
||||
self.inventory.add_host(group=group, host=hostname)
|
||||
self._fill_host_variables(host=hostname, server_info=host_infos)
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
config_zones = self.get_option("regions")
|
||||
tags = self.get_option("tags")
|
||||
token = self.get_option("oauth_token")
|
||||
hostname_preference = self.get_option("hostnames")
|
||||
|
||||
for zone in self._get_zones(config_zones):
|
||||
self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
|
282
plugins/inventory/virtualbox.py
Normal file
282
plugins/inventory/virtualbox.py
Normal file
|
@ -0,0 +1,282 @@
|
|||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
name: virtualbox
|
||||
plugin_type: inventory
|
||||
short_description: virtualbox inventory source
|
||||
description:
|
||||
- Get inventory hosts from the local virtualbox installation.
|
||||
- Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
|
||||
- The inventory_hostname is always the 'Name' of the virtualbox instance.
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
- inventory_cache
|
||||
options:
|
||||
plugin:
|
||||
description: token that ensures this is a source file for the 'virtualbox' plugin
|
||||
required: True
|
||||
choices: ['virtualbox']
|
||||
running_only:
|
||||
description: toggles showing all vms vs only those currently running
|
||||
type: boolean
|
||||
default: False
|
||||
settings_password_file:
|
||||
description: provide a file containing the settings password (equivalent to --settingspwfile)
|
||||
network_info_path:
|
||||
description: property path to query for network information (ansible_host)
|
||||
default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
|
||||
query:
|
||||
description: create vars from virtualbox properties
|
||||
type: dictionary
|
||||
default: {}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# file must be named vbox.yaml or vbox.yml
|
||||
simple_config_file:
|
||||
plugin: virtualbox
|
||||
settings_password_file: /etc/virtulbox/secrets
|
||||
query:
|
||||
logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
|
||||
compose:
|
||||
ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
|
||||
|
||||
# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
|
||||
plugin: virtualbox
|
||||
groups:
|
||||
container: "'minis' in (inventory_hostname)"
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from ansible.errors import AnsibleParserError
|
||||
from ansible.module_utils._text import to_bytes, to_native, to_text
|
||||
from ansible.module_utils.common._collections_compat import MutableMapping
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
''' Host inventory parser for ansible using local virtualbox. '''
|
||||
|
||||
NAME = 'community.general.virtualbox'
|
||||
VBOX = "VBoxManage"
|
||||
|
||||
def __init__(self):
|
||||
self._vbox_path = None
|
||||
super(InventoryModule, self).__init__()
|
||||
|
||||
def _query_vbox_data(self, host, property_path):
|
||||
ret = None
|
||||
try:
|
||||
cmd = [self._vbox_path, b'guestproperty', b'get',
|
||||
to_bytes(host, errors='surrogate_or_strict'),
|
||||
to_bytes(property_path, errors='surrogate_or_strict')]
|
||||
x = Popen(cmd, stdout=PIPE)
|
||||
ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict')
|
||||
if 'Value' in ipinfo:
|
||||
a, ip = ipinfo.split(':', 1)
|
||||
ret = ip.strip()
|
||||
except Exception:
|
||||
pass
|
||||
return ret
|
||||
|
||||
def _set_variables(self, hostvars):
|
||||
|
||||
# set vars in inventory from hostvars
|
||||
for host in hostvars:
|
||||
|
||||
query = self.get_option('query')
|
||||
# create vars from vbox properties
|
||||
if query and isinstance(query, MutableMapping):
|
||||
for varname in query:
|
||||
hostvars[host][varname] = self._query_vbox_data(host, query[varname])
|
||||
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# create composite vars
|
||||
self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict)
|
||||
|
||||
# actually update inventory
|
||||
for key in hostvars[host]:
|
||||
self.inventory.set_variable(host, key, hostvars[host][key])
|
||||
|
||||
# constructed groups based on conditionals
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict)
|
||||
|
||||
# constructed keyed_groups
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
|
||||
|
||||
def _populate_from_cache(self, source_data):
|
||||
hostvars = source_data.pop('_meta', {}).get('hostvars', {})
|
||||
for group in source_data:
|
||||
if group == 'all':
|
||||
continue
|
||||
else:
|
||||
group = self.inventory.add_group(group)
|
||||
hosts = source_data[group].get('hosts', [])
|
||||
for host in hosts:
|
||||
self._populate_host_vars([host], hostvars.get(host, {}), group)
|
||||
self.inventory.add_child('all', group)
|
||||
if not source_data:
|
||||
for host in hostvars:
|
||||
self.inventory.add_host(host)
|
||||
self._populate_host_vars([host], hostvars.get(host, {}))
|
||||
|
||||
def _populate_from_source(self, source_data, using_current_cache=False):
|
||||
if using_current_cache:
|
||||
self._populate_from_cache(source_data)
|
||||
return source_data
|
||||
|
||||
cacheable_results = {'_meta': {'hostvars': {}}}
|
||||
|
||||
hostvars = {}
|
||||
prevkey = pref_k = ''
|
||||
current_host = None
|
||||
|
||||
# needed to possibly set ansible_host
|
||||
netinfo = self.get_option('network_info_path')
|
||||
|
||||
for line in source_data:
|
||||
line = to_text(line)
|
||||
if ':' not in line:
|
||||
continue
|
||||
try:
|
||||
k, v = line.split(':', 1)
|
||||
except Exception:
|
||||
# skip non splitable
|
||||
continue
|
||||
|
||||
if k.strip() == '':
|
||||
# skip empty
|
||||
continue
|
||||
|
||||
v = v.strip()
|
||||
# found host
|
||||
if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
|
||||
current_host = v
|
||||
if current_host not in hostvars:
|
||||
hostvars[current_host] = {}
|
||||
self.inventory.add_host(current_host)
|
||||
|
||||
# try to get network info
|
||||
netdata = self._query_vbox_data(current_host, netinfo)
|
||||
if netdata:
|
||||
self.inventory.set_variable(current_host, 'ansible_host', netdata)
|
||||
|
||||
# found groups
|
||||
elif k == 'Groups':
|
||||
for group in v.split('/'):
|
||||
if group:
|
||||
group = self.inventory.add_group(group)
|
||||
self.inventory.add_child(group, current_host)
|
||||
if group not in cacheable_results:
|
||||
cacheable_results[group] = {'hosts': []}
|
||||
cacheable_results[group]['hosts'].append(current_host)
|
||||
continue
|
||||
|
||||
else:
|
||||
# found vars, accumulate in hostvars for clean inventory set
|
||||
pref_k = 'vbox_' + k.strip().replace(' ', '_')
|
||||
if k.startswith(' '):
|
||||
if prevkey not in hostvars[current_host]:
|
||||
hostvars[current_host][prevkey] = {}
|
||||
hostvars[current_host][prevkey][pref_k] = v
|
||||
else:
|
||||
if v != '':
|
||||
hostvars[current_host][pref_k] = v
|
||||
if self._ungrouped_host(current_host, cacheable_results):
|
||||
if 'ungrouped' not in cacheable_results:
|
||||
cacheable_results['ungrouped'] = {'hosts': []}
|
||||
cacheable_results['ungrouped']['hosts'].append(current_host)
|
||||
|
||||
prevkey = pref_k
|
||||
|
||||
self._set_variables(hostvars)
|
||||
for host in hostvars:
|
||||
h = self.inventory.get_host(host)
|
||||
cacheable_results['_meta']['hostvars'][h.name] = h.vars
|
||||
|
||||
return cacheable_results
|
||||
|
||||
def _ungrouped_host(self, host, inventory):
|
||||
def find_host(host, inventory):
|
||||
for k, v in inventory.items():
|
||||
if k == '_meta':
|
||||
continue
|
||||
if isinstance(v, dict):
|
||||
yield self._ungrouped_host(host, v)
|
||||
elif isinstance(v, list):
|
||||
yield host not in v
|
||||
yield True
|
||||
|
||||
return all([found_host for found_host in find_host(host, inventory)])
|
||||
|
||||
def verify_file(self, path):
|
||||
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')):
|
||||
valid = True
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
|
||||
try:
|
||||
self._vbox_path = get_bin_path(self.VBOX)
|
||||
except ValueError as e:
|
||||
raise AnsibleParserError(e)
|
||||
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
|
||||
cache_key = self.get_cache_key(path)
|
||||
|
||||
config_data = self._read_config_data(path)
|
||||
|
||||
# set _options from config data
|
||||
self._consume_options(config_data)
|
||||
|
||||
source_data = None
|
||||
if cache:
|
||||
cache = self.get_option('cache')
|
||||
|
||||
update_cache = False
|
||||
if cache:
|
||||
try:
|
||||
source_data = self._cache[cache_key]
|
||||
except KeyError:
|
||||
update_cache = True
|
||||
|
||||
if not source_data:
|
||||
b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru')
|
||||
running = self.get_option('running_only')
|
||||
|
||||
# start getting data
|
||||
cmd = [self._vbox_path, b'list', b'-l']
|
||||
if running:
|
||||
cmd.append(b'runningvms')
|
||||
else:
|
||||
cmd.append(b'vms')
|
||||
|
||||
if b_pwfile and os.path.exists(b_pwfile):
|
||||
cmd.append(b'--settingspwfile')
|
||||
cmd.append(b_pwfile)
|
||||
|
||||
try:
|
||||
p = Popen(cmd, stdout=PIPE)
|
||||
except Exception as e:
|
||||
raise AnsibleParserError(to_native(e))
|
||||
|
||||
source_data = p.stdout.read().splitlines()
|
||||
|
||||
using_current_cache = cache and not update_cache
|
||||
cacheable_results = self._populate_from_source(source_data, using_current_cache)
|
||||
|
||||
if update_cache:
|
||||
self._cache[cache_key] = cacheable_results
|
201
plugins/inventory/vultr.py
Normal file
201
plugins/inventory/vultr.py
Normal file
|
@ -0,0 +1,201 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
|
||||
# Copyright (c) 2019, René Moser <mail@renemoser.net>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
name: vultr
|
||||
plugin_type: inventory
|
||||
author:
|
||||
- Yanis Guenane (@Spredzy)
|
||||
- René Moser (@resmo)
|
||||
short_description: Vultr inventory source
|
||||
extends_documentation_fragment:
|
||||
- constructed
|
||||
description:
|
||||
- Get inventory hosts from Vultr public cloud.
|
||||
- Uses an YAML configuration file ending with either I(vultr.yml) or I(vultr.yaml) to set parameter values (also see examples).
|
||||
- Uses I(api_config), I(~/.vultr.ini), I(./vultr.ini) or C(VULTR_API_CONFIG) pointing to a Vultr credentials INI file
|
||||
(see U(https://docs.ansible.com/ansible/latest/scenario_guides/guide_vultr.html)).
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the 'vultr' plugin.
|
||||
type: string
|
||||
required: True
|
||||
choices: [ vultr ]
|
||||
api_account:
|
||||
description: Specify the account to be used.
|
||||
type: string
|
||||
default: default
|
||||
api_config:
|
||||
description: Path to the vultr configuration file. If not specified will be taken from regular Vultr configuration.
|
||||
type: path
|
||||
env:
|
||||
- name: VULTR_API_CONFIG
|
||||
api_key:
|
||||
description: Vultr API key. If not specified will be taken from regular Vultr configuration.
|
||||
type: string
|
||||
env:
|
||||
- name: VULTR_API_KEY
|
||||
hostname:
|
||||
description: Field to match the hostname. Note v4_main_ip corresponds to the main_ip field returned from the API and name to label.
|
||||
type: string
|
||||
default: v4_main_ip
|
||||
choices:
|
||||
- v4_main_ip
|
||||
- v6_main_ip
|
||||
- name
|
||||
filter_by_tag:
|
||||
description: Only return servers filtered by this tag
|
||||
type: string
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
# inventory_vultr.yml file in YAML format
|
||||
# Example command line: ansible-inventory --list -i inventory_vultr.yml
|
||||
|
||||
# Group by a region as lower case and with prefix e.g. "vultr_region_amsterdam" and by OS without prefix e.g. "CentOS_7_x64"
|
||||
plugin: vultr
|
||||
keyed_groups:
|
||||
- prefix: vultr_region
|
||||
key: region | lower
|
||||
- separator: ""
|
||||
key: os
|
||||
|
||||
# Pass a tag filter to the API
|
||||
plugin: vultr
|
||||
filter_by_tag: Cache
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible.module_utils.six.moves import configparser
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.vultr import Vultr, VULTR_API_ENDPOINT, VULTR_USER_AGENT
|
||||
from ansible.module_utils.six.moves.urllib.parse import quote
|
||||
|
||||
|
||||
SCHEMA = {
|
||||
'SUBID': dict(key='id'),
|
||||
'label': dict(key='name'),
|
||||
'date_created': dict(),
|
||||
'allowed_bandwidth_gb': dict(convert_to='int'),
|
||||
'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'),
|
||||
'current_bandwidth_gb': dict(),
|
||||
'kvm_url': dict(),
|
||||
'default_password': dict(),
|
||||
'internal_ip': dict(),
|
||||
'disk': dict(),
|
||||
'cost_per_month': dict(convert_to='float'),
|
||||
'location': dict(key='region'),
|
||||
'main_ip': dict(key='v4_main_ip'),
|
||||
'network_v4': dict(key='v4_network'),
|
||||
'gateway_v4': dict(key='v4_gateway'),
|
||||
'os': dict(),
|
||||
'pending_charges': dict(convert_to='float'),
|
||||
'power_status': dict(),
|
||||
'ram': dict(),
|
||||
'plan': dict(),
|
||||
'server_state': dict(),
|
||||
'status': dict(),
|
||||
'firewall_group': dict(),
|
||||
'tag': dict(),
|
||||
'v6_main_ip': dict(),
|
||||
'v6_network': dict(),
|
||||
'v6_network_size': dict(),
|
||||
'v6_networks': dict(),
|
||||
'vcpu_count': dict(convert_to='int'),
|
||||
}
|
||||
|
||||
|
||||
def _load_conf(path, account):
|
||||
|
||||
if path:
|
||||
conf = configparser.ConfigParser()
|
||||
conf.read(path)
|
||||
|
||||
if not conf._sections.get(account):
|
||||
return None
|
||||
|
||||
return dict(conf.items(account))
|
||||
else:
|
||||
return Vultr.read_ini_config(account)
|
||||
|
||||
|
||||
def _retrieve_servers(api_key, tag_filter=None):
|
||||
api_url = '%s/v1/server/list' % VULTR_API_ENDPOINT
|
||||
if tag_filter is not None:
|
||||
api_url = api_url + '?tag=%s' % quote(tag_filter)
|
||||
|
||||
try:
|
||||
response = open_url(
|
||||
api_url, headers={'API-Key': api_key, 'Content-type': 'application/json'},
|
||||
http_agent=VULTR_USER_AGENT,
|
||||
)
|
||||
servers_list = json.loads(response.read())
|
||||
|
||||
return servers_list.values() if servers_list else []
|
||||
except ValueError:
|
||||
raise AnsibleError("Incorrect JSON payload")
|
||||
except Exception as e:
|
||||
raise AnsibleError("Error while fetching %s: %s" % (api_url, to_native(e)))
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
|
||||
NAME = 'community.general.vultr'
|
||||
|
||||
def verify_file(self, path):
|
||||
valid = False
|
||||
if super(InventoryModule, self).verify_file(path):
|
||||
if path.endswith(('vultr.yaml', 'vultr.yml')):
|
||||
valid = True
|
||||
return valid
|
||||
|
||||
def parse(self, inventory, loader, path, cache=True):
|
||||
super(InventoryModule, self).parse(inventory, loader, path)
|
||||
self._read_config_data(path=path)
|
||||
|
||||
conf = _load_conf(self.get_option('api_config'), self.get_option('api_account'))
|
||||
try:
|
||||
api_key = self.get_option('api_key') or conf.get('key')
|
||||
except Exception:
|
||||
raise AnsibleError('Could not find an API key. Check inventory file and Vultr configuration files.')
|
||||
|
||||
hostname_preference = self.get_option('hostname')
|
||||
|
||||
# Add a top group 'vultr'
|
||||
self.inventory.add_group(group='vultr')
|
||||
|
||||
# Filter by tag is supported by the api with a query
|
||||
filter_by_tag = self.get_option('filter_by_tag')
|
||||
for server in _retrieve_servers(api_key, filter_by_tag):
|
||||
|
||||
server = Vultr.normalize_result(server, SCHEMA)
|
||||
|
||||
self.inventory.add_host(host=server['name'], group='vultr')
|
||||
|
||||
for attribute, value in server.items():
|
||||
self.inventory.set_variable(server['name'], attribute, value)
|
||||
|
||||
if hostname_preference != 'name':
|
||||
self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option('strict')
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(self.get_option('compose'), server, server['name'], strict=strict)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(self.get_option('groups'), server, server['name'], strict=strict)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, server['name'], strict=strict)
|
Loading…
Add table
Add a link
Reference in a new issue