Initial commit

This commit is contained in:
Ansible Core Team 2020-03-09 09:11:07 +00:00
commit aebc1b03fd
4861 changed files with 812621 additions and 0 deletions

View file

@ -0,0 +1,522 @@
#!/usr/bin/python
"""short_description: Check or wait for migrations between nodes"""
# Copyright: (c) 2018, Albert Autin
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aerospike_migrations
short_description: Check or wait for migrations between nodes
description:
- This can be used to check for migrations in a cluster.
This makes it easy to do a rolling upgrade/update on Aerospike nodes.
- If waiting for migrations is not desired, simply just poll until
port 3000 if available or asinfo -v status returns ok
author: "Albert Autin (@Alb0t)"
options:
host:
description:
- Which host do we use as seed for info connection
required: False
type: str
default: localhost
port:
description:
- Which port to connect to Aerospike on (service port)
required: False
type: int
default: 3000
connect_timeout:
description:
- How long to try to connect before giving up (milliseconds)
required: False
type: int
default: 1000
consecutive_good_checks:
description:
- How many times should the cluster report "no migrations"
consecutively before returning OK back to ansible?
required: False
type: int
default: 3
sleep_between_checks:
description:
- How long to sleep between each check (seconds).
required: False
type: int
default: 60
tries_limit:
description:
- How many times do we poll before giving up and failing?
default: 300
required: False
type: int
local_only:
description:
- Do you wish to only check for migrations on the local node
before returning, or do you want all nodes in the cluster
to finish before returning?
required: True
type: bool
min_cluster_size:
description:
- Check will return bad until cluster size is met
or until tries is exhausted
required: False
type: int
default: 1
fail_on_cluster_change:
description:
- Fail if the cluster key changes
if something else is changing the cluster, we may want to fail
required: False
type: bool
default: True
migrate_tx_key:
description:
- The metric key used to determine if we have tx migrations
remaining. Changeable due to backwards compatibility.
required: False
type: str
default: migrate_tx_partitions_remaining
migrate_rx_key:
description:
- The metric key used to determine if we have rx migrations
remaining. Changeable due to backwards compatibility.
required: False
type: str
default: migrate_rx_partitions_remaining
target_cluster_size:
description:
- When all aerospike builds in the cluster are greater than
version 4.3, then the C(cluster-stable) info command will be used.
Inside this command, you can optionally specify what the target
cluster size is - but it is not necessary. You can still rely on
min_cluster_size if you don't want to use this option.
- If this option is specified on a cluster that has at least 1
host <4.3 then it will be ignored until the min version reaches
4.3.
required: False
type: int
'''
EXAMPLES = '''
# check for migrations on local node
- name: wait for migrations on local node before proceeding
aerospike_migrations:
host: "localhost"
connect_timeout: 2000
consecutive_good_checks: 5
sleep_between_checks: 15
tries_limit: 600
local_only: False
# example playbook:
---
- name: upgrade aerospike
hosts: all
become: true
serial: 1
tasks:
- name: Install dependencies
apt:
name:
- python
- python-pip
- python-setuptools
state: latest
- name: setup aerospike
pip:
name: aerospike
# check for migrations every (sleep_between_checks)
# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
# Will exit if any exception, which can be caused by bad nodes,
# nodes not returning data, or other reasons.
# Maximum runtime before giving up in this case will be:
# Tries Limit * Sleep Between Checks * delay * retries
- name: wait for aerospike migrations
aerospike_migrations:
local_only: True
sleep_between_checks: 1
tries_limit: 5
consecutive_good_checks: 3
fail_on_cluster_change: true
min_cluster_size: 3
target_cluster_size: 4
register: migrations_check
until: migrations_check is succeeded
changed_when: false
delay: 60
retries: 120
- name: another thing
shell: |
echo foo
- name: reboot
reboot:
'''
RETURN = '''
# Returns only a success/failure result. Changed is always false.
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
LIB_FOUND_ERR = None
try:
import aerospike
from time import sleep
import re
except ImportError as ie:
LIB_FOUND = False
LIB_FOUND_ERR = traceback.format_exc()
else:
LIB_FOUND = True
def run_module():
"""run ansible module"""
module_args = dict(
host=dict(type='str', required=False, default='localhost'),
port=dict(type='int', required=False, default=3000),
connect_timeout=dict(type='int', required=False, default=1000),
consecutive_good_checks=dict(type='int', required=False, default=3),
sleep_between_checks=dict(type='int', required=False, default=60),
tries_limit=dict(type='int', required=False, default=300),
local_only=dict(type='bool', required=True),
min_cluster_size=dict(type='int', required=False, default=1),
target_cluster_size=dict(type='int', required=False, default=None),
fail_on_cluster_change=dict(type='bool', required=False, default=True),
migrate_tx_key=dict(type='str', required=False,
default="migrate_tx_partitions_remaining"),
migrate_rx_key=dict(type='str', required=False,
default="migrate_rx_partitions_remaining")
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not LIB_FOUND:
module.fail_json(msg=missing_required_lib('aerospike'),
exception=LIB_FOUND_ERR)
try:
if module.check_mode:
has_migrations, skip_reason = False, None
else:
migrations = Migrations(module)
has_migrations, skip_reason = migrations.has_migs(
module.params['local_only']
)
if has_migrations:
module.fail_json(msg="Failed.", skip_reason=skip_reason)
except Exception as e:
module.fail_json(msg="Error: {0}".format(e))
module.exit_json(**result)
class Migrations:
""" Check or wait for migrations between nodes """
def __init__(self, module):
self.module = module
self._client = self._create_client().connect()
self._nodes = {}
self._update_nodes_list()
self._cluster_statistics = {}
self._update_cluster_statistics()
self._namespaces = set()
self._update_cluster_namespace_list()
self._build_list = set()
self._update_build_list()
self._start_cluster_key = \
self._cluster_statistics[self._nodes[0]]['cluster_key']
def _create_client(self):
""" TODO: add support for auth, tls, and other special features
I won't use those features, so I'll wait until somebody complains
or does it for me (Cross fingers)
create the client object"""
config = {
'hosts': [
(self.module.params['host'], self.module.params['port'])
],
'policies': {
'timeout': self.module.params['connect_timeout']
}
}
return aerospike.client(config)
def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
"""delimiter is for separate stats that come back, NOT for kv
separation which is ="""
if node is None: # If no node passed, use the first one (local)
node = self._nodes[0]
data = self._client.info_node(cmd, node)
data = data.split("\t")
if len(data) != 1 and len(data) != 2:
self.module.fail_json(
msg="Unexpected number of values returned in info command: " +
str(len(data))
)
# data will be in format 'command\touput'
data = data[-1]
data = data.rstrip("\n\r")
data_arr = data.split(delimiter)
# some commands don't return in kv format
# so we dont want a dict from those.
if '=' in data:
retval = dict(
metric.split("=", 1) for metric in data_arr
)
else:
# if only 1 element found, and not kv, return just the value.
if len(data_arr) == 1:
retval = data_arr[0]
else:
retval = data_arr
return retval
def _update_build_list(self):
"""creates self._build_list which is a unique list
of build versions."""
self._build_list = set()
for node in self._nodes:
build = self._info_cmd_helper('build', node)
self._build_list.add(build)
# just checks to see if the version is 4.3 or greater
def _can_use_cluster_stable(self):
# if version <4.3 we can't use cluster-stable info cmd
# regex hack to check for versions beginning with 0-3 or
# beginning with 4.0,4.1,4.2
if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
return False
return True
def _update_cluster_namespace_list(self):
""" make a unique list of namespaces
TODO: does this work on a rolling namespace add/deletion?
thankfully if it doesn't, we dont need this on builds >=4.3"""
self._namespaces = set()
for node in self._nodes:
namespaces = self._info_cmd_helper('namespaces', node)
for namespace in namespaces:
self._namespaces.add(namespace)
def _update_cluster_statistics(self):
"""create a dict of nodes with their related stats """
self._cluster_statistics = {}
for node in self._nodes:
self._cluster_statistics[node] = \
self._info_cmd_helper('statistics', node)
def _update_nodes_list(self):
"""get a fresh list of all the nodes"""
self._nodes = self._client.get_nodes()
if not self._nodes:
self.module.fail_json("Failed to retrieve at least 1 node.")
def _namespace_has_migs(self, namespace, node=None):
"""returns a True or False.
Does the namespace have migrations for the node passed?
If no node passed, uses the local node or the first one in the list"""
namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
try:
namespace_tx = \
int(namespace_stats[self.module.params['migrate_tx_key']])
namespace_rx = \
int(namespace_stats[self.module.params['migrate_tx_key']])
except KeyError:
self.module.fail_json(
msg="Did not find partition remaining key:" +
self.module.params['migrate_tx_key'] +
" or key:" +
self.module.params['migrate_rx_key'] +
" in 'namespace/" +
namespace +
"' output."
)
except TypeError:
self.module.fail_json(
msg="namespace stat returned was not numerical"
)
return namespace_tx != 0 or namespace_rx != 0
def _node_has_migs(self, node=None):
"""just calls namespace_has_migs and
if any namespace has migs returns true"""
migs = 0
self._update_cluster_namespace_list()
for namespace in self._namespaces:
if self._namespace_has_migs(namespace, node):
migs += 1
return migs != 0
def _cluster_key_consistent(self):
"""create a dictionary to store what each node
returns the cluster key as. we should end up with only 1 dict key,
with the key being the cluster key."""
cluster_keys = {}
for node in self._nodes:
cluster_key = self._cluster_statistics[node][
'cluster_key']
if cluster_key not in cluster_keys:
cluster_keys[cluster_key] = 1
else:
cluster_keys[cluster_key] += 1
if len(cluster_keys.keys()) == 1 and \
self._start_cluster_key in cluster_keys:
return True
return False
def _cluster_migrates_allowed(self):
"""ensure all nodes have 'migrate_allowed' in their stats output"""
for node in self._nodes:
node_stats = self._info_cmd_helper('statistics', node)
allowed = node_stats['migrate_allowed']
if allowed == "false":
return False
return True
def _cluster_has_migs(self):
"""calls node_has_migs for each node"""
migs = 0
for node in self._nodes:
if self._node_has_migs(node):
migs += 1
if migs == 0:
return False
return True
def _has_migs(self, local):
if local:
return self._local_node_has_migs()
return self._cluster_has_migs()
def _local_node_has_migs(self):
return self._node_has_migs(None)
def _is_min_cluster_size(self):
"""checks that all nodes in the cluster are returning the
minimum cluster size specified in their statistics output"""
sizes = set()
for node in self._cluster_statistics:
sizes.add(int(self._cluster_statistics[node]['cluster_size']))
if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
return False
if (min(sizes)) >= self.module.params['min_cluster_size']:
return True
return False
def _cluster_stable(self):
"""Added 4.3:
cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
Returns the current 'cluster_key' when the following are satisfied:
If 'size' is specified then the target node's 'cluster-size'
must match size.
If 'ignore-migrations' is either unspecified or 'false' then
the target node's migrations counts must be zero for the provided
'namespace' or all namespaces if 'namespace' is not provided."""
cluster_key = set()
cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
cmd = "cluster-stable:"
target_cluster_size = self.module.params['target_cluster_size']
if target_cluster_size is not None:
cmd = cmd + "size=" + str(target_cluster_size) + ";"
for node in self._nodes:
cluster_key.add(self._info_cmd_helper(cmd, node))
if len(cluster_key) == 1:
return True
return False
def _cluster_good_state(self):
"""checks a few things to make sure we're OK to say the cluster
has no migs. It could be in a unhealthy condition that does not allow
migs, or a split brain"""
if self._cluster_key_consistent() is not True:
return False, "Cluster key inconsistent."
if self._is_min_cluster_size() is not True:
return False, "Cluster min size not reached."
if self._cluster_migrates_allowed() is not True:
return False, "migrate_allowed is false somewhere."
return True, "OK."
def has_migs(self, local=True):
"""returns a boolean, False if no migrations otherwise True"""
consecutive_good = 0
try_num = 0
skip_reason = list()
while \
try_num < int(self.module.params['tries_limit']) and \
consecutive_good < \
int(self.module.params['consecutive_good_checks']):
self._update_nodes_list()
self._update_cluster_statistics()
# These checks are outside of the while loop because
# we probably want to skip & sleep instead of failing entirely
stable, reason = self._cluster_good_state()
if stable is not True:
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + reason
)
else:
if self._can_use_cluster_stable():
if self._cluster_stable():
consecutive_good += 1
else:
consecutive_good = 0
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + " cluster_stable"
)
elif self._has_migs(local):
# print("_has_migs")
skip_reason.append(
"Skipping on try#" + str(try_num) +
" for reason:" + " migrations"
)
consecutive_good = 0
else:
consecutive_good += 1
if consecutive_good == self.module.params[
'consecutive_good_checks']:
break
try_num += 1
sleep(self.module.params['sleep_between_checks'])
# print(skip_reason)
if consecutive_good == self.module.params['consecutive_good_checks']:
return False, None
return True, skip_reason
def main():
"""main method for ansible module"""
run_module()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,146 @@
#!/usr/bin/python
# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: influxdb_database
short_description: Manage InfluxDB databases
description:
- Manage InfluxDB databases.
author: "Kamil Szczygiel (@kamsz)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9 & <= 1.2.4"
- requests
options:
database_name:
description:
- Name of the database.
required: true
type: str
state:
description:
- Determines if the database should be created or destroyed.
choices: [ absent, present ]
default: present
type: str
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
# Example influxdb_database command from Ansible Playbooks
- name: Create database
influxdb_database:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
- name: Destroy database
influxdb_database:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
state: absent
- name: Create database using custom credentials
influxdb_database:
hostname: "{{influxdb_ip_address}}"
username: "{{influxdb_username}}"
password: "{{influxdb_password}}"
database_name: "{{influxdb_database_name}}"
ssl: yes
validate_certs: yes
'''
RETURN = r'''
# only defaults
'''
try:
import requests.exceptions
from influxdb import exceptions
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
def find_database(module, client, database_name):
database = None
try:
databases = client.get_list_database()
for db in databases:
if db['name'] == database_name:
database = db
break
except requests.exceptions.ConnectionError as e:
module.fail_json(msg=str(e))
return database
def create_database(module, client, database_name):
if not module.check_mode:
try:
client.create_database(database_name)
except requests.exceptions.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=True)
def drop_database(module, client, database_name):
if not module.check_mode:
try:
client.drop_database(database_name)
except exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def main():
argument_spec = InfluxDb.influxdb_argument_spec()
argument_spec.update(
database_name=dict(required=True, type='str'),
state=dict(default='present', type='str', choices=['present', 'absent'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
influxdb = InfluxDb(module)
client = influxdb.connect_to_influxdb()
database_name = influxdb.database_name
database = find_database(module, client, database_name)
if state == 'present':
if database:
module.exit_json(changed=False)
else:
create_database(module, client, database_name)
if state == 'absent':
if database:
drop_database(module, client, database_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,106 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: influxdb_query
short_description: Query data points from InfluxDB
description:
- Query data points from InfluxDB.
author: "René Moser (@resmo)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
query:
description:
- Query to be executed.
required: true
type: str
database_name:
description:
- Name of the database.
required: true
type: str
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
- name: Query connections
influxdb_query:
hostname: "{{ influxdb_ip_address }}"
database_name: "{{ influxdb_database_name }}"
query: "select mean(value) from connections"
register: connection
- name: Query connections with tags filters
influxdb_query:
hostname: "{{ influxdb_ip_address }}"
database_name: "{{ influxdb_database_name }}"
query: "select mean(value) from connections where region='zue01' and host='server01'"
register: connection
- name: Print results from the query
debug:
var: connection.query_results
'''
RETURN = r'''
query_results:
description: Result from the query
returned: success
type: list
sample:
- mean: 1245.5333333333333
time: "1970-01-01T00:00:00Z"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
class AnsibleInfluxDBRead(InfluxDb):
def read_by_query(self, query):
client = self.connect_to_influxdb()
try:
rs = client.query(query)
if rs:
return list(rs.get_points())
except Exception as e:
self.module.fail_json(msg=to_native(e))
def main():
argument_spec = InfluxDb.influxdb_argument_spec()
argument_spec.update(
query=dict(type='str', required=True),
database_name=dict(required=True, type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
influx = AnsibleInfluxDBRead(module)
query = module.params.get('query')
results = influx.read_by_query(query)
module.exit_json(changed=True, query_results=results)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,201 @@
#!/usr/bin/python
# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: influxdb_retention_policy
short_description: Manage InfluxDB retention policies
description:
- Manage InfluxDB retention policies.
author: "Kamil Szczygiel (@kamsz)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
- requests
options:
database_name:
description:
- Name of the database.
required: true
type: str
policy_name:
description:
- Name of the retention policy.
required: true
type: str
duration:
description:
- Determines how long InfluxDB should keep the data.
required: true
type: str
replication:
description:
- Determines how many independent copies of each point are stored in the cluster.
required: true
type: int
default:
description:
- Sets the retention policy as default retention policy.
type: bool
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
# Example influxdb_retention_policy command from Ansible Playbooks
- name: create 1 hour retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: 1h
replication: 1
ssl: yes
validate_certs: yes
- name: create 1 day retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: 1d
replication: 1
- name: create 1 week retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: 1w
replication: 1
- name: create infinite retention policy
influxdb_retention_policy:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
policy_name: test
duration: INF
replication: 1
ssl: no
validate_certs: no
'''
RETURN = r'''
# only defaults
'''
import re
try:
import requests.exceptions
from influxdb import exceptions
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
from ansible.module_utils._text import to_native
def find_retention_policy(module, client):
database_name = module.params['database_name']
policy_name = module.params['policy_name']
hostname = module.params['hostname']
retention_policy = None
try:
retention_policies = client.get_list_retention_policies(database=database_name)
for policy in retention_policies:
if policy['name'] == policy_name:
retention_policy = policy
break
except requests.exceptions.ConnectionError as e:
module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
return retention_policy
def create_retention_policy(module, client):
database_name = module.params['database_name']
policy_name = module.params['policy_name']
duration = module.params['duration']
replication = module.params['replication']
default = module.params['default']
if not module.check_mode:
try:
client.create_retention_policy(policy_name, duration, replication, database_name, default)
except exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def alter_retention_policy(module, client, retention_policy):
database_name = module.params['database_name']
policy_name = module.params['policy_name']
duration = module.params['duration']
replication = module.params['replication']
default = module.params['default']
duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
changed = False
duration_lookup = duration_regexp.search(duration)
if duration_lookup.group(2) == 'h':
influxdb_duration_format = '%s0m0s' % duration
elif duration_lookup.group(2) == 'd':
influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
elif duration_lookup.group(2) == 'w':
influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
elif duration == 'INF':
influxdb_duration_format = '0'
if (not retention_policy['duration'] == influxdb_duration_format or
not retention_policy['replicaN'] == int(replication) or
not retention_policy['default'] == default):
if not module.check_mode:
try:
client.alter_retention_policy(policy_name, database_name, duration, replication, default)
except exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
changed = True
module.exit_json(changed=changed)
def main():
argument_spec = InfluxDb.influxdb_argument_spec()
argument_spec.update(
database_name=dict(required=True, type='str'),
policy_name=dict(required=True, type='str'),
duration=dict(required=True, type='str'),
replication=dict(required=True, type='int'),
default=dict(default=False, type='bool')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
influxdb = InfluxDb(module)
client = influxdb.connect_to_influxdb()
retention_policy = find_retention_policy(module, client)
if retention_policy:
alter_retention_policy(module, client, retention_policy)
else:
create_retention_policy(module, client)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,265 @@
#!/usr/bin/python
# Copyright: (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users.
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
type: str
user_password:
description:
- Password to be set for the user.
required: false
type: str
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ absent, present ]
default: present
type: str
grants:
description:
- Privileges to grant to this user.
- Takes a list of dicts containing the "database" and "privilege" keys.
- If this argument is not provided, the current grants will be left alone.
- If an empty list is provided, all grants for the user will be removed.
type: list
elements: dict
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
- name: Create a user on localhost using default login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
- name: Create a user on localhost using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create an admin user on a remote host using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create a user on localhost with privileges
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
grants:
- database: 'collectd'
privilege: 'WRITE'
- database: 'graphite'
privilege: 'READ'
- name: Destroy a user using custom login credentials
influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
state: absent
'''
RETURN = r'''
#only defaults
'''
from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.community.general.plugins.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=to_native(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ConnectionError as e:
module.fail_json(msg=to_native(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ConnectionError as e:
module.fail_json(msg=to_native(e))
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def set_user_grants(module, client, user_name, grants):
changed = False
try:
current_grants = client.get_list_privileges(user_name)
# Fix privileges wording
for i, v in enumerate(current_grants):
if v['privilege'] == 'ALL PRIVILEGES':
v['privilege'] = 'ALL'
current_grants[i] = v
elif v['privilege'] == 'NO PRIVILEGES':
del(current_grants[i])
# check if the current grants are included in the desired ones
for current_grant in current_grants:
if current_grant not in grants:
if not module.check_mode:
client.revoke_privilege(current_grant['privilege'],
current_grant['database'],
user_name)
changed = True
# check if the desired grants are included in the current ones
for grant in grants:
if grant not in current_grants:
if not module.check_mode:
client.grant_privilege(grant['privilege'],
grant['database'],
user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
return changed
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list', elements='dict'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
grants = module.params['grants']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
changed = False
if state == 'present':
if user:
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=to_native(e))
else:
user_password = user_password or ''
create_user(module, client, user_name, user_password, admin)
changed = True
if grants is not None:
if set_user_grants(module, client, user_name, grants):
changed = True
module.exit_json(changed=changed)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,101 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: influxdb_write
short_description: Write data points into InfluxDB
description:
- Write data points into InfluxDB.
author: "René Moser (@resmo)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
data_points:
description:
- Data points as dict to write into the database.
required: true
type: list
elements: dict
database_name:
description:
- Name of the database.
required: true
type: str
extends_documentation_fragment:
- community.general.influxdb
'''
EXAMPLES = r'''
- name: Write points into database
influxdb_write:
hostname: "{{influxdb_ip_address}}"
database_name: "{{influxdb_database_name}}"
data_points:
- measurement: connections
tags:
host: server01
region: us-west
time: "{{ ansible_date_time.iso8601 }}"
fields:
value: 2000
- measurement: connections
tags:
host: server02
region: us-east
time: "{{ ansible_date_time.iso8601 }}"
fields:
value: 3000
'''
RETURN = r'''
# only defaults
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
class AnsibleInfluxDBWrite(InfluxDb):
def write_data_point(self, data_points):
client = self.connect_to_influxdb()
try:
client.write_points(data_points)
except Exception as e:
self.module.fail_json(msg=to_native(e))
def main():
argument_spec = InfluxDb.influxdb_argument_spec()
argument_spec.update(
data_points=dict(required=True, type='list', elements='dict'),
database_name=dict(required=True, type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
)
influx = AnsibleInfluxDBWrite(module)
data_points = module.params.get('data_points')
influx.write_data_point(data_points)
module.exit_json(changed=True)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,298 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
# (c) 2017, Sam Doran <sdoran@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: elasticsearch_plugin
short_description: Manage Elasticsearch plugins
description:
- Manages Elasticsearch plugins.
author:
- Mathew Davies (@ThePixelDeveloper)
- Sam Doran (@samdoran)
options:
name:
description:
- Name of the plugin to install.
required: True
state:
description:
- Desired state of a plugin.
choices: ["present", "absent"]
default: present
src:
description:
- Optionally set the source location to retrieve the plugin from. This can be a file://
URL to install from a local file, or a remote URL. If this is not set, the plugin
location is just based on the name.
- The name parameter must match the descriptor in the plugin ZIP specified.
- Is only used if the state would change, which is solely checked based on the name
parameter. If, for example, the plugin is already installed, changing this has no
effect.
- For ES 1.x use url.
required: False
url:
description:
- Set exact URL to download the plugin from (Only works for ES 1.x).
- For ES 2.x and higher, use src.
required: False
timeout:
description:
- "Timeout setting: 30s, 1m, 1h..."
- Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
default: 1m
force:
description:
- "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
default: False
type: bool
plugin_bin:
description:
- Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
- The default changed in Ansible 2.4 to None.
plugin_dir:
description:
- Your configured plugin directory specified in Elasticsearch
default: /usr/share/elasticsearch/plugins/
proxy_host:
description:
- Proxy host to use during plugin installation
proxy_port:
description:
- Proxy port to use during plugin installation
version:
description:
- Version of the plugin to be installed.
If plugin exists with previous version, it will NOT be updated
'''
EXAMPLES = '''
# Install Elasticsearch Head plugin in Elasticsearch 2.x
- elasticsearch_plugin:
name: mobz/elasticsearch-head
state: present
# Install a specific version of Elasticsearch Head in Elasticsearch 2.x
- elasticsearch_plugin:
name: mobz/elasticsearch-head
version: 2.0.0
# Uninstall Elasticsearch head plugin in Elasticsearch 2.x
- elasticsearch_plugin:
name: mobz/elasticsearch-head
state: absent
# Install a specific plugin in Elasticsearch >= 5.0
- elasticsearch_plugin:
name: analysis-icu
state: present
# Install the ingest-geoip plugin with a forced installation
- elasticsearch_plugin:
name: ingest-geoip
state: present
force: yes
'''
import os
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="install",
absent="remove"
)
PLUGIN_BIN_PATHS = tuple([
'/usr/share/elasticsearch/bin/elasticsearch-plugin',
'/usr/share/elasticsearch/bin/plugin'
])
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_name, plugin_dir):
return os.path.isdir(os.path.join(plugin_dir, plugin_name))
def parse_error(string):
reason = "ERROR: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
is_old_command = (os.path.basename(plugin_bin) == 'plugin')
# Timeout and version are only valid for plugin, not elasticsearch-plugin
if is_old_command:
if timeout:
cmd_args.append("--timeout %s" % timeout)
if version:
plugin_name = plugin_name + '/' + version
cmd_args[2] = plugin_name
if proxy_host and proxy_port:
cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
# Legacy ES 1.x
if url:
cmd_args.append("--url %s" % url)
if force:
cmd_args.append("--batch")
if src:
cmd_args.append(src)
else:
cmd_args.append(plugin_name)
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
return True, cmd, out, err
def get_plugin_bin(module, plugin_bin=None):
# Use the plugin_bin that was supplied first before trying other options
valid_plugin_bin = None
if plugin_bin and os.path.isfile(plugin_bin):
valid_plugin_bin = plugin_bin
else:
# Add the plugin_bin passed into the module to the top of the list of paths to test,
# testing for that binary name first before falling back to the default paths.
bin_paths = list(PLUGIN_BIN_PATHS)
if plugin_bin and plugin_bin not in bin_paths:
bin_paths.insert(0, plugin_bin)
# Get separate lists of dirs and binary names from the full paths to the
# plugin binaries.
plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
# Check for the binary names in the default system paths as well as the path
# specified in the module arguments.
for bin_file in plugin_bins:
valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
if valid_plugin_bin:
break
if not valid_plugin_bin:
module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
return valid_plugin_bin
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
src=dict(default=None),
url=dict(default=None),
timeout=dict(default="1m"),
force=dict(type='bool', default=False),
plugin_bin=dict(type="path"),
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
proxy_host=dict(default=None),
proxy_port=dict(default=None),
version=dict(default=None)
),
mutually_exclusive=[("src", "url")],
supports_check_mode=True
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
src = module.params["src"]
timeout = module.params["timeout"]
force = module.params["force"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
proxy_host = module.params["proxy_host"]
proxy_port = module.params["proxy_port"]
version = module.params["version"]
# Search provided path and system paths for valid binary
plugin_bin = get_plugin_bin(module, plugin_bin)
repo = parse_plugin_repo(name)
present = is_plugin_present(repo, plugin_dir)
# skip if the state is correct
if (present and state == "present") or (state == "absent" and not present):
module.exit_json(changed=False, name=name, state=state)
if state == "present":
changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,264 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Thierno IB. BARRY @barryib
# Sponsored by Polyconseil http://polyconseil.fr.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: kibana_plugin
short_description: Manage Kibana plugins
description:
- This module can be used to manage Kibana plugins.
author: Thierno IB. BARRY (@barryib)
options:
name:
description:
- Name of the plugin to install.
required: True
state:
description:
- Desired state of a plugin.
choices: ["present", "absent"]
default: present
url:
description:
- Set exact URL to download the plugin from.
- For local file, prefix its absolute path with file://
timeout:
description:
- "Timeout setting: 30s, 1m, 1h etc."
default: 1m
plugin_bin:
description:
- Location of the Kibana binary.
default: /opt/kibana/bin/kibana
plugin_dir:
description:
- Your configured plugin directory specified in Kibana.
default: /opt/kibana/installedPlugins/
version:
description:
- Version of the plugin to be installed.
- If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
force:
description:
- Delete and re-install the plugin. Can be useful for plugins update.
type: bool
default: 'no'
'''
EXAMPLES = '''
- name: Install Elasticsearch head plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
- name: Install specific version of a plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
version: '2.3.3'
- name: Uninstall Elasticsearch head plugin
kibana_plugin:
state: absent
name: elasticsearch/marvel
'''
RETURN = '''
cmd:
description: the launched command during plugin management (install / remove)
returned: success
type: str
name:
description: the plugin name to install or remove
returned: success
type: str
url:
description: the url from where the plugin is installed from
returned: success
type: str
timeout:
description: the timeout for plugin download
returned: success
type: str
stdout:
description: the command stdout
returned: success
type: str
stderr:
description: the command stderr
returned: success
type: str
state:
description: the state for the managed plugin
returned: success
type: str
'''
import os
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="--install",
absent="--remove"
)
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
if LooseVersion(kibana_version) > LooseVersion('4.6'):
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
cmd_args = [kibana_plugin_bin, "install"]
if url:
cmd_args.append(url)
else:
cmd_args.append(plugin_name)
else:
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
if url:
cmd_args.append("--url %s" % url)
if timeout:
cmd_args.append("--timeout %s" % timeout)
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
if LooseVersion(kibana_version) > LooseVersion('4.6'):
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
cmd_args = [kibana_plugin_bin, "remove", plugin_name]
else:
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def get_kibana_version(module, plugin_bin):
cmd_args = [plugin_bin, '--version']
cmd = " ".join(cmd_args)
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="Failed to get Kibana version : %s" % err)
return out.strip()
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
url=dict(default=None),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
version=dict(default=None),
force=dict(default="no", type="bool")
),
supports_check_mode=True,
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
timeout = module.params["timeout"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
version = module.params["version"]
force = module.params["force"]
changed, cmd, out, err = False, '', '', ''
kibana_version = get_kibana_version(module, plugin_bin)
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
# skip if the state is correct
if (present and state == "present" and not force) or (state == "absent" and not present and not force):
module.exit_json(changed=False, name=name, state=state)
if version:
name = name + '/' + version
if state == "present":
if force:
remove_plugin(module, plugin_bin, name)
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,312 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redis
short_description: Various redis commands, slave and flush
description:
- Unified utility to interact with redis instances.
options:
command:
description:
- The selected redis command
- C(config) (new in 1.6), ensures a configuration setting on an instance.
- C(flush) flushes all the instance or a specified db.
- C(slave) sets a redis instance in slave or master mode.
required: true
choices: [ config, flush, slave ]
login_password:
description:
- The password used to authenticate with (usually not used)
login_host:
description:
- The host running the database
default: localhost
login_port:
description:
- The port to connect to
default: 6379
master_host:
description:
- The host of the master instance [slave command]
master_port:
description:
- The port of the master instance [slave command]
slave_mode:
description:
- the mode of the redis instance [slave command]
default: slave
choices: [ master, slave ]
db:
description:
- The database to flush (used in db mode) [flush command]
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one)
[flush command]
default: all
choices: [ all, db ]
name:
description:
- A redis config key.
value:
description:
- A redis config value. When memory size is needed, it is possible
to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making slave of is password protected
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
- name: Set local redis instance to be slave of melee.island on port 6377
redis:
command: slave
master_host: melee.island
master_port: 6377
- name: Deactivate slave mode
redis:
command: slave
slave_mode: master
- name: Flush all the redis db
redis:
command: flush
flush_mode: all
- name: Flush only one db in a redis instance
redis:
command: flush
db: 1
flush_mode: db
- name: Configure local redis to have 10000 max clients
redis:
command: config
name: maxclients
value: 10000
- name: Configure local redis maxmemory to 4GB
redis:
command: config
name: maxmemory
value: 4GB
- name: Configure local redis to have lua time limit of 100 ms
redis:
command: config
name: lua-time-limit
value: 100
'''
import traceback
REDIS_IMP_ERR = None
try:
import redis
except ImportError:
REDIS_IMP_ERR = traceback.format_exc()
redis_found = False
else:
redis_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible.module_utils._text import to_native
# Redis module specific support methods.
def set_slave_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if not isinstance(db, int):
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# Module execution.
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(type='str', choices=['config', 'flush', 'slave']),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=6379),
master_host=dict(type='str'),
master_port=dict(type='int'),
slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
db=dict(type='int'),
flush_mode=dict(type='str', default='all', choices=['all', 'db']),
name=dict(type='str'),
value=dict(type='str')
),
supports_check_mode=True,
)
if not redis_found:
module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
# Slave Command section -----------
if command == "slave":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['slave_mode']
# Check if we have all the data
if mode == "slave": # Only need data if we want to be slave
if not master_host:
module.fail_json(msg='In slave mode master host must be provided')
if not master_port:
module.fail_json(msg='In slave mode master port must be provided')
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
status = dict(
status=mode,
master_host=master_host,
master_port=master_port,
)
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "slave":
if module.check_mode or\
set_slave_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set slave mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_mode']
# Check if we have all the data
if mode == "db":
if db is None:
module.fail_json(msg="In db mode the db number must be provided")
# Connect and check
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "all":
if module.check_mode or flush(r):
module.exit_json(changed=True, flushed=True)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush all databases")
else:
if module.check_mode or flush(r, db):
module.exit_json(changed=True, flushed=True, db=db)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush '%d' database" % db)
elif command == 'config':
name = module.params['name']
try: # try to parse the value as if it were the memory size
value = str(human_to_bytes(module.params['value'].upper()))
except ValueError:
value = module.params['value']
r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
try:
r.ping()
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
old_value = r.config_get(name)[name]
except Exception as e:
module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
changed = old_value != value
if module.check_mode or not changed:
module.exit_json(changed=changed, name=name, value=value)
else:
try:
r.config_set(name, value)
except Exception as e:
module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, name=name, value=value)
else:
module.fail_json(msg='A valid command must be provided')
if __name__ == '__main__':
main()

View file

@ -0,0 +1,226 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: riak
short_description: This module handles some common Riak operations
description:
- This module can be used to join nodes to a cluster, check
the status of the cluster.
author:
- "James Martin (@jsmartin)"
- "Drew Kerrigan (@drewkerrigan)"
options:
command:
description:
- The command you would like to perform against the cluster.
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
config_dir:
description:
- The path to the riak configuration directory
default: /etc/riak
http_conn:
description:
- The ip address and port that is listening for Riak HTTP queries
default: 127.0.0.1:8098
target_node:
description:
- The target node for certain operations (join, ping)
default: riak@127.0.0.1
wait_for_handoffs:
description:
- Number of seconds to wait for handoffs to complete.
wait_for_ring:
description:
- Number of seconds to wait for all nodes to agree on the ring.
wait_for_service:
description:
- Waits for a riak service to come online before continuing.
choices: ['kv']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Join's a Riak node to another node
- riak:
command: join
target_node: riak@10.1.1.1
# Wait for handoffs to finish. Use with async and poll.
- riak:
wait_for_handoffs: yes
# Wait for riak_kv service to startup
- riak:
wait_for_service: kv
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def ring_check(module, riak_admin_bin):
cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=False, default=None, choices=[
'ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak', type='path'),
http_conn=dict(required=False, default='127.0.0.1:8098'),
target_node=dict(default='riak@127.0.0.1', required=False),
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
validate_certs=dict(default='yes', type='bool'))
)
command = module.params.get('command')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
# make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
timeout = time.time() + 120
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
if info['status'] == 200:
stats_raw = response.read()
break
time.sleep(5)
# here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except Exception:
module.fail_json(msg='Could not parse Riak stats.')
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'])
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,
version=version)
if command == 'ping':
cmd = '%s ping %s' % (riak_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
else:
module.fail_json(msg=out)
elif command == 'kv_test':
cmd = '%s test' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['kv_test'] = out
else:
module.fail_json(msg=out)
elif command == 'join':
if nodes.count(node_name) == 1 and len(nodes) > 1:
result['join'] = 'Node is already in cluster or staged to be in cluster.'
else:
cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['join'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'plan':
cmd = '%s cluster plan' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['plan'] = out
if 'Staged Changes' in out:
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'commit':
cmd = '%s cluster commit' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['commit'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
# this could take a while, recommend to run in async mode
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
rc, out, err = module.run_command(cmd)
result['service'] = out
if wait_for_ring:
timeout = time.time() + wait_for_ring
while True:
if ring_check(module, riak_admin_bin):
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
result['ring_ready'] = ring_check(module, riak_admin_bin)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,228 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
# Outline and parts are reused from Mark Theunissen's mysql_db module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mssql_db
short_description: Add or remove MSSQL databases from a remote host.
description:
- Add or remove MSSQL databases from a remote host.
options:
name:
description:
- name of the database to add or remove
required: true
aliases: [ db ]
login_user:
description:
- The username used to authenticate with
login_password:
description:
- The password used to authenticate with
login_host:
description:
- Host running the database
login_port:
description:
- Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
default: 1433
state:
description:
- The database state
default: present
choices: [ "present", "absent", "import" ]
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) files are supported.
autocommit:
description:
- Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
within a transaction.
type: bool
default: 'no'
notes:
- Requires the pymssql Python package on the remote host. For Ubuntu, this
is as easy as pip install pymssql (See M(pip).)
requirements:
- python >= 2.7
- pymssql
author: Vedit Firat Arig (@vedit)
'''
EXAMPLES = '''
# Create a new database with name 'jackdata'
- mssql_db:
name: jackdata
state: present
# Copy database dump file to remote host and restore it to database 'my_db'
- copy:
src: dump.sql
dest: /tmp
- mssql_db:
name: my_db
state: import
target: /tmp/dump.sql
'''
RETURN = '''
#
'''
import os
import traceback
PYMSSQL_IMP_ERR = None
try:
import pymssql
except ImportError:
PYMSSQL_IMP_ERR = traceback.format_exc()
mssql_found = False
else:
mssql_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
def db_exists(conn, cursor, db):
cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
conn.commit()
return bool(cursor.rowcount)
def db_create(conn, cursor, db):
cursor.execute("CREATE DATABASE [%s]" % db)
return db_exists(conn, cursor, db)
def db_delete(conn, cursor, db):
try:
cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
except Exception:
pass
cursor.execute("DROP DATABASE [%s]" % db)
return not db_exists(conn, cursor, db)
def db_import(conn, cursor, module, db, target):
if os.path.isfile(target):
with open(target, 'r') as backup:
sqlQuery = "USE [%s]\n" % db
for line in backup:
if line is None:
break
elif line.startswith('GO'):
cursor.execute(sqlQuery)
sqlQuery = "USE [%s]\n" % db
else:
sqlQuery += line
cursor.execute(sqlQuery)
conn.commit()
return 0, "import successful", ""
else:
return 1, "cannot find target file", "cannot find target file"
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['db']),
login_user=dict(default=''),
login_password=dict(default='', no_log=True),
login_host=dict(required=True),
login_port=dict(default='1433'),
target=dict(default=None),
autocommit=dict(type='bool', default=False),
state=dict(
default='present', choices=['present', 'absent', 'import'])
)
)
if not mssql_found:
module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
db = module.params['name']
state = module.params['state']
autocommit = module.params['autocommit']
target = module.params["target"]
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_querystring = login_host
if login_port != "1433":
login_querystring = "%s:%s" % (login_host, login_port)
if login_user != "" and login_password == "":
module.fail_json(msg="when supplying login_user arguments login_password must be provided")
try:
conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
cursor = conn.cursor()
except Exception as e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True)
changed = False
if db_exists(conn, cursor, db):
if state == "absent":
try:
changed = db_delete(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error deleting database: " + str(e))
elif state == "import":
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
else:
if state == "present":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
elif state == "import":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
module.exit_json(changed=changed, db=db)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,610 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mysql_db
short_description: Add or remove MySQL databases from a remote host
description:
- Add or remove MySQL databases from a remote host.
options:
name:
description:
- Name of the database to add or remove.
- I(name=all) may only be provided if I(state) is C(dump) or C(import).
- List of databases is provided with I(state=dump), I(state=present) and I(state=absent).
- If I(name=all) it works like --all-databases option for mysqldump (Added in 2.0).
required: true
type: list
elements: str
aliases: [db]
state:
description:
- The database state
type: str
default: present
choices: ['absent', 'dump', 'import', 'present']
collation:
description:
- Collation mode (sorting). This only applies to new table/databases and
does not update existing ones, this is a limitation of MySQL.
type: str
default: ''
encoding:
description:
- Encoding mode to use, examples include C(utf8) or C(latin1_swedish_ci),
at creation of database, dump or importation of sql script.
type: str
default: ''
target:
description:
- Location, on the remote host, of the dump file to read from or write to.
- Uncompressed SQL files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and
xz (Added in 2.0) compressed files are supported.
type: path
single_transaction:
description:
- Execute the dump in a single transaction.
type: bool
default: no
quick:
description:
- Option used for dumping large tables.
type: bool
default: yes
ignore_tables:
description:
- A list of table names that will be ignored in the dump
of the form database_name.table_name.
type: list
elements: str
required: no
default: []
hex_blob:
description:
- Dump binary columns using hexadecimal notation.
required: no
default: no
type: bool
force:
description:
- Continue dump or import even if we get an SQL error.
- Used only when I(state) is C(dump) or C(import).
required: no
type: bool
default: no
master_data:
description:
- Option to dump a master replication server to produce a dump file
that can be used to set up another server as a slave of the master.
- C(0) to not include master data.
- C(1) to generate a 'CHANGE MASTER TO' statement
required on the slave to start the replication process.
- C(2) to generate a commented 'CHANGE MASTER TO'.
- Can be used when I(state=dump).
required: no
type: int
choices: [0, 1, 2]
default: 0
skip_lock_tables:
description:
- Skip locking tables for read. Used when I(state=dump), ignored otherwise.
required: no
type: bool
default: no
dump_extra_args:
description:
- Provide additional arguments for mysqldump.
Used when I(state=dump) only, ignored otherwise.
required: no
type: str
seealso:
- module: mysql_info
- module: mysql_variables
- module: mysql_user
- module: mysql_replication
- name: MySQL command-line client reference
description: Complete reference of the MySQL command-line client documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/mysql.html
- name: mysqldump reference
description: Complete reference of the ``mysqldump`` client utility documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html
- name: CREATE DATABASE reference
description: Complete reference of the CREATE DATABASE command documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/create-database.html
- name: DROP DATABASE reference
description: Complete reference of the DROP DATABASE command documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/drop-database.html
author: "Ansible Core Team"
requirements:
- mysql (command line binary)
- mysqldump (command line binary)
notes:
- Requires the mysql and mysqldump binaries on the remote host.
- This module is B(not idempotent) when I(state) is C(import),
and will import the dump file each time if run more than once.
extends_documentation_fragment:
- community.general.mysql
'''
EXAMPLES = r'''
- name: Create a new database with name 'bobdata'
mysql_db:
name: bobdata
state: present
- name: Create new databases with names 'foo' and 'bar'
mysql_db:
name:
- foo
- bar
state: present
# Copy database dump file to remote host and restore it to database 'my_db'
- name: Copy database dump file
copy:
src: dump.sql.bz2
dest: /tmp
- name: Restore database
mysql_db:
name: my_db
state: import
target: /tmp/dump.sql.bz2
- name: Restore database ignoring errors
mysql_db:
name: my_db
state: import
target: /tmp/dump.sql.bz2
force: yes
- name: Dump multiple databases
mysql_db:
state: dump
name: db_1,db_2
target: /tmp/dump.sql
- name: Dump multiple databases
mysql_db:
state: dump
name:
- db_1
- db_2
target: /tmp/dump.sql
- name: Dump all databases to hostname.sql
mysql_db:
state: dump
name: all
target: /tmp/dump.sql
- name: Dump all databases to hostname.sql including master data
mysql_db:
state: dump
name: all
target: /tmp/dump.sql
master_data: 1
# Import of sql script with encoding option
- name: >
Import dump.sql with specific latin1 encoding,
similar to mysql -u <username> --default-character-set=latin1 -p <password> < dump.sql
mysql_db:
state: import
name: all
encoding: latin1
target: /tmp/dump.sql
# Dump of database with encoding option
- name: >
Dump of Databse with specific latin1 encoding,
similar to mysqldump -u <username> --default-character-set=latin1 -p <password> <database>
mysql_db:
state: dump
name: db_1
encoding: latin1
target: /tmp/dump.sql
- name: Delete database with name 'bobdata'
mysql_db:
name: bobdata
state: absent
- name: Make sure there is neither a database with name 'foo', nor one with name 'bar'
mysql_db:
name:
- foo
- bar
state: absent
# Dump database with argument not directly supported by this module
# using dump_extra_args parameter
- name: Dump databases without including triggers
mysql_db:
state: dump
name: foo
target: /tmp/dump.sql
dump_extra_args: --skip-triggers
'''
RETURN = r'''
db:
description: Database names in string format delimited by white space.
returned: always
type: str
sample: "foo bar"
db_list:
description: List of database names.
returned: always
type: list
sample: ["foo", "bar"]
version_added: '2.9'
executed_commands:
description: List of commands which tried to run.
returned: if executed
type: list
sample: ["CREATE DATABASE acme"]
version_added: '2.10'
'''
import os
import subprocess
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import mysql_quote_identifier
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_native
executed_commands = []
# ===========================================
# MySQL module specific support methods.
#
def db_exists(cursor, db):
res = 0
for each_db in db:
res += cursor.execute("SHOW DATABASES LIKE %s", (each_db.replace("_", r"\_"),))
return res == len(db)
def db_delete(cursor, db):
if not db:
return False
for each_db in db:
query = "DROP DATABASE %s" % mysql_quote_identifier(each_db, 'database')
executed_commands.append(query)
cursor.execute(query)
return True
def db_dump(module, host, user, password, db_name, target, all_databases, port,
config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
single_transaction=None, quick=None, ignore_tables=None, hex_blob=None,
encoding=None, force=False, master_data=0, skip_lock_tables=False, dump_extra_args=None):
cmd = module.get_bin_path('mysqldump', True)
# If defined, mysqldump demands --defaults-extra-file be the first option
if config_file:
cmd += " --defaults-extra-file=%s" % shlex_quote(config_file)
if user is not None:
cmd += " --user=%s" % shlex_quote(user)
if password is not None:
cmd += " --password=%s" % shlex_quote(password)
if ssl_cert is not None:
cmd += " --ssl-cert=%s" % shlex_quote(ssl_cert)
if ssl_key is not None:
cmd += " --ssl-key=%s" % shlex_quote(ssl_key)
if ssl_ca is not None:
cmd += " --ssl-ca=%s" % shlex_quote(ssl_ca)
if force:
cmd += " --force"
if socket is not None:
cmd += " --socket=%s" % shlex_quote(socket)
else:
cmd += " --host=%s --port=%i" % (shlex_quote(host), port)
if all_databases:
cmd += " --all-databases"
elif len(db_name) > 1:
cmd += " --databases {0}".format(' '.join(db_name))
else:
cmd += " %s" % shlex_quote(' '.join(db_name))
if skip_lock_tables:
cmd += " --skip-lock-tables"
if (encoding is not None) and (encoding != ""):
cmd += " --default-character-set=%s" % shlex_quote(encoding)
if single_transaction:
cmd += " --single-transaction=true"
if quick:
cmd += " --quick"
if ignore_tables:
for an_ignored_table in ignore_tables:
cmd += " --ignore-table={0}".format(an_ignored_table)
if hex_blob:
cmd += " --hex-blob"
if master_data:
cmd += " --master-data=%s" % master_data
if dump_extra_args is not None:
cmd += " " + dump_extra_args
path = None
if os.path.splitext(target)[-1] == '.gz':
path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
path = module.get_bin_path('xz', True)
if path:
cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target))
else:
cmd += " > %s" % shlex_quote(target)
executed_commands.append(cmd)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_import(module, host, user, password, db_name, target, all_databases, port, config_file,
socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, encoding=None, force=False):
if not os.path.exists(target):
return module.fail_json(msg="target %s does not exist on the host" % target)
cmd = [module.get_bin_path('mysql', True)]
# --defaults-file must go first, or errors out
if config_file:
cmd.append("--defaults-extra-file=%s" % shlex_quote(config_file))
if user:
cmd.append("--user=%s" % shlex_quote(user))
if password:
cmd.append("--password=%s" % shlex_quote(password))
if ssl_cert is not None:
cmd.append("--ssl-cert=%s" % shlex_quote(ssl_cert))
if ssl_key is not None:
cmd.append("--ssl-key=%s" % shlex_quote(ssl_key))
if ssl_ca is not None:
cmd.append("--ssl-ca=%s" % shlex_quote(ssl_ca))
if force:
cmd.append("-f")
if socket is not None:
cmd.append("--socket=%s" % shlex_quote(socket))
else:
cmd.append("--host=%s" % shlex_quote(host))
cmd.append("--port=%i" % port)
if (encoding is not None) and (encoding != ""):
cmd.append("--default-character-set=%s" % shlex_quote(encoding))
if not all_databases:
cmd.append("--one-database")
cmd.append(shlex_quote(''.join(db_name)))
comp_prog_path = None
if os.path.splitext(target)[-1] == '.gz':
comp_prog_path = module.get_bin_path('gzip', required=True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzip2', required=True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', required=True)
if comp_prog_path:
# The line above is for returned data only:
executed_commands.append('%s -dc %s | %s' % (comp_prog_path, target, ' '.join(cmd)))
p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1
else:
return p2.returncode, stdout2, stderr2
else:
cmd = ' '.join(cmd)
cmd += " < %s" % shlex_quote(target)
executed_commands.append(cmd)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
if not db:
return False
query_params = dict(enc=encoding, collate=collation)
res = 0
for each_db in db:
query = ['CREATE DATABASE %s' % mysql_quote_identifier(each_db, 'database')]
if encoding:
query.append("CHARACTER SET %(enc)s")
if collation:
query.append("COLLATE %(collate)s")
query = ' '.join(query)
res += cursor.execute(query, query_params)
try:
executed_commands.append(cursor.mogrify(query, query_params))
except AttributeError:
executed_commands.append(cursor._executed)
except Exception:
executed_commands.append(query)
return res > 0
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(type='str'),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=3306),
login_unix_socket=dict(type='str'),
name=dict(type='list', required=True, aliases=['db']),
encoding=dict(type='str', default=''),
collation=dict(type='str', default=''),
target=dict(type='path'),
state=dict(type='str', default='present', choices=['absent', 'dump', 'import', 'present']),
client_cert=dict(type='path', aliases=['ssl_cert']),
client_key=dict(type='path', aliases=['ssl_key']),
ca_cert=dict(type='path', aliases=['ssl_ca']),
connect_timeout=dict(type='int', default=30),
config_file=dict(type='path', default='~/.my.cnf'),
single_transaction=dict(type='bool', default=False),
quick=dict(type='bool', default=True),
ignore_tables=dict(type='list', default=[]),
hex_blob=dict(default=False, type='bool'),
force=dict(type='bool', default=False),
master_data=dict(type='int', default=0, choices=[0, 1, 2]),
skip_lock_tables=dict(type='bool', default=False),
dump_extra_args=dict(type='str'),
),
supports_check_mode=True,
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
db = module.params["name"]
if not db:
module.exit_json(changed=False, db=db, db_list=[])
db = [each_db.strip() for each_db in db]
encoding = module.params["encoding"]
collation = module.params["collation"]
state = module.params["state"]
target = module.params["target"]
socket = module.params["login_unix_socket"]
login_port = module.params["login_port"]
if login_port < 0 or login_port > 65535:
module.fail_json(msg="login_port must be a valid unix port number (0-65535)")
ssl_cert = module.params["client_cert"]
ssl_key = module.params["client_key"]
ssl_ca = module.params["ca_cert"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
login_password = module.params["login_password"]
login_user = module.params["login_user"]
login_host = module.params["login_host"]
ignore_tables = module.params["ignore_tables"]
for a_table in ignore_tables:
if a_table == "":
module.fail_json(msg="Name of ignored table cannot be empty")
single_transaction = module.params["single_transaction"]
quick = module.params["quick"]
hex_blob = module.params["hex_blob"]
force = module.params["force"]
master_data = module.params["master_data"]
skip_lock_tables = module.params["skip_lock_tables"]
dump_extra_args = module.params["dump_extra_args"]
if len(db) > 1 and state == 'import':
module.fail_json(msg="Multiple databases are not supported with state=import")
db_name = ' '.join(db)
all_databases = False
if state in ['dump', 'import']:
if target is None:
module.fail_json(msg="with state=%s target is required" % state)
if db == ['all']:
all_databases = True
else:
if db == ['all']:
module.fail_json(msg="name is not allowed to equal 'all' unless state equals import, or dump.")
try:
cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca,
connect_timeout=connect_timeout)
except Exception as e:
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
changed = False
if not os.path.exists(config_file):
config_file = None
existence_list = []
non_existence_list = []
if not all_databases:
for each_database in db:
if db_exists(cursor, [each_database]):
existence_list.append(each_database)
else:
non_existence_list.append(each_database)
if state == "absent":
if module.check_mode:
module.exit_json(changed=bool(existence_list), db=db_name, db_list=db)
try:
changed = db_delete(cursor, existence_list)
except Exception as e:
module.fail_json(msg="error deleting database: %s" % to_native(e))
module.exit_json(changed=changed, db=db_name, db_list=db, executed_commands=executed_commands)
elif state == "present":
if module.check_mode:
module.exit_json(changed=bool(non_existence_list), db=db_name, db_list=db)
changed = False
if non_existence_list:
try:
changed = db_create(cursor, non_existence_list, encoding, collation)
except Exception as e:
module.fail_json(msg="error creating database: %s" % to_native(e),
exception=traceback.format_exc())
module.exit_json(changed=changed, db=db_name, db_list=db, executed_commands=executed_commands)
elif state == "dump":
if non_existence_list and not all_databases:
module.fail_json(msg="Cannot dump database(s) %r - not found" % (', '.join(non_existence_list)))
if module.check_mode:
module.exit_json(changed=True, db=db_name, db_list=db)
rc, stdout, stderr = db_dump(module, login_host, login_user,
login_password, db, target, all_databases,
login_port, config_file, socket, ssl_cert, ssl_key,
ssl_ca, single_transaction, quick, ignore_tables,
hex_blob, encoding, force, master_data, skip_lock_tables,
dump_extra_args)
if rc != 0:
module.fail_json(msg="%s" % stderr)
module.exit_json(changed=True, db=db_name, db_list=db, msg=stdout,
executed_commands=executed_commands)
elif state == "import":
if module.check_mode:
module.exit_json(changed=True, db=db_name, db_list=db)
if non_existence_list and not all_databases:
try:
db_create(cursor, non_existence_list, encoding, collation)
except Exception as e:
module.fail_json(msg="error creating database: %s" % to_native(e),
exception=traceback.format_exc())
rc, stdout, stderr = db_import(module, login_host, login_user,
login_password, db, target,
all_databases,
login_port, config_file,
socket, ssl_cert, ssl_key, ssl_ca, encoding, force)
if rc != 0:
module.fail_json(msg="%s" % stderr)
module.exit_json(changed=True, db=db_name, db_list=db, msg=stdout,
executed_commands=executed_commands)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,529 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: mysql_info
short_description: Gather information about MySQL servers
description:
- Gathers information about MySQL servers.
options:
filter:
description:
- Limit the collected information by comma separated string or YAML list.
- Allowable values are C(version), C(databases), C(settings), C(global_status),
C(users), C(engines), C(master_status), C(slave_status), C(slave_hosts).
- By default, collects all subsets.
- You can use '!' before value (for example, C(!settings)) to exclude it from the information.
- If you pass including and excluding values to the filter, for example, I(filter=!settings,version),
the excluding values, C(!settings) in this case, will be ignored.
type: list
elements: str
login_db:
description:
- Database name to connect to.
- It makes sense if I(login_user) is allowed to connect to a specific database only.
type: str
exclude_fields:
description:
- List of fields which are not needed to collect.
- "Supports elements: C(db_size). Unsupported elements will be ignored"
type: list
elements: str
return_empty_dbs:
description:
- Includes names of empty databases to returned dictionary.
type: bool
default: no
notes:
- Calculating the size of a database might be slow, depending on the number and size of tables in it.
To avoid this, use I(exclude_fields=db_size).
seealso:
- module: mysql_variables
- module: mysql_db
- module: mysql_user
- module: mysql_replication
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.mysql
'''
EXAMPLES = r'''
# Display info from mysql-hosts group (using creds from ~/.my.cnf to connect):
# ansible mysql-hosts -m mysql_info
# Display only databases and users info:
# ansible mysql-hosts -m mysql_info -a 'filter=databases,users'
# Display only slave status:
# ansible standby -m mysql_info -a 'filter=slave_status'
# Display all info from databases group except settings:
# ansible databases -m mysql_info -a 'filter=!settings'
- name: Collect all possible information using passwordless root access
mysql_info:
login_user: root
- name: Get MySQL version with non-default credentials
mysql_info:
login_user: mysuperuser
login_password: mysuperpass
filter: version
- name: Collect all info except settings and users by root
mysql_info:
login_user: root
login_password: rootpass
filter: "!settings,!users"
- name: Collect info about databases and version using ~/.my.cnf as a credential file
become: yes
mysql_info:
filter:
- databases
- version
- name: Collect info about databases and version using ~alice/.my.cnf as a credential file
become: yes
mysql_info:
config_file: /home/alice/.my.cnf
filter:
- databases
- version
- name: Collect info about databases including empty and excluding their sizes
become: yes
mysql_info:
config_file: /home/alice/.my.cnf
filter:
- databases
exclude_fields: db_size
return_empty_dbs: yes
'''
RETURN = r'''
version:
description: Database server version.
returned: if not excluded by filter
type: dict
sample: { "version": { "major": 5, "minor": 5, "release": 60 } }
contains:
major:
description: Major server version.
returned: if not excluded by filter
type: int
sample: 5
minor:
description: Minor server version.
returned: if not excluded by filter
type: int
sample: 5
release:
description: Release server version.
returned: if not excluded by filter
type: int
sample: 60
databases:
description: Information about databases.
returned: if not excluded by filter
type: dict
sample:
- { "mysql": { "size": 656594 }, "information_schema": { "size": 73728 } }
contains:
size:
description: Database size in bytes.
returned: if not excluded by filter
type: dict
sample: { 'size': 656594 }
settings:
description: Global settings (variables) information.
returned: if not excluded by filter
type: dict
sample:
- { "innodb_open_files": 300, innodb_page_size": 16384 }
global_status:
description: Global status information.
returned: if not excluded by filter
type: dict
sample:
- { "Innodb_buffer_pool_read_requests": 123, "Innodb_buffer_pool_reads": 32 }
version_added: "2.10"
users:
description: Users information.
returned: if not excluded by filter
type: dict
sample:
- { "localhost": { "root": { "Alter_priv": "Y", "Alter_routine_priv": "Y" } } }
engines:
description: Information about the server's storage engines.
returned: if not excluded by filter
type: dict
sample:
- { "CSV": { "Comment": "CSV storage engine", "Savepoints": "NO", "Support": "YES", "Transactions": "NO", "XA": "NO" } }
master_status:
description: Master status information.
returned: if master
type: dict
sample:
- { "Binlog_Do_DB": "", "Binlog_Ignore_DB": "mysql", "File": "mysql-bin.000001", "Position": 769 }
slave_status:
description: Slave status information.
returned: if standby
type: dict
sample:
- { "192.168.1.101": { "3306": { "replication_user": { "Connect_Retry": 60, "Exec_Master_Log_Pos": 769, "Last_Errno": 0 } } } }
slave_hosts:
description: Slave status information.
returned: if master
type: dict
sample:
- { "2": { "Host": "", "Master_id": 1, "Port": 3306 } }
'''
from decimal import Decimal
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import (
mysql_connect,
mysql_common_argument_spec,
mysql_driver,
mysql_driver_fail_msg,
)
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
# ===========================================
# MySQL module specific support methods.
#
class MySQL_Info(object):
"""Class for collection MySQL instance information.
Arguments:
module (AnsibleModule): Object of AnsibleModule class.
cursor (pymysql/mysql-python): Cursor class for interaction with
the database.
Note:
If you need to add a new subset:
1. add a new key with the same name to self.info attr in self.__init__()
2. add a new private method to get the information
3. add invocation of the new method to self.__collect()
4. add info about the new subset to the DOCUMENTATION block
5. add info about the new subset with an example to RETURN block
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.info = {
'version': {},
'databases': {},
'settings': {},
'global_status': {},
'engines': {},
'users': {},
'master_status': {},
'slave_hosts': {},
'slave_status': {},
}
def get_info(self, filter_, exclude_fields, return_empty_dbs):
"""Get MySQL instance information based on filter_.
Arguments:
filter_ (list): List of collected subsets (e.g., databases, users, etc.),
when it is empty, return all available information.
"""
self.__collect(exclude_fields, return_empty_dbs)
inc_list = []
exc_list = []
if filter_:
partial_info = {}
for fi in filter_:
if fi.lstrip('!') not in self.info:
self.module.warn('filter element: %s is not allowable, ignored' % fi)
continue
if fi[0] == '!':
exc_list.append(fi.lstrip('!'))
else:
inc_list.append(fi)
if inc_list:
for i in self.info:
if i in inc_list:
partial_info[i] = self.info[i]
else:
for i in self.info:
if i not in exc_list:
partial_info[i] = self.info[i]
return partial_info
else:
return self.info
def __collect(self, exclude_fields, return_empty_dbs):
"""Collect all possible subsets."""
self.__get_databases(exclude_fields, return_empty_dbs)
self.__get_global_variables()
self.__get_global_status()
self.__get_engines()
self.__get_users()
self.__get_master_status()
self.__get_slave_status()
self.__get_slaves()
def __get_engines(self):
"""Get storage engines info."""
res = self.__exec_sql('SHOW ENGINES')
if res:
for line in res:
engine = line['Engine']
self.info['engines'][engine] = {}
for vname, val in iteritems(line):
if vname != 'Engine':
self.info['engines'][engine][vname] = val
def __convert(self, val):
"""Convert unserializable data."""
try:
if isinstance(val, Decimal):
val = float(val)
else:
val = int(val)
except ValueError:
pass
except TypeError:
pass
return val
def __get_global_variables(self):
"""Get global variables (instance settings)."""
res = self.__exec_sql('SHOW GLOBAL VARIABLES')
if res:
for var in res:
self.info['settings'][var['Variable_name']] = self.__convert(var['Value'])
ver = self.info['settings']['version'].split('.')
release = ver[2].split('-')[0]
self.info['version'] = dict(
major=int(ver[0]),
minor=int(ver[1]),
release=int(release),
)
def __get_global_status(self):
"""Get global status."""
res = self.__exec_sql('SHOW GLOBAL STATUS')
if res:
for var in res:
self.info['global_status'][var['Variable_name']] = self.__convert(var['Value'])
def __get_master_status(self):
"""Get master status if the instance is a master."""
res = self.__exec_sql('SHOW MASTER STATUS')
if res:
for line in res:
for vname, val in iteritems(line):
self.info['master_status'][vname] = self.__convert(val)
def __get_slave_status(self):
"""Get slave status if the instance is a slave."""
res = self.__exec_sql('SHOW SLAVE STATUS')
if res:
for line in res:
host = line['Master_Host']
if host not in self.info['slave_status']:
self.info['slave_status'][host] = {}
port = line['Master_Port']
if port not in self.info['slave_status'][host]:
self.info['slave_status'][host][port] = {}
user = line['Master_User']
if user not in self.info['slave_status'][host][port]:
self.info['slave_status'][host][port][user] = {}
for vname, val in iteritems(line):
if vname not in ('Master_Host', 'Master_Port', 'Master_User'):
self.info['slave_status'][host][port][user][vname] = self.__convert(val)
def __get_slaves(self):
"""Get slave hosts info if the instance is a master."""
res = self.__exec_sql('SHOW SLAVE HOSTS')
if res:
for line in res:
srv_id = line['Server_id']
if srv_id not in self.info['slave_hosts']:
self.info['slave_hosts'][srv_id] = {}
for vname, val in iteritems(line):
if vname != 'Server_id':
self.info['slave_hosts'][srv_id][vname] = self.__convert(val)
def __get_users(self):
"""Get user info."""
res = self.__exec_sql('SELECT * FROM mysql.user')
if res:
for line in res:
host = line['Host']
if host not in self.info['users']:
self.info['users'][host] = {}
user = line['User']
self.info['users'][host][user] = {}
for vname, val in iteritems(line):
if vname not in ('Host', 'User'):
self.info['users'][host][user][vname] = self.__convert(val)
def __get_databases(self, exclude_fields, return_empty_dbs):
"""Get info about databases."""
if not exclude_fields:
query = ('SELECT table_schema AS "name", '
'SUM(data_length + index_length) AS "size" '
'FROM information_schema.TABLES GROUP BY table_schema')
else:
if 'db_size' in exclude_fields:
query = ('SELECT table_schema AS "name" '
'FROM information_schema.TABLES GROUP BY table_schema')
res = self.__exec_sql(query)
if res:
for db in res:
self.info['databases'][db['name']] = {}
if not exclude_fields or 'db_size' not in exclude_fields:
self.info['databases'][db['name']]['size'] = int(db['size'])
# If empty dbs are not needed in the returned dict, exit from the method
if not return_empty_dbs:
return None
# Add info about empty databases (issue #65727):
res = self.__exec_sql('SHOW DATABASES')
if res:
for db in res:
if db['Database'] not in self.info['databases']:
self.info['databases'][db['Database']] = {}
if not exclude_fields or 'db_size' not in exclude_fields:
self.info['databases'][db['Database']]['size'] = 0
def __exec_sql(self, query, ddl=False):
"""Execute SQL.
Arguments:
ddl (bool): If True, return True or False.
Used for queries that don't return any rows
(mainly for DDL queries) (default False).
"""
try:
self.cursor.execute(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except Exception as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = mysql_common_argument_spec()
argument_spec.update(
login_db=dict(type='str'),
filter=dict(type='list'),
exclude_fields=dict(type='list'),
return_empty_dbs=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
db = module.params['login_db']
connect_timeout = module.params['connect_timeout']
login_user = module.params['login_user']
login_password = module.params['login_password']
ssl_cert = module.params['client_cert']
ssl_key = module.params['client_key']
ssl_ca = module.params['ca_cert']
config_file = module.params['config_file']
filter_ = module.params['filter']
exclude_fields = module.params['exclude_fields']
return_empty_dbs = module.params['return_empty_dbs']
if filter_:
filter_ = [f.strip() for f in filter_]
if exclude_fields:
exclude_fields = set([f.strip() for f in exclude_fields])
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
try:
cursor, db_conn = mysql_connect(module, login_user, login_password,
config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout, cursor_class='DictCursor')
except Exception as e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
###############################
# Create object and do main job
mysql = MySQL_Info(module, cursor)
module.exit_json(changed=False, **mysql.get_info(filter_, exclude_fields, return_empty_dbs))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,238 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: mysql_query
short_description: Run MySQL queries
description:
- Runs arbitrary MySQL queries.
- Pay attention, the module does not support check mode!
All queries will be executed in autocommit mode.
options:
query:
description:
- SQL query to run. Multiple queries can be passed using YAML list syntax.
type: list
elements: str
required: yes
positional_args:
description:
- List of values to be passed as positional arguments to the query.
- Mutually exclusive with I(named_args).
type: list
named_args:
description:
- Dictionary of key-value arguments to pass to the query.
- Mutually exclusive with I(positional_args).
type: dict
login_db:
description:
- Name of database to connect to and run queries against.
type: str
single_transaction:
description:
- Where passed queries run in a single transaction (C(yes)) or commit them one-by-one (C(no)).
type: bool
default: no
notes:
- To pass a query containing commas, use YAML list notation with hyphen (see EXAMPLES block).
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.mysql
'''
EXAMPLES = r'''
- name: Simple select query to acme db
mysql_query:
login_db: acme
query: SELECT * FROM orders
- name: Select query to db acme with positional arguments
mysql_query:
login_db: acme
query: SELECT * FROM acme WHERE id = %s AND story = %s
positional_args:
- 1
- test
- name: Select query to test_db with named_args
mysql_query:
login_db: test_db
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
named_args:
id_val: 1
story_val: test
- name: Run several insert queries against db test_db in single transaction
mysql_query:
login_db: test_db
query:
- INSERT INTO articles (id, story) VALUES (2, 'my_long_story')
- INSERT INTO prices (id, price) VALUES (123, '100.00')
single_transaction: yes
'''
RETURN = r'''
executed_queries:
description: List of executed queries.
returned: always
type: list
sample: ['SELECT * FROM bar', 'UPDATE bar SET id = 1 WHERE id = 2']
query_result:
description:
- List of lists (sublist for each query) containing dictionaries
in column:value form representing returned rows.
returned: changed
type: list
sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"ID": 1}, {"ID": 2}]]
rowcount:
description: Number of affected rows for each subquery.
returned: changed
type: list
sample: [5, 1]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import (
mysql_connect,
mysql_common_argument_spec,
mysql_driver,
mysql_driver_fail_msg,
)
from ansible.module_utils._text import to_native
DML_QUERY_KEYWORDS = ('INSERT', 'UPDATE', 'DELETE')
# TRUNCATE is not DDL query but it also returns 0 rows affected:
DDL_QUERY_KEYWORDS = ('CREATE', 'DROP', 'ALTER', 'RENAME', 'TRUNCATE')
# ===========================================
# Module execution.
#
def main():
argument_spec = mysql_common_argument_spec()
argument_spec.update(
query=dict(type='list', elements='str', required=True),
login_db=dict(type='str'),
positional_args=dict(type='list'),
named_args=dict(type='dict'),
single_transaction=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(
('positional_args', 'named_args'),
),
)
db = module.params['login_db']
connect_timeout = module.params['connect_timeout']
login_user = module.params['login_user']
login_password = module.params['login_password']
ssl_cert = module.params['client_cert']
ssl_key = module.params['client_key']
ssl_ca = module.params['ca_cert']
config_file = module.params['config_file']
query = module.params["query"]
if module.params["single_transaction"]:
autocommit = False
else:
autocommit = True
# Prepare args:
if module.params.get("positional_args"):
arguments = module.params["positional_args"]
elif module.params.get("named_args"):
arguments = module.params["named_args"]
else:
arguments = None
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
# Connect to DB:
try:
cursor, db_connection = mysql_connect(module, login_user, login_password,
config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout,
cursor_class='DictCursor', autocommit=autocommit)
except Exception as e:
module.fail_json(msg="unable to connect to database, check login_user and "
"login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
# Set defaults:
changed = False
max_keyword_len = len(max(DML_QUERY_KEYWORDS + DDL_QUERY_KEYWORDS, key=len))
# Execute query:
query_result = []
executed_queries = []
rowcount = []
for q in query:
try:
cursor.execute(q, arguments)
except Exception as e:
if not autocommit:
db_connection.rollback()
cursor.close()
module.fail_json(msg="Cannot execute SQL '%s' args [%s]: %s" % (q, arguments, to_native(e)))
try:
query_result.append([dict(row) for row in cursor.fetchall()])
except Exception as e:
if not autocommit:
db_connection.rollback()
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
# Check DML or DDL keywords in query and set changed accordingly:
q = q.lstrip()[0:max_keyword_len].upper()
for keyword in DML_QUERY_KEYWORDS:
if keyword in q and cursor.rowcount > 0:
changed = True
for keyword in DDL_QUERY_KEYWORDS:
if keyword in q:
changed = True
executed_queries.append(cursor._last_executed)
rowcount.append(cursor.rowcount)
# When the module run with the single_transaction == True:
if not autocommit:
db_connection.commit()
# Create dict with returned values:
kw = {
'changed': changed,
'executed_queries': executed_queries,
'query_result': query_result,
'rowcount': rowcount,
}
# Exit:
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,573 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Balazs Pocze <banyek@gawker.com>
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# Certain parts are taken from Mark Theunissen's mysqldb module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mysql_replication
short_description: Manage MySQL replication
description:
- Manages MySQL server replication, slave, master status, get and change master host.
author:
- Balazs Pocze (@banyek)
- Andrew Klychkov (@Andersson007)
options:
mode:
description:
- Module operating mode. Could be
C(changemaster) (CHANGE MASTER TO),
C(getmaster) (SHOW MASTER STATUS),
C(getslave) (SHOW SLAVE STATUS),
C(startslave) (START SLAVE),
C(stopslave) (STOP SLAVE),
C(resetmaster) (RESET MASTER) - supported from Ansible 2.10,
C(resetslave) (RESET SLAVE),
C(resetslaveall) (RESET SLAVE ALL).
type: str
choices:
- changemaster
- getmaster
- getslave
- startslave
- stopslave
- resetmaster
- resetslave
- resetslaveall
default: getslave
master_host:
description:
- Same as mysql variable.
type: str
master_user:
description:
- Same as mysql variable.
type: str
master_password:
description:
- Same as mysql variable.
type: str
master_port:
description:
- Same as mysql variable.
type: int
master_connect_retry:
description:
- Same as mysql variable.
type: int
master_log_file:
description:
- Same as mysql variable.
type: str
master_log_pos:
description:
- Same as mysql variable.
type: int
relay_log_file:
description:
- Same as mysql variable.
type: str
relay_log_pos:
description:
- Same as mysql variable.
type: int
master_ssl:
description:
- Same as mysql variable.
type: bool
master_ssl_ca:
description:
- Same as mysql variable.
type: str
master_ssl_capath:
description:
- Same as mysql variable.
type: str
master_ssl_cert:
description:
- Same as mysql variable.
type: str
master_ssl_key:
description:
- Same as mysql variable.
type: str
master_ssl_cipher:
description:
- Same as mysql variable.
type: str
master_auto_position:
description:
- Whether the host uses GTID based replication or not.
type: bool
master_use_gtid:
description:
- Configures the slave to use the MariaDB Global Transaction ID.
- C(disabled) equals MASTER_USE_GTID=no command.
- To find information about available values see
U(https://mariadb.com/kb/en/library/change-master-to/#master_use_gtid).
- Available since MariaDB 10.0.2.
choices: [current_pos, slave_pos, disabled]
type: str
master_delay:
description:
- Time lag behind the master's state (in seconds).
- Available from MySQL 5.6.
- For more information see U(https://dev.mysql.com/doc/refman/8.0/en/replication-delayed.html).
type: int
connection_name:
description:
- Name of the master connection.
- Supported from MariaDB 10.0.1.
- Mutually exclusive with I(channel).
- For more information see U(https://mariadb.com/kb/en/library/multi-source-replication/).
type: str
channel:
description:
- Name of replication channel.
- Multi-source replication is supported from MySQL 5.7.
- Mutually exclusive with I(connection_name).
- For more information see U(https://dev.mysql.com/doc/refman/8.0/en/replication-multi-source.html).
type: str
fail_on_error:
description:
- Fails on error when calling mysql.
type: bool
default: False
notes:
- If an empty value for the parameter of string type is needed, use an empty string.
extends_documentation_fragment:
- community.general.mysql
seealso:
- module: mysql_info
- name: MySQL replication reference
description: Complete reference of the MySQL replication documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/replication.html
- name: MariaDB replication reference
description: Complete reference of the MariaDB replication documentation.
link: https://mariadb.com/kb/en/library/setting-up-replication/
'''
EXAMPLES = r'''
- name: Stop mysql slave thread
mysql_replication:
mode: stopslave
- name: Get master binlog file name and binlog position
mysql_replication:
mode: getmaster
- name: Change master to master server 192.0.2.1 and use binary log 'mysql-bin.000009' with position 4578
mysql_replication:
mode: changemaster
master_host: 192.0.2.1
master_log_file: mysql-bin.000009
master_log_pos: 4578
- name: Check slave status using port 3308
mysql_replication:
mode: getslave
login_host: ansible.example.com
login_port: 3308
- name: On MariaDB change master to use GTID current_pos
mysql_replication:
mode: changemaster
master_use_gtid: current_pos
- name: Change master to use replication delay 3600 seconds
mysql_replication:
mode: changemaster
master_host: 192.0.2.1
master_delay: 3600
- name: Start MariaDB standby with connection name master-1
mysql_replication:
mode: startslave
connection_name: master-1
- name: Stop replication in channel master-1
mysql_replication:
mode: stopslave
channel: master-1
- name: >
Run RESET MASTER command which will delete all existing binary log files
and reset the binary log index file on the master
mysql_replication:
mode: resetmaster
- name: Run start slave and fail the task on errors
mysql_replication:
mode: startslave
connection_name: master-1
fail_on_error: yes
- name: Change master and fail on error (like when slave thread is running)
mysql_replication:
mode: changemaster
fail_on_error: yes
'''
RETURN = r'''
queries:
description: List of executed queries which modified DB's state.
returned: always
type: list
sample: ["CHANGE MASTER TO MASTER_HOST='master2.example.com',MASTER_PORT=3306"]
version_added: '2.10'
'''
import os
import warnings
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
executed_queries = []
def get_master_status(cursor):
cursor.execute("SHOW MASTER STATUS")
masterstatus = cursor.fetchone()
return masterstatus
def get_slave_status(cursor, connection_name='', channel=''):
if connection_name:
query = "SHOW SLAVE '%s' STATUS" % connection_name
else:
query = "SHOW SLAVE STATUS"
if channel:
query += " FOR CHANNEL '%s'" % channel
cursor.execute(query)
slavestatus = cursor.fetchone()
return slavestatus
def stop_slave(module, cursor, connection_name='', channel='', fail_on_error=False):
if connection_name:
query = "STOP SLAVE '%s'" % connection_name
else:
query = 'STOP SLAVE'
if channel:
query += " FOR CHANNEL '%s'" % channel
try:
executed_queries.append(query)
cursor.execute(query)
stopped = True
except mysql_driver.Warning as e:
stopped = False
except Exception as e:
if fail_on_error:
module.fail_json(msg="STOP SLAVE failed: %s" % to_native(e))
stopped = False
return stopped
def reset_slave(module, cursor, connection_name='', channel='', fail_on_error=False):
if connection_name:
query = "RESET SLAVE '%s'" % connection_name
else:
query = 'RESET SLAVE'
if channel:
query += " FOR CHANNEL '%s'" % channel
try:
executed_queries.append(query)
cursor.execute(query)
reset = True
except mysql_driver.Warning as e:
reset = False
except Exception as e:
if fail_on_error:
module.fail_json(msg="RESET SLAVE failed: %s" % to_native(e))
reset = False
return reset
def reset_slave_all(module, cursor, connection_name='', channel='', fail_on_error=False):
if connection_name:
query = "RESET SLAVE '%s' ALL" % connection_name
else:
query = 'RESET SLAVE ALL'
if channel:
query += " FOR CHANNEL '%s'" % channel
try:
executed_queries.append(query)
cursor.execute(query)
reset = True
except mysql_driver.Warning as e:
reset = False
except Exception as e:
if fail_on_error:
module.fail_json(msg="RESET SLAVE ALL failed: %s" % to_native(e))
reset = False
return reset
def reset_master(module, cursor, fail_on_error=False):
query = 'RESET MASTER'
try:
executed_queries.append(query)
cursor.execute(query)
reset = True
except mysql_driver.Warning as e:
reset = False
except Exception as e:
if fail_on_error:
module.fail_json(msg="RESET MASTER failed: %s" % to_native(e))
reset = False
return reset
def start_slave(module, cursor, connection_name='', channel='', fail_on_error=False):
if connection_name:
query = "START SLAVE '%s'" % connection_name
else:
query = 'START SLAVE'
if channel:
query += " FOR CHANNEL '%s'" % channel
try:
executed_queries.append(query)
cursor.execute(query)
started = True
except mysql_driver.Warning as e:
started = False
except Exception as e:
if fail_on_error:
module.fail_json(msg="START SLAVE failed: %s" % to_native(e))
started = False
return started
def changemaster(cursor, chm, connection_name='', channel=''):
if connection_name:
query = "CHANGE MASTER '%s' TO %s" % (connection_name, ','.join(chm))
else:
query = 'CHANGE MASTER TO %s' % ','.join(chm)
if channel:
query += " FOR CHANNEL '%s'" % channel
executed_queries.append(query)
cursor.execute(query)
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(type='str'),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=3306),
login_unix_socket=dict(type='str'),
mode=dict(type='str', default='getslave', choices=[
'getmaster', 'getslave', 'changemaster', 'stopslave',
'startslave', 'resetmaster', 'resetslave', 'resetslaveall']),
master_auto_position=dict(type='bool', default=False),
master_host=dict(type='str'),
master_user=dict(type='str'),
master_password=dict(type='str', no_log=True),
master_port=dict(type='int'),
master_connect_retry=dict(type='int'),
master_log_file=dict(type='str'),
master_log_pos=dict(type='int'),
relay_log_file=dict(type='str'),
relay_log_pos=dict(type='int'),
master_ssl=dict(type='bool', default=False),
master_ssl_ca=dict(type='str'),
master_ssl_capath=dict(type='str'),
master_ssl_cert=dict(type='str'),
master_ssl_key=dict(type='str'),
master_ssl_cipher=dict(type='str'),
connect_timeout=dict(type='int', default=30),
config_file=dict(type='path', default='~/.my.cnf'),
client_cert=dict(type='path', aliases=['ssl_cert']),
client_key=dict(type='path', aliases=['ssl_key']),
ca_cert=dict(type='path', aliases=['ssl_ca']),
master_use_gtid=dict(type='str', choices=['current_pos', 'slave_pos', 'disabled']),
master_delay=dict(type='int'),
connection_name=dict(type='str'),
channel=dict(type='str'),
fail_on_error=dict(type='bool', default=False),
),
mutually_exclusive=[
['connection_name', 'channel']
],
)
mode = module.params["mode"]
master_host = module.params["master_host"]
master_user = module.params["master_user"]
master_password = module.params["master_password"]
master_port = module.params["master_port"]
master_connect_retry = module.params["master_connect_retry"]
master_log_file = module.params["master_log_file"]
master_log_pos = module.params["master_log_pos"]
relay_log_file = module.params["relay_log_file"]
relay_log_pos = module.params["relay_log_pos"]
master_ssl = module.params["master_ssl"]
master_ssl_ca = module.params["master_ssl_ca"]
master_ssl_capath = module.params["master_ssl_capath"]
master_ssl_cert = module.params["master_ssl_cert"]
master_ssl_key = module.params["master_ssl_key"]
master_ssl_cipher = module.params["master_ssl_cipher"]
master_auto_position = module.params["master_auto_position"]
ssl_cert = module.params["client_cert"]
ssl_key = module.params["client_key"]
ssl_ca = module.params["ca_cert"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
master_delay = module.params['master_delay']
if module.params.get("master_use_gtid") == 'disabled':
master_use_gtid = 'no'
else:
master_use_gtid = module.params["master_use_gtid"]
connection_name = module.params["connection_name"]
channel = module.params['channel']
fail_on_error = module.params['fail_on_error']
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
else:
warnings.filterwarnings('error', category=mysql_driver.Warning)
login_password = module.params["login_password"]
login_user = module.params["login_user"]
try:
cursor, db_conn = mysql_connect(module, login_user, login_password, config_file,
ssl_cert, ssl_key, ssl_ca, None, cursor_class='DictCursor',
connect_timeout=connect_timeout)
except Exception as e:
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
if mode in "getmaster":
status = get_master_status(cursor)
if not isinstance(status, dict):
status = dict(Is_Master=False, msg="Server is not configured as mysql master")
else:
status['Is_Master'] = True
module.exit_json(queries=executed_queries, **status)
elif mode in "getslave":
status = get_slave_status(cursor, connection_name, channel)
if not isinstance(status, dict):
status = dict(Is_Slave=False, msg="Server is not configured as mysql slave")
else:
status['Is_Slave'] = True
module.exit_json(queries=executed_queries, **status)
elif mode in "changemaster":
chm = []
result = {}
if master_host is not None:
chm.append("MASTER_HOST='%s'" % master_host)
if master_user is not None:
chm.append("MASTER_USER='%s'" % master_user)
if master_password is not None:
chm.append("MASTER_PASSWORD='%s'" % master_password)
if master_port is not None:
chm.append("MASTER_PORT=%s" % master_port)
if master_connect_retry is not None:
chm.append("MASTER_CONNECT_RETRY=%s" % master_connect_retry)
if master_log_file is not None:
chm.append("MASTER_LOG_FILE='%s'" % master_log_file)
if master_log_pos is not None:
chm.append("MASTER_LOG_POS=%s" % master_log_pos)
if master_delay is not None:
chm.append("MASTER_DELAY=%s" % master_delay)
if relay_log_file is not None:
chm.append("RELAY_LOG_FILE='%s'" % relay_log_file)
if relay_log_pos is not None:
chm.append("RELAY_LOG_POS=%s" % relay_log_pos)
if master_ssl:
chm.append("MASTER_SSL=1")
if master_ssl_ca is not None:
chm.append("MASTER_SSL_CA='%s'" % master_ssl_ca)
if master_ssl_capath is not None:
chm.append("MASTER_SSL_CAPATH='%s'" % master_ssl_capath)
if master_ssl_cert is not None:
chm.append("MASTER_SSL_CERT='%s'" % master_ssl_cert)
if master_ssl_key is not None:
chm.append("MASTER_SSL_KEY='%s'" % master_ssl_key)
if master_ssl_cipher is not None:
chm.append("MASTER_SSL_CIPHER='%s'" % master_ssl_cipher)
if master_auto_position:
chm.append("MASTER_AUTO_POSITION=1")
if master_use_gtid is not None:
chm.append("MASTER_USE_GTID=%s" % master_use_gtid)
try:
changemaster(cursor, chm, connection_name, channel)
except mysql_driver.Warning as e:
result['warning'] = to_native(e)
except Exception as e:
module.fail_json(msg='%s. Query == CHANGE MASTER TO %s' % (to_native(e), chm))
result['changed'] = True
module.exit_json(queries=executed_queries, **result)
elif mode in "startslave":
started = start_slave(module, cursor, connection_name, channel, fail_on_error)
if started is True:
module.exit_json(msg="Slave started ", changed=True, queries=executed_queries)
else:
module.exit_json(msg="Slave already started (Or cannot be started)", changed=False, queries=executed_queries)
elif mode in "stopslave":
stopped = stop_slave(module, cursor, connection_name, channel, fail_on_error)
if stopped is True:
module.exit_json(msg="Slave stopped", changed=True, queries=executed_queries)
else:
module.exit_json(msg="Slave already stopped", changed=False, queries=executed_queries)
elif mode in "resetmaster":
reset = reset_master(module, cursor, fail_on_error)
if reset is True:
module.exit_json(msg="Master reset", changed=True, queries=executed_queries)
else:
module.exit_json(msg="Master already reset", changed=False, queries=executed_queries)
elif mode in "resetslave":
reset = reset_slave(module, cursor, connection_name, channel, fail_on_error)
if reset is True:
module.exit_json(msg="Slave reset", changed=True, queries=executed_queries)
else:
module.exit_json(msg="Slave already reset", changed=False, queries=executed_queries)
elif mode in "resetslaveall":
reset = reset_slave_all(module, cursor, connection_name, channel, fail_on_error)
if reset is True:
module.exit_json(msg="Slave reset", changed=True, queries=executed_queries)
else:
module.exit_json(msg="Slave already reset", changed=False, queries=executed_queries)
warnings.simplefilter("ignore")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,807 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mysql_user
short_description: Adds or removes a user from a MySQL database
description:
- Adds or removes a user from a MySQL database.
options:
name:
description:
- Name of the user (role) to add or remove.
type: str
required: true
password:
description:
- Set the user's password..
type: str
encrypted:
description:
- Indicate that the 'password' field is a `mysql_native_password` hash.
type: bool
default: no
host:
description:
- The 'host' part of the MySQL username.
type: str
default: localhost
host_all:
description:
- Override the host option, making ansible apply changes to all hostnames for a given user.
- This option cannot be used when creating users.
type: bool
default: no
priv:
description:
- "MySQL privileges string in the format: C(db.table:priv1,priv2)."
- "Multiple privileges can be specified by separating each one using
a forward slash: C(db.table:priv/db.table:priv)."
- The format is based on MySQL C(GRANT) statement.
- Database and table names can be quoted, MySQL-style.
- If column privileges are used, the C(priv1,priv2) part must be
exactly as returned by a C(SHOW GRANT) statement. If not followed,
the module will always report changes. It includes grouping columns
by permission (C(SELECT(col1,col2)) instead of C(SELECT(col1),SELECT(col2))).
- Can be passed as a dictionary (see the examples).
type: raw
append_privs:
description:
- Append the privileges defined by priv to the existing ones for this
user instead of overwriting existing ones.
type: bool
default: no
sql_log_bin:
description:
- Whether binary logging should be enabled or disabled for the connection.
type: bool
default: yes
state:
description:
- Whether the user should exist.
- When C(absent), removes the user.
type: str
choices: [ absent, present ]
default: present
check_implicit_admin:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
type: bool
default: no
update_password:
description:
- C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users.
type: str
choices: [ always, on_create ]
default: always
plugin:
description:
- User's plugin to authenticate (``CREATE USER user IDENTIFIED WITH plugin``).
type: str
plugin_hash_string:
description:
- User's plugin hash string (``CREATE USER user IDENTIFIED WITH plugin AS plugin_hash_string``).
type: str
plugin_auth_string:
description:
- User's plugin auth_string (``CREATE USER user IDENTIFIED WITH plugin BY plugin_auth_string``).
type: str
notes:
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
- Currently, there is only support for the `mysql_native_password` encrypted password hash module.
seealso:
- module: mysql_info
- name: MySQL access control and account management reference
description: Complete reference of the MySQL access control and account management documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/access-control.html
- name: MySQL provided privileges reference
description: Complete reference of the MySQL provided privileges documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html
author:
- Jonathan Mainguy (@Jmainguy)
- Benjamin Malynovytch (@bmalynovytch)
- Lukasz Tomaszkiewicz (@tomaszkiewicz)
extends_documentation_fragment:
- community.general.mysql
'''
EXAMPLES = r'''
- name: Removes anonymous user account for localhost
mysql_user:
name: ''
host: localhost
state: absent
- name: Removes all anonymous user accounts
mysql_user:
name: ''
host_all: yes
state: absent
- name: Create database user with name 'bob' and password '12345' with all database privileges
mysql_user:
name: bob
password: 12345
priv: '*.*:ALL'
state: present
- name: Create database user using hashed password with all database privileges
mysql_user:
name: bob
password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4'
encrypted: yes
priv: '*.*:ALL'
state: present
- name: Create database user with password and all database privileges and 'WITH GRANT OPTION'
mysql_user:
name: bob
password: 12345
priv: '*.*:ALL,GRANT'
state: present
- name: Create user with password, all database privileges and 'WITH GRANT OPTION' in db1 and db2
mysql_user:
state: present
name: bob
password: 12345dd
priv:
'db1.*': 'ALL,GRANT'
'db2.*': 'ALL,GRANT'
# Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- name: Modify user to require SSL connections.
mysql_user:
name: bob
append_privs: yes
priv: '*.*:REQUIRESSL'
state: present
- name: Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
mysql_user:
login_user: root
login_password: 123456
name: sally
state: absent
- name: Ensure no user named 'sally' exists at all
mysql_user:
name: sally
host_all: yes
state: absent
- name: Specify grants composed of more than one word
mysql_user:
name: replication
password: 12345
priv: "*.*:REPLICATION CLIENT"
state: present
- name: Revoke all privileges for user 'bob' and password '12345'
mysql_user:
name: bob
password: 12345
priv: "*.*:USAGE"
state: present
# Example privileges string format
# mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
- name: Example using login_unix_socket to connect to server
mysql_user:
name: root
password: abc123
login_unix_socket: /var/run/mysqld/mysqld.sock
- name: Example of skipping binary logging while adding user 'bob'
mysql_user:
name: bob
password: 12345
priv: "*.*:USAGE"
state: present
sql_log_bin: no
- name: Create user 'bob' authenticated with plugin 'AWSAuthenticationPlugin'
mysql_user:
name: bob
plugin: AWSAuthenticationPlugin
plugin_hash_string: RDS
priv: '*.*:ALL'
state: present
# Example .my.cnf file for setting the root password
# [client]
# user=root
# password=n<_665{vS43y
'''
import re
import string
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import SQLParseError
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER',
'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE',
'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW',
'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL',
'CREATE ROLE', 'DROP ROLE', 'APPLICATION_PASSWORD_ADMIN',
'AUDIT_ADMIN', 'BACKUP_ADMIN', 'BINLOG_ADMIN',
'BINLOG_ENCRYPTION_ADMIN', 'CLONE_ADMIN', 'CONNECTION_ADMIN',
'ENCRYPTION_KEY_ADMIN', 'FIREWALL_ADMIN', 'FIREWALL_USER',
'GROUP_REPLICATION_ADMIN', 'INNODB_REDO_LOG_ARCHIVE',
'NDB_STORED_USER', 'PERSIST_RO_VARIABLES_ADMIN',
'REPLICATION_APPLIER', 'REPLICATION_SLAVE_ADMIN',
'RESOURCE_GROUP_ADMIN', 'RESOURCE_GROUP_USER',
'ROLE_ADMIN', 'SESSION_VARIABLES_ADMIN', 'SET_USER_ID',
'SYSTEM_USER', 'SYSTEM_VARIABLES_ADMIN', 'SYSTEM_USER',
'TABLE_ENCRYPTION_ADMIN', 'VERSION_TOKEN_ADMIN',
'XA_RECOVER_ADMIN', 'LOAD FROM S3', 'SELECT INTO S3'))
class InvalidPrivsError(Exception):
pass
# ===========================================
# MySQL module specific support methods.
#
# User Authentication Management changed in MySQL 5.7 and MariaDB 10.2.0
def use_old_user_mgmt(cursor):
cursor.execute("SELECT VERSION()")
result = cursor.fetchone()
version_str = result[0]
version = version_str.split('.')
if 'mariadb' in version_str.lower():
# Prior to MariaDB 10.2
if int(version[0]) * 1000 + int(version[1]) < 10002:
return True
else:
return False
else:
# Prior to MySQL 5.7
if int(version[0]) * 1000 + int(version[1]) < 5007:
return True
else:
return False
def get_mode(cursor):
cursor.execute('SELECT @@GLOBAL.sql_mode')
result = cursor.fetchone()
mode_str = result[0]
if 'ANSI' in mode_str:
mode = 'ANSI'
else:
mode = 'NOTANSI'
return mode
def user_exists(cursor, user, host, host_all):
if host_all:
cursor.execute("SELECT count(*) FROM mysql.user WHERE user = %s", ([user]))
else:
cursor.execute("SELECT count(*) FROM mysql.user WHERE user = %s AND host = %s", (user, host))
count = cursor.fetchone()
return count[0] > 0
def user_add(cursor, user, host, host_all, password, encrypted,
plugin, plugin_hash_string, plugin_auth_string, new_priv, check_mode):
# we cannot create users without a proper hostname
if host_all:
return False
if check_mode:
return True
if password and encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user, host, password))
elif password and not encrypted:
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user, host, password))
elif plugin and plugin_hash_string:
cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s AS %s", (user, host, plugin, plugin_hash_string))
elif plugin and plugin_auth_string:
cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s BY %s", (user, host, plugin, plugin_auth_string))
elif plugin:
cursor.execute("CREATE USER %s@%s IDENTIFIED WITH %s", (user, host, plugin))
else:
cursor.execute("CREATE USER %s@%s", (user, host))
if new_priv is not None:
for db_table, priv in iteritems(new_priv):
privileges_grant(cursor, user, host, db_table, priv)
return True
def is_hash(password):
ishash = False
if len(password) == 41 and password[0] == '*':
if frozenset(password[1:]).issubset(string.hexdigits):
ishash = True
return ishash
def user_mod(cursor, user, host, host_all, password, encrypted,
plugin, plugin_hash_string, plugin_auth_string, new_priv, append_privs, module):
changed = False
msg = "User unchanged"
grant_option = False
if host_all:
hostnames = user_get_hostnames(cursor, [user])
else:
hostnames = [host]
for host in hostnames:
# Handle clear text and hashed passwords.
if bool(password):
# Determine what user management method server uses
old_user_mgmt = use_old_user_mgmt(cursor)
# Get a list of valid columns in mysql.user table to check if Password and/or authentication_string exist
cursor.execute("""
SELECT COLUMN_NAME FROM information_schema.COLUMNS
WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME IN ('Password', 'authentication_string')
ORDER BY COLUMN_NAME DESC LIMIT 1
""")
colA = cursor.fetchone()
cursor.execute("""
SELECT COLUMN_NAME FROM information_schema.COLUMNS
WHERE TABLE_SCHEMA = 'mysql' AND TABLE_NAME = 'user' AND COLUMN_NAME IN ('Password', 'authentication_string')
ORDER BY COLUMN_NAME ASC LIMIT 1
""")
colB = cursor.fetchone()
# Select hash from either Password or authentication_string, depending which one exists and/or is filled
cursor.execute("""
SELECT COALESCE(
CASE WHEN %s = '' THEN NULL ELSE %s END,
CASE WHEN %s = '' THEN NULL ELSE %s END
)
FROM mysql.user WHERE user = %%s AND host = %%s
""" % (colA[0], colA[0], colB[0], colB[0]), (user, host))
current_pass_hash = cursor.fetchone()[0]
if isinstance(current_pass_hash, bytes):
current_pass_hash = current_pass_hash.decode('ascii')
if encrypted:
encrypted_password = password
if not is_hash(encrypted_password):
module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))")
else:
if old_user_mgmt:
cursor.execute("SELECT PASSWORD(%s)", (password,))
else:
cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,))
encrypted_password = cursor.fetchone()[0]
if current_pass_hash != encrypted_password:
msg = "Password updated"
if module.check_mode:
return (True, msg)
if old_user_mgmt:
cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, encrypted_password))
msg = "Password updated (old style)"
else:
try:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, encrypted_password))
msg = "Password updated (new style)"
except (mysql_driver.Error) as e:
# https://stackoverflow.com/questions/51600000/authentication-string-of-root-user-on-mysql
# Replacing empty root password with new authentication mechanisms fails with error 1396
if e.args[0] == 1396:
cursor.execute(
"UPDATE user SET plugin = %s, authentication_string = %s, Password = '' WHERE User = %s AND Host = %s",
('mysql_native_password', encrypted_password, user, host)
)
cursor.execute("FLUSH PRIVILEGES")
msg = "Password forced update"
else:
raise e
changed = True
# Handle plugin authentication
if plugin:
cursor.execute("SELECT plugin, authentication_string FROM mysql.user "
"WHERE user = %s AND host = %s", (user, host))
current_plugin = cursor.fetchone()
update = False
if current_plugin[0] != plugin:
update = True
if plugin_hash_string and current_plugin[1] != plugin_hash_string:
update = True
if plugin_auth_string and current_plugin[1] != plugin_auth_string:
# this case can cause more updates than expected,
# as plugin can hash auth_string in any way it wants
# and there's no way to figure it out for
# a check, so I prefer to update more often than never
update = True
if update:
if plugin_hash_string:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s AS %s", (user, host, plugin, plugin_hash_string))
elif plugin_auth_string:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s BY %s", (user, host, plugin, plugin_auth_string))
else:
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH %s", (user, host, plugin))
changed = True
# Handle privileges
if new_priv is not None:
curr_priv = privileges_get(cursor, user, host)
# If the user has privileges on a db.table that doesn't appear at all in
# the new specification, then revoke all privileges on it.
for db_table, priv in iteritems(curr_priv):
# If the user has the GRANT OPTION on a db.table, revoke it first.
if "GRANT" in priv:
grant_option = True
if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs:
msg = "Privileges updated"
if module.check_mode:
return (True, msg)
privileges_revoke(cursor, user, host, db_table, priv, grant_option)
changed = True
# If the user doesn't currently have any privileges on a db.table, then
# we can perform a straight grant operation.
for db_table, priv in iteritems(new_priv):
if db_table not in curr_priv:
msg = "New privileges granted"
if module.check_mode:
return (True, msg)
privileges_grant(cursor, user, host, db_table, priv)
changed = True
# If the db.table specification exists in both the user's current privileges
# and in the new privileges, then we need to see if there's a difference.
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
for db_table in db_table_intersect:
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if len(priv_diff) > 0:
msg = "Privileges updated"
if module.check_mode:
return (True, msg)
if not append_privs:
privileges_revoke(cursor, user, host, db_table, curr_priv[db_table], grant_option)
privileges_grant(cursor, user, host, db_table, new_priv[db_table])
changed = True
return (changed, msg)
def user_delete(cursor, user, host, host_all, check_mode):
if check_mode:
return True
if host_all:
hostnames = user_get_hostnames(cursor, [user])
for hostname in hostnames:
cursor.execute("DROP USER %s@%s", (user, hostname))
else:
cursor.execute("DROP USER %s@%s", (user, host))
return True
def user_get_hostnames(cursor, user):
cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user)
hostnames_raw = cursor.fetchall()
hostnames = []
for hostname_raw in hostnames_raw:
hostnames.append(hostname_raw[0])
return hostnames
def privileges_get(cursor, user, host):
""" MySQL doesn't have a better method of getting privileges aside from the
SHOW GRANTS query syntax, which requires us to then parse the returned string.
Here's an example of the string that is returned from MySQL:
GRANT USAGE ON *.* TO 'user'@'localhost' IDENTIFIED BY 'pass';
This function makes the query and returns a dictionary containing the results.
The dictionary format is the same as that returned by privileges_unpack() below.
"""
output = {}
cursor.execute("SHOW GRANTS FOR %s@%s", (user, host))
grants = cursor.fetchall()
def pick(x):
if x == 'ALL PRIVILEGES':
return 'ALL'
else:
return x
for grant in grants:
res = re.match("""GRANT (.+) ON (.+) TO (['`"]).*\\3@(['`"]).*\\4( IDENTIFIED BY PASSWORD (['`"]).+\\6)? ?(.*)""", grant[0])
if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ")
privileges = [pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(7):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(7):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
def privileges_unpack(priv, mode):
""" Take a privileges string, typically passed as a parameter, and unserialize
it into a dictionary, the same format as privileges_get() above. We have this
custom format to avoid using YAML/JSON strings inside YAML playbooks. Example
of a privileges string:
mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanother.*:ALL
The privilege USAGE stands for no privileges, so we add that in on *.* if it's
not specified in the string, as MySQL will always provide this by default.
"""
if mode == 'ANSI':
quote = '"'
else:
quote = '`'
output = {}
privs = []
for item in priv.strip().split('/'):
pieces = item.strip().rsplit(':', 1)
dbpriv = pieces[0].rsplit(".", 1)
# Check for FUNCTION or PROCEDURE object types
parts = dbpriv[0].split(" ", 1)
object_type = ''
if len(parts) > 1 and (parts[0] == 'FUNCTION' or parts[0] == 'PROCEDURE'):
object_type = parts[0] + ' '
dbpriv[0] = parts[1]
# Do not escape if privilege is for database or table, i.e.
# neither quote *. nor .*
for i, side in enumerate(dbpriv):
if side.strip('`') != '*':
dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote)
pieces[0] = object_type + '.'.join(dbpriv)
if '(' in pieces[1]:
output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
for i in output[pieces[0]]:
privs.append(re.sub(r'\s*\(.*\)', '', i))
else:
output[pieces[0]] = pieces[1].upper().split(',')
privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL and/or GRANT (=WITH GRANT OPTION) in *.*
# we still need to add USAGE as a privilege to avoid syntax errors
if 'REQUIRESSL' in priv and not set(output['*.*']).difference(set(['GRANT', 'REQUIRESSL'])):
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user, host, db_table, priv, grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
query = ["REVOKE GRANT OPTION ON %s" % db_table]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, db_table)]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
def privileges_grant(cursor, user, host, db_table, priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, db_table)]
query.append("TO %s@%s")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
query = ' '.join(query)
cursor.execute(query, (user, host))
def convert_priv_dict_to_str(priv):
"""Converts privs dictionary to string of certain format.
Args:
priv (dict): Dict of privileges that needs to be converted to string.
Returns:
priv (str): String representation of input argument.
"""
priv_list = ['%s:%s' % (key, val) for key, val in iteritems(priv)]
return '/'.join(priv_list)
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(type='str'),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=3306),
login_unix_socket=dict(type='str'),
user=dict(type='str', required=True, aliases=['name']),
password=dict(type='str', no_log=True),
encrypted=dict(type='bool', default=False),
host=dict(type='str', default='localhost'),
host_all=dict(type="bool", default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
priv=dict(type='raw'),
append_privs=dict(type='bool', default=False),
check_implicit_admin=dict(type='bool', default=False),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
connect_timeout=dict(type='int', default=30),
config_file=dict(type='path', default='~/.my.cnf'),
sql_log_bin=dict(type='bool', default=True),
client_cert=dict(type='path', aliases=['ssl_cert']),
client_key=dict(type='path', aliases=['ssl_key']),
ca_cert=dict(type='path', aliases=['ssl_ca']),
plugin=dict(default=None, type='str'),
plugin_hash_string=dict(default=None, type='str'),
plugin_auth_string=dict(default=None, type='str'),
),
supports_check_mode=True,
)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
user = module.params["user"]
password = module.params["password"]
encrypted = module.boolean(module.params["encrypted"])
host = module.params["host"].lower()
host_all = module.params["host_all"]
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
append_privs = module.boolean(module.params["append_privs"])
update_password = module.params['update_password']
ssl_cert = module.params["client_cert"]
ssl_key = module.params["client_key"]
ssl_ca = module.params["ca_cert"]
db = ''
sql_log_bin = module.params["sql_log_bin"]
plugin = module.params["plugin"]
plugin_hash_string = module.params["plugin_hash_string"]
plugin_auth_string = module.params["plugin_auth_string"]
if priv and not (isinstance(priv, str) or isinstance(priv, dict)):
module.fail_json(msg="priv parameter must be str or dict but %s was passed" % type(priv))
if priv and isinstance(priv, dict):
priv = convert_priv_dict_to_str(priv)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
cursor = None
try:
if check_implicit_admin:
try:
cursor, db_conn = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception:
pass
if not cursor:
cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception as e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
if not sql_log_bin:
cursor.execute("SET SQL_LOG_BIN=0;")
if priv is not None:
try:
mode = get_mode(cursor)
except Exception as e:
module.fail_json(msg=to_native(e))
try:
priv = privileges_unpack(priv, mode)
except Exception as e:
module.fail_json(msg="invalid privileges string: %s" % to_native(e))
if state == "present":
if user_exists(cursor, user, host, host_all):
try:
if update_password == 'always':
changed, msg = user_mod(cursor, user, host, host_all, password, encrypted,
plugin, plugin_hash_string, plugin_auth_string,
priv, append_privs, module)
else:
changed, msg = user_mod(cursor, user, host, host_all, None, encrypted,
plugin, plugin_hash_string, plugin_auth_string,
priv, append_privs, module)
except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e:
module.fail_json(msg=to_native(e))
else:
if host_all:
module.fail_json(msg="host_all parameter cannot be used when adding a user")
try:
changed = user_add(cursor, user, host, host_all, password, encrypted,
plugin, plugin_hash_string, plugin_auth_string,
priv, module.check_mode)
if changed:
msg = "User added"
except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e:
module.fail_json(msg=to_native(e))
elif state == "absent":
if user_exists(cursor, user, host, host_all):
changed = user_delete(cursor, user, host, host_all, module.check_mode)
msg = "User deleted"
else:
changed = False
msg = "User doesn't exist"
module.exit_json(changed=changed, user=user, msg=msg)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,275 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Balazs Pocze <banyek@gawker.com>
# Certain parts are taken from Mark Theunissen's mysqldb module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mysql_variables
short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables.
author:
- Balazs Pocze (@banyek)
options:
variable:
description:
- Variable name to operate
type: str
required: yes
value:
description:
- If set, then sets variable value to this
type: str
mode:
description:
- C(global) assigns C(value) to a global system variable which will be changed at runtime
but won't persist across server restarts.
- C(persist) assigns C(value) to a global system variable and persists it to
the mysqld-auto.cnf option file in the data directory
(the variable will survive service restarts).
- C(persist_only) persists C(value) to the mysqld-auto.cnf option file in the data directory
but without setting the global variable runtime value
(the value will be changed after the next service restart).
- Supported by MySQL 8.0 or later.
- For more information see U(https://dev.mysql.com/doc/refman/8.0/en/set-variable.html).
type: str
choices: ['global', 'persist', 'persist_only']
default: global
seealso:
- module: mysql_info
- name: MySQL SET command reference
description: Complete reference of the MySQL SET command documentation.
link: https://dev.mysql.com/doc/refman/8.0/en/set-statement.html
extends_documentation_fragment:
- community.general.mysql
'''
EXAMPLES = r'''
- name: Check for sync_binlog setting
mysql_variables:
variable: sync_binlog
- name: Set read_only variable to 1 persistently
mysql_variables:
variable: read_only
value: 1
mode: persist
'''
RETURN = r'''
queries:
description: List of executed queries which modified DB's state.
returned: if executed
type: list
sample: ["SET GLOBAL `read_only` = 1"]
version_added: '2.10'
'''
import os
import warnings
from re import match
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import SQLParseError, mysql_quote_identifier
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
executed_queries = []
def check_mysqld_auto(module, cursor, mysqlvar):
"""Check variable's value in mysqld-auto.cnf."""
query = ("SELECT VARIABLE_VALUE "
"FROM performance_schema.persisted_variables "
"WHERE VARIABLE_NAME = %s")
try:
cursor.execute(query, (mysqlvar,))
res = cursor.fetchone()
except Exception as e:
if "Table 'performance_schema.persisted_variables' doesn't exist" in str(e):
module.fail_json(msg='Server version must be 8.0 or greater.')
if res:
return res[0]
else:
return None
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
if len(mysqlvar_val) == 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value, mode='global'):
""" Set a global mysql variable to a given value
The DB driver will handle quoting of the given value based on its
type, thus numeric strings like '3.0' or '8' are illegal, they
should be passed as numeric literals.
"""
if mode == 'persist':
query = "SET PERSIST %s = " % mysql_quote_identifier(mysqlvar, 'vars')
elif mode == 'global':
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
elif mode == 'persist_only':
query = "SET PERSIST_ONLY %s = " % mysql_quote_identifier(mysqlvar, 'vars')
try:
cursor.execute(query + "%s", (value,))
executed_queries.append(query + "%s" % value)
cursor.fetchall()
result = True
except Exception as e:
result = to_native(e)
return result
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(type='str'),
login_password=dict(type='str', no_log=True),
login_host=dict(type='str', default='localhost'),
login_port=dict(type='int', default=3306),
login_unix_socket=dict(type='str'),
variable=dict(type='str'),
value=dict(type='str'),
client_cert=dict(type='path', aliases=['ssl_cert']),
client_key=dict(type='path', aliases=['ssl_key']),
ca_cert=dict(type='path', aliases=['ssl_ca']),
connect_timeout=dict(type='int', default=30),
config_file=dict(type='path', default='~/.my.cnf'),
mode=dict(type='str', choices=['global', 'persist', 'persist_only'], default='global'),
),
)
user = module.params["login_user"]
password = module.params["login_password"]
connect_timeout = module.params['connect_timeout']
ssl_cert = module.params["client_cert"]
ssl_key = module.params["client_key"]
ssl_ca = module.params["ca_cert"]
config_file = module.params['config_file']
db = 'mysql'
mysqlvar = module.params["variable"]
value = module.params["value"]
mode = module.params["mode"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_.]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
else:
warnings.filterwarnings('error', category=mysql_driver.Warning)
try:
cursor, db_conn = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception as e:
if os.path.exists(config_file):
module.fail_json(msg=("unable to connect to database, check login_user and "
"login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e))))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
mysqlvar_val = None
var_in_mysqld_auto_cnf = None
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None:
module.exit_json(msg=mysqlvar_val)
if mode in ('persist', 'persist_only'):
var_in_mysqld_auto_cnf = check_mysqld_auto(module, cursor, mysqlvar)
if mode == 'persist_only':
if var_in_mysqld_auto_cnf is None:
mysqlvar_val = False
else:
mysqlvar_val = var_in_mysqld_auto_cnf
# Type values before using them
value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val)
value_in_auto_cnf = None
if var_in_mysqld_auto_cnf is not None:
value_in_auto_cnf = typedvalue(var_in_mysqld_auto_cnf)
if value_wanted == value_actual and mode in ('global', 'persist'):
if mode == 'persist' and value_wanted == value_in_auto_cnf:
module.exit_json(msg="Variable is already set to requested value globally"
"and stored into mysqld-auto.cnf file.", changed=False)
elif mode == 'global':
module.exit_json(msg="Variable is already set to requested value.", changed=False)
if mode == 'persist_only' and value_wanted == value_in_auto_cnf:
module.exit_json(msg="Variable is already stored into mysqld-auto.cnf "
"with requested value.", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted, mode)
except SQLParseError as e:
result = to_native(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual,
changed=True, queries=executed_queries)
else:
module.fail_json(msg=result, changed=False)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,401 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_copy
short_description: Copy data between a file/program and a PostgreSQL table
description:
- Copy data between a file/program and a PostgreSQL table.
options:
copy_to:
description:
- Copy the contents of a table to a file.
- Can also copy the results of a SELECT query.
- Mutually exclusive with I(copy_from) and I(dst).
type: path
aliases: [ to ]
copy_from:
description:
- Copy data from a file to a table (appending the data to whatever is in the table already).
- Mutually exclusive with I(copy_to) and I(src).
type: path
aliases: [ from ]
src:
description:
- Copy data from I(copy_from) to I(src=tablename).
- Used with I(copy_to) only.
type: str
aliases: [ source ]
dst:
description:
- Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
- Used with I(copy_from) only.
type: str
aliases: [ destination ]
columns:
description:
- List of column names for the src/dst table to COPY FROM/TO.
type: list
elements: str
aliases: [ column ]
program:
description:
- Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
- See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
type: bool
default: no
options:
description:
- Options of COPY command.
- See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
type: dict
db:
description:
- Name of database to connect to.
type: str
aliases: [ login_db ]
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
notes:
- Supports PostgreSQL version 9.4+.
- COPY command is only allowed to database superusers.
- if I(check_mode=yes), we just check the src/dst table availability
and return the COPY query that actually has not been executed.
- If i(check_mode=yes) and the source has been passed as SQL, the module
will execute it and rolled the transaction back but pay attention
it can affect database performance (e.g., if SQL collects a lot of data).
seealso:
- name: COPY command reference
description: Complete reference of the COPY command documentation.
link: https://www.postgresql.org/docs/current/sql-copy.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
postgresql_copy:
copy_from: /tmp/data.txt
dst: acme
- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
postgresql_copy:
copy_from: /tmp/data.csv
dst: acme
columns: id,name
options:
format: csv
- name: >
Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
The NULL values are specified as N
postgresql_copy:
copy_from: /tmp/data.csv
dst: bar
options:
delimiter: '|'
null: 'N'
- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
postgresql_copy:
src: acme
copy_to: /tmp/data.txt
- name: Copy data from SELECT query to/tmp/data.csv in CSV format
postgresql_copy:
src: 'SELECT * FROM acme'
copy_to: /tmp/data.csv
options:
format: csv
- name: Copy CSV data from my_table to gzip
postgresql_copy:
src: my_table
copy_to: 'gzip > /tmp/data.csv.gz'
program: yes
options:
format: csv
- name: >
Copy data from columns id, name of table bar to /tmp/data.txt.
Output format is text, vertical-bar-separated, NULL as N
postgresql_copy:
src: bar
columns:
- id
- name
copy_to: /tmp/data.csv
options:
delimiter: '|'
null: 'N'
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
src:
description: Data source.
returned: always
type: str
sample: "mytable"
dst:
description: Data destination.
returned: always
type: str
sample: "/tmp/data.csv"
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
class PgCopyData(object):
"""Implements behavior of COPY FROM, COPY TO PostgreSQL command.
Arguments:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
Attributes:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
changed (bool) -- something was changed after execution or not
executed_queries (list) -- executed queries
dst (str) -- data destination table (when copy_from)
src (str) -- data source table (when copy_to)
opt_need_quotes (tuple) -- values of these options must be passed
to SQL in quotes
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.executed_queries = []
self.changed = False
self.dst = ''
self.src = ''
self.opt_need_quotes = (
'DELIMITER',
'NULL',
'QUOTE',
'ESCAPE',
'ENCODING',
)
def copy_from(self):
"""Implements COPY FROM command behavior."""
self.src = self.module.params['copy_from']
self.dst = self.module.params['dst']
query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
if self.module.params.get('columns'):
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
query_fragments.append('FROM')
if self.module.params.get('program'):
query_fragments.append('PROGRAM')
query_fragments.append("'%s'" % self.src)
if self.module.params.get('options'):
query_fragments.append(self.__transform_options())
# Note: check mode is implemented here:
if self.module.check_mode:
self.changed = self.__check_table(self.dst)
if self.changed:
self.executed_queries.append(' '.join(query_fragments))
else:
if exec_sql(self, ' '.join(query_fragments), ddl=True):
self.changed = True
def copy_to(self):
"""Implements COPY TO command behavior."""
self.src = self.module.params['src']
self.dst = self.module.params['copy_to']
if 'SELECT ' in self.src.upper():
# If src is SQL SELECT statement:
query_fragments = ['COPY (%s)' % self.src]
else:
# If src is a table:
query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
if self.module.params.get('columns'):
query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
query_fragments.append('TO')
if self.module.params.get('program'):
query_fragments.append('PROGRAM')
query_fragments.append("'%s'" % self.dst)
if self.module.params.get('options'):
query_fragments.append(self.__transform_options())
# Note: check mode is implemented here:
if self.module.check_mode:
self.changed = self.__check_table(self.src)
if self.changed:
self.executed_queries.append(' '.join(query_fragments))
else:
if exec_sql(self, ' '.join(query_fragments), ddl=True):
self.changed = True
def __transform_options(self):
"""Transform options dict into a suitable string."""
for (key, val) in iteritems(self.module.params['options']):
if key.upper() in self.opt_need_quotes:
self.module.params['options'][key] = "'%s'" % val
opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
return '(%s)' % ', '.join(opt)
def __check_table(self, table):
"""Check table or SQL in transaction mode for check_mode.
Return True if it is OK.
Arguments:
table (str) - Table name that needs to be checked.
It can be SQL SELECT statement that was passed
instead of the table name.
"""
if 'SELECT ' in table.upper():
# In this case table is actually SQL SELECT statement.
# If SQL fails, it's handled by exec_sql():
exec_sql(self, table, add_to_executed=False)
# If exec_sql was passed, it means all is OK:
return True
exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
add_to_executed=False)
# If SQL was executed successfully:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
copy_to=dict(type='path', aliases=['to']),
copy_from=dict(type='path', aliases=['from']),
src=dict(type='str', aliases=['source']),
dst=dict(type='str', aliases=['destination']),
columns=dict(type='list', elements='str', aliases=['column']),
options=dict(type='dict'),
program=dict(type='bool', default=False),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['copy_from', 'copy_to'],
['copy_from', 'src'],
['copy_to', 'dst'],
]
)
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
if module.params.get('copy_from') and not module.params.get('dst'):
module.fail_json(msg='dst param is necessary with copy_from')
elif module.params.get('copy_to') and not module.params.get('src'):
module.fail_json(msg='src param is necessary with copy_to')
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = PgCopyData(module, cursor)
# Note: parameters like dst, src, etc. are got
# from module object into data object of PgCopyData class.
# Therefore not need to pass args to the methods below.
# Note: check mode is implemented inside the methods below
# by checking passed module.check_mode arg.
if module.params.get('copy_to'):
data.copy_to()
elif module.params.get('copy_from'):
data.copy_from()
# Finish:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Return some values:
module.exit_json(
changed=data.changed,
queries=data.executed_queries,
src=data.src,
dst=data.dst,
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,650 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_db
short_description: Add or remove PostgreSQL databases from a remote host.
description:
- Add or remove PostgreSQL databases from a remote host.
options:
name:
description:
- Name of the database to add or remove
type: str
required: true
aliases: [ db ]
port:
description:
- Database port to connect (if needed)
type: int
default: 5432
aliases:
- login_port
owner:
description:
- Name of the role to set as owner of the database
type: str
template:
description:
- Template used to create the database
type: str
encoding:
description:
- Encoding of the database
type: str
lc_collate:
description:
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
type: str
lc_ctype:
description:
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
is used as template.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
state:
description:
- The database state.
- C(present) implies that the database should be created if necessary.
- C(absent) implies that the database should be removed if present.
- C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
pg_dump returns rc 1 in this case.
- C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
- The format of the backup will be detected based on the target name.
- Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
- Supported formats for dump and restore include C(.sql) and C(.tar)
type: str
choices: [ absent, dump, present, restore ]
default: present
target:
description:
- File to back up or restore from.
- Used when I(state) is C(dump) or C(restore).
type: path
target_opts:
description:
- Further arguments for pg_dump or pg_restore.
- Used when I(state) is C(dump) or C(restore).
type: str
maintenance_db:
description:
- The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
type: str
default: postgres
conn_limit:
description:
- Specifies the database connection limit.
type: str
tablespace:
description:
- The tablespace to set for the database
U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
- If you want to move the database back to the default tablespace,
explicitly set this to pg_default.
type: path
dump_extra_args:
description:
- Provides additional arguments when I(state) is C(dump).
- Cannot be used with dump-file-format-related arguments like ``--format=d``.
type: str
seealso:
- name: CREATE DATABASE reference
description: Complete reference of the CREATE DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-createdatabase.html
- name: DROP DATABASE reference
description: Complete reference of the DROP DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
- name: pg_dump reference
description: Complete reference of pg_dump documentation.
link: https://www.postgresql.org/docs/current/app-pgdump.html
- name: pg_restore reference
description: Complete reference of pg_restore documentation.
link: https://www.postgresql.org/docs/current/app-pgrestore.html
- module: postgresql_tablespace
- module: postgresql_info
- module: postgresql_ping
notes:
- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
author: "Ansible Core Team"
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create a new database with name "acme"
postgresql_db:
name: acme
# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
- name: Create a new database with name "acme" and specific encoding and locale # settings.
postgresql_db:
name: acme
encoding: UTF-8
lc_collate: de_DE.UTF-8
lc_ctype: de_DE.UTF-8
template: template0
# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
postgresql_db:
name: acme
conn_limit: "100"
- name: Dump an existing database to a file
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
- name: Dump an existing database to a file excluding the test table
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
dump_extra_args: --exclude-table=test
- name: Dump an existing database to a file (with compression)
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql.gz
- name: Dump a single schema for an existing database
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
target_opts: "-n public"
# Note: In the example below, if database foo exists and has another tablespace
# the tablespace will be changed to foo. Access to the database will be locked
# until the copying of database files is finished.
- name: Create a new database called foo in tablespace bar
postgresql_db:
name: foo
tablespace: bar
'''
RETURN = r'''
executed_commands:
description: List of commands which tried to run.
returned: always
type: list
sample: ["CREATE DATABASE acme"]
version_added: '2.10'
'''
import os
import subprocess
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
HAS_PSYCOPG2 = False
else:
HAS_PSYCOPG2 = True
import ansible_collections.community.general.plugins.module_utils.postgres as pgutils
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_native
executed_commands = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, db, owner):
query = 'ALTER DATABASE %s OWNER TO "%s"' % (
pg_quote_identifier(db, 'database'),
owner)
executed_commands.append(query)
cursor.execute(query)
return True
def set_conn_limit(cursor, db, conn_limit):
query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
pg_quote_identifier(db, 'database'),
conn_limit)
executed_commands.append(query)
cursor.execute(query)
return True
def get_encoding_id(cursor, encoding):
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
cursor.execute(query, {'encoding': encoding})
return cursor.fetchone()['encoding_id']
def get_db_info(cursor, db):
query = """
SELECT rolname AS owner,
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
spcname AS tablespace
FROM pg_database
JOIN pg_roles ON pg_roles.oid = pg_database.datdba
JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
WHERE datname = %(db)s
"""
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
cursor.execute(query, {'db': db})
return cursor.rowcount == 1
def db_delete(cursor, db):
if db_exists(cursor, db):
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
executed_commands.append(query)
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner:
query_fragments.append('OWNER "%s"' % owner)
if template:
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding:
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
query_fragments.append('LC_CTYPE %(ctype)s')
if tablespace:
query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
if conn_limit:
query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query = ' '.join(query_fragments)
executed_commands.append(cursor.mogrify(query, params))
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
'Current encoding: %s' % db_info['encoding']
)
elif lc_collate and lc_collate != db_info['lc_collate']:
raise NotSupportedError(
'Changing LC_COLLATE is not supported. '
'Current LC_COLLATE: %s' % db_info['lc_collate']
)
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
raise NotSupportedError(
'Changing LC_CTYPE is not supported.'
'Current LC_CTYPE: %s' % db_info['lc_ctype']
)
else:
changed = False
if owner and owner != db_info['owner']:
changed = set_owner(cursor, db, owner)
if conn_limit and conn_limit != str(db_info['conn_limit']):
changed = set_conn_limit(cursor, db, conn_limit)
if tablespace and tablespace != db_info['tablespace']:
changed = set_tablespace(cursor, db, tablespace)
return changed
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
if not db_exists(cursor, db):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
return False
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
return False
elif owner and owner != db_info['owner']:
return False
elif conn_limit and conn_limit != str(db_info['conn_limit']):
return False
elif tablespace and tablespace != db_info['tablespace']:
return False
else:
return True
def db_dump(module, target, target_opts="",
db=None,
dump_extra_args=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user, db_prefix=False)
cmd = module.get_bin_path('pg_dump', True)
comp_prog_path = None
if os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=t')
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=c')
if os.path.splitext(target)[-1] == '.gz':
if module.get_bin_path('pigz'):
comp_prog_path = module.get_bin_path('pigz', True)
else:
comp_prog_path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', True)
cmd += "".join(flags)
if dump_extra_args:
cmd += " {0} ".format(dump_extra_args)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
# Use a fifo to be notified of an error in pg_dump
# Using shell pipe has no way to return the code of the first command
# in a portable way.
fifo = os.path.join(module.tmpdir, 'pg_fifo')
os.mkfifo(fifo)
cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
else:
cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def db_restore(module, target, target_opts="",
db=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user)
comp_prog_path = None
cmd = module.get_bin_path('psql', True)
if os.path.splitext(target)[-1] == '.sql':
flags.append(' --file={0}'.format(target))
elif os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=Tar')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=Custom')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.gz':
comp_prog_path = module.get_bin_path('zcat', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzcat', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xzcat', True)
cmd += "".join(flags)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
env = os.environ.copy()
if password:
env = {"PGPASSWORD": password}
p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1, 'cmd: ****'
else:
return p2.returncode, '', stderr2, 'cmd: ****'
else:
cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def login_flags(db, host, port, user, db_prefix=True):
"""
returns a list of connection argument strings each prefixed
with a space and quoted where necessary to later be combined
in a single shell string with `"".join(rv)`
db_prefix determines if "--dbname" is prefixed to the db argument,
since the argument was introduced in 9.3.
"""
flags = []
if db:
if db_prefix:
flags.append(' --dbname={0}'.format(shlex_quote(db)))
else:
flags.append(' {0}'.format(shlex_quote(db)))
if host:
flags.append(' --host={0}'.format(host))
if port:
flags.append(' --port={0}'.format(port))
if user:
flags.append(' --username={0}'.format(user))
return flags
def do_with_password(module, cmd, password):
env = {}
if password:
env = {"PGPASSWORD": password}
executed_commands.append(cmd)
rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
return rc, stderr, stdout, cmd
def set_tablespace(cursor, db, tablespace):
query = "ALTER DATABASE %s SET TABLESPACE %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(tablespace, 'tablespace'))
executed_commands.append(query)
cursor.execute(query)
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = pgutils.postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', required=True, aliases=['name']),
owner=dict(type='str', default=''),
template=dict(type='str', default=''),
encoding=dict(type='str', default=''),
lc_collate=dict(type='str', default=''),
lc_ctype=dict(type='str', default=''),
state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
target=dict(type='path', default=''),
target_opts=dict(type='str', default=''),
maintenance_db=dict(type='str', default="postgres"),
session_role=dict(type='str'),
conn_limit=dict(type='str', default=''),
tablespace=dict(type='path', default=''),
dump_extra_args=dict(type='str', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
db = module.params["db"]
owner = module.params["owner"]
template = module.params["template"]
encoding = module.params["encoding"]
lc_collate = module.params["lc_collate"]
lc_ctype = module.params["lc_ctype"]
target = module.params["target"]
target_opts = module.params["target_opts"]
state = module.params["state"]
changed = False
maintenance_db = module.params['maintenance_db']
session_role = module.params["session_role"]
conn_limit = module.params['conn_limit']
tablespace = module.params['tablespace']
dump_extra_args = module.params['dump_extra_args']
raw_connection = state in ("dump", "restore")
if not raw_connection:
pgutils.ensure_required_libs(module)
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if target == "":
target = "{0}/{1}.sql".format(os.getcwd(), db)
target = os.path.expanduser(target)
if not raw_connection:
try:
db_connection = psycopg2.connect(database=maintenance_db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
if session_role:
try:
cursor.execute('SET ROLE "%s"' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if state == "absent":
try:
changed = db_delete(cursor, db)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state in ("dump", "restore"):
method = state == "dump" and db_dump or db_restore
try:
if state == 'dump':
rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
else:
rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
if rc != 0:
module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
else:
module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
executed_commands=executed_commands)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,409 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database
description:
- Add or remove PostgreSQL extensions from a database.
options:
name:
description:
- Name of the extension to add or remove.
required: true
type: str
aliases:
- ext
db:
description:
- Name of the database to add or remove the extension to/from.
required: true
type: str
aliases:
- login_db
schema:
description:
- Name of the schema to add the extension to.
type: str
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
state:
description:
- The database extension state.
default: present
choices: [ absent, present ]
type: str
cascade:
description:
- Automatically install/remove any extensions that this extension depends on
that are not already installed/removed (supported since PostgreSQL 9.6).
type: bool
default: no
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
version:
description:
- Extension version to add or update to. Has effect with I(state=present) only.
- If not specified, the latest extension version will be created.
- It can't downgrade an extension version.
When version downgrade is needed, remove the extension and create new one with appropriate version.
- Set I(version=latest) to update the extension to the latest available version.
type: str
seealso:
- name: PostgreSQL extensions
description: General information about PostgreSQL extensions.
link: https://www.postgresql.org/docs/current/external-extensions.html
- name: CREATE EXTENSION reference
description: Complete reference of the CREATE EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-createextension.html
- name: ALTER EXTENSION reference
description: Complete reference of the ALTER EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-alterextension.html
- name: DROP EXTENSION reference
description: Complete reference of the DROP EXTENSION command documentation.
link: https://www.postgresql.org/docs/current/sql-droppublication.html
notes:
- The default authentication assumes that you are either logging in as
or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter.
- You must ensure that C(psycopg2) is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case),
then PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Daniel Schep (@dschep)
- Thomas O'Donnell (@andytom)
- Sandro Santilli (@strk)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Adds postgis extension to the database acme in the schema foo
postgresql_ext:
name: postgis
db: acme
schema: foo
- name: Removes postgis extension to the database acme
postgresql_ext:
name: postgis
db: acme
state: absent
- name: Adds earthdistance extension to the database template1 cascade
postgresql_ext:
name: earthdistance
db: template1
cascade: true
# In the example below, if earthdistance extension is installed,
# it will be removed too because it depends on cube:
- name: Removes cube extension from the database acme cascade
postgresql_ext:
name: cube
db: acme
cascade: yes
state: absent
- name: Create extension foo of version 1.2 or update it if it's already created
postgresql_ext:
db: acme
name: foo
version: 1.2
- name: Assuming extension foo is created, update it to the latest version
postgresql_ext:
db: acme
name: foo
version: latest
'''
RETURN = r'''
query:
description: List of executed queries.
returned: always
type: list
sample: ["DROP EXTENSION \"acme\""]
'''
import traceback
from distutils.version import LooseVersion
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
executed_queries = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext, cascade):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
if cascade:
query += " CASCADE"
cursor.execute(query)
executed_queries.append(query)
return True
else:
return False
def ext_update_version(cursor, ext, version):
"""Update extension version.
Return True if success.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
version (str) -- extension version
"""
if version != 'latest':
query = ("ALTER EXTENSION \"%s\"" % ext)
cursor.execute(query + " UPDATE TO %(ver)s", {'ver': version})
executed_queries.append(cursor.mogrify(query + " UPDATE TO %(ver)s", {'ver': version}))
else:
query = ("ALTER EXTENSION \"%s\" UPDATE" % ext)
cursor.execute(query)
executed_queries.append(query)
return True
def ext_create(cursor, ext, schema, cascade, version):
query = "CREATE EXTENSION \"%s\"" % ext
if schema:
query += " WITH SCHEMA \"%s\"" % schema
if version:
query += " VERSION %(ver)s"
if cascade:
query += " CASCADE"
if version:
cursor.execute(query, {'ver': version})
executed_queries.append(cursor.mogrify(query, {'ver': version}))
else:
cursor.execute(query)
executed_queries.append(query)
return True
def ext_get_versions(cursor, ext):
"""
Get the current created extension version and available versions.
Return tuple (current_version, [list of available versions]).
Note: the list of available versions contains only versions
that higher than the current created version.
If the extension is not created, this list will contain all
available versions.
Args:
cursor (cursor) -- cursor object of psycopg2 library
ext (str) -- extension name
"""
# 1. Get the current extension version:
query = ("SELECT extversion FROM pg_catalog.pg_extension "
"WHERE extname = %(ext)s")
current_version = '0'
cursor.execute(query, {'ext': ext})
res = cursor.fetchone()
if res:
current_version = res[0]
# 2. Get available versions:
query = ("SELECT version FROM pg_available_extension_versions "
"WHERE name = %(ext)s")
cursor.execute(query, {'ext': ext})
res = cursor.fetchall()
available_versions = []
if res:
# Make the list of available versions:
for line in res:
if LooseVersion(line[0]) > LooseVersion(current_version):
available_versions.append(line['version'])
if current_version == '0':
current_version = False
return (current_version, available_versions)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", required=True, aliases=["login_db"]),
ext=dict(type="str", required=True, aliases=["name"]),
schema=dict(type="str"),
state=dict(type="str", default="present", choices=["absent", "present"]),
cascade=dict(type="bool", default=False),
session_role=dict(type="str"),
version=dict(type="str"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
ext = module.params["ext"]
schema = module.params["schema"]
state = module.params["state"]
cascade = module.params["cascade"]
version = module.params["version"]
changed = False
if version and state == 'absent':
module.warn("Parameter version is ignored when state=absent")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
# Get extension info and available versions:
curr_version, available_versions = ext_get_versions(cursor, ext)
if state == "present":
if version == 'latest':
if available_versions:
version = available_versions[-1]
else:
version = ''
if version:
# If the specific version is passed and it is not available for update:
if version not in available_versions:
if not curr_version:
module.fail_json(msg="Passed version '%s' is not available" % version)
elif LooseVersion(curr_version) == LooseVersion(version):
changed = False
else:
module.fail_json(msg="Passed version '%s' is lower than "
"the current created version '%s' or "
"the passed version is not available" % (version, curr_version))
# If the specific version is passed and it is higher that the current version:
if curr_version and version:
if LooseVersion(curr_version) < LooseVersion(version):
if module.check_mode:
changed = True
else:
changed = ext_update_version(cursor, ext, version)
# If the specific version is passed and it is created now:
if curr_version == version:
changed = False
# If the ext doesn't exist and installed:
elif not curr_version and available_versions:
if module.check_mode:
changed = True
else:
changed = ext_create(cursor, ext, schema, cascade, version)
# If version is not passed:
else:
if not curr_version:
# If the ext doesn't exist and it's installed:
if available_versions:
if module.check_mode:
changed = True
else:
changed = ext_create(cursor, ext, schema, cascade, version)
# If the ext doesn't exist and not installed:
else:
module.fail_json(msg="Extension %s is not installed" % ext)
elif state == "absent":
if curr_version:
if module.check_mode:
changed = True
else:
changed = ext_delete(cursor, ext, cascade)
else:
changed = False
except Exception as e:
db_connection.close()
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
db_connection.close()
module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,586 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_idx
short_description: Create or drop indexes from a PostgreSQL database
description:
- Create or drop indexes from a PostgreSQL database.
options:
idxname:
description:
- Name of the index to create or drop.
type: str
required: true
aliases:
- name
db:
description:
- Name of database to connect to and where the index will be created/dropped.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
schema:
description:
- Name of a database schema where the index will be created.
type: str
state:
description:
- Index state.
- C(present) implies the index will be created if it does not exist.
- C(absent) implies the index will be dropped if it exists.
type: str
default: present
choices: [ absent, present ]
table:
description:
- Table to create index on it.
- Mutually exclusive with I(state=absent).
type: str
columns:
description:
- List of index columns that need to be covered by index.
- Mutually exclusive with I(state=absent).
type: list
elements: str
aliases:
- column
cond:
description:
- Index conditions.
- Mutually exclusive with I(state=absent).
type: str
idxtype:
description:
- Index type (like btree, gist, gin, etc.).
- Mutually exclusive with I(state=absent).
type: str
aliases:
- type
concurrent:
description:
- Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
- Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
- If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
In this case it should be dropped and created again.
- Mutually exclusive with I(cascade=yes).
type: bool
default: yes
unique:
description:
- Enable unique index.
- Only btree currently supports unique indexes.
type: bool
default: no
tablespace:
description:
- Set a tablespace for the index.
- Mutually exclusive with I(state=absent).
required: false
type: str
storage_params:
description:
- Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
- Mutually exclusive with I(state=absent).
type: list
elements: str
cascade:
description:
- Automatically drop objects that depend on the index,
and in turn all objects that depend on those objects.
- It used only with I(state=absent).
- Mutually exclusive with I(concurrent=yes)
type: bool
default: no
seealso:
- module: postgresql_table
- module: postgresql_tablespace
- name: PostgreSQL indexes reference
description: General information about PostgreSQL indexes.
link: https://www.postgresql.org/docs/current/indexes.html
- name: CREATE INDEX reference
description: Complete reference of the CREATE INDEX command documentation.
link: https://www.postgresql.org/docs/current/sql-createindex.html
- name: ALTER INDEX reference
description: Complete reference of the ALTER INDEX command documentation.
link: https://www.postgresql.org/docs/current/sql-alterindex.html
- name: DROP INDEX reference
description: Complete reference of the DROP INDEX command documentation.
link: https://www.postgresql.org/docs/current/sql-dropindex.html
notes:
- The index building process can affect database performance.
- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
postgresql_idx:
db: acme
table: products
columns: id,name
name: test_idx
- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
postgresql_idx:
db: acme
table: products
columns:
- id
- name
idxname: test_idx
tablespace: ssd
storage_params:
- fillfactor=90
- name: Create gist index test_gist_idx concurrently on column geo_data of table map
postgresql_idx:
db: somedb
table: map
idxtype: gist
columns: geo_data
idxname: test_gist_idx
# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
- name: Create gin index gin0_idx not concurrently on column comment of table test
postgresql_idx:
idxname: gin0_idx
table: test
columns: comment gin_trgm_ops
concurrent: no
idxtype: gin
- name: Drop btree test_idx concurrently
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
- name: Drop test_idx cascade
postgresql_idx:
db: mydb
idxname: test_idx
state: absent
cascade: yes
concurrent: no
- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
postgresql_idx:
db: mydb
table: test
columns: id,comment
idxname: test_idx
cond: id > 1
- name: Create unique btree index if not exists test_unique_idx on column name of table products
postgresql_idx:
db: acme
table: products
columns: name
name: test_unique_idx
unique: yes
concurrent: no
'''
RETURN = r'''
name:
description: Index name.
returned: always
type: str
sample: 'foo_idx'
state:
description: Index state.
returned: always
type: str
sample: 'present'
schema:
description: Schema where index exists.
returned: always
type: str
sample: 'public'
tablespace:
description: Tablespace where index exists.
returned: always
type: str
sample: 'ssd'
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
storage_params:
description: Index storage parameters.
returned: always
type: list
sample: [ "fillfactor=90" ]
valid:
description: Index validity.
returned: always
type: bool
sample: true
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
# ===========================================
# PostgreSQL module specific support methods.
#
class Index(object):
"""Class for working with PostgreSQL indexes.
TODO:
1. Add possibility to change ownership
2. Add possibility to change tablespace
3. Add list called executed_queries (executed_query should be left too)
4. Use self.module instead of passing arguments to the methods whenever possible
Args:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
schema (str) -- name of the index schema
name (str) -- name of the index
Attrs:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
schema (str) -- name of the index schema
name (str) -- name of the index
exists (bool) -- flag the index exists in the DB or not
info (dict) -- dict that contents information about the index
executed_query (str) -- executed query
"""
def __init__(self, module, cursor, schema, name):
self.name = name
if schema:
self.schema = schema
else:
self.schema = 'public'
self.module = module
self.cursor = cursor
self.info = {
'name': self.name,
'state': 'absent',
'schema': '',
'tblname': '',
'tblspace': '',
'valid': True,
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_query = ''
def get_info(self):
"""Refresh index info.
Return self.info dict.
"""
self.__exists_in_db()
return self.info
def __exists_in_db(self):
"""Check index existence, collect info, add it to self.info dict.
Return True if the index exists, otherwise, return False.
"""
query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
"pi.indisvalid, c.reloptions "
"FROM pg_catalog.pg_indexes AS i "
"JOIN pg_catalog.pg_class AS c "
"ON i.indexname = c.relname "
"JOIN pg_catalog.pg_index AS pi "
"ON c.oid = pi.indexrelid "
"WHERE i.indexname = %(name)s")
res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
if res:
self.exists = True
self.info = dict(
name=self.name,
state='present',
schema=res[0][0],
tblname=res[0][1],
tblspace=res[0][2] if res[0][2] else '',
valid=res[0][3],
storage_params=res[0][4] if res[0][4] else [],
)
return True
else:
self.exists = False
return False
def create(self, tblname, idxtype, columns, cond, tblspace, storage_params, concurrent=True, unique=False):
"""Create PostgreSQL index.
Return True if success, otherwise, return False.
Args:
tblname (str) -- name of a table for the index
idxtype (str) -- type of the index like BTREE, BRIN, etc
columns (str) -- string of comma-separated columns that need to be covered by index
tblspace (str) -- tablespace for storing the index
storage_params (str) -- string of comma-separated storage parameters
Kwargs:
concurrent (bool) -- build index in concurrent mode, default True
"""
if self.exists:
return False
if idxtype is None:
idxtype = "BTREE"
query = 'CREATE'
if unique:
query += ' UNIQUE'
query += ' INDEX'
if concurrent:
query += ' CONCURRENTLY'
query += ' %s' % self.name
if self.schema:
query += ' ON %s.%s ' % (self.schema, tblname)
else:
query += 'public.%s ' % tblname
query += 'USING %s (%s)' % (idxtype, columns)
if storage_params:
query += ' WITH (%s)' % storage_params
if tblspace:
query += ' TABLESPACE %s' % tblspace
if cond:
query += ' WHERE %s' % cond
self.executed_query = query
if exec_sql(self, query, ddl=True, add_to_executed=False):
return True
return False
def drop(self, schema, cascade=False, concurrent=True):
"""Drop PostgreSQL index.
Return True if success, otherwise, return False.
Args:
schema (str) -- name of the index schema
Kwargs:
cascade (bool) -- automatically drop objects that depend on the index,
default False
concurrent (bool) -- build index in concurrent mode, default True
"""
if not self.exists:
return False
query = 'DROP INDEX'
if concurrent:
query += ' CONCURRENTLY'
if not schema:
query += ' public.%s' % self.name
else:
query += ' %s.%s' % (schema, self.name)
if cascade:
query += ' CASCADE'
self.executed_query = query
if exec_sql(self, query, ddl=True, add_to_executed=False):
return True
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
idxname=dict(type='str', required=True, aliases=['name']),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
concurrent=dict(type='bool', default=True),
unique=dict(type='bool', default=False),
table=dict(type='str'),
idxtype=dict(type='str', aliases=['type']),
columns=dict(type='list', elements='str', aliases=['column']),
cond=dict(type='str'),
session_role=dict(type='str'),
tablespace=dict(type='str'),
storage_params=dict(type='list', elements='str'),
cascade=dict(type='bool', default=False),
schema=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
idxname = module.params["idxname"]
state = module.params["state"]
concurrent = module.params["concurrent"]
unique = module.params["unique"]
table = module.params["table"]
idxtype = module.params["idxtype"]
columns = module.params["columns"]
cond = module.params["cond"]
tablespace = module.params["tablespace"]
storage_params = module.params["storage_params"]
cascade = module.params["cascade"]
schema = module.params["schema"]
if concurrent and cascade:
module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
if unique and (idxtype and idxtype != 'btree'):
module.fail_json(msg="Only btree currently supports unique indexes")
if state == 'present':
if not table:
module.fail_json(msg="Table must be specified")
if not columns:
module.fail_json(msg="At least one column must be specified")
else:
if table or columns or cond or idxtype or tablespace:
module.fail_json(msg="Index %s is going to be removed, so it does not "
"make sense to pass a table name, columns, conditions, "
"index type, or tablespace" % idxname)
if cascade and state != 'absent':
module.fail_json(msg="cascade parameter used only with state=absent")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Set defaults:
changed = False
# Do job:
index = Index(module, cursor, schema, idxname)
kw = index.get_info()
kw['query'] = ''
#
# check_mode start
if module.check_mode:
if state == 'present' and index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'present' and not index.exists:
kw['changed'] = True
module.exit_json(**kw)
elif state == 'absent' and not index.exists:
kw['changed'] = False
module.exit_json(**kw)
elif state == 'absent' and index.exists:
kw['changed'] = True
module.exit_json(**kw)
# check_mode end
#
if state == "present":
if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
columns = ','.join(columns)
if storage_params:
storage_params = ','.join(storage_params)
changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
if changed:
kw = index.get_info()
kw['state'] = 'present'
kw['query'] = index.executed_query
else:
changed = index.drop(schema, cascade, concurrent)
if changed:
kw['state'] = 'absent'
kw['query'] = index.executed_query
if not kw['valid']:
db_connection.rollback()
module.warn("Index %s is invalid! ROLLBACK" % idxname)
if not concurrent:
db_connection.commit()
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,350 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database.
- The module can be used on the machine where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify I(cascade=yes) to
automatically drop objects that depend on the language (such as functions in the
language).
- In case the language can't be deleted because it is required by the
database system, you can specify I(fail_on_drop=no) to ignore the error.
- Be careful when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
options:
lang:
description:
- Name of the procedural language to add, remove or change.
required: true
type: str
aliases:
- name
trust:
description:
- Make this language trusted for the selected db.
type: bool
default: 'no'
db:
description:
- Name of database to connect to and where the language will be added, removed or changed.
type: str
aliases:
- login_db
required: true
force_trust:
description:
- Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- Use with care!
type: bool
default: 'no'
fail_on_drop:
description:
- If C(yes), fail when removing a language. Otherwise just log and continue.
- In some cases, it is not possible to remove a language (used by the db-system).
- When dependencies block the removal, consider using I(cascade).
type: bool
default: 'yes'
cascade:
description:
- When dropping a language, also delete object that depend on this language.
- Only used when I(state=absent).
type: bool
default: 'no'
session_role:
description:
- Switch to session_role after connecting.
- The specified I(session_role) must be a role that the current I(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though the I(session_role) were the one that had logged in originally.
type: str
state:
description:
- The state of the language for the selected database.
type: str
default: present
choices: [ absent, present ]
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
owner:
description:
- Set an owner for the language.
- Ignored when I(state=absent).
type: str
seealso:
- name: PostgreSQL languages
description: General information about PostgreSQL languages.
link: https://www.postgresql.org/docs/current/xplang.html
- name: CREATE LANGUAGE reference
description: Complete reference of the CREATE LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-createlanguage.html
- name: ALTER LANGUAGE reference
description: Complete reference of the ALTER LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
- name: DROP LANGUAGE reference
description: Complete reference of the DROP LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-droplanguage.html
author:
- Jens Depuydt (@jensdepuydt)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Add language pltclu to database testdb if it doesn't exist
postgresql_lang: db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
# Marks the language as trusted if it exists but isn't trusted yet.
# force_trust makes sure that the language will be marked as trusted
- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
postgresql_lang:
db: testdb
lang: pltclu
state: present
trust: yes
force_trust: yes
- name: Remove language pltclu from database testdb
postgresql_lang:
db: testdb
lang: pltclu
state: absent
- name: Remove language pltclu from database testdb and remove all dependencies
postgresql_lang:
db: testdb
lang: pltclu
state: absent
cascade: yes
- name: Remove language c from database testdb but ignore errors if something prevents the removal
postgresql_lang:
db: testdb
lang: pltclu
state: absent
fail_on_drop: no
- name: In testdb change owner of mylang to alice
postgresql_lang:
db: testdb
lang: mylang
owner: alice
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: list
sample: ['CREATE LANGUAGE "acme"']
version_added: '2.8'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
executed_queries = []
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
cursor.execute(query, {'lang': lang})
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
cursor.execute(query, {'lang': lang})
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
cursor.execute(query, {'trust': trust, 'lang': lang})
executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
executed_queries.append(query)
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
query = "DROP LANGUAGE \"%s\" CASCADE" % lang
else:
query = "DROP LANGUAGE \"%s\"" % lang
executed_queries.append(query)
cursor.execute(query)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def get_lang_owner(cursor, lang):
"""Get language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
"""
query = ("SELECT r.rolname FROM pg_language l "
"JOIN pg_roles r ON l.lanowner = r.oid "
"WHERE l.lanname = %(lang)s")
cursor.execute(query, {'lang': lang})
return cursor.fetchone()[0]
def set_lang_owner(cursor, lang, owner):
"""Set language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
owner (str): name of new owner.
"""
query = "ALTER LANGUAGE \"%s\" OWNER TO %s" % (lang, owner)
executed_queries.append(query)
cursor.execute(query)
return True
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", required=True, aliases=["login_db"]),
lang=dict(type="str", required=True, aliases=["name"]),
state=dict(type="str", default="present", choices=["absent", "present"]),
trust=dict(type="bool", default="no"),
force_trust=dict(type="bool", default="no"),
cascade=dict(type="bool", default="no"),
fail_on_drop=dict(type="bool", default="yes"),
session_role=dict(type="str"),
owner=dict(type="str"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
db = module.params["db"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
owner = module.params["owner"]
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor()
changed = False
kw = {'db': db, 'lang': lang, 'trust': trust}
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = ("unable to drop language, use cascade "
"to delete dependencies or fail_on_drop=no to ignore")
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if owner and state == 'present':
if lang_exists(cursor, lang):
if owner != get_lang_owner(cursor, lang):
changed = set_lang_owner(cursor, lang, owner)
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
kw['queries'] = executed_queries
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,220 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_membership
short_description: Add or remove PostgreSQL roles from groups
description:
- Adds or removes PostgreSQL roles from groups (other roles).
- Users are roles with login privilege.
- Groups are PostgreSQL roles usually without LOGIN privilege.
- "Common use case:"
- 1) add a new group (groups) by M(postgresql_user) module with I(role_attr_flags=NOLOGIN)
- 2) grant them desired privileges by M(postgresql_privs) module
- 3) add desired PostgreSQL users to the new group (groups) by this module
options:
groups:
description:
- The list of groups (roles) that need to be granted to or revoked from I(target_roles).
required: yes
type: list
elements: str
aliases:
- group
- source_role
- source_roles
target_roles:
description:
- The list of target roles (groups will be granted to them).
required: yes
type: list
elements: str
aliases:
- target_role
- users
- user
fail_on_role:
description:
- If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
default: yes
type: bool
state:
description:
- Membership state.
- I(state=present) implies the I(groups)must be granted to I(target_roles).
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
type: str
default: present
choices: [ absent, present ]
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
seealso:
- module: postgresql_user
- module: postgresql_privs
- module: postgresql_owner
- name: PostgreSQL role membership reference
description: Complete reference of the PostgreSQL role membership documentation.
link: https://www.postgresql.org/docs/current/role-membership.html
- name: PostgreSQL role attributes reference
description: Complete reference of the PostgreSQL role attributes documentation.
link: https://www.postgresql.org/docs/current/role-attributes.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Grant role read_only to alice and bob
postgresql_membership:
group: read_only
target_roles:
- alice
- bob
state: present
# you can also use target_roles: alice,bob,etc to pass the role list
- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
postgresql_membership:
groups:
- read_only
- exec_func
target_role: bob
fail_on_role: no
state: absent
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
granted:
description: Dict of granted groups and roles.
returned: if I(state=present)
type: dict
sample: { "ro_group": [ "alice", "bob" ] }
revoked:
description: Dict of revoked groups and roles.
returned: if I(state=absent)
type: dict
sample: { "ro_group": [ "alice", "bob" ] }
state:
description: Membership state that tried to be set.
returned: always
type: str
sample: "present"
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
PgMembership,
postgres_common_argument_spec,
)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
fail_on_role=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
groups = module.params['groups']
target_roles = module.params['target_roles']
fail_on_role = module.params['fail_on_role']
state = module.params['state']
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
if state == 'present':
pg_membership.grant()
elif state == 'absent':
pg_membership.revoke()
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
return_dict = dict(
changed=pg_membership.changed,
state=state,
groups=pg_membership.groups,
target_roles=pg_membership.target_roles,
queries=pg_membership.executed_queries,
)
if state == 'present':
return_dict['granted'] = pg_membership.granted
elif state == 'absent':
return_dict['revoked'] = pg_membership.revoked
module.exit_json(**return_dict)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,445 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_owner
short_description: Change an owner of PostgreSQL database object
description:
- Change an owner of PostgreSQL database object.
- Also allows to reassign the ownership of database objects owned by a database role to another role.
options:
new_owner:
description:
- Role (user/group) to set as an I(obj_name) owner.
type: str
required: yes
obj_name:
description:
- Name of a database object to change ownership.
- Mutually exclusive with I(reassign_owned_by).
type: str
obj_type:
description:
- Type of a database object.
- Mutually exclusive with I(reassign_owned_by).
type: str
choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
aliases:
- type
reassign_owned_by:
description:
- The list of role names. The ownership of all the objects within the current database,
and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
- Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
- If role(s) exists, always returns changed True.
- Cannot reassign ownership of objects that are required by the database system.
- Mutually exclusive with C(obj_type).
type: list
elements: str
fail_on_role:
description:
- If C(yes), fail when I(reassign_owned_by) role does not exist.
Otherwise just warn and continue.
- Mutually exclusive with I(obj_name) and I(obj_type).
default: yes
type: bool
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
seealso:
- module: postgresql_user
- module: postgresql_privs
- module: postgresql_membership
- name: PostgreSQL REASSIGN OWNED command reference
description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
- name: The same as above by playbook
postgresql_owner:
db: bar
new_owner: alice
obj_name: myfunc
obj_type: function
- name: Set owner as bob for table acme in database bar
postgresql_owner:
db: bar
new_owner: bob
obj_name: acme
obj_type: table
- name: Set owner as alice for view test_view in database bar
postgresql_owner:
db: bar
new_owner: alice
obj_name: test_view
obj_type: view
- name: Set owner as bob for tablespace ssd in database foo
postgresql_owner:
db: foo
new_owner: bob
obj_name: ssd
obj_type: tablespace
- name: Reassign all object in database bar owned by bob to alice
postgresql_owner:
db: bar
new_owner: alice
reassign_owned_by: bob
- name: Reassign all object in database bar owned by bob and bill to alice
postgresql_owner:
db: bar
new_owner: alice
reassign_owned_by:
- bob
- bill
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
class PgOwnership(object):
"""Class for changing ownership of PostgreSQL objects.
Arguments:
module (AnsibleModule): Object of Ansible module class.
cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
role (str): Role name to set as a new owner of objects.
Important:
If you want to add handling of a new type of database objects:
1. Add a specific method for this like self.__set_db_owner(), etc.
2. Add a condition with a check of ownership for new type objects to self.__is_owner()
3. Add a condition with invocation of the specific method to self.set_owner()
4. Add the information to the module documentation
That's all.
"""
def __init__(self, module, cursor, role):
self.module = module
self.cursor = cursor
self.check_role_exists(role)
self.role = role
self.changed = False
self.executed_queries = []
self.obj_name = ''
self.obj_type = ''
def check_role_exists(self, role, fail_on_role=True):
"""Check the role exists or not.
Arguments:
role (str): Role name.
fail_on_role (bool): If True, fail when the role does not exist.
Otherwise just warn and continue.
"""
if not self.__role_exists(role):
if fail_on_role:
self.module.fail_json(msg="Role '%s' does not exist" % role)
else:
self.module.warn("Role '%s' does not exist, pass" % role)
return False
else:
return True
def reassign(self, old_owners, fail_on_role):
"""Implements REASSIGN OWNED BY command.
If success, set self.changed as True.
Arguments:
old_owners (list): The ownership of all the objects within
the current database, and of all shared objects (databases, tablespaces),
owned by these roles will be reassigned to self.role.
fail_on_role (bool): If True, fail when a role from old_owners does not exist.
Otherwise just warn and continue.
"""
roles = []
for r in old_owners:
if self.check_role_exists(r, fail_on_role):
roles.append(pg_quote_identifier(r, 'role'))
# Roles do not exist, nothing to do, exit:
if not roles:
return False
old_owners = ','.join(roles)
query = ['REASSIGN OWNED BY']
query.append(old_owners)
query.append('TO %s' % pg_quote_identifier(self.role, 'role'))
query = ' '.join(query)
self.changed = exec_sql(self, query, ddl=True)
def set_owner(self, obj_type, obj_name):
"""Change owner of a database object.
Arguments:
obj_type (str): Type of object (like database, table, view, etc.).
obj_name (str): Object name.
"""
self.obj_name = obj_name
self.obj_type = obj_type
# if a new_owner is the object owner now,
# nothing to do:
if self.__is_owner():
return False
if obj_type == 'database':
self.__set_db_owner()
elif obj_type == 'function':
self.__set_func_owner()
elif obj_type == 'sequence':
self.__set_seq_owner()
elif obj_type == 'schema':
self.__set_schema_owner()
elif obj_type == 'table':
self.__set_table_owner()
elif obj_type == 'tablespace':
self.__set_tablespace_owner()
elif obj_type == 'view':
self.__set_view_owner()
elif obj_type == 'matview':
self.__set_mat_view_owner()
def __is_owner(self):
"""Return True if self.role is the current object owner."""
if self.obj_type == 'table':
query = ("SELECT 1 FROM pg_tables "
"WHERE tablename = %(obj_name)s "
"AND tableowner = %(role)s")
elif self.obj_type == 'database':
query = ("SELECT 1 FROM pg_database AS d "
"JOIN pg_roles AS r ON d.datdba = r.oid "
"WHERE d.datname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'function':
query = ("SELECT 1 FROM pg_proc AS f "
"JOIN pg_roles AS r ON f.proowner = r.oid "
"WHERE f.proname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'sequence':
query = ("SELECT 1 FROM pg_class AS c "
"JOIN pg_roles AS r ON c.relowner = r.oid "
"WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'schema':
query = ("SELECT 1 FROM information_schema.schemata "
"WHERE schema_name = %(obj_name)s "
"AND schema_owner = %(role)s")
elif self.obj_type == 'tablespace':
query = ("SELECT 1 FROM pg_tablespace AS t "
"JOIN pg_roles AS r ON t.spcowner = r.oid "
"WHERE t.spcname = %(obj_name)s "
"AND r.rolname = %(role)s")
elif self.obj_type == 'view':
query = ("SELECT 1 FROM pg_views "
"WHERE viewname = %(obj_name)s "
"AND viewowner = %(role)s")
elif self.obj_type == 'matview':
query = ("SELECT 1 FROM pg_matviews "
"WHERE matviewname = %(obj_name)s "
"AND matviewowner = %(role)s")
query_params = {'obj_name': self.obj_name, 'role': self.role}
return exec_sql(self, query, query_params, add_to_executed=False)
def __set_db_owner(self):
"""Set the database owner."""
query = "ALTER DATABASE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'database'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_func_owner(self):
"""Set the function owner."""
query = "ALTER FUNCTION %s OWNER TO %s" % (self.obj_name,
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_seq_owner(self):
"""Set the sequence owner."""
query = "ALTER SEQUENCE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_schema_owner(self):
"""Set the schema owner."""
query = "ALTER SCHEMA %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'schema'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_table_owner(self):
"""Set the table owner."""
query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_tablespace_owner(self):
"""Set the tablespace owner."""
query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'database'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_view_owner(self):
"""Set the view owner."""
query = "ALTER VIEW %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __set_mat_view_owner(self):
"""Set the materialized view owner."""
query = "ALTER MATERIALIZED VIEW %s OWNER TO %s" % (pg_quote_identifier(self.obj_name, 'table'),
pg_quote_identifier(self.role, 'role'))
self.changed = exec_sql(self, query, ddl=True)
def __role_exists(self, role):
"""Return True if role exists, otherwise return False."""
query_params = {'role': role}
query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
return exec_sql(self, query, query_params, add_to_executed=False)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
new_owner=dict(type='str', required=True),
obj_name=dict(type='str'),
obj_type=dict(type='str', aliases=['type'], choices=[
'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
reassign_owned_by=dict(type='list', elements='str'),
fail_on_role=dict(type='bool', default=True),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['obj_name', 'reassign_owned_by'],
['obj_type', 'reassign_owned_by'],
['obj_name', 'fail_on_role'],
['obj_type', 'fail_on_role'],
],
supports_check_mode=True,
)
new_owner = module.params['new_owner']
obj_name = module.params['obj_name']
obj_type = module.params['obj_type']
reassign_owned_by = module.params['reassign_owned_by']
fail_on_role = module.params['fail_on_role']
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
pg_ownership = PgOwnership(module, cursor, new_owner)
# if we want to change ownership:
if obj_name:
pg_ownership.set_owner(obj_type, obj_name)
# if we want to reassign objects owned by roles:
elif reassign_owned_by:
pg_ownership.reassign(reassign_owned_by, fail_on_role)
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
module.exit_json(
changed=pg_ownership.changed,
queries=pg_ownership.executed_queries,
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,748 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
'''
This module is used to manage postgres pg_hba files with Ansible.
'''
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_pg_hba
short_description: Add, remove or modify a rule in a pg_hba file
description:
- The fundamental function of the module is to create, or delete lines in pg_hba files.
- The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain.
extends_documentation_fragment: files
options:
address:
description:
- The source address/net where the connections could come from.
- Will not be used for entries of I(type)=C(local).
- You can also use keywords C(all), C(samehost), and C(samenet).
default: samehost
type: str
aliases: [ source, src ]
backup:
description:
- If set, create a backup of the C(pg_hba) file before it is modified.
The location of the backup is returned in the (backup) variable by this module.
default: false
type: bool
backup_file:
description:
- Write backup to a specific backupfile rather than a temp file.
type: str
create:
description:
- Create an C(pg_hba) file if none exists.
- When set to false, an error is raised when the C(pg_hba) file doesn't exist.
default: false
type: bool
contype:
description:
- Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
type: str
choices: [ local, host, hostnossl, hostssl ]
databases:
description:
- Databases this line applies to.
default: all
type: str
dest:
description:
- Path to C(pg_hba) file to modify.
type: path
required: true
method:
description:
- Authentication method to be used.
type: str
choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
default: md5
netmask:
description:
- The netmask of the source address.
type: str
options:
description:
- Additional options for the authentication I(method).
type: str
order:
description:
- The entries will be written out in a specific order.
With this option you can control by which field they are ordered first, second and last.
s=source, d=databases, u=users.
This option is deprecated since 2.9 and will be removed in 2.11.
Sortorder is now hardcoded to sdu.
type: str
default: sdu
choices: [ sdu, sud, dsu, dus, usd, uds ]
state:
description:
- The lines will be added/modified when C(state=present) and removed when C(state=absent).
type: str
default: present
choices: [ absent, present ]
users:
description:
- Users this line applies to.
type: str
default: all
notes:
- The default authentication assumes that on the host, you are either logging in as or
sudo'ing to an account with appropriate permissions to read and modify the file.
- This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
- This module will sort resulting C(pg_hba) files if a rule change is required.
This could give unexpected results with manual created hba files, if it was improperly sorted.
For example a rule was created for a net first and for a ip in that net range next.
In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
After the C(pg_hba) file is rewritten by the M(postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
And then it will hit, which will give unexpected results.
- With the 'order' parameter you can control which field is used to sort first, next and last.
- The module supports a check mode and a diff mode.
seealso:
- name: PostgreSQL pg_hba.conf file reference
description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
requirements:
- ipaddress
author: Sebastiaan Mannem (@sebasmannem)
'''
EXAMPLES = '''
- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication.
postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: host
users: joe,simon
source: ::1
databases: sales,logistics
method: peer
create: true
- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication.
postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: host
users: replication
source: 192.168.0.100/24
databases: replication
method: cert
- name: Revoke access from local user mary on database mydb.
postgresql_pg_hba:
dest: /var/lib/postgres/data/pg_hba.conf
contype: local
users: mary
databases: mydb
state: absent
'''
RETURN = r'''
msgs:
description: List of textual messages what was done
returned: always
type: list
sample:
"msgs": [
"Removing",
"Changed",
"Writing"
]
backup_file:
description: File that the original pg_hba file was backed up to
returned: changed
type: str
sample: /tmp/pg_hba_jxobj_p
pg_hba:
description: List of the pg_hba rules as they are configured in the specified hba file
returned: always
type: list
sample:
"pg_hba": [
{
"db": "all",
"method": "md5",
"src": "samehost",
"type": "host",
"usr": "all"
}
]
'''
import os
import re
import traceback
IPADDRESS_IMP_ERR = None
try:
import ipaddress
except ImportError:
IPADDRESS_IMP_ERR = traceback.format_exc()
import tempfile
import shutil
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
# from ansible.module_utils.postgres import postgres_common_argument_spec
PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
"ldap", "radius", "cert", "pam", "scram-sha-256"]
PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
WHITESPACES_RE = re.compile(r'\s+')
class PgHbaError(Exception):
'''
This exception is raised when parsing the pg_hba file ends in an error.
'''
class PgHbaRuleError(PgHbaError):
'''
This exception is raised when parsing the pg_hba file ends in an error.
'''
class PgHbaRuleChanged(PgHbaRuleError):
'''
This exception is raised when a new parsed rule is a changed version of an existing rule.
'''
class PgHbaValueError(PgHbaError):
'''
This exception is raised when a new parsed rule is a changed version of an existing rule.
'''
class PgHbaRuleValueError(PgHbaRuleError):
'''
This exception is raised when a new parsed rule is a changed version of an existing rule.
'''
class PgHba(object):
"""
PgHba object to read/write entries to/from.
pg_hba_file - the pg_hba file almost always /etc/pg_hba
"""
def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
if order not in PG_HBA_ORDERS:
msg = "invalid order setting {0} (should be one of '{1}')."
raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
self.pg_hba_file = pg_hba_file
self.rules = None
self.comment = None
self.order = order
self.backup = backup
self.last_backup = None
self.create = create
self.unchanged()
# self.databases will be update by add_rule and gives some idea of the number of databases
# (at least that are handled by this pg_hba)
self.databases = set(['postgres', 'template0', 'template1'])
# self.databases will be update by add_rule and gives some idea of the number of users
# (at least that are handled by this pg_hba) since this might also be groups with multiple
# users, this might be totally off, but at least it is some info...
self.users = set(['postgres'])
self.read()
def unchanged(self):
'''
This method resets self.diff to a empty default
'''
self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
'after': {'file': self.pg_hba_file, 'pg_hba': []}}
def read(self):
'''
Read in the pg_hba from the system
'''
self.rules = {}
self.comment = []
# read the pg_hbafile
try:
with open(self.pg_hba_file, 'r') as file:
for line in file:
line = line.strip()
# uncomment
if '#' in line:
line, comment = line.split('#', 1)
self.comment.append('#' + comment)
try:
self.add_rule(PgHbaRule(line=line))
except PgHbaRuleError:
pass
self.unchanged()
except IOError:
pass
def write(self, backup_file=''):
'''
This method writes the PgHba rules (back) to a file.
'''
if not self.changed():
return False
contents = self.render()
if self.pg_hba_file:
if not (os.path.isfile(self.pg_hba_file) or self.create):
raise PgHbaError("pg_hba file '{0}' doesn't exist. "
"Use create option to autocreate.".format(self.pg_hba_file))
if self.backup and os.path.isfile(self.pg_hba_file):
if backup_file:
self.last_backup = backup_file
else:
__backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
shutil.copy(self.pg_hba_file, self.last_backup)
fileh = open(self.pg_hba_file, 'w')
else:
filed, __path = tempfile.mkstemp(prefix='pg_hba')
fileh = os.fdopen(filed, 'w')
fileh.write(contents)
self.unchanged()
fileh.close()
return True
def add_rule(self, rule):
'''
This method can be used to add a rule to the list of rules in this PgHba object
'''
key = rule.key()
try:
try:
oldrule = self.rules[key]
except KeyError:
raise PgHbaRuleChanged
ekeys = set(list(oldrule.keys()) + list(rule.keys()))
ekeys.remove('line')
for k in ekeys:
if oldrule[k] != rule[k]:
raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
except PgHbaRuleChanged:
self.rules[key] = rule
self.diff['after']['pg_hba'].append(rule.line())
if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
databases = set(rule['db'].split(','))
self.databases.update(databases)
if rule['usr'] != 'all':
user = rule['usr']
if user[0] == '+':
user = user[1:]
self.users.add(user)
def remove_rule(self, rule):
'''
This method can be used to find and remove a rule. It doesn't look for the exact rule, only
the rule with the same key.
'''
keys = rule.key()
try:
del self.rules[keys]
self.diff['before']['pg_hba'].append(rule.line())
except KeyError:
pass
def get_rules(self, with_lines=False):
'''
This method returns all the rules of the PgHba object
'''
rules = sorted(self.rules.values())
for rule in rules:
ret = {}
for key, value in rule.items():
ret[key] = value
if not with_lines:
if 'line' in ret:
del ret['line']
else:
ret['line'] = rule.line()
yield ret
def render(self):
'''
This method renders the content of the PgHba rules and comments.
The returning value can be used directly to write to a new file.
'''
comment = '\n'.join(self.comment)
rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
result = comment + '\n' + rule_lines
# End it properly with a linefeed (if not already).
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def changed(self):
'''
This method can be called to detect if the PgHba file has been changed.
'''
return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
class PgHbaRule(dict):
'''
This class represents one rule as defined in a line in a PgHbaFile.
'''
def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
method=None, options=None, line=None):
'''
This function can be called with a comma seperated list of databases and a comma seperated
list of users and it will act as a generator that returns a expanded list of rules one by
one.
'''
super(PgHbaRule, self).__init__()
if line:
# Read values from line if parsed
self.fromline(line)
# read rule cols from parsed items
rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
for key, value in rule.items():
if value:
self[key] = value
# Some sanity checks
for key in ['method', 'type']:
if key not in self:
raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
if self['method'] not in PG_HBA_METHODS:
msg = "invalid method {0} (should be one of '{1}')."
raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
if self['type'] not in PG_HBA_TYPES:
msg = "invalid connection type {0} (should be one of '{1}')."
raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
if self['type'] == 'local':
self.unset('src')
self.unset('mask')
elif 'src' not in self:
raise PgHbaRuleError('Missing src in rule {1}'.format(self))
elif '/' in self['src']:
self.unset('mask')
else:
self['src'] = str(self.source())
self.unset('mask')
def unset(self, key):
'''
This method is used to unset certain columns if they exist
'''
if key in self:
del self[key]
def line(self):
'''
This method can be used to return (or generate) the line
'''
try:
return self['line']
except KeyError:
self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
return self['line']
def fromline(self, line):
'''
split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
'''
if WHITESPACES_RE.sub('', line) == '':
# empty line. skip this one...
return
cols = WHITESPACES_RE.split(line)
if len(cols) < 4:
msg = "Rule {0} has too few columns."
raise PgHbaValueError(msg.format(line))
if cols[0] not in PG_HBA_TYPES:
msg = "Rule {0} has unknown type: {1}."
raise PgHbaValueError(msg.format(line, cols[0]))
if cols[0] == 'local':
cols.insert(3, None) # No address
cols.insert(3, None) # No IP-mask
if len(cols) < 6:
cols.insert(4, None) # No IP-mask
elif cols[5] not in PG_HBA_METHODS:
cols.insert(4, None) # No IP-mask
if cols[5] not in PG_HBA_METHODS:
raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
if len(cols) < 7:
cols.insert(6, None) # No auth-options
else:
cols[6] = " ".join(cols[6:]) # combine all auth-options
rule = dict(zip(PG_HBA_HDR, cols[:7]))
for key, value in rule.items():
if value:
self[key] = value
def key(self):
'''
This method can be used to get the key from a rule.
'''
if self['type'] == 'local':
source = 'local'
else:
source = str(self.source())
return (source, self['db'], self['usr'])
def source(self):
'''
This method is used to get the source of a rule as an ipaddress object if possible.
'''
if 'mask' in self.keys():
try:
ipaddress.ip_address(u'{0}'.format(self['src']))
except ValueError:
raise PgHbaValueError('Mask was specified, but source "{0}" '
'is no valid ip'.format(self['src']))
# ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
# furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
# mask error that doesn't seem to describe what is going on.
try:
mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
except ValueError:
raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
binvalue = "{0:b}".format(int(mask_as_ip))
if '01' in binvalue:
raise PgHbaValueError('IP mask {0} seems invalid '
'(binary value has 1 after 0)'.format(self['mask']))
prefixlen = binvalue.count('1')
sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
try:
return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
except ValueError:
raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
try:
return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
except ValueError:
return self['src']
def __lt__(self, other):
"""This function helps sorted to decide how to sort.
It just checks itself against the other and decides on some key values
if it should be sorted higher or lower in the list.
The way it works:
For networks, every 1 in 'netmask in binary' makes the subnet more specific.
Therefore I chose to use prefix as the weight.
So a single IP (/32) should have twice the weight of a /16 network.
To keep everything in the same weight scale,
- for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
- for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
which corresponds to ipv6 (0-128).
"""
myweight = self.source_weight()
hisweight = other.source_weight()
if myweight != hisweight:
return myweight > hisweight
myweight = self.db_weight()
hisweight = other.db_weight()
if myweight != hisweight:
return myweight < hisweight
myweight = self.user_weight()
hisweight = other.user_weight()
if myweight != hisweight:
return myweight < hisweight
try:
return self['src'] < other['src']
except TypeError:
return self.source_type_weight() < other.source_type_weight()
except Exception:
# When all else fails, just compare the exact line.
return self.line() < other.line()
def source_weight(self):
"""Report the weight of this source net.
Basically this is the netmask, where IPv4 is normalized to IPv6
(IPv4/32 has the same weight as IPv6/128).
"""
if self['type'] == 'local':
return 130
sourceobj = self.source()
if isinstance(sourceobj, ipaddress.IPv4Network):
return sourceobj.prefixlen * 4
if isinstance(sourceobj, ipaddress.IPv6Network):
return sourceobj.prefixlen
if isinstance(sourceobj, str):
# You can also write all to match any IP address,
# samehost to match any of the server's own IP addresses,
# or samenet to match any address in any subnet that the server is connected to.
if sourceobj == 'all':
# (all is considered the full range of all ips, which has a weight of 0)
return 0
if sourceobj == 'samehost':
# (sort samehost second after local)
return 129
if sourceobj == 'samenet':
# Might write some fancy code to determine all prefix's
# from all interfaces and find a sane value for this one.
# For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
return 96
if sourceobj[0] == '.':
# suffix matching (domain name), let's assume a very large scale
# and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
return 64
# hostname, let's assume only one host matches, which is
# IPv4/32 or IPv6/128 (both have weight 128)
return 128
raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
def source_type_weight(self):
"""Give a weight on the type of this source.
Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
This is a 'when all else fails' solution in __lt__.
"""
if self['type'] == 'local':
return 3
sourceobj = self.source()
if isinstance(sourceobj, ipaddress.IPv4Network):
return 2
if isinstance(sourceobj, ipaddress.IPv6Network):
return 1
if isinstance(sourceobj, str):
return 0
raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
def db_weight(self):
"""Report the weight of the database.
Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
"""
if self['db'] == 'all':
return 100000
if self['db'] == 'replication':
return 0
if self['db'] in ['samerole', 'samegroup']:
return 1
return 1 + self['db'].count(',')
def user_weight(self):
"""Report weight when comparing users."""
if self['usr'] == 'all':
return 1000000
return 1
def main():
'''
This function is the main function of this module
'''
# argument_spec = postgres_common_argument_spec()
argument_spec = dict()
argument_spec.update(
address=dict(type='str', default='samehost', aliases=['source', 'src']),
backup=dict(type='bool', default=False),
backup_file=dict(type='str'),
contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
create=dict(type='bool', default=False),
databases=dict(type='str', default='all'),
dest=dict(type='path', required=True),
method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
netmask=dict(type='str'),
options=dict(type='str'),
order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS),
state=dict(type='str', default="present", choices=["absent", "present"]),
users=dict(type='str', default='all')
)
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True
)
if IPADDRESS_IMP_ERR is not None:
module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
contype = module.params["contype"]
create = bool(module.params["create"] or module.check_mode)
if module.check_mode:
backup = False
else:
backup = module.params['backup']
backup_file = module.params['backup_file']
databases = module.params["databases"]
dest = module.params["dest"]
method = module.params["method"]
netmask = module.params["netmask"]
options = module.params["options"]
order = module.params["order"]
source = module.params["address"]
state = module.params["state"]
users = module.params["users"]
ret = {'msgs': []}
try:
pg_hba = PgHba(dest, order, backup=backup, create=create)
except PgHbaError as error:
module.fail_json(msg='Error reading file:\n{0}'.format(error))
if contype:
try:
for database in databases.split(','):
for user in users.split(','):
rule = PgHbaRule(contype, database, user, source, netmask, method, options)
if state == "present":
ret['msgs'].append('Adding')
pg_hba.add_rule(rule)
else:
ret['msgs'].append('Removing')
pg_hba.remove_rule(rule)
except PgHbaError as error:
module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
file_args = module.load_file_common_arguments(module.params)
ret['changed'] = changed = pg_hba.changed()
if changed:
ret['msgs'].append('Changed')
ret['diff'] = pg_hba.diff
if not module.check_mode:
ret['msgs'].append('Writing')
try:
if pg_hba.write(backup_file):
module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
expand=False)
except PgHbaError as error:
module.fail_json(msg='Error writing file:\n{0}'.format(error))
if pg_hba.last_backup:
ret['backup_file'] = pg_hba.last_backup
ret['pg_hba'] = list(pg_hba.get_rules())
module.exit_json(**ret)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,152 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_ping
short_description: Check remote PostgreSQL server availability
description:
- Simple module to check remote PostgreSQL server availability.
options:
db:
description:
- Name of a database to connect to.
type: str
aliases:
- login_db
seealso:
- module: postgresql_info
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
# PostgreSQL ping dbsrv server from the shell:
# ansible dbsrv -m postgresql_ping
# In the example below you need to generate certificates previously.
# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
- name: PostgreSQL ping dbsrv server using not default credentials and ssl
postgresql_ping:
db: protected_db
login_host: dbsrv
login_user: secret
login_password: secret_pass
ca_cert: /root/root.crt
ssl_mode: verify-full
'''
RETURN = r'''
is_available:
description: PostgreSQL server availability.
returned: always
type: bool
sample: true
server_version:
description: PostgreSQL server version.
returned: always
type: dict
sample: { major: 10, minor: 1 }
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class PgPing(object):
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.is_available = False
self.version = {}
def do(self):
self.get_pg_version()
return (self.is_available, self.version)
def get_pg_version(self):
query = "SELECT version()"
raw = exec_sql(self, query, add_to_executed=False)[0][0]
if raw:
self.is_available = True
raw = raw.split()[1].split('.')
self.version = dict(
major=int(raw[0]),
minor=int(raw[1]),
)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Set some default values:
cursor = False
db_connection = False
result = dict(
changed=False,
is_available=False,
server_version=dict(),
)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
if db_connection is not None:
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Do job:
pg_ping = PgPing(module, cursor)
if cursor:
# If connection established:
result["is_available"], result["server_version"] = pg_ping.do()
db_connection.rollback()
module.exit_json(**result)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,655 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_publication
short_description: Add, update, or remove PostgreSQL publication
description:
- Add, update, or remove PostgreSQL publication.
options:
name:
description:
- Name of the publication to add, update, or remove.
required: true
type: str
db:
description:
- Name of the database to connect to and where
the publication state will be changed.
aliases: [ login_db ]
type: str
tables:
description:
- List of tables to add to the publication.
- If no value is set all tables are targeted.
- If the publication already exists for specific tables and I(tables) is not passed,
nothing will be changed. If you need to add all tables to the publication with the same name,
drop existent and create new without passing I(tables).
type: list
elements: str
state:
description:
- The publication state.
default: present
choices: [ absent, present ]
type: str
parameters:
description:
- Dictionary with optional publication parameters.
- Available parameters depend on PostgreSQL version.
type: dict
owner:
description:
- Publication owner.
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
type: str
cascade:
description:
- Drop publication dependencies. Has effect with I(state=absent) only.
type: bool
default: false
notes:
- PostgreSQL version must be 10 or greater.
seealso:
- name: CREATE PUBLICATION reference
description: Complete reference of the CREATE PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-createpublication.html
- name: ALTER PUBLICATION reference
description: Complete reference of the ALTER PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-alterpublication.html
- name: DROP PUBLICATION reference
description: Complete reference of the DROP PUBLICATION command documentation.
link: https://www.postgresql.org/docs/current/sql-droppublication.html
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create a new publication with name "acme" targeting all tables in database "test".
postgresql_publication:
db: test
name: acme
- name: Create publication "acme" publishing only prices and vehicles tables.
postgresql_publication:
name: acme
tables:
- prices
- vehicles
- name: >
Create publication "acme", set user alice as an owner, targeting all tables.
Allowable DML operations are INSERT and UPDATE only
postgresql_publication:
name: acme
owner: alice
parameters:
publish: 'insert,update'
- name: >
Assuming publication "acme" exists and there are targeted
tables "prices" and "vehicles", add table "stores" to the publication.
postgresql_publication:
name: acme
tables:
- prices
- vehicles
- stores
- name: Remove publication "acme" if exists in database "test".
postgresql_publication:
db: test
name: acme
state: absent
'''
RETURN = r'''
exists:
description:
- Flag indicates the publication exists or not at the end of runtime.
returned: always
type: bool
sample: true
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
owner:
description: Owner of the publication at the end of runtime.
returned: if publication exists
type: str
sample: "alice"
tables:
description:
- List of tables in the publication at the end of runtime.
- If all tables are published, returns empty list.
returned: if publication exists
type: list
sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
alltables:
description:
- Flag indicates that all tables are published.
returned: if publication exists
type: bool
sample: false
parameters:
description: Publication parameters at the end of runtime.
returned: if publication exists
type: dict
sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
SUPPORTED_PG_VERSION = 10000
################################
# Module functions and classes #
################################
def transform_tables_representation(tbl_list):
"""Add 'public.' to names of tables where a schema identifier is absent
and add quotes to each element.
Args:
tbl_list (list): List of table names.
Returns:
tbl_list (list): Changed list.
"""
for i, table in enumerate(tbl_list):
if '.' not in table:
tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
else:
tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
return tbl_list
class PgPublication():
"""Class to work with PostgreSQL publication.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): The name of the publication.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): Name of the publication.
executed_queries (list): List of executed queries.
attrs (dict): Dict with publication attributes.
exists (bool): Flag indicates the publication exists or not.
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.executed_queries = []
self.attrs = {
'alltables': False,
'tables': [],
'parameters': {},
'owner': '',
}
self.exists = self.check_pub()
def get_info(self):
"""Refresh the publication information.
Returns:
``self.attrs``.
"""
self.exists = self.check_pub()
return self.attrs
def check_pub(self):
"""Check the publication and refresh ``self.attrs`` publication attribute.
Returns:
True if the publication with ``self.name`` exists, False otherwise.
"""
pub_info = self.__get_general_pub_info()
if not pub_info:
# Publication does not exist:
return False
self.attrs['owner'] = pub_info.get('pubowner')
# Publication DML operations:
self.attrs['parameters']['publish'] = {}
self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
if pub_info.get('pubtruncate'):
self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
# If alltables flag is False, get the list of targeted tables:
if not pub_info.get('puballtables'):
table_info = self.__get_tables_pub_info()
# Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
# for better representation:
for i, schema_and_table in enumerate(table_info):
table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
self.attrs['tables'] = table_info
else:
self.attrs['alltables'] = True
# Publication exists:
return True
def create(self, tables, params, owner, check_mode=True):
"""Create the publication.
Args:
tables (list): List with names of the tables that need to be added to the publication.
params (dict): Dict contains optional publication parameters and their values.
owner (str): Name of the publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been created, otherwise False.
"""
changed = True
query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
if tables:
query_fragments.append("FOR TABLE %s" % ', '.join(tables))
else:
query_fragments.append("FOR ALL TABLES")
if params:
params_list = []
# Make list ["param = 'value'", ...] from params dict:
for (key, val) in iteritems(params):
params_list.append("%s = '%s'" % (key, val))
# Add the list to query_fragments:
query_fragments.append("WITH (%s)" % ', '.join(params_list))
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
if owner:
# If check_mode, just add possible SQL to
# executed_queries and return:
self.__pub_set_owner(owner, check_mode=check_mode)
return changed
def update(self, tables, params, owner, check_mode=True):
"""Update the publication.
Args:
tables (list): List with names of the tables that need to be presented in the publication.
params (dict): Dict contains optional publication parameters and their values.
owner (str): Name of the publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been updated, otherwise False.
"""
changed = False
# Add or drop tables from published tables suit:
if tables and not self.attrs['alltables']:
# 1. If needs to add table to the publication:
for tbl in tables:
if tbl not in self.attrs['tables']:
# If needs to add table to the publication:
changed = self.__pub_add_table(tbl, check_mode=check_mode)
# 2. if there is a table in targeted tables
# that's not presented in the passed tables:
for tbl in self.attrs['tables']:
if tbl not in tables:
changed = self.__pub_drop_table(tbl, check_mode=check_mode)
elif tables and self.attrs['alltables']:
changed = self.__pub_set_tables(tables, check_mode=check_mode)
# Update pub parameters:
if params:
for key, val in iteritems(params):
if self.attrs['parameters'].get(key):
# In PostgreSQL 10/11 only 'publish' optional parameter is presented.
if key == 'publish':
# 'publish' value can be only a string with comma-separated items
# of allowed DML operations like 'insert,update' or
# 'insert,update,delete', etc.
# Make dictionary to compare with current attrs later:
val_dict = self.attrs['parameters']['publish'].copy()
val_list = val.split(',')
for v in val_dict:
if v in val_list:
val_dict[v] = True
else:
val_dict[v] = False
# Compare val_dict and the dict with current 'publish' parameters,
# if they're different, set new values:
if val_dict != self.attrs['parameters']['publish']:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
# Default behavior for other cases:
elif self.attrs['parameters'][key] != val:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
else:
# If the parameter was not set before:
changed = self.__pub_set_param(key, val, check_mode=check_mode)
# Update pub owner:
if owner:
if owner != self.attrs['owner']:
changed = self.__pub_set_owner(owner, check_mode=check_mode)
return changed
def drop(self, cascade=False, check_mode=True):
"""Drop the publication.
Kwargs:
cascade (bool): Flag indicates that publication needs to be deleted
with its dependencies.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if publication has been updated, otherwise False.
"""
if self.exists:
query_fragments = []
query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
if cascade:
query_fragments.append("CASCADE")
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
def __get_general_pub_info(self):
"""Get and return general publication information.
Returns:
Dict with publication information if successful, False otherwise.
"""
# Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_publication' "
"AND column_name = 'pubtruncate'"), add_to_executed=False)
if pgtrunc_sup:
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
"p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
"JOIN pg_catalog.pg_roles AS r "
"ON p.pubowner = r.oid "
"WHERE p.pubname = %(pname)s")
else:
query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
"p.pubupdate , p.pubdelete FROM pg_publication AS p "
"JOIN pg_catalog.pg_roles AS r "
"ON p.pubowner = r.oid "
"WHERE p.pubname = %(pname)s")
result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
if result:
return result[0]
else:
return False
def __get_tables_pub_info(self):
"""Get and return tables that are published by the publication.
Returns:
List of dicts with published tables.
"""
query = ("SELECT schemaname, tablename "
"FROM pg_publication_tables WHERE pubname = %(pname)s")
return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
def __pub_add_table(self, table, check_mode=False):
"""Add a table to the publication.
Args:
table (str): Table name.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(table, 'table')))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_drop_table(self, table, check_mode=False):
"""Drop a table from the publication.
Args:
table (str): Table name.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(table, 'table')))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_tables(self, tables, check_mode=False):
"""Set a table suit that need to be published by the publication.
Args:
tables (list): List of tables.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
', '.join(quoted_tables)))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_param(self, param, value, check_mode=False):
"""Set an optional publication parameter.
Args:
param (str): Name of the parameter.
value (str): Parameter value.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
param, value))
return self.__exec_sql(query, check_mode=check_mode)
def __pub_set_owner(self, role, check_mode=False):
"""Set a publication owner.
Args:
role (str): Role (user) name that needs to be set as a publication owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = ("ALTER PUBLICATION %s OWNER TO %s" % (pg_quote_identifier(self.name, 'publication'),
pg_quote_identifier(role, 'role')))
return self.__exec_sql(query, check_mode=check_mode)
def __exec_sql(self, query, check_mode=False):
"""Execute SQL query.
Note: If we need just to get information from the database,
we use ``exec_sql`` function directly.
Args:
query (str): Query that needs to be executed.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just add ``query`` to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if check_mode:
self.executed_queries.append(query)
return True
else:
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(required=True),
db=dict(type='str', aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present']),
tables=dict(type='list', elements='str'),
parameters=dict(type='dict'),
owner=dict(type='str'),
cascade=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Parameters handling:
name = module.params['name']
state = module.params['state']
tables = module.params['tables']
params = module.params['parameters']
owner = module.params['owner']
cascade = module.params['cascade']
if state == 'absent':
if tables:
module.warn('parameter "tables" is ignored when "state=absent"')
if params:
module.warn('parameter "parameters" is ignored when "state=absent"')
if owner:
module.warn('parameter "owner" is ignored when "state=absent"')
if state == 'present' and cascade:
module.warn('parameter "cascade" is ignored when "state=present"')
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
# We check publication state without DML queries execution, so set autocommit:
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Check version:
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
# Nothing was changed by default:
changed = False
###################################
# Create object and do rock'n'roll:
publication = PgPublication(module, cursor, name)
if tables:
tables = transform_tables_representation(tables)
# If module.check_mode=True, nothing will be changed:
if state == 'present':
if not publication.exists:
changed = publication.create(tables, params, owner, check_mode=module.check_mode)
else:
changed = publication.update(tables, params, owner, check_mode=module.check_mode)
elif state == 'absent':
changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
# Get final publication info:
pub_fin_info = {}
if state == 'present' or (state == 'absent' and module.check_mode):
pub_fin_info = publication.get_info()
elif state == 'absent' and not module.check_mode:
publication.exists = False
# Connection is not needed any more:
cursor.close()
db_connection.close()
# Update publication info and return ret values:
module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,363 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Felix Archambault
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_query
short_description: Run PostgreSQL queries
description:
- Runs arbitrary PostgreSQL queries.
- Can run queries from SQL script files.
- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
to run queries on files made by pg_dump/pg_dumpall utilities.
options:
query:
description:
- SQL query to run. Variables can be escaped with psycopg2 syntax
U(http://initd.org/psycopg/docs/usage.html).
type: str
positional_args:
description:
- List of values to be passed as positional arguments to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(named_args).
type: list
elements: raw
named_args:
description:
- Dictionary of key-value arguments to pass to the query.
When the value is a list, it will be converted to PostgreSQL array.
- Mutually exclusive with I(positional_args).
type: dict
path_to_script:
description:
- Path to SQL script on the remote host.
- Returns result of the last query in the script.
- Mutually exclusive with I(query).
type: path
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
autocommit:
description:
- Execute in autocommit mode when the query can't be run inside a transaction block
(e.g., VACUUM).
- Mutually exclusive with I(check_mode).
type: bool
default: no
encoding:
description:
- Set the client encoding for the current session (e.g. C(UTF-8)).
- The default is the encoding defined by the database.
type: str
seealso:
- module: postgresql_db
author:
- Felix Archambault (@archf)
- Andrew Klychkov (@Andersson007)
- Will Rouesnel (@wrouesnel)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Simple select query to acme db
postgresql_query:
db: acme
query: SELECT version()
- name: Select query to db acme with positional arguments and non-default credentials
postgresql_query:
db: acme
login_user: django
login_password: mysecretpass
query: SELECT * FROM acme WHERE id = %s AND story = %s
positional_args:
- 1
- test
- name: Select query to test_db with named_args
postgresql_query:
db: test_db
query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
named_args:
id_val: 1
story_val: test
- name: Insert query to test_table in db test_db
postgresql_query:
db: test_db
query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
- name: Run queries from SQL script using UTF-8 client encoding for session
postgresql_query:
db: test_db
path_to_script: /var/lib/pgsql/test.sql
positional_args:
- 1
encoding: UTF-8
- name: Example of using autocommit parameter
postgresql_query:
db: test_db
query: VACUUM
autocommit: yes
- name: >
Insert data to the column of array type using positional_args.
Note that we use quotes here, the same as for passing JSON, etc.
postgresql_query:
query: INSERT INTO test_table (array_column) VALUES (%s)
positional_args:
- '{1,2,3}'
# Pass list and string vars as positional_args
- name: Set vars
set_fact:
my_list:
- 1
- 2
- 3
my_arr: '{1, 2, 3}'
- name: Select from test table by passing positional_args as arrays
postgresql_query:
query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
positional_args:
- '{{ my_list }}'
- '{{ my_arr|string }}'
'''
RETURN = r'''
query:
description: Query that was tried to be executed.
returned: always
type: str
sample: 'SELECT * FROM bar'
statusmessage:
description: Attribute containing the message returned by the command.
returned: always
type: str
sample: 'INSERT 0 1'
query_result:
description:
- List of dictionaries in column:value form representing returned rows.
returned: changed
type: list
sample: [{"Column": "Value1"},{"Column": "Value2"}]
rowcount:
description: Number of affected rows.
returned: changed
type: int
sample: 5
'''
try:
from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
from psycopg2.extras import DictCursor
except ImportError:
# it is needed for checking 'no result to fetch' in main(),
# psycopg2 availability will be checked by connect_to_db() into
# ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# Module execution.
#
def list_to_pg_array(elem):
"""Convert the passed list to PostgreSQL array
represented as a string.
Args:
elem (list): List that needs to be converted.
Returns:
elem (str): String representation of PostgreSQL array.
"""
elem = str(elem).strip('[]')
elem = '{' + elem + '}'
return elem
def convert_elements_to_pg_arrays(obj):
"""Convert list elements of the passed object
to PostgreSQL arrays represented as strings.
Args:
obj (dict or list): Object whose elements need to be converted.
Returns:
obj (dict or list): Object with converted elements.
"""
if isinstance(obj, dict):
for (key, elem) in iteritems(obj):
if isinstance(elem, list):
obj[key] = list_to_pg_array(elem)
elif isinstance(obj, list):
for i, elem in enumerate(obj):
if isinstance(elem, list):
obj[i] = list_to_pg_array(elem)
return obj
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
query=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
positional_args=dict(type='list', elements='raw'),
named_args=dict(type='dict'),
session_role=dict(type='str'),
path_to_script=dict(type='path'),
autocommit=dict(type='bool', default=False),
encoding=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
query = module.params["query"]
positional_args = module.params["positional_args"]
named_args = module.params["named_args"]
path_to_script = module.params["path_to_script"]
autocommit = module.params["autocommit"]
encoding = module.params["encoding"]
if autocommit and module.check_mode:
module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
if path_to_script and query:
module.fail_json(msg="path_to_script is mutually exclusive with query")
if positional_args:
positional_args = convert_elements_to_pg_arrays(positional_args)
elif named_args:
named_args = convert_elements_to_pg_arrays(named_args)
if path_to_script:
try:
with open(path_to_script, 'rb') as f:
query = to_native(f.read())
except Exception as e:
module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
if encoding is not None:
db_connection.set_client_encoding(encoding)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Prepare args:
if module.params.get("positional_args"):
arguments = module.params["positional_args"]
elif module.params.get("named_args"):
arguments = module.params["named_args"]
else:
arguments = None
# Set defaults:
changed = False
# Execute query:
try:
cursor.execute(query, arguments)
except Exception as e:
if not autocommit:
db_connection.rollback()
cursor.close()
db_connection.close()
module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
statusmessage = cursor.statusmessage
rowcount = cursor.rowcount
try:
query_result = [dict(row) for row in cursor.fetchall()]
except Psycopg2ProgrammingError as e:
if to_native(e) == 'no results to fetch':
query_result = {}
except Exception as e:
module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
if 'SELECT' not in statusmessage:
if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
s = statusmessage.split()
if len(s) == 3:
if statusmessage.split()[2] != '0':
changed = True
elif len(s) == 2:
if statusmessage.split()[1] != '0':
changed = True
else:
changed = True
else:
changed = True
if module.check_mode:
db_connection.rollback()
else:
if not autocommit:
db_connection.commit()
kw = dict(
changed=changed,
query=cursor.query,
statusmessage=statusmessage,
query_result=query_result,
rowcount=rowcount if rowcount >= 0 else 0,
)
cursor.close()
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,279 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema
description:
- Add or remove PostgreSQL schema.
options:
name:
description:
- Name of the schema to add or remove.
required: true
type: str
aliases:
- schema
database:
description:
- Name of the database to connect to and add or remove the schema.
type: str
default: postgres
aliases:
- db
- login_db
owner:
description:
- Name of the role to set as owner of the schema.
type: str
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role
were the one that had logged in originally.
type: str
state:
description:
- The schema state.
type: str
default: present
choices: [ absent, present ]
cascade_drop:
description:
- Drop schema with CASCADE to remove child objects.
type: bool
default: false
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
seealso:
- name: PostgreSQL schemas
description: General information about PostgreSQL schemas.
link: https://www.postgresql.org/docs/current/ddl-schemas.html
- name: CREATE SCHEMA reference
description: Complete reference of the CREATE SCHEMA command documentation.
link: https://www.postgresql.org/docs/current/sql-createschema.html
- name: ALTER SCHEMA reference
description: Complete reference of the ALTER SCHEMA command documentation.
link: https://www.postgresql.org/docs/current/sql-alterschema.html
- name: DROP SCHEMA reference
description: Complete reference of the DROP SCHEMA command documentation.
link: https://www.postgresql.org/docs/current/sql-dropschema.html
author:
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
- Thomas O'Donnell (@andytom)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create a new schema with name acme in test database
postgresql_schema:
db: test
name: acme
- name: Create a new schema acme with a user bob who will own it
postgresql_schema:
name: acme
owner: bob
- name: Drop schema "acme" with cascade
postgresql_schema:
name: acme
state: absent
cascade_drop: yes
'''
RETURN = r'''
schema:
description: Name of the schema.
returned: success, changed
type: str
sample: "acme"
queries:
description: List of executed queries.
returned: always
type: list
sample: ["CREATE SCHEMA \"acme\""]
'''
import traceback
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible_collections.community.general.plugins.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils._text import to_native
executed_queries = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
executed_queries.append(query)
return True
def get_schema_info(cursor, schema):
query = ("SELECT schema_owner AS owner "
"FROM information_schema.schemata "
"WHERE schema_name = %(schema)s")
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = ("SELECT schema_name FROM information_schema.schemata "
"WHERE schema_name = %(schema)s")
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema, cascade):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
if cascade:
query += " CASCADE"
cursor.execute(query)
executed_queries.append(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(query)
executed_queries.append(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
schema=dict(type="str", required=True, aliases=['name']),
owner=dict(type="str", default=""),
database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
cascade_drop=dict(type="bool", default=False),
state=dict(type="str", default="present", choices=["absent", "present"]),
session_role=dict(type="str"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
cascade_drop = module.params["cascade_drop"]
changed = False
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
if module.check_mode:
if state == "absent":
changed = not schema_exists(cursor, schema)
elif state == "present":
changed = not schema_matches(cursor, schema, owner)
module.exit_json(changed=changed, schema=schema)
if state == "absent":
try:
changed = schema_delete(cursor, schema, cascade_drop)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = schema_create(cursor, schema, owner)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
db_connection.close()
module.exit_json(changed=changed, schema=schema, queries=executed_queries)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,611 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_sequence
short_description: Create, drop, or alter a PostgreSQL sequence
description:
- Allows to create, drop or change the definition of a sequence generator.
options:
sequence:
description:
- The name of the sequence.
required: true
type: str
aliases:
- name
state:
description:
- The sequence state.
- If I(state=absent) other options will be ignored except of I(name) and
I(schema).
default: present
choices: [ absent, present ]
type: str
data_type:
description:
- Specifies the data type of the sequence. Valid types are bigint, integer,
and smallint. bigint is the default. The data type determines the default
minimum and maximum values of the sequence. For more info see the
documentation
U(https://www.postgresql.org/docs/current/sql-createsequence.html).
- Supported from PostgreSQL 10.
choices: [ bigint, integer, smallint ]
type: str
increment:
description:
- Increment specifies which value is added to the current sequence value
to create a new value.
- A positive value will make an ascending sequence, a negative one a
descending sequence. The default value is 1.
type: int
minvalue:
description:
- Minvalue determines the minimum value a sequence can generate. The
default for an ascending sequence is 1. The default for a descending
sequence is the minimum value of the data type.
type: int
aliases:
- min
maxvalue:
description:
- Maxvalue determines the maximum value for the sequence. The default for
an ascending sequence is the maximum
value of the data type. The default for a descending sequence is -1.
type: int
aliases:
- max
start:
description:
- Start allows the sequence to begin anywhere. The default starting value
is I(minvalue) for ascending sequences and I(maxvalue) for descending
ones.
type: int
cache:
description:
- Cache specifies how many sequence numbers are to be preallocated and
stored in memory for faster access. The minimum value is 1 (only one
value can be generated at a time, i.e., no cache), and this is also
the default.
type: int
cycle:
description:
- The cycle option allows the sequence to wrap around when the I(maxvalue)
or I(minvalue) has been reached by an ascending or descending sequence
respectively. If the limit is reached, the next number generated will be
the minvalue or maxvalue, respectively.
- If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
has reached its maximum value will return an error. False (NO CYCLE) is
the default.
type: bool
default: no
cascade:
description:
- Automatically drop objects that depend on the sequence, and in turn all
objects that depend on those objects.
- Ignored if I(state=present).
- Only used with I(state=absent).
type: bool
default: no
rename_to:
description:
- The new name for the I(sequence).
- Works only for existing sequences.
type: str
owner:
description:
- Set the owner for the I(sequence).
type: str
schema:
description:
- The schema of the I(sequence). This is be used to create and relocate
a I(sequence) in the given schema.
default: public
type: str
newschema:
description:
- The new schema for the I(sequence). Will be used for moving a
I(sequence) to another I(schema).
- Works only for existing sequences.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified I(session_role)
must be a role that the current I(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though
the I(session_role) were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- database
- login_db
notes:
- If you do not pass db parameter, sequence will be created in the database
named postgres.
seealso:
- module: postgresql_table
- module: postgresql_owner
- module: postgresql_privs
- module: postgresql_tablespace
- name: CREATE SEQUENCE reference
description: Complete reference of the CREATE SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-createsequence.html
- name: ALTER SEQUENCE reference
description: Complete reference of the ALTER SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-altersequence.html
- name: DROP SEQUENCE reference
description: Complete reference of the DROP SEQUENCE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropsequence.html
author:
- Tobias Birkefeld (@tcraxs)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create an ascending bigint sequence called foobar in the default
database
postgresql_sequence:
name: foobar
- name: Create an ascending integer sequence called foobar, starting at 101
postgresql_sequence:
name: foobar
data_type: integer
start: 101
- name: Create an descending sequence called foobar, starting at 101 and
preallocated 10 sequence numbers in cache
postgresql_sequence:
name: foobar
increment: -1
cache: 10
start: 101
- name: Create an ascending sequence called foobar, which cycle between 1 to 10
postgresql_sequence:
name: foobar
cycle: yes
min: 1
max: 10
- name: Create an ascending bigint sequence called foobar in the default
database with owner foobar
postgresql_sequence:
name: foobar
owner: foobar
- name: Rename an existing sequence named foo to bar
postgresql_sequence:
name: foo
rename_to: bar
- name: Change the schema of an existing sequence to foobar
postgresql_sequence:
name: foobar
newschema: foobar
- name: Change the owner of an existing sequence to foobar
postgresql_sequence:
name: foobar
owner: foobar
- name: Drop a sequence called foobar
postgresql_sequence:
name: foobar
state: absent
- name: Drop a sequence called foobar with cascade
postgresql_sequence:
name: foobar
cascade: yes
state: absent
'''
RETURN = r'''
state:
description: Sequence state at the end of execution.
returned: always
type: str
sample: 'present'
sequence:
description: Sequence name.
returned: always
type: str
sample: 'foobar'
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE SEQUENCE \"foo\"" ]
schema:
description: Name of the schema of the sequence
returned: always
type: str
sample: 'foo'
data_type:
description: Shows the current data type of the sequence.
returned: always
type: str
sample: 'bigint'
increment:
description: The value of increment of the sequence. A positive value will
make an ascending sequence, a negative one a descending
sequence.
returned: always
type: int
sample: '-1'
minvalue:
description: The value of minvalue of the sequence.
returned: always
type: int
sample: '1'
maxvalue:
description: The value of maxvalue of the sequence.
returned: always
type: int
sample: '9223372036854775807'
start:
description: The value of start of the sequence.
returned: always
type: int
sample: '12'
cycle:
description: Shows if the sequence cycle or not.
returned: always
type: str
sample: 'NO'
owner:
description: Shows the current owner of the sequence
after the successful run of the task.
returned: always
type: str
sample: 'postgres'
newname:
description: Shows the new sequence name after rename.
returned: on success
type: str
sample: 'barfoo'
newschema:
description: Shows the new schema of the sequence after schema change.
returned: on success
type: str
sample: 'foobar'
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
class Sequence(object):
"""Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
Arguments:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
Attributes:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
changed (bool) -- something was changed after execution or not
executed_queries (list) -- executed queries
name (str) -- name of the sequence
owner (str) -- name of the owner of the sequence
schema (str) -- name of the schema (default: public)
data_type (str) -- data type of the sequence
start_value (int) -- value of the sequence start
minvalue (int) -- minimum value of the sequence
maxvalue (int) -- maximum value of the sequence
increment (int) -- increment value of the sequence
cycle (bool) -- sequence can cycle or not
new_name (str) -- name of the renamed sequence
new_schema (str) -- name of the new schema
exists (bool) -- sequence exists or not
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.executed_queries = []
self.name = self.module.params['sequence']
self.owner = ''
self.schema = self.module.params['schema']
self.data_type = ''
self.start_value = ''
self.minvalue = ''
self.maxvalue = ''
self.increment = ''
self.cycle = ''
self.new_name = ''
self.new_schema = ''
self.exists = False
# Collect info
self.get_info()
def get_info(self):
"""Getter to refresh and get sequence info"""
query = ("SELECT "
"s.sequence_schema AS schemaname, "
"s.sequence_name AS sequencename, "
"pg_get_userbyid(c.relowner) AS sequenceowner, "
"s.data_type::regtype AS data_type, "
"s.start_value AS start_value, "
"s.minimum_value AS min_value, "
"s.maximum_value AS max_value, "
"s.increment AS increment_by, "
"s.cycle_option AS cycle "
"FROM information_schema.sequences s "
"JOIN pg_class c ON c.relname = s.sequence_name "
"LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE NOT pg_is_other_temp_schema(n.oid) "
"AND c.relkind = 'S'::\"char\" "
"AND sequence_name = %(name)s "
"AND sequence_schema = %(schema)s")
res = exec_sql(self, query,
query_params={'name': self.name, 'schema': self.schema},
add_to_executed=False)
if not res:
self.exists = False
return False
if res:
self.exists = True
self.schema = res[0]['schemaname']
self.name = res[0]['sequencename']
self.owner = res[0]['sequenceowner']
self.data_type = res[0]['data_type']
self.start_value = res[0]['start_value']
self.minvalue = res[0]['min_value']
self.maxvalue = res[0]['max_value']
self.increment = res[0]['increment_by']
self.cycle = res[0]['cycle']
def create(self):
"""Implements CREATE SEQUENCE command behavior."""
query = ['CREATE SEQUENCE']
query.append(self.__add_schema())
if self.module.params.get('data_type'):
query.append('AS %s' % self.module.params['data_type'])
if self.module.params.get('increment'):
query.append('INCREMENT BY %s' % self.module.params['increment'])
if self.module.params.get('minvalue'):
query.append('MINVALUE %s' % self.module.params['minvalue'])
if self.module.params.get('maxvalue'):
query.append('MAXVALUE %s' % self.module.params['maxvalue'])
if self.module.params.get('start'):
query.append('START WITH %s' % self.module.params['start'])
if self.module.params.get('cache'):
query.append('CACHE %s' % self.module.params['cache'])
if self.module.params.get('cycle'):
query.append('CYCLE')
return exec_sql(self, ' '.join(query), ddl=True)
def drop(self):
"""Implements DROP SEQUENCE command behavior."""
query = ['DROP SEQUENCE']
query.append(self.__add_schema())
if self.module.params.get('cascade'):
query.append('CASCADE')
return exec_sql(self, ' '.join(query), ddl=True)
def rename(self):
"""Implements ALTER SEQUENCE RENAME TO command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('RENAME TO %s' % pg_quote_identifier(self.module.params['rename_to'], 'sequence'))
return exec_sql(self, ' '.join(query), ddl=True)
def set_owner(self):
"""Implements ALTER SEQUENCE OWNER TO command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('OWNER TO %s' % pg_quote_identifier(self.module.params['owner'], 'role'))
return exec_sql(self, ' '.join(query), ddl=True)
def set_schema(self):
"""Implements ALTER SEQUENCE SET SCHEMA command behavior."""
query = ['ALTER SEQUENCE']
query.append(self.__add_schema())
query.append('SET SCHEMA %s' % pg_quote_identifier(self.module.params['newschema'], 'schema'))
return exec_sql(self, ' '.join(query), ddl=True)
def __add_schema(self):
return '.'.join([pg_quote_identifier(self.schema, 'schema'),
pg_quote_identifier(self.name, 'sequence')])
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
sequence=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='present', choices=['absent', 'present']),
data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
increment=dict(type='int'),
minvalue=dict(type='int', aliases=['min']),
maxvalue=dict(type='int', aliases=['max']),
start=dict(type='int'),
cache=dict(type='int'),
cycle=dict(type='bool', default=False),
schema=dict(type='str', default='public'),
cascade=dict(type='bool', default=False),
rename_to=dict(type='str'),
owner=dict(type='str'),
newschema=dict(type='str'),
db=dict(type='str', default='', aliases=['login_db', 'database']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['rename_to', 'data_type'],
['rename_to', 'increment'],
['rename_to', 'minvalue'],
['rename_to', 'maxvalue'],
['rename_to', 'start'],
['rename_to', 'cache'],
['rename_to', 'cycle'],
['rename_to', 'cascade'],
['rename_to', 'owner'],
['rename_to', 'newschema'],
['cascade', 'data_type'],
['cascade', 'increment'],
['cascade', 'minvalue'],
['cascade', 'maxvalue'],
['cascade', 'start'],
['cascade', 'cache'],
['cascade', 'cycle'],
['cascade', 'owner'],
['cascade', 'newschema'],
]
)
# Note: we don't need to check mutually exclusive params here, because they are
# checked automatically by AnsibleModule (mutually_exclusive=[] list above).
# Change autocommit to False if check_mode:
autocommit = not module.check_mode
# Connect to DB and make cursor object:
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##############
# Create the object and do main job:
data = Sequence(module, cursor)
# Set defaults:
changed = False
# Create new sequence
if not data.exists and module.params['state'] == 'present':
if module.params.get('rename_to'):
module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
if module.params.get('newschema'):
module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
changed = data.create()
# Drop non-existing sequence
elif not data.exists and module.params['state'] == 'absent':
# Nothing to do
changed = False
# Drop existing sequence
elif data.exists and module.params['state'] == 'absent':
changed = data.drop()
# Rename sequence
if data.exists and module.params.get('rename_to'):
if data.name != module.params['rename_to']:
changed = data.rename()
if changed:
data.new_name = module.params['rename_to']
# Refresh information
if module.params['state'] == 'present':
data.get_info()
# Change owner, schema and settings
if module.params['state'] == 'present' and data.exists:
# change owner
if module.params.get('owner'):
if data.owner != module.params['owner']:
changed = data.set_owner()
# Set schema
if module.params.get('newschema'):
if data.schema != module.params['newschema']:
changed = data.set_schema()
if changed:
data.new_schema = module.params['newschema']
# Rollback if it's possible and check_mode:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
sequence=data.name,
queries=data.executed_queries,
schema=data.schema,
data_type=data.data_type,
increment=data.increment,
minvalue=data.minvalue,
maxvalue=data.maxvalue,
start=data.start_value,
cycle=data.cycle,
owner=data.owner,
)
if module.params['state'] == 'present':
if data.new_name:
kw['newname'] = data.new_name
if data.new_schema:
kw['newschema'] = data.new_schema
elif module.params['state'] == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,435 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_set
short_description: Change a PostgreSQL server configuration parameter
description:
- Allows to change a PostgreSQL server configuration parameter.
- The module uses ALTER SYSTEM command and applies changes by reload server configuration.
- ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
- It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
- ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
which is read in addition to postgresql.conf.
- The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
- After change you can see in the ansible output the previous and
the new parameter value and other information using returned values and M(debug) module.
options:
name:
description:
- Name of PostgreSQL server parameter.
type: str
required: true
value:
description:
- Parameter value to set.
- To remove parameter string from postgresql.auto.conf and
reload the server configuration you must pass I(value=default).
With I(value=default) the playbook always returns changed is true.
type: str
reset:
description:
- Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
type: bool
default: false
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
notes:
- Supported version of PostgreSQL is 9.4 and later.
- Pay attention, change setting with 'postmaster' context can return changed is true
when actually nothing changes because the same value may be presented in
several different form, for example, 1024MB, 1GB, etc. However in pg_settings
system view it can be defined like 131072 number of 8kB pages.
The final check of the parameter value cannot compare it because the server was
not restarted and the value in pg_settings is not updated yet.
- For some parameters restart of PostgreSQL server is required.
See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
seealso:
- module: postgresql_info
- name: PostgreSQL server configuration
description: General information about PostgreSQL server configuration.
link: https://www.postgresql.org/docs/current/runtime-config.html
- name: PostgreSQL view pg_settings reference
description: Complete reference of the pg_settings view documentation.
link: https://www.postgresql.org/docs/current/view-pg-settings.html
- name: PostgreSQL ALTER SYSTEM command reference
description: Complete reference of the ALTER SYSTEM command documentation.
link: https://www.postgresql.org/docs/current/sql-altersystem.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Restore wal_keep_segments parameter to initial state
postgresql_set:
name: wal_keep_segments
reset: yes
# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
- name: Set work mem parameter
postgresql_set:
name: work_mem
value: 32mb
register: set
- debug:
msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
when: set.changed
# Ensure that the restart of PostgreSQL server must be required for some parameters.
# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
# (If you passed the value that was different from the current server setting).
- name: Set log_min_duration_statement parameter to 1 second
postgresql_set:
name: log_min_duration_statement
value: 1s
- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
postgresql_set:
name: wal_log_hints
value: default
'''
RETURN = r'''
name:
description: Name of PostgreSQL server parameter.
returned: always
type: str
sample: 'shared_buffers'
restart_required:
description: Information about parameter current state.
returned: always
type: bool
sample: true
prev_val_pretty:
description: Information about previous state of the parameter.
returned: always
type: str
sample: '4MB'
value_pretty:
description: Information about current state of the parameter.
returned: always
type: str
sample: '64MB'
value:
description:
- Dictionary that contains the current parameter value (at the time of playbook finish).
- Pay attention that for real change some parameters restart of PostgreSQL server is required.
- Returns the current value in the check mode.
returned: always
type: dict
sample: { "value": 67108864, "unit": "b" }
context:
description:
- PostgreSQL setting context.
returned: always
type: str
sample: user
'''
try:
from psycopg2.extras import DictCursor
except Exception:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
PG_REQ_VER = 90400
# To allow to set value like 1mb instead of 1MB, etc:
POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
# ===========================================
# PostgreSQL module specific support methods.
#
def param_get(cursor, module, name):
query = ("SELECT name, setting, unit, context, boot_val "
"FROM pg_settings WHERE name = %(name)s")
try:
cursor.execute(query, {'name': name})
info = cursor.fetchall()
cursor.execute("SHOW %s" % name)
val = cursor.fetchone()
except Exception as e:
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
raw_val = info[0][1]
unit = info[0][2]
context = info[0][3]
boot_val = info[0][4]
if val[0] == 'True':
val[0] = 'on'
elif val[0] == 'False':
val[0] = 'off'
if unit == 'kB':
if int(raw_val) > 0:
raw_val = int(raw_val) * 1024
if int(boot_val) > 0:
boot_val = int(boot_val) * 1024
unit = 'b'
elif unit == 'MB':
if int(raw_val) > 0:
raw_val = int(raw_val) * 1024 * 1024
if int(boot_val) > 0:
boot_val = int(boot_val) * 1024 * 1024
unit = 'b'
return (val[0], raw_val, unit, boot_val, context)
def pretty_to_bytes(pretty_val):
# The function returns a value in bytes
# if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
# Otherwise it returns the passed argument.
val_in_bytes = None
if 'kB' in pretty_val:
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024
elif 'MB' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024 * 1024
elif 'GB' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024 * 1024 * 1024
elif 'TB' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
elif 'B' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part
else:
return pretty_val
return val_in_bytes
def param_set(cursor, module, name, value, context):
try:
if str(value).lower() == 'default':
query = "ALTER SYSTEM SET %s = DEFAULT" % name
else:
query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
cursor.execute(query)
if context != 'postmaster':
cursor.execute("SELECT pg_reload_conf()")
except Exception as e:
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(type='str', required=True),
db=dict(type='str', aliases=['login_db']),
value=dict(type='str'),
reset=dict(type='bool'),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params["name"]
value = module.params["value"]
reset = module.params["reset"]
# Allow to pass values like 1mb instead of 1MB, etc:
if value:
for unit in POSSIBLE_SIZE_UNITS:
if value[:-2].isdigit() and unit in value[-2:]:
value = value.upper()
if value and reset:
module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
if not value and not reset:
module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
kw = {}
# Check server version (needs 9.4 or later):
ver = db_connection.server_version
if ver < PG_REQ_VER:
module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
kw = dict(
changed=False,
restart_required=False,
value_pretty="",
prev_val_pretty="",
value={"value": "", "unit": ""},
)
kw['name'] = name
db_connection.close()
module.exit_json(**kw)
# Set default returned values:
restart_required = False
changed = False
kw['name'] = name
kw['restart_required'] = False
# Get info about param state:
res = param_get(cursor, module, name)
current_value = res[0]
raw_val = res[1]
unit = res[2]
boot_val = res[3]
context = res[4]
if value == 'True':
value = 'on'
elif value == 'False':
value = 'off'
kw['prev_val_pretty'] = current_value
kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
kw['context'] = context
# Do job
if context == "internal":
module.fail_json(msg="%s: cannot be changed (internal context). See "
"https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
if context == "postmaster":
restart_required = True
# If check_mode, just compare and exit:
if module.check_mode:
if pretty_to_bytes(value) == pretty_to_bytes(current_value):
kw['changed'] = False
else:
kw['value_pretty'] = value
kw['changed'] = True
# Anyway returns current raw value in the check_mode:
kw['value'] = dict(
value=raw_val,
unit=unit,
)
kw['restart_required'] = restart_required
module.exit_json(**kw)
# Set param:
if value and value != current_value:
changed = param_set(cursor, module, name, value, context)
kw['value_pretty'] = value
# Reset param:
elif reset:
if raw_val == boot_val:
# nothing to change, exit:
kw['value'] = dict(
value=raw_val,
unit=unit,
)
module.exit_json(**kw)
changed = param_set(cursor, module, name, boot_val, context)
if restart_required:
module.warn("Restart of PostgreSQL is required for setting %s" % name)
cursor.close()
db_connection.close()
# Reconnect and recheck current value:
if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
res = param_get(cursor, module, name)
# f_ means 'final'
f_value = res[0]
f_raw_val = res[1]
if raw_val == f_raw_val:
changed = False
else:
changed = True
kw['value_pretty'] = f_value
kw['value'] = dict(
value=f_raw_val,
unit=unit,
)
cursor.close()
db_connection.close()
kw['changed'] = changed
kw['restart_required'] = restart_required
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,293 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_slot
short_description: Add or remove replication slots from a PostgreSQL database
description:
- Add or remove physical or logical replication slots from a PostgreSQL database.
options:
name:
description:
- Name of the replication slot to add or remove.
type: str
required: yes
aliases:
- slot_name
slot_type:
description:
- Slot type.
type: str
default: physical
choices: [ logical, physical ]
state:
description:
- The slot state.
- I(state=present) implies the slot must be present in the system.
- I(state=absent) implies the I(groups) must be revoked from I(target_roles).
type: str
default: present
choices: [ absent, present ]
immediately_reserve:
description:
- Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
from a streaming replication client.
- Is available from PostgreSQL version 9.6.
- Uses only with I(slot_type=physical).
- Mutually exclusive with I(slot_type=logical).
type: bool
default: no
output_plugin:
description:
- All logical slots must indicate which output plugin decoder they're using.
- This parameter does not apply to physical slots.
- It will be ignored with I(slot_type=physical).
type: str
default: "test_decoding"
db:
description:
- Name of database to connect to.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
notes:
- Physical replication slots were introduced to PostgreSQL with version 9.4,
while logical replication slots were added beginning with version 10.0.
seealso:
- name: PostgreSQL pg_replication_slots view reference
description: Complete reference of the PostgreSQL pg_replication_slots view.
link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
- name: PostgreSQL streaming replication protocol reference
description: Complete reference of the PostgreSQL streaming replication protocol documentation.
link: https://www.postgresql.org/docs/current/protocol-replication.html
- name: PostgreSQL logical replication protocol reference
description: Complete reference of the PostgreSQL logical replication protocol documentation.
link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
author:
- John Scalia (@jscalia)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create physical_one physical slot if doesn't exist
become_user: postgres
postgresql_slot:
slot_name: physical_one
db: ansible
- name: Remove physical_one slot if exists
become_user: postgres
postgresql_slot:
slot_name: physical_one
db: ansible
state: absent
- name: Create logical_one logical slot to the database acme if doesn't exist
postgresql_slot:
name: logical_slot_one
slot_type: logical
state: present
output_plugin: custom_decoder_one
db: "acme"
- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
postgresql_slot:
name: logical_one
login_host: mydatabase.example.org
port: 5433
login_user: ourSuperuser
login_password: thePassword
state: absent
'''
RETURN = r'''
name:
description: Name of the slot
returned: always
type: str
sample: "physical_one"
queries:
description: List of executed queries.
returned: always
type: str
sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class PgSlot(object):
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.kind = ''
self.__slot_exists()
self.changed = False
self.executed_queries = []
def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
if self.exists:
if self.kind == kind:
return False
else:
self.module.warn("slot with name '%s' already exists "
"but has another type '%s'" % (self.name, self.kind))
return False
if just_check:
return None
if kind == 'physical':
# Check server version (needs for immedately_reserverd needs 9.6+):
if self.cursor.connection.server_version < 96000:
query = "SELECT pg_create_physical_replication_slot(%(name)s)"
else:
query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
self.changed = exec_sql(self, query,
query_params={'name': self.name, 'i_reserve': immediately_reserve},
ddl=True)
elif kind == 'logical':
query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
self.changed = exec_sql(self, query,
query_params={'name': self.name, 'o_plugin': output_plugin}, ddl=True)
def drop(self):
if not self.exists:
return False
query = "SELECT pg_drop_replication_slot(%(name)s)"
self.changed = exec_sql(self, query, query_params={'name': self.name}, ddl=True)
def __slot_exists(self):
query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
if res:
self.exists = True
self.kind = res[0][0]
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", aliases=["login_db"]),
name=dict(type="str", required=True, aliases=["slot_name"]),
slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
immediately_reserve=dict(type="bool", default=False),
session_role=dict(type="str"),
output_plugin=dict(type="str", default="test_decoding"),
state=dict(type="str", default="present", choices=["absent", "present"]),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params["name"]
slot_type = module.params["slot_type"]
immediately_reserve = module.params["immediately_reserve"]
state = module.params["state"]
output_plugin = module.params["output_plugin"]
if immediately_reserve and slot_type == 'logical':
module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
# When slot_type is logical and parameter db is not passed,
# the default database will be used to create the slot and
# the user should know about this.
# When the slot type is physical,
# it doesn't matter which database will be used
# because physical slots are global objects.
if slot_type == 'logical':
warn_db_default = True
else:
warn_db_default = False
conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
##################################
# Create an object and do main job
pg_slot = PgSlot(module, cursor, name)
changed = False
if module.check_mode:
if state == "present":
if not pg_slot.exists:
changed = True
pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
elif state == "absent":
if pg_slot.exists:
changed = True
else:
if state == "absent":
pg_slot.drop()
elif state == "present":
pg_slot.create(slot_type, immediately_reserve, output_plugin)
changed = pg_slot.changed
db_connection.close()
module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,685 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_subscription
short_description: Add, update, or remove PostgreSQL subscription
description:
- Add, update, or remove PostgreSQL subscription.
options:
name:
description:
- Name of the subscription to add, update, or remove.
type: str
required: yes
db:
description:
- Name of the database to connect to and where
the subscription state will be changed.
aliases: [ login_db ]
type: str
required: yes
state:
description:
- The subscription state.
- C(present) implies that if I(name) subscription doesn't exist, it will be created.
- C(absent) implies that if I(name) subscription exists, it will be removed.
- C(refresh) implies that if I(name) subscription exists, it will be refreshed.
Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
This will start replication of tables that were added to the subscribed-to publications
since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
The existing data in the publications that are being subscribed to
should be copied once the replication starts.
- For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
type: str
choices: [ absent, present, refresh ]
default: present
owner:
description:
- Subscription owner.
- If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
- Ignored when I(state) is not C(present).
type: str
publications:
description:
- The publication names on the publisher to use for the subscription.
- Ignored when I(state) is not C(present).
type: list
elements: str
connparams:
description:
- The connection dict param-value to connect to the publisher.
- For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
- Ignored when I(state) is not C(present).
type: dict
cascade:
description:
- Drop subscription dependencies. Has effect with I(state=absent) only.
- Ignored when I(state) is not C(absent).
type: bool
default: false
subsparams:
description:
- Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
- For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
- See available parameters to create a new subscription
on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
- Ignored when I(state) is not C(present).
type: dict
notes:
- PostgreSQL version must be 10 or greater.
seealso:
- module: postgresql_publication
- module: postgresql_info
- name: CREATE SUBSCRIPTION reference
description: Complete reference of the CREATE SUBSCRIPTION command documentation.
link: https://www.postgresql.org/docs/current/sql-createsubscription.html
- name: ALTER SUBSCRIPTION reference
description: Complete reference of the ALTER SUBSCRIPTION command documentation.
link: https://www.postgresql.org/docs/current/sql-altersubscription.html
- name: DROP SUBSCRIPTION reference
description: Complete reference of the DROP SUBSCRIPTION command documentation.
link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
author:
- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: >
Create acme subscription in mydb database using acme_publication and
the following connection parameters to connect to the publisher.
Set the subscription owner as alice.
postgresql_subscription:
db: mydb
name: acme
state: present
publications: acme_publication
owner: alice
connparams:
host: 127.0.0.1
port: 5432
user: repl
password: replpass
dbname: mydb
- name: Assuming that acme subscription exists, try to change conn parameters
postgresql_subscription:
db: mydb
name: acme
connparams:
host: 127.0.0.1
port: 5432
user: repl
password: replpass
connect_timeout: 100
- name: Refresh acme publication
postgresql_subscription:
db: mydb
name: acme
state: refresh
- name: Drop acme subscription from mydb with dependencies (cascade=yes)
postgresql_subscription:
db: mydb
name: acme
state: absent
cascade: yes
- name: Assuming that acme subscription exists and enabled, disable the subscription
postgresql_subscription:
db: mydb
name: acme
state: present
subsparams:
enabled: no
'''
RETURN = r'''
name:
description:
- Name of the subscription.
returned: always
type: str
sample: acme
exists:
description:
- Flag indicates the subscription exists or not at the end of runtime.
returned: always
type: bool
sample: true
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
initial_state:
description: Subscription configuration at the beginning of runtime.
returned: always
type: dict
sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
final_state:
description: Subscription configuration at the end of runtime.
returned: always
type: dict
sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
'''
from copy import deepcopy
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
SUPPORTED_PG_VERSION = 10000
SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
################################
# Module functions and classes #
################################
def convert_conn_params(conn_dict):
"""Converts the passed connection dictionary to string.
Args:
conn_dict (list): Dictionary which needs to be converted.
Returns:
Connection string.
"""
conn_list = []
for (param, val) in iteritems(conn_dict):
conn_list.append('%s=%s' % (param, val))
return ' '.join(conn_list)
def convert_subscr_params(params_dict):
"""Converts the passed params dictionary to string.
Args:
params_dict (list): Dictionary which needs to be converted.
Returns:
Parameters string.
"""
params_list = []
for (param, val) in iteritems(params_dict):
if val is False:
val = 'false'
elif val is True:
val = 'true'
params_list.append('%s = %s' % (param, val))
return ', '.join(params_list)
class PgSubscription():
"""Class to work with PostgreSQL subscription.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): The name of the subscription.
db (str): The database name the subscription will be associated with.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
name (str): Name of subscription.
executed_queries (list): List of executed queries.
attrs (dict): Dict with subscription attributes.
exists (bool): Flag indicates the subscription exists or not.
"""
def __init__(self, module, cursor, name, db):
self.module = module
self.cursor = cursor
self.name = name
self.db = db
self.executed_queries = []
self.attrs = {
'owner': None,
'enabled': None,
'synccommit': None,
'conninfo': {},
'slotname': None,
'publications': [],
}
self.empty_attrs = deepcopy(self.attrs)
self.exists = self.check_subscr()
def get_info(self):
"""Refresh the subscription information.
Returns:
``self.attrs``.
"""
self.exists = self.check_subscr()
return self.attrs
def check_subscr(self):
"""Check the subscription and refresh ``self.attrs`` subscription attribute.
Returns:
True if the subscription with ``self.name`` exists, False otherwise.
"""
subscr_info = self.__get_general_subscr_info()
if not subscr_info:
# The subscription does not exist:
self.attrs = deepcopy(self.empty_attrs)
return False
self.attrs['owner'] = subscr_info.get('rolname')
self.attrs['enabled'] = subscr_info.get('subenabled')
self.attrs['synccommit'] = subscr_info.get('subenabled')
self.attrs['slotname'] = subscr_info.get('subslotname')
self.attrs['publications'] = subscr_info.get('subpublications')
if subscr_info.get('subconninfo'):
for param in subscr_info['subconninfo'].split(' '):
tmp = param.split('=')
try:
self.attrs['conninfo'][tmp[0]] = int(tmp[1])
except ValueError:
self.attrs['conninfo'][tmp[0]] = tmp[1]
return True
def create(self, connparams, publications, subsparams, check_mode=True):
"""Create the subscription.
Args:
connparams (str): Connection string in libpq style.
publications (list): Publications on the master to use.
subsparams (str): Parameters string in WITH () clause style.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if the subscription has been created, otherwise False.
"""
query_fragments = []
query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
"PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
if subsparams:
query_fragments.append("WITH (%s)" % subsparams)
changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
return changed
def update(self, connparams, publications, subsparams, check_mode=True):
"""Update the subscription.
Args:
connparams (str): Connection string in libpq style.
publications (list): Publications on the master to use.
subsparams (dict): Dictionary of optional parameters.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if subscription has been updated, otherwise False.
"""
changed = False
if connparams:
if connparams != self.attrs['conninfo']:
changed = self.__set_conn_params(convert_conn_params(connparams),
check_mode=check_mode)
if publications:
if sorted(self.attrs['publications']) != sorted(publications):
changed = self.__set_publications(publications, check_mode=check_mode)
if subsparams:
params_to_update = []
for (param, value) in iteritems(subsparams):
if param == 'enabled':
if self.attrs['enabled'] and value is False:
changed = self.enable(enabled=False, check_mode=check_mode)
elif not self.attrs['enabled'] and value is True:
changed = self.enable(enabled=True, check_mode=check_mode)
elif param == 'synchronous_commit':
if self.attrs['synccommit'] is True and value is False:
params_to_update.append("%s = false" % param)
elif self.attrs['synccommit'] is False and value is True:
params_to_update.append("%s = true" % param)
elif param == 'slot_name':
if self.attrs['slotname'] and self.attrs['slotname'] != value:
params_to_update.append("%s = %s" % (param, value))
else:
self.module.warn("Parameter '%s' is not in params supported "
"for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
if params_to_update:
changed = self.__set_params(params_to_update, check_mode=check_mode)
return changed
def drop(self, cascade=False, check_mode=True):
"""Drop the subscription.
Kwargs:
cascade (bool): Flag indicates that the subscription needs to be deleted
with its dependencies.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
changed (bool): True if the subscription has been removed, otherwise False.
"""
if self.exists:
query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
if cascade:
query_fragments.append("CASCADE")
return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
def set_owner(self, role, check_mode=True):
"""Set a subscription owner.
Args:
role (str): Role (user) name that needs to be set as a subscription owner.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
return self.__exec_sql(query, check_mode=check_mode)
def refresh(self, check_mode=True):
"""Refresh publication.
Fetches missing table info from publisher.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
return self.__exec_sql(query, check_mode=check_mode)
def __set_params(self, params_to_update, check_mode=True):
"""Update optional subscription parameters.
Args:
params_to_update (list): Parameters with values to update.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
return self.__exec_sql(query, check_mode=check_mode)
def __set_conn_params(self, connparams, check_mode=True):
"""Update connection parameters.
Args:
connparams (str): Connection string in libpq style.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
return self.__exec_sql(query, check_mode=check_mode)
def __set_publications(self, publications, check_mode=True):
"""Update publications.
Args:
publications (list): Publications on the master to use.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
return self.__exec_sql(query, check_mode=check_mode)
def enable(self, enabled=True, check_mode=True):
"""Enable or disable the subscription.
Kwargs:
enable (bool): Flag indicates that the subscription needs
to be enabled or disabled.
check_mode (bool): If True, don't actually change anything,
just make SQL, add it to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if enabled:
query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
else:
query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
return self.__exec_sql(query, check_mode=check_mode)
def __get_general_subscr_info(self):
"""Get and return general subscription information.
Returns:
Dict with subscription information if successful, False otherwise.
"""
query = ("SELECT d.datname, r.rolname, s.subenabled, "
"s.subconninfo, s.subslotname, s.subsynccommit, "
"s.subpublications FROM pg_catalog.pg_subscription s "
"JOIN pg_catalog.pg_database d "
"ON s.subdbid = d.oid "
"JOIN pg_catalog.pg_roles AS r "
"ON s.subowner = r.oid "
"WHERE s.subname = %(name)s AND d.datname = %(db)s")
result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
if result:
return result[0]
else:
return False
def __exec_sql(self, query, check_mode=False):
"""Execute SQL query.
Note: If we need just to get information from the database,
we use ``exec_sql`` function directly.
Args:
query (str): Query that needs to be executed.
Kwargs:
check_mode (bool): If True, don't actually change anything,
just add ``query`` to ``self.executed_queries`` and return True.
Returns:
True if successful, False otherwise.
"""
if check_mode:
self.executed_queries.append(query)
return True
else:
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(type='str', required=True),
db=dict(type='str', required=True, aliases=['login_db']),
state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
publications=dict(type='list', elements='str'),
connparams=dict(type='dict'),
cascade=dict(type='bool', default=False),
owner=dict(type='str'),
subsparams=dict(type='dict'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Parameters handling:
db = module.params['db']
name = module.params['name']
state = module.params['state']
publications = module.params['publications']
cascade = module.params['cascade']
owner = module.params['owner']
subsparams = module.params['subsparams']
connparams = module.params['connparams']
if state == 'present' and cascade:
module.warn('parameter "cascade" is ignored when state is not absent')
if state != 'present':
if owner:
module.warn("parameter 'owner' is ignored when state is not 'present'")
if publications:
module.warn("parameter 'publications' is ignored when state is not 'present'")
if connparams:
module.warn("parameter 'connparams' is ignored when state is not 'present'")
if subsparams:
module.warn("parameter 'subsparams' is ignored when state is not 'present'")
# Connect to DB and make cursor object:
pg_conn_params = get_conn_params(module, module.params)
# We check subscription state without DML queries execution, so set autocommit:
db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Check version:
if cursor.connection.server_version < SUPPORTED_PG_VERSION:
module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
# Set defaults:
changed = False
initial_state = {}
final_state = {}
###################################
# Create object and do rock'n'roll:
subscription = PgSubscription(module, cursor, name, db)
if subscription.exists:
initial_state = deepcopy(subscription.attrs)
final_state = deepcopy(initial_state)
if state == 'present':
if not subscription.exists:
if subsparams:
subsparams = convert_subscr_params(subsparams)
if connparams:
connparams = convert_conn_params(connparams)
changed = subscription.create(connparams,
publications,
subsparams,
check_mode=module.check_mode)
else:
changed = subscription.update(connparams,
publications,
subsparams,
check_mode=module.check_mode)
if owner and subscription.attrs['owner'] != owner:
changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
elif state == 'absent':
changed = subscription.drop(cascade, check_mode=module.check_mode)
elif state == 'refresh':
if not subscription.exists:
module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
# Always returns True:
changed = subscription.refresh(check_mode=module.check_mode)
# Get final subscription info:
final_state = subscription.get_info()
# Connection is not needed any more:
cursor.close()
db_connection.close()
# Return ret values and exit:
module.exit_json(changed=changed,
name=name,
exists=subscription.exists,
queries=subscription.executed_queries,
initial_state=initial_state,
final_state=final_state)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,601 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_table
short_description: Create, drop, or modify a PostgreSQL table
description:
- Allows to create, drop, rename, truncate a table, or change some table attributes.
options:
table:
description:
- Table name.
required: true
aliases:
- name
type: str
state:
description:
- The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
type: str
default: present
choices: [ absent, present ]
tablespace:
description:
- Set a tablespace for the table.
required: false
type: str
owner:
description:
- Set a table owner.
type: str
unlogged:
description:
- Create an unlogged table.
type: bool
default: no
like:
description:
- Create a table like another table (with similar DDL).
Mutually exclusive with I(columns), I(rename), and I(truncate).
type: str
including:
description:
- Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
type: str
columns:
description:
- Columns that are needed.
type: list
elements: str
rename:
description:
- New table name. Mutually exclusive with I(tablespace), I(owner),
I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
type: str
truncate:
description:
- Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
I(like), I(including), I(columns), I(rename), and I(storage_params).
type: bool
default: no
storage_params:
description:
- Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
Mutually exclusive with I(rename) and I(truncate).
type: list
elements: str
db:
description:
- Name of database to connect and where the table will be created.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
cascade:
description:
- Automatically drop objects that depend on the table (such as views).
Used with I(state=absent) only.
type: bool
default: no
notes:
- If you do not pass db parameter, tables will be created in the database
named postgres.
- PostgreSQL allows to create columnless table, so columns param is optional.
- Unlogged tables are available from PostgreSQL server version 9.1.
seealso:
- module: postgresql_sequence
- module: postgresql_idx
- module: postgresql_info
- module: postgresql_tablespace
- module: postgresql_owner
- module: postgresql_privs
- module: postgresql_copy
- name: CREATE TABLE reference
description: Complete reference of the CREATE TABLE command documentation.
link: https://www.postgresql.org/docs/current/sql-createtable.html
- name: ALTER TABLE reference
description: Complete reference of the ALTER TABLE command documentation.
link: https://www.postgresql.org/docs/current/sql-altertable.html
- name: DROP TABLE reference
description: Complete reference of the DROP TABLE command documentation.
link: https://www.postgresql.org/docs/current/sql-droptable.html
- name: PostgreSQL data types
description: Complete reference of the PostgreSQL data types documentation.
link: https://www.postgresql.org/docs/current/datatype.html
author:
- Andrei Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
postgresql_table:
db: acme
name: tbl2
like: tbl1
owner: testuser
- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
postgresql_table:
db: acme
table: tbl2
like: tbl1
including: comments, indexes
tablespace: ssd
- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
postgresql_table:
name: test_table
columns:
- id bigserial primary key
- num bigint
- stories text
tablespace: ssd
storage_params:
- fillfactor=10
- autovacuum_analyze_threshold=1
- name: Create an unlogged table in schema acme
postgresql_table:
name: acme.useless_data
columns: waste_id int
unlogged: true
- name: Rename table foo to bar
postgresql_table:
table: foo
rename: bar
- name: Rename table foo from schema acme to bar
postgresql_table:
name: acme.foo
rename: bar
- name: Set owner to someuser
postgresql_table:
name: foo
owner: someuser
- name: Change tablespace of foo table to new_tablespace and set owner to new_user
postgresql_table:
name: foo
tablespace: new_tablespace
owner: new_user
- name: Truncate table foo
postgresql_table:
name: foo
truncate: yes
- name: Drop table foo from schema acme
postgresql_table:
name: acme.foo
state: absent
- name: Drop table bar cascade
postgresql_table:
name: bar
state: absent
cascade: yes
'''
RETURN = r'''
table:
description: Name of a table.
returned: always
type: str
sample: 'foo'
state:
description: Table state.
returned: always
type: str
sample: 'present'
owner:
description: Table owner.
returned: always
type: str
sample: 'postgres'
tablespace:
description: Tablespace.
returned: always
type: str
sample: 'ssd_tablespace'
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
storage_params:
description: Storage parameters.
returned: always
type: list
sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class Table(object):
def __init__(self, name, module, cursor):
self.name = name
self.module = module
self.cursor = cursor
self.info = {
'owner': '',
'tblspace': '',
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_queries = []
def get_info(self):
"""Getter to refresh and get table info"""
self.__exists_in_db()
def __exists_in_db(self):
"""Check table exists and refresh info"""
if "." in self.name:
schema = self.name.split('.')[-2]
tblname = self.name.split('.')[-1]
else:
schema = 'public'
tblname = self.name
query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
"FROM pg_tables AS t "
"INNER JOIN pg_class AS c ON c.relname = t.tablename "
"INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
"WHERE t.tablename = %(tblname)s "
"AND n.nspname = %(schema)s")
res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
add_to_executed=False)
if res:
self.exists = True
self.info = dict(
owner=res[0][0],
tblspace=res[0][1] if res[0][1] else '',
storage_params=res[0][2] if res[0][2] else [],
)
return True
else:
self.exists = False
return False
def create(self, columns='', params='', tblspace='',
unlogged=False, owner=''):
"""
Create table.
If table exists, check passed args (params, tblspace, owner) and,
if they're different from current, change them.
Arguments:
params - storage params (passed by "WITH (...)" in SQL),
comma separated.
tblspace - tablespace.
owner - table owner.
unlogged - create unlogged table.
columns - column string (comma separated).
"""
name = pg_quote_identifier(self.name, 'table')
changed = False
if self.exists:
if tblspace == 'pg_default' and self.info['tblspace'] is None:
pass # Because they have the same meaning
elif tblspace and self.info['tblspace'] != tblspace:
self.set_tblspace(tblspace)
changed = True
if owner and self.info['owner'] != owner:
self.set_owner(owner)
changed = True
if params:
param_list = [p.strip(' ') for p in params.split(',')]
new_param = False
for p in param_list:
if p not in self.info['storage_params']:
new_param = True
if new_param:
self.set_stor_params(params)
changed = True
if changed:
return True
return False
query = "CREATE"
if unlogged:
query += " UNLOGGED TABLE %s" % name
else:
query += " TABLE %s" % name
if columns:
query += " (%s)" % columns
else:
query += " ()"
if params:
query += " WITH (%s)" % params
if tblspace:
query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
if exec_sql(self, query, ddl=True):
changed = True
if owner:
changed = self.set_owner(owner)
return changed
def create_like(self, src_table, including='', tblspace='',
unlogged=False, params='', owner=''):
"""
Create table like another table (with similar DDL).
Arguments:
src_table - source table.
including - corresponds to optional INCLUDING expression
in CREATE TABLE ... LIKE statement.
params - storage params (passed by "WITH (...)" in SQL),
comma separated.
tblspace - tablespace.
owner - table owner.
unlogged - create unlogged table.
"""
changed = False
name = pg_quote_identifier(self.name, 'table')
query = "CREATE"
if unlogged:
query += " UNLOGGED TABLE %s" % name
else:
query += " TABLE %s" % name
query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
if including:
including = including.split(',')
for i in including:
query += " INCLUDING %s" % i
query += ')'
if params:
query += " WITH (%s)" % params
if tblspace:
query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
if exec_sql(self, query, ddl=True):
changed = True
if owner:
changed = self.set_owner(owner)
return changed
def truncate(self):
query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
return exec_sql(self, query, ddl=True)
def rename(self, newname):
query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(newname, 'table'))
return exec_sql(self, query, ddl=True)
def set_owner(self, username):
query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(username, 'role'))
return exec_sql(self, query, ddl=True)
def drop(self, cascade=False):
if not self.exists:
return False
query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
if cascade:
query += " CASCADE"
return exec_sql(self, query, ddl=True)
def set_tblspace(self, tblspace):
query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(tblspace, 'database'))
return exec_sql(self, query, ddl=True)
def set_stor_params(self, params):
query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
table=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
db=dict(type='str', default='', aliases=['login_db']),
tablespace=dict(type='str'),
owner=dict(type='str'),
unlogged=dict(type='bool', default=False),
like=dict(type='str'),
including=dict(type='str'),
rename=dict(type='str'),
truncate=dict(type='bool', default=False),
columns=dict(type='list', elements='str'),
storage_params=dict(type='list', elements='str'),
session_role=dict(type='str'),
cascade=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
table = module.params["table"]
state = module.params["state"]
tablespace = module.params["tablespace"]
owner = module.params["owner"]
unlogged = module.params["unlogged"]
like = module.params["like"]
including = module.params["including"]
newname = module.params["rename"]
storage_params = module.params["storage_params"]
truncate = module.params["truncate"]
columns = module.params["columns"]
cascade = module.params["cascade"]
if state == 'present' and cascade:
module.warn("cascade=true is ignored when state=present")
# Check mutual exclusive parameters:
if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
module.fail_json(msg="%s: state=absent is mutually exclusive with: "
"truncate, rename, columns, tablespace, "
"including, like, storage_params, unlogged, owner" % table)
if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
module.fail_json(msg="%s: truncate is mutually exclusive with: "
"rename, columns, like, unlogged, including, "
"storage_params, owner, tablespace" % table)
if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
module.fail_json(msg="%s: rename is mutually exclusive with: "
"columns, like, unlogged, including, "
"storage_params, owner, tablespace" % table)
if like and columns:
module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
if including and not like:
module.fail_json(msg="%s: including param needs like param specified" % table)
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
if storage_params:
storage_params = ','.join(storage_params)
if columns:
columns = ','.join(columns)
##############
# Do main job:
table_obj = Table(table, module, cursor)
# Set default returned values:
changed = False
kw = {}
kw['table'] = table
kw['state'] = ''
if table_obj.exists:
kw = dict(
table=table,
state='present',
owner=table_obj.info['owner'],
tablespace=table_obj.info['tblspace'],
storage_params=table_obj.info['storage_params'],
)
if state == 'absent':
changed = table_obj.drop(cascade=cascade)
elif truncate:
changed = table_obj.truncate()
elif newname:
changed = table_obj.rename(newname)
q = table_obj.executed_queries
table_obj = Table(newname, module, cursor)
table_obj.executed_queries = q
elif state == 'present' and not like:
changed = table_obj.create(columns, storage_params,
tablespace, unlogged, owner)
elif state == 'present' and like:
changed = table_obj.create_like(like, including, tablespace,
unlogged, storage_params)
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
# Refresh table info for RETURN.
# Note, if table has been renamed, it gets info by newname:
table_obj.get_info()
db_connection.commit()
if table_obj.exists:
kw = dict(
table=table,
state='present',
owner=table_obj.info['owner'],
tablespace=table_obj.info['tblspace'],
storage_params=table_obj.info['storage_params'],
)
else:
# We just change the table state here
# to keep other information about the dropped table:
kw['state'] = 'absent'
kw['queries'] = table_obj.executed_queries
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,520 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_tablespace
short_description: Add or remove PostgreSQL tablespaces from remote hosts
description:
- Adds or removes PostgreSQL tablespaces from remote hosts.
options:
tablespace:
description:
- Name of the tablespace to add or remove.
required: true
type: str
aliases:
- name
location:
description:
- Path to the tablespace directory in the file system.
- Ensure that the location exists and has right privileges.
type: path
aliases:
- path
state:
description:
- Tablespace state.
- I(state=present) implies the tablespace must be created if it doesn't exist.
- I(state=absent) implies the tablespace must be removed if present.
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
- See the Notes section for information about check mode restrictions.
type: str
default: present
choices: [ absent, present ]
owner:
description:
- Name of the role to set as an owner of the tablespace.
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
type: str
set:
description:
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
- When reset is passed as an option's value, if the option was set previously, it will be removed.
type: dict
rename_to:
description:
- New name of the tablespace.
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
notes:
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
can not be run inside the transaction block.
seealso:
- name: PostgreSQL tablespaces
description: General information about PostgreSQL tablespaces.
link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
- name: CREATE TABLESPACE reference
description: Complete reference of the CREATE TABLESPACE command documentation.
link: https://www.postgresql.org/docs/current/sql-createtablespace.html
- name: ALTER TABLESPACE reference
description: Complete reference of the ALTER TABLESPACE command documentation.
link: https://www.postgresql.org/docs/current/sql-altertablespace.html
- name: DROP TABLESPACE reference
description: Complete reference of the DROP TABLESPACE command documentation.
link: https://www.postgresql.org/docs/current/sql-droptablespace.html
author:
- Flavien Chantelot (@Dorn-)
- Antoine Levy-Lambert (@antoinell)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Create a new tablespace called acme and set bob as an its owner
postgresql_tablespace:
name: acme
owner: bob
location: /data/foo
- name: Create a new tablespace called bar with tablespace options
postgresql_tablespace:
name: bar
set:
random_page_cost: 1
seq_page_cost: 1
- name: Reset random_page_cost option
postgresql_tablespace:
name: bar
set:
random_page_cost: reset
- name: Rename the tablespace from bar to pcie_ssd
postgresql_tablespace:
name: bar
rename_to: pcie_ssd
- name: Drop tablespace called bloat
postgresql_tablespace:
name: bloat
state: absent
'''
RETURN = r'''
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
tablespace:
description: Tablespace name.
returned: always
type: str
sample: 'ssd'
owner:
description: Tablespace owner.
returned: always
type: str
sample: 'Bob'
options:
description: Tablespace options.
returned: always
type: dict
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
location:
description: Path to the tablespace in the file system.
returned: always
type: str
sample: '/incredible/fast/ssd'
newname:
description: New tablespace name
returned: if existent
type: str
sample: new_ssd
state:
description: Tablespace state at the end of execution.
returned: always
type: str
sample: 'present'
'''
try:
from psycopg2 import __version__ as PSYCOPG2_VERSION
from psycopg2.extras import DictCursor
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
class PgTablespace(object):
"""Class for working with PostgreSQL tablespaces.
Args:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
Attrs:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
exists (bool) -- flag the tablespace exists in the DB or not
owner (str) -- tablespace owner
location (str) -- path to the tablespace directory in the file system
executed_queries (list) -- list of executed queries
new_name (str) -- new name for the tablespace
opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.owner = ''
self.settings = {}
self.location = ''
self.executed_queries = []
self.new_name = ''
self.opt_not_supported = False
# Collect info:
self.get_info()
def get_info(self):
"""Get tablespace information."""
# Check that spcoptions exists:
opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'", add_to_executed=False)
# For 9.1 version and earlier:
location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spclocation'", add_to_executed=False)
if location:
location = 'spclocation'
else:
location = 'pg_tablespace_location(t.oid)'
if not opt:
self.opt_not_supported = True
query = ("SELECT r.rolname, (SELECT Null), %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid " % location)
else:
query = ("SELECT r.rolname, t.spcoptions, %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid " % location)
res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
query_params={'name': self.name}, add_to_executed=False)
if not res:
self.exists = False
return False
if res[0][0]:
self.exists = True
self.owner = res[0][0]
if res[0][1]:
# Options exist:
for i in res[0][1]:
i = i.split('=')
self.settings[i[0]] = i[1]
if res[0][2]:
# Location exists:
self.location = res[0][2]
def create(self, location):
"""Create tablespace.
Return True if success, otherwise, return False.
args:
location (str) -- tablespace directory path in the FS
"""
query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location))
return exec_sql(self, query, ddl=True)
def drop(self):
"""Drop tablespace.
Return True if success, otherwise, return False.
"""
return exec_sql(self, "DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True)
def set_owner(self, new_owner):
"""Set tablespace owner.
Return True if success, otherwise, return False.
args:
new_owner (str) -- name of a new owner for the tablespace"
"""
if new_owner == self.owner:
return False
query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner)
return exec_sql(self, query, ddl=True)
def rename(self, newname):
"""Rename tablespace.
Return True if success, otherwise, return False.
args:
newname (str) -- new name for the tablespace"
"""
query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname)
self.new_name = newname
return exec_sql(self, query, ddl=True)
def set_settings(self, new_settings):
"""Set tablespace settings (options).
If some setting has been changed, set changed = True.
After all settings list is handling, return changed.
args:
new_settings (list) -- list of new settings
"""
# settings must be a dict {'key': 'value'}
if self.opt_not_supported:
return False
changed = False
# Apply new settings:
for i in new_settings:
if new_settings[i] == 'reset':
if i in self.settings:
changed = self.__reset_setting(i)
self.settings[i] = None
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
return changed
def __reset_setting(self, setting):
"""Reset tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return exec_sql(self, query, ddl=True)
def __set_setting(self, setting):
"""Set tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
tablespace=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
location=dict(type='path', aliases=['path']),
owner=dict(type='str'),
set=dict(type='dict'),
rename_to=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
tablespace = module.params["tablespace"]
state = module.params["state"]
location = module.params["location"]
owner = module.params["owner"]
rename_to = module.params["rename_to"]
settings = module.params["set"]
if state == 'absent' and (location or owner or rename_to or settings):
module.fail_json(msg="state=absent is mutually exclusive location, "
"owner, rename_to, and set")
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Change autocommit to False if check_mode:
if module.check_mode:
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=False)
else:
db_connection.set_isolation_level(READ_COMMITTED)
# Set defaults:
autocommit = False
changed = False
##############
# Create PgTablespace object and do main job:
tblspace = PgTablespace(module, cursor, tablespace)
# If tablespace exists with different location, exit:
if tblspace.exists and location and location != tblspace.location:
module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location))
# Create new tablespace:
if not tblspace.exists and state == 'present':
if rename_to:
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
if not location:
module.fail_json(msg="'location' parameter must be passed with "
"state=present if the tablespace doesn't exist")
# Because CREATE TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.create(location)
# Drop non-existing tablespace:
elif not tblspace.exists and state == 'absent':
# Nothing to do:
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
# Drop existing tablespace:
elif tblspace.exists and state == 'absent':
# Because DROP TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.drop()
# Rename tablespace:
elif tblspace.exists and rename_to:
if tblspace.name != rename_to:
changed = tblspace.rename(rename_to)
if state == 'present':
# Refresh information:
tblspace.get_info()
# Change owner and settings:
if state == 'present' and tblspace.exists:
if owner:
changed = tblspace.set_owner(owner)
if settings:
changed = tblspace.set_settings(settings)
tblspace.get_info()
# Rollback if it's possible and check_mode:
if not autocommit:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
tablespace=tblspace.name,
owner=tblspace.owner,
queries=tblspace.executed_queries,
options=tblspace.settings,
location=tblspace.location,
)
if state == 'present':
kw['state'] = 'present'
if tblspace.new_name:
kw['newname'] = tblspace.new_name
elif state == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,919 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_user
short_description: Add or remove a user (role) from a PostgreSQL server instance
description:
- Adds or removes a user (role) from a PostgreSQL server instance
("cluster" in PostgreSQL terminology) and, optionally,
grants the user access to an existing database or tables.
- A user is a role with login privilege.
- The fundamental function of the module is to create, or delete, users from
a PostgreSQL instances. Privilege assignment, or removal, is an optional
step, which works on one database at a time. This allows for the module to
be called several times in the same module to modify the permissions on
different databases, or to grant permissions to already existing users.
- A user cannot be removed until all the privileges have been stripped from
the user. In such situation, if the module tries to remove the user it
will fail. To avoid this from happening the fail_on_user option signals
the module to try to remove the user, but if not possible keep going; the
module will report if changes happened and separately if the user was
removed or not.
options:
name:
description:
- Name of the user (role) to add or remove.
type: str
required: true
aliases:
- user
password:
description:
- Set the user's password, before 1.4 this was required.
- Password can be passed unhashed or hashed (MD5-hashed).
- Unhashed password will automatically be hashed when saved into the
database if C(encrypted) parameter is set, otherwise it will be save in
plain text format.
- When passing a hashed password it must be generated with the format
C('str["md5"] + md5[ password + username ]'), resulting in a total of
35 characters. An easy way to do this is C(echo "md5$(echo -n
'verysecretpasswordJOE' | md5sum | awk '{print $1}')").
- Note that if the provided password string is already in MD5-hashed
format, then it is used as-is, regardless of C(encrypted) parameter.
type: str
db:
description:
- Name of database to connect to and where user's permissions will be granted.
type: str
aliases:
- login_db
fail_on_user:
description:
- If C(yes), fail when user (role) can't be removed. Otherwise just log and continue.
default: 'yes'
type: bool
aliases:
- fail_on_role
priv:
description:
- "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
privileges can be defined for database ( allowed options - 'CREATE',
'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
C(table:SELECT) ). Mixed example of this string:
C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
type: str
role_attr_flags:
description:
- "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
- Note that '[NO]CREATEUSER' is deprecated.
- To create a simple role for using it like a group, use C(NOLOGIN) flag.
type: str
choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
'[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
session_role:
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
state:
description:
- The user (role) state.
type: str
default: present
choices: [ absent, present ]
encrypted:
description:
- Whether the password is stored hashed in the database.
- Passwords can be passed already hashed or unhashed, and postgresql
ensures the stored password is hashed when C(encrypted) is set.
- "Note: Postgresql 10 and newer doesn't support unhashed passwords."
- Previous to Ansible 2.6, this was C(no) by default.
default: 'yes'
type: bool
expires:
description:
- The date at which the user's password is to expire.
- If set to C('infinity'), user's password never expire.
- Note that this value should be a valid SQL date and time type.
type: str
no_password_changes:
description:
- If C(yes), don't inspect database for password changes. Effective when
C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
password changes as necessary.
default: 'no'
type: bool
conn_limit:
description:
- Specifies the user (role) connection limit.
type: int
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
groups:
description:
- The list of groups (roles) that need to be granted to the user.
type: list
elements: str
comment:
description:
- Add a comment on the user (equal to the COMMENT ON ROLE statement result).
type: str
notes:
- The module creates a user (role) with login privilege by default.
Use NOLOGIN role_attr_flags to change this behaviour.
- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles).
You may not specify password or role_attr_flags when the PUBLIC user is specified.
seealso:
- module: postgresql_privs
- module: postgresql_membership
- module: postgresql_owner
- name: PostgreSQL database roles
description: Complete reference of the PostgreSQL database roles documentation.
link: https://www.postgresql.org/docs/current/user-manag.html
author:
- Ansible Core Team
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Connect to acme database, create django user, and grant access to database and products table
postgresql_user:
db: acme
name: django
password: ceec4eif7ya
priv: "CONNECT/products:ALL"
expires: "Jan 31 2020"
- name: Add a comment on django user
postgresql_user:
db: acme
name: django
comment: This is a test user
# Connect to default database, create rails user, set its password (MD5-hashed),
# and grant privilege to create other databases and demote rails from super user status if user exists
- name: Create rails user, set MD5-hashed password, grant privs
postgresql_user:
name: rails
password: md59543f1d82624df2b31672ec0f7050460
role_attr_flags: CREATEDB,NOSUPERUSER
- name: Connect to acme database and remove test user privileges from there
postgresql_user:
db: acme
name: test
priv: "ALL/products:ALL"
state: absent
fail_on_user: no
- name: Connect to test database, remove test user from cluster
postgresql_user:
db: test
name: test
priv: ALL
state: absent
- name: Connect to acme database and set user's password with no expire date
postgresql_user:
db: acme
name: django
password: mysupersecretword
priv: "CONNECT/products:ALL"
expires: infinity
# Example privileges string format
# INSERT,UPDATE/table:SELECT/anothertable:ALL
- name: Connect to test database and remove an existing user's password
postgresql_user:
db: test
user: test
password: ""
- name: Create user test and grant group user_ro and user_rw to it
postgresql_user:
name: test
groups:
- user_ro
- user_rw
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: list
sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
version_added: '2.8'
'''
import itertools
import re
import traceback
from hashlib import md5
try:
import psycopg2
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.database import pg_quote_identifier, SQLParseError
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
get_conn_params,
PgMembership,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six import iteritems
FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
database=frozenset(
('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
)
# map to cope with idiosyncracies of SUPERUSER and LOGIN
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
executed_queries = []
class InvalidFlagsError(Exception):
pass
class InvalidPrivsError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def user_exists(cursor, user):
# The PUBLIC user is a special case that is always there
if user == 'PUBLIC':
return True
query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
cursor.execute(query, {'user': user})
return cursor.rowcount > 0
def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
"""Create a new database user (role)."""
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
# literal
query_password_data = dict(password=password, expires=expires)
query = ['CREATE USER "%(user)s"' %
{"user": user}]
if password is not None and password != '':
query.append("WITH %(crypt)s" % {"crypt": encrypted})
query.append("PASSWORD %(password)s")
if expires is not None:
query.append("VALID UNTIL %(expires)s")
if conn_limit is not None:
query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query.append(role_attr_flags)
query = ' '.join(query)
executed_queries.append(query)
cursor.execute(query, query_password_data)
return True
def user_should_we_change_password(current_role_attrs, user, password, encrypted):
"""Check if we should change the user's password.
Compare the proposed password with the existing one, comparing
hashes if encrypted. If we can't access it assume yes.
"""
if current_role_attrs is None:
# on some databases, E.g. AWS RDS instances, there is no access to
# the pg_authid relation to check the pre-existing password, so we
# just assume password is different
return True
# Do we actually need to do anything?
pwchanging = False
if password is not None:
# Empty password means that the role shouldn't have a password, which
# means we need to check if the current password is None.
if password == '':
if current_role_attrs['rolpassword'] is not None:
pwchanging = True
# 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
# 3: The size of the 'md5' prefix
# When the provided password looks like a MD5-hash, value of
# 'encrypted' is ignored.
elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
if password != current_role_attrs['rolpassword']:
pwchanging = True
elif encrypted == 'ENCRYPTED':
hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
if hashed_password != current_role_attrs['rolpassword']:
pwchanging = True
return pwchanging
def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
"""Change user password and/or attributes. Return True if changed, False otherwise."""
changed = False
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
# literal
if user == 'PUBLIC':
if password is not None:
module.fail_json(msg="cannot change the password for PUBLIC user")
elif role_attr_flags != '':
module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
else:
return False
# Handle passwords.
if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
# Select password and all flag-like columns in order to verify changes.
try:
select = "SELECT * FROM pg_authid where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
except psycopg2.ProgrammingError:
current_role_attrs = None
db_connection.rollback()
pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
if current_role_attrs is None:
try:
# AWS RDS instances does not allow user to access pg_authid
# so try to get current_role_attrs from pg_roles tables
select = "SELECT * FROM pg_roles where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes from pg_roles
current_role_attrs = cursor.fetchone()
except psycopg2.ProgrammingError as e:
db_connection.rollback()
module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
if expires is not None:
cursor.execute("SELECT %s::timestamptz;", (expires,))
expires_with_tz = cursor.fetchone()[0]
expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
else:
expires_changing = False
conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
return False
alter = ['ALTER USER "%(user)s"' % {"user": user}]
if pwchanging:
if password != '':
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
else:
alter.append("WITH PASSWORD NULL")
alter.append(role_attr_flags)
elif role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
if expires is not None:
alter.append("VALID UNTIL %(expires)s")
if conn_limit is not None:
alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query_password_data = dict(password=password, expires=expires)
try:
cursor.execute(' '.join(alter), query_password_data)
changed = True
except psycopg2.InternalError as e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
return changed
else:
raise psycopg2.InternalError(e)
except psycopg2.NotSupportedError as e:
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
elif no_password_changes and role_attr_flags != '':
# Grab role information from pg_roles instead of pg_authid
select = "SELECT * FROM pg_roles where rolname=%(user)s"
cursor.execute(select, {"user": user})
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
if not role_attr_flags_changing:
return False
alter = ['ALTER USER "%(user)s"' %
{"user": user}]
if role_attr_flags:
alter.append('WITH %s' % role_attr_flags)
try:
cursor.execute(' '.join(alter))
except psycopg2.InternalError as e:
if e.pgcode == '25006':
# Handle errors due to read-only transactions indicated by pgcode 25006
# ERROR: cannot execute ALTER ROLE in a read-only transaction
changed = False
module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
return changed
else:
raise psycopg2.InternalError(e)
# Grab new role attributes.
cursor.execute(select, {"user": user})
new_role_attrs = cursor.fetchone()
# Detect any differences between current_ and new_role_attrs.
changed = current_role_attrs != new_role_attrs
return changed
def user_delete(cursor, user):
"""Try to remove a user. Returns True if successful otherwise False"""
cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
try:
query = 'DROP USER "%s"' % user
executed_queries.append(query)
cursor.execute(query)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privileges(cursor, user, table, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_table_privileges(cursor, user, table)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def get_table_privileges(cursor, user, table):
if '.' in table:
schema, table = table.split('.', 1)
else:
schema = 'public'
query = ("SELECT privilege_type FROM information_schema.role_table_grants "
"WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
return frozenset([x[0] for x in cursor.fetchall()])
def grant_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'GRANT %s ON TABLE %s TO "%s"' % (
privs, pg_quote_identifier(table, 'table'), user)
executed_queries.append(query)
cursor.execute(query)
def revoke_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
privs, pg_quote_identifier(table, 'table'), user)
executed_queries.append(query)
cursor.execute(query)
def get_database_privileges(cursor, user, db):
priv_map = {
'C': 'CREATE',
'T': 'TEMPORARY',
'c': 'CONNECT',
}
query = 'SELECT datacl FROM pg_database WHERE datname = %s'
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return set()
r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
if r is None:
return set()
o = set()
for v in r.group(1):
o.add(priv_map[v])
return normalize_privileges(o, 'database')
def has_database_privileges(cursor, user, db, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_database_privileges(cursor, user, db)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def grant_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO "%s"' % (
privs, pg_quote_identifier(db, 'database'), user)
executed_queries.append(query)
cursor.execute(query)
def revoke_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
privs, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
privs, pg_quote_identifier(db, 'database'), user)
executed_queries.append(query)
cursor.execute(query)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
revoke_funcs = dict(table=revoke_table_privileges,
database=revoke_database_privileges)
check_funcs = dict(table=has_table_privileges,
database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[0]:
revoke_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
grant_funcs = dict(table=grant_table_privileges,
database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges,
database=has_database_privileges)
changed = False
for type_ in privs:
for name, privileges in iteritems(privs[type_]):
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[2]:
grant_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def parse_role_attrs(cursor, role_attr_flags):
"""
Parse role attributes string for user creation.
Format:
attributes[,attributes,...]
Where:
attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
[ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
"[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
"[NO]BYPASSRLS" ]
Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
Note: "[NO]CREATEUSER" role attribute is deprecated.
"""
flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
if not flags.issubset(valid_flags):
raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
' '.join(flags.difference(valid_flags)))
return ' '.join(flags)
def normalize_privileges(privs, type_):
new_privs = set(privs)
if 'ALL' in new_privs:
new_privs.update(VALID_PRIVS[type_])
new_privs.remove('ALL')
if 'TEMP' in new_privs:
new_privs.add('TEMPORARY')
new_privs.remove('TEMP')
return new_privs
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
Format:
privileges[/privileges/...]
Where:
privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
"""
if privs is None:
return privs
o_privs = {
'database': {},
'table': {}
}
for token in privs.split('/'):
if ':' not in token:
type_ = 'database'
name = db
priv_set = frozenset(x.strip().upper()
for x in token.split(',') if x.strip())
else:
type_ = 'table'
name, privileges = token.split(':', 1)
priv_set = frozenset(x.strip().upper()
for x in privileges.split(',') if x.strip())
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
priv_set = normalize_privileges(priv_set, type_)
o_privs[type_][name] = priv_set
return o_privs
def get_valid_flags_by_version(cursor):
"""
Some role attributes were introduced after certain versions. We want to
compile a list of valid flags against the current Postgres version.
"""
current_version = cursor.connection.server_version
return [
flag
for flag, version_introduced in FLAGS_BY_VERSION.items()
if current_version >= version_introduced
]
def get_comment(cursor, user):
"""Get user's comment."""
query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
"FROM pg_catalog.pg_roles r "
"WHERE r.rolname = %(user)s")
cursor.execute(query, {'user': user})
return cursor.fetchone()[0]
def add_comment(cursor, user, comment):
"""Add comment on user."""
if comment != get_comment(cursor, user):
query = 'COMMENT ON ROLE "%s" IS ' % user
cursor.execute(query + '%(comment)s', {'comment': comment})
executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
user=dict(type='str', required=True, aliases=['name']),
password=dict(type='str', default=None, no_log=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
priv=dict(type='str', default=None),
db=dict(type='str', default='', aliases=['login_db']),
fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']),
role_attr_flags=dict(type='str', default=''),
encrypted=dict(type='bool', default='yes'),
no_password_changes=dict(type='bool', default='no'),
expires=dict(type='str', default=None),
conn_limit=dict(type='int', default=None),
session_role=dict(type='str'),
groups=dict(type='list', elements='str'),
comment=dict(type='str', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
user = module.params["user"]
password = module.params["password"]
state = module.params["state"]
fail_on_user = module.params["fail_on_user"]
if module.params['db'] == '' and module.params["priv"] is not None:
module.fail_json(msg="privileges require a database to be specified")
privs = parse_privs(module.params["priv"], module.params["db"])
no_password_changes = module.params["no_password_changes"]
if module.params["encrypted"]:
encrypted = "ENCRYPTED"
else:
encrypted = "UNENCRYPTED"
expires = module.params["expires"]
conn_limit = module.params["conn_limit"]
role_attr_flags = module.params["role_attr_flags"]
groups = module.params["groups"]
if groups:
groups = [e.strip() for e in groups]
comment = module.params["comment"]
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params)
cursor = db_connection.cursor(cursor_factory=DictCursor)
try:
role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
except InvalidFlagsError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
kw = dict(user=user)
changed = False
user_removed = False
if state == "present":
if user_exists(cursor, user):
try:
changed = user_alter(db_connection, module, user, password,
role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
else:
try:
changed = user_add(cursor, user, password,
role_attr_flags, encrypted, expires, conn_limit)
except psycopg2.ProgrammingError as e:
module.fail_json(msg="Unable to add user with given requirement "
"due to : %s" % to_native(e),
exception=traceback.format_exc())
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
try:
changed = grant_privileges(cursor, user, privs) or changed
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if groups:
target_roles = []
target_roles.append(user)
pg_membership = PgMembership(module, cursor, groups, target_roles)
changed = pg_membership.grant() or changed
executed_queries.extend(pg_membership.executed_queries)
if comment is not None:
try:
changed = add_comment(cursor, user, comment) or changed
except Exception as e:
module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
exception=traceback.format_exc())
else:
if user_exists(cursor, user):
if module.check_mode:
changed = True
kw['user_removed'] = True
else:
try:
changed = revoke_privileges(cursor, user, privs)
user_removed = user_delete(cursor, user)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
changed = changed or user_removed
if fail_on_user and not user_removed:
msg = "Unable to remove user"
module.fail_json(msg=msg)
kw['user_removed'] = user_removed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
kw['queries'] = executed_queries
module.exit_json(**kw)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,335 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_user_obj_stat_info
short_description: Gather statistics about PostgreSQL user objects
description:
- Gathers statistics about PostgreSQL user objects.
options:
filter:
description:
- Limit the collected information by comma separated string or YAML list.
- Allowable values are C(functions), C(indexes), C(tables).
- By default, collects all subsets.
- Unsupported values are ignored.
type: list
elements: str
schema:
description:
- Restrict the output by certain schema.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
notes:
- C(size) and C(total_size) returned values are presented in bytes.
- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
seealso:
- module: postgresql_info
- module: postgresql_ping
- name: PostgreSQL statistics collector reference
description: Complete reference of the PostgreSQL statistics collector documentation.
link: https://www.postgresql.org/docs/current/monitoring-stats.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment:
- community.general.postgres
'''
EXAMPLES = r'''
- name: Collect information about all supported user objects of the acme database
postgresql_user_obj_stat_info:
db: acme
- name: Collect information about all supported user objects in the custom schema of the acme database
postgresql_user_obj_stat_info:
db: acme
schema: custom
- name: Collect information about user tables and indexes in the acme database
postgresql_user_obj_stat_info:
db: acme
filter: tables, indexes
'''
RETURN = r'''
indexes:
description: User index statistics
returned: always
type: dict
sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
tables:
description: User table statistics.
returned: always
type: dict
sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
functions:
description: User function statistics.
returned: always
type: dict
sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgUserObjStatInfo():
"""Class to collect information about PostgreSQL user objects.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
executed_queries (list): List of executed queries.
info (dict): Statistics dictionary.
obj_func_mapping (dict): Mapping of object types to corresponding functions.
schema (str): Name of a schema to restrict stat collecting.
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.info = {
'functions': {},
'indexes': {},
'tables': {},
}
self.obj_func_mapping = {
'functions': self.get_func_stat,
'indexes': self.get_idx_stat,
'tables': self.get_tbl_stat,
}
self.schema = None
def collect(self, filter_=None, schema=None):
"""Collect statistics information of user objects.
Kwargs:
filter_ (list): List of subsets which need to be collected.
schema (str): Restrict stat collecting by certain schema.
Returns:
``self.info``.
"""
if schema:
self.set_schema(schema)
if filter_:
for obj_type in filter_:
obj_type = obj_type.strip()
obj_func = self.obj_func_mapping.get(obj_type)
if obj_func is not None:
obj_func()
else:
self.module.warn("Unknown filter option '%s'" % obj_type)
else:
for obj_func in self.obj_func_mapping.values():
obj_func()
return self.info
def get_func_stat(self):
"""Get function statistics and fill out self.info dictionary."""
if not self.schema:
query = "SELECT * FROM pg_stat_user_functions"
result = exec_sql(self, query, add_to_executed=False)
else:
query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='functions',
schema_key='schemaname',
name_key='funcname')
def get_idx_stat(self):
"""Get index statistics and fill out self.info dictionary."""
if not self.schema:
query = "SELECT * FROM pg_stat_user_indexes"
result = exec_sql(self, query, add_to_executed=False)
else:
query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='indexes',
schema_key='schemaname',
name_key='indexrelname')
def get_tbl_stat(self):
"""Get table statistics and fill out self.info dictionary."""
if not self.schema:
query = "SELECT * FROM pg_stat_user_tables"
result = exec_sql(self, query, add_to_executed=False)
else:
query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='tables',
schema_key='schemaname',
name_key='relname')
def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
# Convert result to list of dicts to handle it easier:
result = [dict(row) for row in result]
for elem in result:
# Add schema name as a key if not presented:
if not self.info[info_key].get(elem[schema_key]):
self.info[info_key][elem[schema_key]] = {}
# Add object name key as a subkey
# (they must be uniq over a schema, so no need additional checks):
self.info[info_key][elem[schema_key]][elem[name_key]] = {}
# Add other other attributes to a certain index:
for key, val in iteritems(elem):
if key not in (schema_key, name_key):
self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
if info_key in ('tables', 'indexes'):
relname = elem[name_key]
schemaname = elem[schema_key]
if not self.schema:
result = exec_sql(self, "SELECT pg_relation_size ('%s.%s')" % (schemaname, relname),
add_to_executed=False)
else:
relname = '%s.%s' % (self.schema, relname)
result = exec_sql(self, "SELECT pg_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
if info_key == 'tables':
relname = elem[name_key]
schemaname = elem[schema_key]
if not self.schema:
result = exec_sql(self, "SELECT pg_total_relation_size ('%s.%s')" % (schemaname, relname),
add_to_executed=False)
else:
relname = '%s.%s' % (self.schema, relname)
result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
def set_schema(self, schema):
"""If schema exists, sets self.schema, otherwise fails."""
query = ("SELECT 1 FROM information_schema.schemata "
"WHERE schema_name = %s")
result = exec_sql(self, query, query_params=(schema,),
add_to_executed=False)
if result and result[0][0]:
self.schema = schema
else:
self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
filter=dict(type='list', elements='str'),
session_role=dict(type='str'),
schema=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
filter_ = module.params["filter"]
schema = module.params["schema"]
# Connect to DB and make cursor object:
pg_conn_params = get_conn_params(module, module.params)
# We don't need to commit anything, so, set it to False:
db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
############################
# Create object and do work:
pg_obj_info = PgUserObjStatInfo(module, cursor)
info_dict = pg_obj_info.collect(filter_, schema)
# Clean up:
cursor.close()
db_connection.close()
# Return information:
module.exit_json(**info_dict)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,509 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_backend_servers
author: "Ben Mildren (@bmildren)"
short_description: Adds or removes mysql hosts from proxysql admin interface.
description:
- The M(proxysql_backend_servers) module adds or removes mysql hosts using
the proxysql admin interface.
options:
hostgroup_id:
description:
- The hostgroup in which this mysqld instance is included. An instance
can be part of one or more hostgroups.
default: 0
hostname:
description:
- The ip address at which the mysqld instance can be contacted.
required: True
port:
description:
- The port at which the mysqld instance can be contacted.
default: 3306
status:
description:
- ONLINE - Backend server is fully operational.
OFFLINE_SOFT - When a server is put into C(OFFLINE_SOFT) mode,
connections are kept in use until the current
transaction is completed. This allows to gracefully
detach a backend.
OFFLINE_HARD - When a server is put into C(OFFLINE_HARD) mode, the
existing connections are dropped, while new incoming
connections aren't accepted either.
If omitted the proxysql database default for I(status) is C(ONLINE).
choices: [ "ONLINE", "OFFLINE_SOFT", "OFFLINE_HARD"]
weight:
description:
- The bigger the weight of a server relative to other weights, the higher
the probability of the server being chosen from the hostgroup. If
omitted the proxysql database default for I(weight) is 1.
compression:
description:
- If the value of I(compression) is greater than 0, new connections to
that server will use compression. If omitted the proxysql database
default for I(compression) is 0.
max_connections:
description:
- The maximum number of connections ProxySQL will open to this backend
server. If omitted the proxysql database default for I(max_connections)
is 1000.
max_replication_lag:
description:
- If greater than 0, ProxySQL will regularly monitor replication lag. If
replication lag goes above I(max_replication_lag), proxysql will
temporarily shun the server until replication catches up. If omitted
the proxysql database default for I(max_replication_lag) is 0.
use_ssl:
description:
- If I(use_ssl) is set to C(True), connections to this server will be
made using SSL connections. If omitted the proxysql database default
for I(use_ssl) is C(False).
type: bool
max_latency_ms:
description:
- Ping time is monitored regularly. If a host has a ping time greater
than I(max_latency_ms) it is excluded from the connection pool
(although the server stays ONLINE). If omitted the proxysql database
default for I(max_latency_ms) is 0.
comment:
description:
- Text field that can be used for any purposed defined by the user.
Could be a description of what the host stores, a reminder of when the
host was added or disabled, or a JSON processed by some checker script.
default: ''
state:
description:
- When C(present) - adds the host, when C(absent) - removes the host.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- community.general.proxysql.managing_config
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a server, it saves the mysql server config to disk, but
# avoids loading the mysql server config to runtime (this might be because
# several servers are being added and the user wants to push the config to
# runtime in a single batch using the M(proxysql_manage_config) module). It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_backend_servers:
login_user: 'admin'
login_password: 'admin'
hostname: 'mysql01'
state: present
load_to_runtime: False
# This example removes a server, saves the mysql server config to disk, and
# dynamically loads the mysql server config to runtime. It uses credentials
# in a supplied config file to connect to the proxysql admin interface.
- proxysql_backend_servers:
config_file: '~/proxysql.cnf'
hostname: 'mysql02'
state: absent
'''
RETURN = '''
stdout:
description: The mysql host modified or removed from proxysql
returned: On create/update will return the newly modified host, on delete
it will return the deleted record.
type: dict
"sample": {
"changed": true,
"hostname": "192.168.52.1",
"msg": "Added server to mysql_hosts",
"server": {
"comment": "",
"compression": "0",
"hostgroup_id": "1",
"hostname": "192.168.52.1",
"max_connections": "1000",
"max_latency_ms": "0",
"max_replication_lag": "0",
"port": "3306",
"status": "ONLINE",
"use_ssl": "0",
"weight": "1"
},
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if module.params["port"] < 0 \
or module.params["port"] > 65535:
module.fail_json(
msg="port must be a valid unix port number (0-65535)"
)
if module.params["compression"]:
if module.params["compression"] < 0 \
or module.params["compression"] > 102400:
module.fail_json(
msg="compression must be set between 0 and 102400"
)
if module.params["max_replication_lag"]:
if module.params["max_replication_lag"] < 0 \
or module.params["max_replication_lag"] > 126144000:
module.fail_json(
msg="max_replication_lag must be set between 0 and 102400"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL SERVERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL SERVERS TO RUNTIME")
return True
class ProxySQLServer(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.hostgroup_id = module.params["hostgroup_id"]
self.hostname = module.params["hostname"]
self.port = module.params["port"]
config_data_keys = ["status",
"weight",
"compression",
"max_connections",
"max_replication_lag",
"use_ssl",
"max_latency_ms",
"comment"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_server_config_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `host_count`
FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['host_count']) > 0)
def check_server_config(self, cursor):
query_string = \
"""SELECT count(*) AS `host_count`
FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
if isinstance(check_count, tuple):
return int(check_count[0]) > 0
return (int(check_count['host_count']) > 0)
def get_server_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
cursor.execute(query_string, query_data)
server = cursor.fetchone()
return server
def create_server_config(self, cursor):
query_string = \
"""INSERT INTO mysql_servers (
hostgroup_id,
hostname,
port"""
cols = 3
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += ",\n" + col
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
return True
def update_server_config(self, cursor):
query_string = """UPDATE mysql_servers"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += ("\nWHERE hostgroup_id = %s\n AND hostname = %s" +
"\n AND port = %s")
query_data.append(self.hostgroup_id)
query_data.append(self.hostname)
query_data.append(self.port)
cursor.execute(query_string, query_data)
return True
def delete_server_config(self, cursor):
query_string = \
"""DELETE FROM mysql_servers
WHERE hostgroup_id = %s
AND hostname = %s
AND port = %s"""
query_data = \
[self.hostgroup_id,
self.hostname,
self.port]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_server(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_server_config(cursor)
result['msg'] = "Added server to mysql_hosts"
result['server'] = \
self.get_server_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Server would have been added to" +
" mysql_hosts, however check_mode" +
" is enabled.")
def update_server(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_server_config(cursor)
result['msg'] = "Updated server in mysql_hosts"
result['server'] = \
self.get_server_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Server would have been updated in" +
" mysql_hosts, however check_mode" +
" is enabled.")
def delete_server(self, check_mode, result, cursor):
if not check_mode:
result['server'] = \
self.get_server_config(cursor)
result['changed'] = \
self.delete_server_config(cursor)
result['msg'] = "Deleted server from mysql_hosts"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Server would have been deleted from" +
" mysql_hosts, however check_mode is" +
" enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default='127.0.0.1'),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default='', type='path'),
hostgroup_id=dict(default=0, type='int'),
hostname=dict(required=True, type='str'),
port=dict(default=3306, type='int'),
status=dict(choices=['ONLINE',
'OFFLINE_SOFT',
'OFFLINE_HARD']),
weight=dict(type='int'),
compression=dict(type='int'),
max_connections=dict(type='int'),
max_replication_lag=dict(type='int'),
use_ssl=dict(type='bool'),
max_latency_ms=dict(type='int'),
comment=dict(default='', type='str'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class='DictCursor')
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_server = ProxySQLServer(module)
result = {}
result['state'] = proxysql_server.state
if proxysql_server.hostname:
result['hostname'] = proxysql_server.hostname
if proxysql_server.state == "present":
try:
if not proxysql_server.check_server_config(cursor):
if not proxysql_server.check_server_config_exists(cursor):
proxysql_server.create_server(module.check_mode,
result,
cursor)
else:
proxysql_server.update_server(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The server already exists in mysql_hosts" +
" and doesn't need to be updated.")
result['server'] = \
proxysql_server.get_server_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify server.. %s" % to_native(e)
)
elif proxysql_server.state == "absent":
try:
if proxysql_server.check_server_config_exists(cursor):
proxysql_server.delete_server(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The server is already absent from the" +
" mysql_hosts memory configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to remove server.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,270 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_global_variables
author: "Ben Mildren (@bmildren)"
short_description: Gets or sets the proxysql global variables.
description:
- The M(proxysql_global_variables) module gets or sets the proxysql global
variables.
options:
variable:
description:
- Defines which variable should be returned, or if I(value) is specified
which variable should be updated.
required: True
value:
description:
- Defines a value the variable specified using I(variable) should be set
to.
extends_documentation_fragment:
- community.general.proxysql.managing_config
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example sets the value of a variable, saves the mysql admin variables
# config to disk, and dynamically loads the mysql admin variables config to
# runtime. It uses supplied credentials to connect to the proxysql admin
# interface.
- proxysql_global_variables:
login_user: 'admin'
login_password: 'admin'
variable: 'mysql-max_connections'
value: 4096
# This example gets the value of a variable. It uses credentials in a
# supplied config file to connect to the proxysql admin interface.
- proxysql_global_variables:
config_file: '~/proxysql.cnf'
variable: 'mysql-default_query_delay'
'''
RETURN = '''
stdout:
description: Returns the mysql variable supplied with it's associated value.
returned: Returns the current variable and value, or the newly set value
for the variable supplied..
type: dict
"sample": {
"changed": false,
"msg": "The variable is already been set to the supplied value",
"var": {
"variable_name": "mysql-poll_timeout",
"variable_value": "3000"
}
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(variable, cursor):
if variable.startswith("admin"):
cursor.execute("SAVE ADMIN VARIABLES TO DISK")
else:
cursor.execute("SAVE MYSQL VARIABLES TO DISK")
return True
def load_config_to_runtime(variable, cursor):
if variable.startswith("admin"):
cursor.execute("LOAD ADMIN VARIABLES TO RUNTIME")
else:
cursor.execute("LOAD MYSQL VARIABLES TO RUNTIME")
return True
def check_config(variable, value, cursor):
query_string = \
"""SELECT count(*) AS `variable_count`
FROM global_variables
WHERE variable_name = %s and variable_value = %s"""
query_data = \
[variable, value]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
if isinstance(check_count, tuple):
return int(check_count[0]) > 0
return (int(check_count['variable_count']) > 0)
def get_config(variable, cursor):
query_string = \
"""SELECT *
FROM global_variables
WHERE variable_name = %s"""
query_data = \
[variable, ]
cursor.execute(query_string, query_data)
row_count = cursor.rowcount
resultset = cursor.fetchone()
if row_count > 0:
return resultset
else:
return False
def set_config(variable, value, cursor):
query_string = \
"""UPDATE global_variables
SET variable_value = %s
WHERE variable_name = %s"""
query_data = \
[value, variable]
cursor.execute(query_string, query_data)
return True
def manage_config(variable, save_to_disk, load_to_runtime, cursor, state):
if state:
if save_to_disk:
save_config_to_disk(variable, cursor)
if load_to_runtime:
load_config_to_runtime(variable, cursor)
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
variable=dict(required=True, type='str'),
value=dict(),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
variable = module.params["variable"]
value = module.params["value"]
save_to_disk = module.params["save_to_disk"]
load_to_runtime = module.params["load_to_runtime"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class='DictCursor')
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
result = {}
if not value:
try:
if get_config(variable, cursor):
result['changed'] = False
result['msg'] = \
"Returned the variable and it's current value"
result['var'] = get_config(variable, cursor)
else:
module.fail_json(
msg="The variable \"%s\" was not found" % variable
)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to get config.. %s" % to_native(e)
)
else:
try:
if get_config(variable, cursor):
if not check_config(variable, value, cursor):
if not module.check_mode:
result['changed'] = set_config(variable, value, cursor)
result['msg'] = \
"Set the variable to the supplied value"
result['var'] = get_config(variable, cursor)
manage_config(variable,
save_to_disk,
load_to_runtime,
cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Variable would have been set to" +
" the supplied value, however" +
" check_mode is enabled.")
else:
result['changed'] = False
result['msg'] = ("The variable is already been set to" +
" the supplied value")
result['var'] = get_config(variable, cursor)
else:
module.fail_json(
msg="The variable \"%s\" was not found" % variable
)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to set config.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,217 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_manage_config
author: "Ben Mildren (@bmildren)"
short_description: Writes the proxysql configuration settings between layers.
description:
- The M(proxysql_global_variables) module writes the proxysql configuration
settings between layers. Currently this module will always report a
changed state, so should typically be used with WHEN however this will
change in a future version when the CHECKSUM table commands are available
for all tables in proxysql.
options:
action:
description:
- The supplied I(action) combines with the supplied I(direction) to
provide the semantics of how we want to move the I(config_settings)
between the I(config_layers).
choices: [ "LOAD", "SAVE" ]
required: True
config_settings:
description:
- The I(config_settings) specifies which configuration we're writing.
choices: [ "MYSQL USERS", "MYSQL SERVERS", "MYSQL QUERY RULES",
"MYSQL VARIABLES", "ADMIN VARIABLES", "SCHEDULER" ]
required: True
direction:
description:
- FROM - denotes we're reading values FROM the supplied I(config_layer)
and writing to the next layer.
TO - denotes we're reading from the previous layer and writing TO the
supplied I(config_layer)."
choices: [ "FROM", "TO" ]
required: True
config_layer:
description:
- RUNTIME - represents the in-memory data structures of ProxySQL used by
the threads that are handling the requests.
MEMORY - (sometimes also referred as main) represents the in-memory
SQLite3 database.
DISK - represents the on-disk SQLite3 database.
CONFIG - is the classical config file. You can only LOAD FROM the
config file.
choices: [ "MEMORY", "DISK", "RUNTIME", "CONFIG" ]
required: True
extends_documentation_fragment:
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example saves the mysql users config from memory to disk. It uses
# supplied credentials to connect to the proxysql admin interface.
- proxysql_manage_config:
login_user: 'admin'
login_password: 'admin'
action: "SAVE"
config_settings: "MYSQL USERS"
direction: "FROM"
config_layer: "MEMORY"
# This example loads the mysql query rules config from memory to to runtime. It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_manage_config:
config_file: '~/proxysql.cnf'
action: "LOAD"
config_settings: "MYSQL QUERY RULES"
direction: "TO"
config_layer: "RUNTIME"
'''
RETURN = '''
stdout:
description: Simply reports whether the action reported a change.
returned: Currently the returned value with always be changed=True.
type: dict
"sample": {
"changed": true
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if module.params["config_layer"] == 'CONFIG' and \
(module.params["action"] != 'LOAD' or
module.params["direction"] != 'FROM'):
if (module.params["action"] != 'LOAD' and
module.params["direction"] != 'FROM'):
msg_string = ("Neither the action \"%s\" nor the direction" +
" \"%s\" are valid combination with the CONFIG" +
" config_layer")
module.fail_json(msg=msg_string % (module.params["action"],
module.params["direction"]))
elif module.params["action"] != 'LOAD':
msg_string = ("The action \"%s\" is not a valid combination" +
" with the CONFIG config_layer")
module.fail_json(msg=msg_string % module.params["action"])
else:
msg_string = ("The direction \"%s\" is not a valid combination" +
" with the CONFIG config_layer")
module.fail_json(msg=msg_string % module.params["direction"])
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def manage_config(manage_config_settings, cursor):
query_string = "%s" % ' '.join(manage_config_settings)
cursor.execute(query_string)
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
action=dict(required=True, choices=['LOAD',
'SAVE']),
config_settings=dict(required=True, choices=['MYSQL USERS',
'MYSQL SERVERS',
'MYSQL QUERY RULES',
'MYSQL VARIABLES',
'ADMIN VARIABLES',
'SCHEDULER']),
direction=dict(required=True, choices=['FROM',
'TO']),
config_layer=dict(required=True, choices=['MEMORY',
'DISK',
'RUNTIME',
'CONFIG'])
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
action = module.params["action"]
config_settings = module.params["config_settings"]
direction = module.params["direction"]
config_layer = module.params["config_layer"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
result = {}
manage_config_settings = \
[action, config_settings, direction, config_layer]
try:
result['changed'] = manage_config(manage_config_settings,
cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to manage config.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,477 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_mysql_users
author: "Ben Mildren (@bmildren)"
short_description: Adds or removes mysql users from proxysql admin interface.
description:
- The M(proxysql_mysql_users) module adds or removes mysql users using the
proxysql admin interface.
options:
username:
description:
- Name of the user connecting to the mysqld or ProxySQL instance.
required: True
password:
description:
- Password of the user connecting to the mysqld or ProxySQL instance.
active:
description:
- A user with I(active) set to C(False) will be tracked in the database,
but will be never loaded in the in-memory data structures. If omitted
the proxysql database default for I(active) is C(True).
type: bool
use_ssl:
description:
- If I(use_ssl) is set to C(True), connections by this user will be made
using SSL connections. If omitted the proxysql database default for
I(use_ssl) is C(False).
type: bool
default_hostgroup:
description:
- If there is no matching rule for the queries sent by this user, the
traffic it generates is sent to the specified hostgroup.
If omitted the proxysql database default for I(use_ssl) is 0.
default_schema:
description:
- The schema to which the connection should change to by default.
transaction_persistent:
description:
- If this is set for the user with which the MySQL client is connecting
to ProxySQL (thus a "frontend" user), transactions started within a
hostgroup will remain within that hostgroup regardless of any other
rules.
If omitted the proxysql database default for I(transaction_persistent)
is C(False).
type: bool
fast_forward:
description:
- If I(fast_forward) is set to C(True), I(fast_forward) will bypass the
query processing layer (rewriting, caching) and pass through the query
directly as is to the backend server. If omitted the proxysql database
default for I(fast_forward) is C(False).
type: bool
backend:
description:
- If I(backend) is set to C(True), this (username, password) pair is
used for authenticating to the ProxySQL instance.
default: True
type: bool
frontend:
description:
- If I(frontend) is set to C(True), this (username, password) pair is
used for authenticating to the mysqld servers against any hostgroup.
default: True
type: bool
max_connections:
description:
- The maximum number of connections ProxySQL will open to the backend for
this user. If omitted the proxysql database default for
I(max_connections) is 10000.
state:
description:
- When C(present) - adds the user, when C(absent) - removes the user.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- community.general.proxysql.managing_config
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a user, it saves the mysql user config to disk, but
# avoids loading the mysql user config to runtime (this might be because
# several users are being added and the user wants to push the config to
# runtime in a single batch using the M(proxysql_manage_config) module). It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_mysql_users:
login_user: 'admin'
login_password: 'admin'
username: 'productiondba'
state: present
load_to_runtime: False
# This example removes a user, saves the mysql user config to disk, and
# dynamically loads the mysql user config to runtime. It uses credentials
# in a supplied config file to connect to the proxysql admin interface.
- proxysql_mysql_users:
config_file: '~/proxysql.cnf'
username: 'mysqlboy'
state: absent
'''
RETURN = '''
stdout:
description: The mysql user modified or removed from proxysql
returned: On create/update will return the newly modified user, on delete
it will return the deleted record.
type: dict
sample:
changed: true
msg: Added user to mysql_users
state: present
user:
active: 1
backend: 1
default_hostgroup: 1
default_schema: null
fast_forward: 0
frontend: 1
max_connections: 10000
password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
schema_locked: 0
transaction_persistent: 0
use_ssl: 0
username: guest_ro
username: guest_ro
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL USERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL USERS TO RUNTIME")
return True
class ProxySQLUser(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.username = module.params["username"]
self.backend = module.params["backend"]
self.frontend = module.params["frontend"]
config_data_keys = ["password",
"active",
"use_ssl",
"default_hostgroup",
"default_schema",
"transaction_persistent",
"fast_forward",
"max_connections"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_user_config_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `user_count`
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['user_count']) > 0)
def check_user_privs(self, cursor):
query_string = \
"""SELECT count(*) AS `user_count`
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['user_count']) > 0)
def get_user_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
user = cursor.fetchone()
return user
def create_user_config(self, cursor):
query_string = \
"""INSERT INTO mysql_users (
username,
backend,
frontend"""
cols = 3
query_data = \
[self.username,
self.backend,
self.frontend]
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += ",\n" + col
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
return True
def update_user_config(self, cursor):
query_string = """UPDATE mysql_users"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += ("\nWHERE username = %s\n AND backend = %s" +
"\n AND frontend = %s")
query_data.append(self.username)
query_data.append(self.backend)
query_data.append(self.frontend)
cursor.execute(query_string, query_data)
return True
def delete_user_config(self, cursor):
query_string = \
"""DELETE FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_user(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_user_config(cursor)
result['msg'] = "Added user to mysql_users"
result['user'] = \
self.get_user_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been added to" +
" mysql_users, however check_mode" +
" is enabled.")
def update_user(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_user_config(cursor)
result['msg'] = "Updated user in mysql_users"
result['user'] = \
self.get_user_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been updated in" +
" mysql_users, however check_mode" +
" is enabled.")
def delete_user(self, check_mode, result, cursor):
if not check_mode:
result['user'] = \
self.get_user_config(cursor)
result['changed'] = \
self.delete_user_config(cursor)
result['msg'] = "Deleted user from mysql_users"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been deleted from" +
" mysql_users, however check_mode is" +
" enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default='', type='path'),
username=dict(required=True, type='str'),
password=dict(no_log=True, type='str'),
active=dict(type='bool'),
use_ssl=dict(type='bool'),
default_hostgroup=dict(type='int'),
default_schema=dict(type='str'),
transaction_persistent=dict(type='bool'),
fast_forward=dict(type='bool'),
backend=dict(default=True, type='bool'),
frontend=dict(default=True, type='bool'),
max_connections=dict(type='int'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class='DictCursor')
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_user = ProxySQLUser(module)
result = {}
result['state'] = proxysql_user.state
if proxysql_user.username:
result['username'] = proxysql_user.username
if proxysql_user.state == "present":
try:
if not proxysql_user.check_user_privs(cursor):
if not proxysql_user.check_user_config_exists(cursor):
proxysql_user.create_user(module.check_mode,
result,
cursor)
else:
proxysql_user.update_user(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The user already exists in mysql_users" +
" and doesn't need to be updated.")
result['user'] = \
proxysql_user.get_user_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify user.. %s" % to_native(e)
)
elif proxysql_user.state == "absent":
try:
if proxysql_user.check_user_config_exists(cursor):
proxysql_user.delete_user(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The user is already absent from the" +
" mysql_users memory configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to remove user.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,613 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_query_rules
author: "Ben Mildren (@bmildren)"
short_description: Modifies query rules using the proxysql admin interface.
description:
- The M(proxysql_query_rules) module modifies query rules using the
proxysql admin interface.
options:
rule_id:
description:
- The unique id of the rule. Rules are processed in rule_id order.
active:
description:
- A rule with I(active) set to C(False) will be tracked in the database,
but will be never loaded in the in-memory data structures.
type: bool
username:
description:
- Filtering criteria matching username. If I(username) is non-NULL, a
query will match only if the connection is made with the correct
username.
schemaname:
description:
- Filtering criteria matching schemaname. If I(schemaname) is non-NULL, a
query will match only if the connection uses schemaname as its default
schema.
flagIN:
description:
- Used in combination with I(flagOUT) and I(apply) to create chains of
rules.
client_addr:
description:
- Match traffic from a specific source.
proxy_addr:
description:
- Match incoming traffic on a specific local IP.
proxy_port:
description:
- Match incoming traffic on a specific local port.
digest:
description:
- Match queries with a specific digest, as returned by
stats_mysql_query_digest.digest.
match_digest:
description:
- Regular expression that matches the query digest. The dialect of
regular expressions used is that of re2 - https://github.com/google/re2
match_pattern:
description:
- Regular expression that matches the query text. The dialect of regular
expressions used is that of re2 - https://github.com/google/re2
negate_match_pattern:
description:
- If I(negate_match_pattern) is set to C(True), only queries not matching
the query text will be considered as a match. This acts as a NOT
operator in front of the regular expression matching against
match_pattern.
type: bool
flagOUT:
description:
- Used in combination with I(flagIN) and apply to create chains of rules.
When set, I(flagOUT) signifies the I(flagIN) to be used in the next
chain of rules.
replace_pattern:
description:
- This is the pattern with which to replace the matched pattern. Note
that this is optional, and when omitted, the query processor will only
cache, route, or set other parameters without rewriting.
destination_hostgroup:
description:
- Route matched queries to this hostgroup. This happens unless there is a
started transaction and the logged in user has
I(transaction_persistent) set to C(True) (see M(proxysql_mysql_users)).
cache_ttl:
description:
- The number of milliseconds for which to cache the result of the query.
Note in ProxySQL 1.1 I(cache_ttl) was in seconds.
timeout:
description:
- The maximum timeout in milliseconds with which the matched or rewritten
query should be executed. If a query run for longer than the specific
threshold, the query is automatically killed. If timeout is not
specified, the global variable mysql-default_query_timeout applies.
retries:
description:
- The maximum number of times a query needs to be re-executed in case of
detected failure during the execution of the query. If retries is not
specified, the global variable mysql-query_retries_on_failure applies.
delay:
description:
- Number of milliseconds to delay the execution of the query. This is
essentially a throttling mechanism and QoS, and allows a way to give
priority to queries over others. This value is added to the
mysql-default_query_delay global variable that applies to all queries.
mirror_flagOUT:
description:
- Enables query mirroring. If set I(mirror_flagOUT) can be used to
evaluates the mirrored query against the specified chain of rules.
mirror_hostgroup:
description:
- Enables query mirroring. If set I(mirror_hostgroup) can be used to
mirror queries to the same or different hostgroup.
error_msg:
description:
- Query will be blocked, and the specified error_msg will be returned to
the client.
log:
description:
- Query will be logged.
type: bool
apply:
description:
- Used in combination with I(flagIN) and I(flagOUT) to create chains of
rules. Setting apply to True signifies the last rule to be applied.
type: bool
comment:
description:
- Free form text field, usable for a descriptive comment of the query
rule.
state:
description:
- When C(present) - adds the rule, when C(absent) - removes the rule.
choices: [ "present", "absent" ]
default: present
force_delete:
description:
- By default we avoid deleting more than one schedule in a single batch,
however if you need this behaviour and you're not concerned about the
schedules deleted, you can set I(force_delete) to C(True).
default: False
type: bool
extends_documentation_fragment:
- community.general.proxysql.managing_config
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a rule to redirect queries from a specific user to another
# hostgroup, it saves the mysql query rule config to disk, but avoids loading
# the mysql query config config to runtime (this might be because several
# rules are being added and the user wants to push the config to runtime in a
# single batch using the M(proxysql_manage_config) module). It uses supplied
# credentials to connect to the proxysql admin interface.
- proxysql_query_rules:
login_user: admin
login_password: admin
username: 'guest_ro'
match_pattern: "^SELECT.*"
destination_hostgroup: 1
active: 1
retries: 3
state: present
load_to_runtime: False
# This example removes all rules that use the username 'guest_ro', saves the
# mysql query rule config to disk, and dynamically loads the mysql query rule
# config to runtime. It uses credentials in a supplied config file to connect
# to the proxysql admin interface.
- proxysql_query_rules:
config_file: '~/proxysql.cnf'
username: 'guest_ro'
state: absent
force_delete: true
'''
RETURN = '''
stdout:
description: The mysql user modified or removed from proxysql
returned: On create/update will return the newly modified rule, in all
other cases will return a list of rules that match the supplied
criteria.
type: dict
"sample": {
"changed": true,
"msg": "Added rule to mysql_query_rules",
"rules": [
{
"active": "0",
"apply": "0",
"cache_ttl": null,
"client_addr": null,
"comment": null,
"delay": null,
"destination_hostgroup": 1,
"digest": null,
"error_msg": null,
"flagIN": "0",
"flagOUT": null,
"log": null,
"match_digest": null,
"match_pattern": null,
"mirror_flagOUT": null,
"mirror_hostgroup": null,
"negate_match_pattern": "0",
"proxy_addr": null,
"proxy_port": null,
"reconnect": null,
"replace_pattern": null,
"retries": null,
"rule_id": "1",
"schemaname": null,
"timeout": null,
"username": "guest_ro"
}
],
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL QUERY RULES TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL QUERY RULES TO RUNTIME")
return True
class ProxyQueryRule(object):
def __init__(self, module):
self.state = module.params["state"]
self.force_delete = module.params["force_delete"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
config_data_keys = ["rule_id",
"active",
"username",
"schemaname",
"flagIN",
"client_addr",
"proxy_addr",
"proxy_port",
"digest",
"match_digest",
"match_pattern",
"negate_match_pattern",
"flagOUT",
"replace_pattern",
"destination_hostgroup",
"cache_ttl",
"timeout",
"retries",
"delay",
"mirror_flagOUT",
"mirror_hostgroup",
"error_msg",
"log",
"apply",
"comment"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_rule_pk_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `rule_count`
FROM mysql_query_rules
WHERE rule_id = %s"""
query_data = \
[self.config_data["rule_id"]]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['rule_count']) > 0)
def check_rule_cfg_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `rule_count`
FROM mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
check_count = cursor.fetchone()
return int(check_count['rule_count'])
def get_rule_config(self, cursor, created_rule_id=None):
query_string = \
"""SELECT *
FROM mysql_query_rules"""
if created_rule_id:
query_data = [created_rule_id, ]
query_string += "\nWHERE rule_id = %s"
cursor.execute(query_string, query_data)
rule = cursor.fetchone()
else:
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
rule = cursor.fetchall()
return rule
def create_rule_config(self, cursor):
query_string = \
"""INSERT INTO mysql_query_rules ("""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += "\n" + col + ","
query_string = query_string[:-1]
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
new_rule_id = cursor.lastrowid
return True, new_rule_id
def update_rule_config(self, cursor):
query_string = """UPDATE mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None and col != "rule_id":
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += "\nWHERE rule_id = %s"
query_data.append(self.config_data["rule_id"])
cursor.execute(query_string, query_data)
return True
def delete_rule_config(self, cursor):
query_string = \
"""DELETE FROM mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
check_count = cursor.rowcount
return True, int(check_count)
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_rule(self, check_mode, result, cursor):
if not check_mode:
result['changed'], new_rule_id = \
self.create_rule_config(cursor)
result['msg'] = "Added rule to mysql_query_rules"
self.manage_config(cursor,
result['changed'])
result['rules'] = \
self.get_rule_config(cursor, new_rule_id)
else:
result['changed'] = True
result['msg'] = ("Rule would have been added to" +
" mysql_query_rules, however" +
" check_mode is enabled.")
def update_rule(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_rule_config(cursor)
result['msg'] = "Updated rule in mysql_query_rules"
self.manage_config(cursor,
result['changed'])
result['rules'] = \
self.get_rule_config(cursor)
else:
result['changed'] = True
result['msg'] = ("Rule would have been updated in" +
" mysql_query_rules, however" +
" check_mode is enabled.")
def delete_rule(self, check_mode, result, cursor):
if not check_mode:
result['rules'] = \
self.get_rule_config(cursor)
result['changed'], result['rows_affected'] = \
self.delete_rule_config(cursor)
result['msg'] = "Deleted rule from mysql_query_rules"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Rule would have been deleted from" +
" mysql_query_rules, however" +
" check_mode is enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
rule_id=dict(type='int'),
active=dict(type='bool'),
username=dict(type='str'),
schemaname=dict(type='str'),
flagIN=dict(type='int'),
client_addr=dict(type='str'),
proxy_addr=dict(type='str'),
proxy_port=dict(type='int'),
digest=dict(type='str'),
match_digest=dict(type='str'),
match_pattern=dict(type='str'),
negate_match_pattern=dict(type='bool'),
flagOUT=dict(type='int'),
replace_pattern=dict(type='str'),
destination_hostgroup=dict(type='int'),
cache_ttl=dict(type='int'),
timeout=dict(type='int'),
retries=dict(type='int'),
delay=dict(type='int'),
mirror_flagOUT=dict(type='int'),
mirror_hostgroup=dict(type='int'),
error_msg=dict(type='str'),
log=dict(type='bool'),
apply=dict(type='bool'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
force_delete=dict(default=False, type='bool'),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class='DictCursor')
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_query_rule = ProxyQueryRule(module)
result = {}
result['state'] = proxysql_query_rule.state
if proxysql_query_rule.state == "present":
try:
if not proxysql_query_rule.check_rule_cfg_exists(cursor):
if proxysql_query_rule.config_data["rule_id"] and \
proxysql_query_rule.check_rule_pk_exists(cursor):
proxysql_query_rule.update_rule(module.check_mode,
result,
cursor)
else:
proxysql_query_rule.create_rule(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The rule already exists in" +
" mysql_query_rules and doesn't need to be" +
" updated.")
result['rules'] = \
proxysql_query_rule.get_rule_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify rule.. %s" % to_native(e)
)
elif proxysql_query_rule.state == "absent":
try:
existing_rules = proxysql_query_rule.check_rule_cfg_exists(cursor)
if existing_rules > 0:
if existing_rules == 1 or \
proxysql_query_rule.force_delete:
proxysql_query_rule.delete_rule(module.check_mode,
result,
cursor)
else:
module.fail_json(
msg=("Operation would delete multiple rules" +
" use force_delete to override this")
)
else:
result['changed'] = False
result['msg'] = ("The rule is already absent from the" +
" mysql_query_rules memory configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to remove rule.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,380 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_replication_hostgroups
author: "Ben Mildren (@bmildren)"
short_description: Manages replication hostgroups using the proxysql admin
interface.
description:
- Each row in mysql_replication_hostgroups represent a pair of
writer_hostgroup and reader_hostgroup. ProxySQL will monitor the value of
read_only for all the servers in specified hostgroups, and based on the
value of read_only will assign the server to the writer or reader
hostgroups.
options:
writer_hostgroup:
description:
- Id of the writer hostgroup.
required: True
reader_hostgroup:
description:
- Id of the reader hostgroup.
required: True
comment:
description:
- Text field that can be used for any purposes defined by the user.
state:
description:
- When C(present) - adds the replication hostgroup, when C(absent) -
removes the replication hostgroup.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- community.general.proxysql.managing_config
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a replication hostgroup, it saves the mysql server config
# to disk, but avoids loading the mysql server config to runtime (this might be
# because several replication hostgroup are being added and the user wants to
# push the config to runtime in a single batch using the
# M(proxysql_manage_config) module). It uses supplied credentials to connect
# to the proxysql admin interface.
- proxysql_replication_hostgroups:
login_user: 'admin'
login_password: 'admin'
writer_hostgroup: 1
reader_hostgroup: 2
state: present
load_to_runtime: False
# This example removes a replication hostgroup, saves the mysql server config
# to disk, and dynamically loads the mysql server config to runtime. It uses
# credentials in a supplied config file to connect to the proxysql admin
# interface.
- proxysql_replication_hostgroups:
config_file: '~/proxysql.cnf'
writer_hostgroup: 3
reader_hostgroup: 4
state: absent
'''
RETURN = '''
stdout:
description: The replication hostgroup modified or removed from proxysql
returned: On create/update will return the newly modified group, on delete
it will return the deleted record.
type: dict
"sample": {
"changed": true,
"msg": "Added server to mysql_hosts",
"repl_group": {
"comment": "",
"reader_hostgroup": "1",
"writer_hostgroup": "2"
},
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not module.params["writer_hostgroup"] >= 0:
module.fail_json(
msg="writer_hostgroup must be a integer greater than or equal to 0"
)
if not module.params["reader_hostgroup"] == \
module.params["writer_hostgroup"]:
if not module.params["reader_hostgroup"] > 0:
module.fail_json(
msg=("writer_hostgroup must be a integer greater than" +
" or equal to 0")
)
else:
module.fail_json(
msg="reader_hostgroup cannot equal writer_hostgroup"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL SERVERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL SERVERS TO RUNTIME")
return True
class ProxySQLReplicationHostgroup(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.writer_hostgroup = module.params["writer_hostgroup"]
self.reader_hostgroup = module.params["reader_hostgroup"]
self.comment = module.params["comment"]
def check_repl_group_config(self, cursor, keys):
query_string = \
"""SELECT count(*) AS `repl_groups`
FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
if self.comment and not keys:
query_string += "\n AND comment = %s"
query_data.append(self.comment)
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['repl_groups']) > 0)
def get_repl_group_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
repl_group = cursor.fetchone()
return repl_group
def create_repl_group_config(self, cursor):
query_string = \
"""INSERT INTO mysql_replication_hostgroups (
writer_hostgroup,
reader_hostgroup,
comment)
VALUES (%s, %s, %s)"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup,
self.comment or '']
cursor.execute(query_string, query_data)
return True
def update_repl_group_config(self, cursor):
query_string = \
"""UPDATE mysql_replication_hostgroups
SET comment = %s
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.comment,
self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
return True
def delete_repl_group_config(self, cursor):
query_string = \
"""DELETE FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_repl_group_config(cursor)
result['msg'] = "Added server to mysql_hosts"
result['repl_group'] = \
self.get_repl_group_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been added to" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
def update_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_repl_group_config(cursor)
result['msg'] = "Updated server in mysql_hosts"
result['repl_group'] = \
self.get_repl_group_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been updated in" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
def delete_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['repl_group'] = \
self.get_repl_group_config(cursor)
result['changed'] = \
self.delete_repl_group_config(cursor)
result['msg'] = "Deleted server from mysql_hosts"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been deleted from" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
writer_hostgroup=dict(required=True, type='int'),
reader_hostgroup=dict(required=True, type='int'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class='DictCursor')
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_repl_group = ProxySQLReplicationHostgroup(module)
result = {}
result['state'] = proxysql_repl_group.state
if proxysql_repl_group.state == "present":
try:
if not proxysql_repl_group.check_repl_group_config(cursor,
keys=True):
proxysql_repl_group.create_repl_group(module.check_mode,
result,
cursor)
else:
if not proxysql_repl_group.check_repl_group_config(cursor,
keys=False):
proxysql_repl_group.update_repl_group(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The repl group already exists in" +
" mysql_replication_hostgroups and" +
" doesn't need to be updated.")
result['repl_group'] = \
proxysql_repl_group.get_repl_group_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify replication hostgroup.. %s" % to_native(e)
)
elif proxysql_repl_group.state == "absent":
try:
if proxysql_repl_group.check_repl_group_config(cursor,
keys=True):
proxysql_repl_group.delete_repl_group(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The repl group is already absent from the" +
" mysql_replication_hostgroups memory" +
" configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to delete replication hostgroup.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,417 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_scheduler
author: "Ben Mildren (@bmildren)"
short_description: Adds or removes schedules from proxysql admin interface.
description:
- The M(proxysql_scheduler) module adds or removes schedules using the
proxysql admin interface.
options:
active:
description:
- A schedule with I(active) set to C(False) will be tracked in the
database, but will be never loaded in the in-memory data structures.
default: True
type: bool
interval_ms:
description:
- How often (in millisecond) the job will be started. The minimum value
for I(interval_ms) is 100 milliseconds.
default: 10000
filename:
description:
- Full path of the executable to be executed.
required: True
arg1:
description:
- Argument that can be passed to the job.
arg2:
description:
- Argument that can be passed to the job.
arg3:
description:
- Argument that can be passed to the job.
arg4:
description:
- Argument that can be passed to the job.
arg5:
description:
- Argument that can be passed to the job.
comment:
description:
- Text field that can be used for any purposed defined by the user.
state:
description:
- When C(present) - adds the schedule, when C(absent) - removes the
schedule.
choices: [ "present", "absent" ]
default: present
force_delete:
description:
- By default we avoid deleting more than one schedule in a single batch,
however if you need this behaviour and you're not concerned about the
schedules deleted, you can set I(force_delete) to C(True).
default: False
type: bool
extends_documentation_fragment:
- community.general.proxysql.managing_config
- community.general.proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a schedule, it saves the scheduler config to disk, but
# avoids loading the scheduler config to runtime (this might be because
# several servers are being added and the user wants to push the config to
# runtime in a single batch using the M(proxysql_manage_config) module). It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_scheduler:
login_user: 'admin'
login_password: 'admin'
interval_ms: 1000
filename: "/opt/maintenance.py"
state: present
load_to_runtime: False
# This example removes a schedule, saves the scheduler config to disk, and
# dynamically loads the scheduler config to runtime. It uses credentials
# in a supplied config file to connect to the proxysql admin interface.
- proxysql_scheduler:
config_file: '~/proxysql.cnf'
filename: "/opt/old_script.py"
state: absent
'''
RETURN = '''
stdout:
description: The schedule modified or removed from proxysql
returned: On create/update will return the newly modified schedule, on
delete it will return the deleted record.
type: dict
"sample": {
"changed": true,
"filename": "/opt/test.py",
"msg": "Added schedule to scheduler",
"schedules": [
{
"active": "1",
"arg1": null,
"arg2": null,
"arg3": null,
"arg4": null,
"arg5": null,
"comment": "",
"filename": "/opt/test.py",
"id": "1",
"interval_ms": "10000"
}
],
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if module.params["interval_ms"] < 100 \
or module.params["interval_ms"] > 100000000:
module.fail_json(
msg="interval_ms must between 100ms & 100000000ms"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE SCHEDULER TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD SCHEDULER TO RUNTIME")
return True
class ProxySQLSchedule(object):
def __init__(self, module):
self.state = module.params["state"]
self.force_delete = module.params["force_delete"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.active = module.params["active"]
self.interval_ms = module.params["interval_ms"]
self.filename = module.params["filename"]
config_data_keys = ["arg1",
"arg2",
"arg3",
"arg4",
"arg5",
"comment"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_schedule_config(self, cursor):
query_string = \
"""SELECT count(*) AS `schedule_count`
FROM scheduler
WHERE active = %s
AND interval_ms = %s
AND filename = %s"""
query_data = \
[self.active,
self.interval_ms,
self.filename]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return int(check_count['schedule_count'])
def get_schedule_config(self, cursor):
query_string = \
"""SELECT *
FROM scheduler
WHERE active = %s
AND interval_ms = %s
AND filename = %s"""
query_data = \
[self.active,
self.interval_ms,
self.filename]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
schedule = cursor.fetchall()
return schedule
def create_schedule_config(self, cursor):
query_string = \
"""INSERT INTO scheduler (
active,
interval_ms,
filename"""
cols = 0
query_data = \
[self.active,
self.interval_ms,
self.filename]
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += ",\n" + col
query_string += \
(")\n" +
"VALUES (%s, %s, %s" +
", %s" * cols +
")")
cursor.execute(query_string, query_data)
return True
def delete_schedule_config(self, cursor):
query_string = \
"""DELETE FROM scheduler
WHERE active = %s
AND interval_ms = %s
AND filename = %s"""
query_data = \
[self.active,
self.interval_ms,
self.filename]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.rowcount
return True, int(check_count)
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_schedule(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_schedule_config(cursor)
result['msg'] = "Added schedule to scheduler"
result['schedules'] = \
self.get_schedule_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Schedule would have been added to" +
" scheduler, however check_mode" +
" is enabled.")
def delete_schedule(self, check_mode, result, cursor):
if not check_mode:
result['schedules'] = \
self.get_schedule_config(cursor)
result['changed'] = \
self.delete_schedule_config(cursor)
result['msg'] = "Deleted schedule from scheduler"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Schedule would have been deleted from" +
" scheduler, however check_mode is" +
" enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
active=dict(default=True, type='bool'),
interval_ms=dict(default=10000, type='int'),
filename=dict(required=True, type='str'),
arg1=dict(type='str'),
arg2=dict(type='str'),
arg3=dict(type='str'),
arg4=dict(type='str'),
arg5=dict(type='str'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
force_delete=dict(default=False, type='bool'),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor, db_conn = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class='DictCursor')
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_schedule = ProxySQLSchedule(module)
result = {}
result['state'] = proxysql_schedule.state
result['filename'] = proxysql_schedule.filename
if proxysql_schedule.state == "present":
try:
if proxysql_schedule.check_schedule_config(cursor) <= 0:
proxysql_schedule.create_schedule(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The schedule already exists and doesn't" +
" need to be updated.")
result['schedules'] = \
proxysql_schedule.get_schedule_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify schedule.. %s" % to_native(e)
)
elif proxysql_schedule.state == "absent":
try:
existing_schedules = \
proxysql_schedule.check_schedule_config(cursor)
if existing_schedules > 0:
if existing_schedules == 1 or proxysql_schedule.force_delete:
proxysql_schedule.delete_schedule(module.check_mode,
result,
cursor)
else:
module.fail_json(
msg=("Operation would delete multiple records" +
" use force_delete to override this")
)
else:
result['changed'] = False
result['msg'] = ("The schedule is already absent from the" +
" memory configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to remove schedule.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,195 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vertica_configuration
short_description: Updates Vertica configuration parameters.
description:
- Updates Vertica configuration parameters.
options:
name:
description:
- Name of the parameter to update.
required: true
aliases: [parameter]
value:
description:
- Value of the parameter to be set.
required: true
db:
description:
- Name of the Vertica database.
cluster:
description:
- Name of the Vertica cluster.
default: localhost
port:
description:
- Vertica cluster port to connect to.
default: 5433
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
'''
EXAMPLES = """
- name: updating load_balance_policy
vertica_configuration: name=failovertostandbyafter value='8 hours'
"""
import traceback
PYODBC_IMP_ERR = None
try:
import pyodbc
except ImportError:
PYODBC_IMP_ERR = traceback.format_exc()
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_configuration_facts(cursor, parameter_name=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter_name, parameter_name)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
return False
return True
def present(configuration_facts, cursor, parameter_name, current_value):
parameter_key = parameter_name.lower()
changed = False
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
changed = True
if changed:
configuration_facts.update(get_configuration_facts(cursor, parameter_name))
return changed
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
parameter=dict(required=True, aliases=['name']),
value=dict(default=None),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
parameter_name = module.params['parameter']
current_value = module.params['value']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
exception=traceback.format_exc())
try:
configuration_facts = get_configuration_facts(cursor)
if module.check_mode:
changed = not check(configuration_facts, parameter_name, current_value)
else:
try:
changed = present(configuration_facts, cursor, parameter_name, current_value)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
except CannotDropError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
if __name__ == '__main__':
main()

View file

@ -0,0 +1 @@
vertica_info.py

View file

@ -0,0 +1,295 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vertica_info
short_description: Gathers Vertica database facts.
description:
- Gathers Vertica database information.
- This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(vertica_info) module no longer returns C(ansible_facts)!
options:
cluster:
description:
- Name of the cluster running the schema.
default: localhost
port:
description:
Database port to connect to.
default: 5433
db:
description:
- Name of the database running the schema.
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
'''
EXAMPLES = """
- name: gathering vertica facts
vertica_info: db=db_name
register: result
- name: Print schemas
debug:
msg: "{{ result.vertica_schemas }}"
"""
import traceback
PYODBC_IMP_ERR = None
try:
import pyodbc
except ImportError:
PYODBC_IMP_ERR = traceback.format_exc()
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# module specific functions
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select schema_name, schema_owner, create_time
from schemata
where not is_system_schema and schema_name not in ('public')
and (? = '' or schema_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.schema_name.lower()] = {
'name': row.schema_name,
'owner': row.schema_owner,
'create_time': str(row.create_time),
'usage_roles': [],
'create_roles': []}
cursor.execute("""
select g.object_name as schema_name, r.name as role_name,
lower(g.privileges_description) privileges_description
from roles r join grants g
on g.grantee = r.name and g.object_type='SCHEMA'
and g.privileges_description like '%USAGE%'
and g.grantee not in ('public', 'dbadmin')
and (? = '' or g.object_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
schema_key = row.schema_name.lower()
if 'create' in row.privileges_description:
facts[schema_key]['create_roles'].append(row.role_name)
else:
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def get_configuration_facts(cursor, parameter=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter, parameter)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def get_node_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select node_name, node_address, export_address, node_state, node_type,
catalog_path
from nodes
""")
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.node_address] = {
'node_name': row.node_name,
'export_address': row.export_address,
'node_state': row.node_state,
'node_type': row.node_type,
'catalog_path': row.catalog_path}
return facts
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
cluster=dict(default='localhost'),
port=dict(default='5433'),
db=dict(default=None),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
is_old_facts = module._name == 'vertica_facts'
if is_old_facts:
module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
if not pyodbc_found:
module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
db = ''
if module.params['db']:
db = module.params['db']
try:
dsn = (
"Driver=Vertica;"
"Server=%s;"
"Port=%s;"
"Database=%s;"
"User=%s;"
"Password=%s;"
"ConnectionLoadBalance=%s"
) % (module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
try:
schema_facts = get_schema_facts(cursor)
user_facts = get_user_facts(cursor)
role_facts = get_role_facts(cursor)
configuration_facts = get_configuration_facts(cursor)
node_facts = get_node_facts(cursor)
if is_old_facts:
module.exit_json(changed=False,
ansible_facts={'vertica_schemas': schema_facts,
'vertica_users': user_facts,
'vertica_roles': role_facts,
'vertica_configuration': configuration_facts,
'vertica_nodes': node_facts})
else:
module.exit_json(changed=False,
vertica_schemas=schema_facts,
vertica_users=user_facts,
vertica_roles=role_facts,
vertica_configuration=configuration_facts,
vertica_nodes=node_facts)
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()

View file

@ -0,0 +1,242 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vertica_role
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
options:
name:
description:
- Name of the role to add or remove.
required: true
assigned_roles:
description:
- Comma separated list of roles to assign to the role.
aliases: ['assigned_role']
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
choices: ['present', 'absent']
default: present
db:
description:
- Name of the Vertica database.
cluster:
description:
- Name of the Vertica cluster.
default: localhost
port:
description:
- Vertica cluster port to connect to.
default: 5433
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
'''
EXAMPLES = """
- name: creating a new vertica role
vertica_role: name=role_name db=db_name state=present
- name: creating a new vertica role with other role assigned
vertica_role: name=role_name assigned_role=other_role_name state=present
"""
import traceback
PYODBC_IMP_ERR = None
try:
import pyodbc
except ImportError:
PYODBC_IMP_ERR = traceback.format_exc()
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
return False
if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
return False
return True
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
cursor.execute("create role {0}".format(role))
update_roles(role_facts, cursor, role, [], assigned_roles)
role_facts.update(get_role_facts(cursor, role))
return True
else:
changed = False
if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
assigned_roles=dict(default=None, aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
role = module.params['role']
assigned_roles = []
if module.params['assigned_roles']:
assigned_roles = module.params['assigned_roles'].split(',')
assigned_roles = filter(None, assigned_roles)
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
try:
role_facts = get_role_facts(cursor)
if module.check_mode:
changed = not check(role_facts, role, assigned_roles)
elif state == 'absent':
try:
changed = absent(role_facts, cursor, role, assigned_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == 'present':
try:
changed = present(role_facts, cursor, role, assigned_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
except CannotDropError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
if __name__ == '__main__':
main()

View file

@ -0,0 +1,313 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vertica_schema
short_description: Adds or removes Vertica database schema and roles.
description:
- Adds or removes Vertica database schema and, optionally, roles
with schema access privileges.
- A schema will not be removed until all the objects have been dropped.
- In such a situation, if the module tries to remove the schema it
will fail and only remove roles created for the schema if they have
no dependencies.
options:
name:
description:
- Name of the schema to add or remove.
required: true
usage_roles:
description:
- Comma separated list of roles to create and grant usage access to the schema.
aliases: ['usage_role']
create_roles:
description:
- Comma separated list of roles to create and grant usage and create access to the schema.
aliases: ['create_role']
owner:
description:
- Name of the user to set as owner of the schema.
state:
description:
- Whether to create C(present), or drop C(absent) a schema.
default: present
choices: ['present', 'absent']
db:
description:
- Name of the Vertica database.
cluster:
description:
- Name of the Vertica cluster.
default: localhost
port:
description:
- Vertica cluster port to connect to.
default: 5433
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
'''
EXAMPLES = """
- name: creating a new vertica schema
vertica_schema: name=schema_name db=db_name state=present
- name: creating a new schema with specific schema owner
vertica_schema: name=schema_name owner=dbowner db=db_name state=present
- name: creating a new schema with roles
vertica_schema:
name=schema_name
create_roles=schema_name_all
usage_roles=schema_name_ro,schema_name_rw
db=db_name
state=present
"""
import traceback
PYODBC_IMP_ERR = None
try:
import pyodbc
except ImportError:
PYODBC_IMP_ERR = traceback.format_exc()
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select schema_name, schema_owner, create_time
from schemata
where not is_system_schema and schema_name not in ('public', 'TxtIndex')
and (? = '' or schema_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.schema_name.lower()] = {
'name': row.schema_name,
'owner': row.schema_owner,
'create_time': str(row.create_time),
'usage_roles': [],
'create_roles': []}
cursor.execute("""
select g.object_name as schema_name, r.name as role_name,
lower(g.privileges_description) privileges_description
from roles r join grants g
on g.grantee_id = r.role_id and g.object_type='SCHEMA'
and g.privileges_description like '%USAGE%'
and g.grantee not in ('public', 'dbadmin')
and (? = '' or g.object_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
schema_key = row.schema_name.lower()
if 'create' in row.privileges_description:
facts[schema_key]['create_roles'].append(row.role_name)
else:
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
def update_roles(schema_facts, cursor, schema,
existing, required,
create_existing, create_required):
for role in set(existing + create_existing) - set(required + create_required):
cursor.execute("drop role {0} cascade".format(role))
for role in set(create_existing) - set(create_required):
cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
for role in set(required + create_required) - set(existing + create_existing):
cursor.execute("create role {0}".format(role))
cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
for role in set(create_required) - set(create_existing):
cursor.execute("grant create on schema {0} to {1}".format(schema, role))
def check(schema_facts, schema, usage_roles, create_roles, owner):
schema_key = schema.lower()
if schema_key not in schema_facts:
return False
if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
return False
if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']):
return False
if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
return False
return True
def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
schema_key = schema.lower()
if schema_key not in schema_facts:
query_fragments = ["create schema {0}".format(schema)]
if owner:
query_fragments.append("authorization {0}".format(owner))
cursor.execute(' '.join(query_fragments))
update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
schema_facts.update(get_schema_facts(cursor, schema))
return True
else:
changed = False
if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
raise NotSupportedError((
"Changing schema owner is not supported. "
"Current owner: {0}."
).format(schema_facts[schema_key]['owner']))
if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
update_roles(schema_facts, cursor, schema,
schema_facts[schema_key]['usage_roles'], usage_roles,
schema_facts[schema_key]['create_roles'], create_roles)
changed = True
if changed:
schema_facts.update(get_schema_facts(cursor, schema))
return changed
def absent(schema_facts, cursor, schema, usage_roles, create_roles):
schema_key = schema.lower()
if schema_key in schema_facts:
update_roles(schema_facts, cursor, schema,
schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
try:
cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
except pyodbc.Error:
raise CannotDropError("Dropping schema failed due to dependencies.")
del schema_facts[schema_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
schema=dict(required=True, aliases=['name']),
usage_roles=dict(default=None, aliases=['usage_role']),
create_roles=dict(default=None, aliases=['create_role']),
owner=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
schema = module.params['schema']
usage_roles = []
if module.params['usage_roles']:
usage_roles = module.params['usage_roles'].split(',')
usage_roles = filter(None, usage_roles)
create_roles = []
if module.params['create_roles']:
create_roles = module.params['create_roles'].split(',')
create_roles = filter(None, create_roles)
owner = module.params['owner']
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
try:
schema_facts = get_schema_facts(cursor)
if module.check_mode:
changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
elif state == 'absent':
try:
changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == 'present':
try:
changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
except CannotDropError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
if __name__ == '__main__':
main()

View file

@ -0,0 +1,378 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vertica_user
short_description: Adds or removes Vertica database users and assigns roles.
description:
- Adds or removes Vertica database user and, optionally, assigns roles.
- A user will not be removed until all the dependencies have been dropped.
- In such a situation, if the module tries to remove the user it
will fail and only remove roles granted to the user.
options:
name:
description:
- Name of the user to add or remove.
required: true
profile:
description:
- Sets the user's profile.
resource_pool:
description:
- Sets the user's resource pool.
password:
description:
- The user's password encrypted by the MD5 algorithm.
- The password must be generated with the format C("md5" + md5[password + username]),
resulting in a total of 35 characters. An easy way to do this is by querying
the Vertica database with select 'md5'||md5('<user_password><user_name>').
expired:
description:
- Sets the user's password expiration.
type: bool
ldap:
description:
- Set to true if users are authenticated via LDAP.
- The user will be created with password expired and set to I($ldap$).
type: bool
roles:
description:
- Comma separated list of roles to assign to the user.
aliases: ['role']
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
choices: ['present', 'absent', 'locked']
default: present
db:
description:
- Name of the Vertica database.
cluster:
description:
- Name of the Vertica cluster.
default: localhost
port:
description:
- Vertica cluster port to connect to.
default: 5433
login_user:
description:
- The username used to authenticate with.
default: dbadmin
login_password:
description:
- The password used to authenticate with.
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
'''
EXAMPLES = """
- name: creating a new vertica user with password
vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
- name: creating a new vertica user authenticated via ldap with roles assigned
vertica_user:
name=user_name
ldap=true
db=db_name
roles=schema_name_ro
state=present
"""
import traceback
PYODBC_IMP_ERR = None
try:
import pyodbc
except ImportError:
PYODBC_IMP_ERR = traceback.format_exc()
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def update_roles(user_facts, cursor, user,
existing_all, existing_default, required):
del_roles = list(set(existing_all) - set(required))
if del_roles:
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
new_roles = list(set(required) - set(existing_all))
if new_roles:
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
if required:
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
return False
if profile and profile != user_facts[user_key]['profile']:
return False
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
return False
if locked != (user_facts[user_key]['locked'] == 'True'):
return False
if password and password != user_facts[user_key]['password']:
return False
if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
return False
if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
return False
return True
def present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
query_fragments = ["create user {0}".format(user)]
if locked:
query_fragments.append("account lock")
if password or ldap:
if password:
query_fragments.append("identified by '{0}'".format(password))
else:
query_fragments.append("identified by '$ldap$'")
if expired or ldap:
query_fragments.append("password expire")
if profile:
query_fragments.append("profile {0}".format(profile))
if resource_pool:
query_fragments.append("resource pool {0}".format(resource_pool))
cursor.execute(' '.join(query_fragments))
if resource_pool and resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
update_roles(user_facts, cursor, user, [], [], roles)
user_facts.update(get_user_facts(cursor, user))
return True
else:
changed = False
query_fragments = ["alter user {0}".format(user)]
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
if locked:
state = 'lock'
else:
state = 'unlock'
query_fragments.append("account {0}".format(state))
changed = True
if password and password != user_facts[user_key]['password']:
query_fragments.append("identified by '{0}'".format(password))
changed = True
if ldap:
if ldap != (user_facts[user_key]['expired'] == 'True'):
query_fragments.append("password expire")
changed = True
elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
if expired:
query_fragments.append("password expire")
changed = True
else:
raise NotSupportedError("Unexpiring user password is not supported.")
if profile and profile != user_facts[user_key]['profile']:
query_fragments.append("profile {0}".format(profile))
changed = True
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
query_fragments.append("resource pool {0}".format(resource_pool))
if user_facts[user_key]['resource_pool'] != 'general':
cursor.execute("revoke usage on resource pool {0} from {1}".format(
user_facts[user_key]['resource_pool'], user))
if resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
changed = True
if changed:
cursor.execute(' '.join(query_fragments))
if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
changed = True
if changed:
user_facts.update(get_user_facts(cursor, user))
return changed
def absent(user_facts, cursor, user, roles):
user_key = user.lower()
if user_key in user_facts:
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
try:
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
except pyodbc.Error:
raise CannotDropError("Dropping user failed due to dependencies.")
del user_facts[user_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True, aliases=['name']),
profile=dict(default=None),
resource_pool=dict(default=None),
password=dict(default=None, no_log=True),
expired=dict(type='bool', default=None),
ldap=dict(type='bool', default=None),
roles=dict(default=None, aliases=['role']),
state=dict(default='present', choices=['absent', 'present', 'locked']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
user = module.params['user']
profile = module.params['profile']
if profile:
profile = profile.lower()
resource_pool = module.params['resource_pool']
if resource_pool:
resource_pool = resource_pool.lower()
password = module.params['password']
expired = module.params['expired']
ldap = module.params['ldap']
roles = []
if module.params['roles']:
roles = module.params['roles'].split(',')
roles = filter(None, roles)
state = module.params['state']
if state == 'locked':
locked = True
else:
locked = False
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
user_facts = get_user_facts(cursor)
if module.check_mode:
changed = not check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles)
elif state == 'absent':
try:
changed = absent(user_facts, cursor, user, roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state in ['present', 'locked']:
try:
changed = present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles)
except pyodbc.Error as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
except CannotDropError as e:
module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
if __name__ == '__main__':
main()