mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-22 21:00:22 -07:00
minor spelling changes
This commit is contained in:
parent
054a3fccf8
commit
0b8011436d
114 changed files with 152 additions and 152 deletions
|
@ -521,7 +521,7 @@ class GalaxyCLI(CLI):
|
|||
|
||||
def execute_login(self):
|
||||
"""
|
||||
Verify user's identify via Github and retreive an auth token from Galaxy.
|
||||
Verify user's identify via Github and retrieve an auth token from Galaxy.
|
||||
"""
|
||||
# Authenticate with github and retrieve a token
|
||||
if self.options.token is None:
|
||||
|
@ -540,7 +540,7 @@ class GalaxyCLI(CLI):
|
|||
token = GalaxyToken()
|
||||
token.set(galaxy_response['token'])
|
||||
|
||||
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
|
||||
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
|
||||
return 0
|
||||
|
||||
def execute_import(self):
|
||||
|
|
|
@ -72,7 +72,7 @@ def get_config(p, section, key, env_var, default, value_type=None, expand_relati
|
|||
and return it as a python list.
|
||||
:none: Sets the value to None
|
||||
:path: Expands any environment variables and tilde's in the value.
|
||||
:tmp_path: Create a unique temporary directory inside of the dirctory
|
||||
:tmp_path: Create a unique temporary directory inside of the directory
|
||||
specified by value and return its path.
|
||||
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
|
||||
means colon separated strings.) Split the value and then expand
|
||||
|
|
|
@ -413,7 +413,7 @@ class TaskExecutor:
|
|||
# loop error takes precedence
|
||||
if self._loop_eval_error is not None:
|
||||
raise self._loop_eval_error
|
||||
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
|
||||
# skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags
|
||||
if self._task.action not in ['include', 'include_role']:
|
||||
raise
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ from ansible.parsing.dataloader import DataLoader
|
|||
|
||||
class TaskResult:
|
||||
'''
|
||||
This class is responsible for interpretting the resulting data
|
||||
This class is responsible for interpreting the resulting data
|
||||
from an executed task, and provides helper methods for determining
|
||||
the result of a given task.
|
||||
'''
|
||||
|
|
|
@ -152,7 +152,7 @@ class InventoryScript:
|
|||
try:
|
||||
got = self.host_vars_from_top.get(host.name, {})
|
||||
except AttributeError as e:
|
||||
raise AnsibleError("Improperly formated host information for %s: %s" % (host.name,to_native(e)))
|
||||
raise AnsibleError("Improperly formatted host information for %s: %s" % (host.name,to_native(e)))
|
||||
return got
|
||||
|
||||
cmd = [self.filename, "--host", host.name]
|
||||
|
|
|
@ -600,7 +600,7 @@ class Distribution(object):
|
|||
"""
|
||||
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
|
||||
|
||||
To do so it checks the existance and content of typical files in /etc containing distribution information
|
||||
To do so it checks the existence and content of typical files in /etc containing distribution information
|
||||
|
||||
This is unit tested. Please extend the tests to cover all distributions if you have them available.
|
||||
"""
|
||||
|
|
|
@ -77,7 +77,7 @@ notes:
|
|||
pause to delay further playbook execution until the instance is reachable,
|
||||
if necessary.
|
||||
- This module returns multiple changed statuses on disassociation or release.
|
||||
It returns an overall status based on any changes occuring. It also returns
|
||||
It returns an overall status based on any changes occurring. It also returns
|
||||
individual changed statuses for disassociation and release.
|
||||
'''
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ def create_scaling_policy(connection, module):
|
|||
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
|
||||
changed = True
|
||||
|
||||
# set the min adjustment step incase the user decided to change their
|
||||
# set the min adjustment step in case the user decided to change their
|
||||
# adjustment type to percentage
|
||||
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
|
||||
|
||||
|
|
|
@ -582,7 +582,7 @@ def main():
|
|||
elif inst is not None:
|
||||
volume, changed = attach_volume(module, ec2, volume, inst)
|
||||
|
||||
# Add device, volume_id and volume_type parameters separately to maintain backward compatability
|
||||
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
|
||||
volume_info = get_volume_info(volume, state)
|
||||
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type'])
|
||||
elif state == 'absent':
|
||||
|
|
|
@ -960,7 +960,7 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
|
|||
changed = True
|
||||
success = True
|
||||
err_msg = (
|
||||
'NAT gateway {0} is in a deleting state. Delete was successfull'
|
||||
'NAT gateway {0} is in a deleting state. Delete was successful'
|
||||
.format(nat_gateway_id)
|
||||
)
|
||||
|
||||
|
|
|
@ -284,7 +284,7 @@ class AzureRMSubnet(AzureRMModuleBase):
|
|||
subnet)
|
||||
new_subnet = self.get_poller_result(poller)
|
||||
except Exception as exc:
|
||||
self.fail("Error creating or updateing subnet {0} - {1}".format(self.name, str(exc)))
|
||||
self.fail("Error creating or updating subnet {0} - {1}".format(self.name, str(exc)))
|
||||
self.check_provisioning_state(new_subnet)
|
||||
return subnet_to_dict(new_subnet)
|
||||
|
||||
|
|
|
@ -287,7 +287,7 @@ class AnsibleCloudStackCluster(AnsibleCloudStack):
|
|||
clusters = self.cs.listClusters(**args)
|
||||
if clusters:
|
||||
self.cluster = clusters['cluster'][0]
|
||||
# fix differnt return from API then request argument given
|
||||
# fix different return from API then request argument given
|
||||
self.cluster['hypervisor'] = self.cluster['hypervisortype']
|
||||
self.cluster['clustername'] = self.cluster['name']
|
||||
return self.cluster
|
||||
|
|
|
@ -175,7 +175,7 @@ options:
|
|||
default: null
|
||||
zone:
|
||||
description:
|
||||
- Name of the zone in which the instance shoud be deployed.
|
||||
- Name of the zone in which the instance should be deployed.
|
||||
- If not set, default zone is used.
|
||||
required: false
|
||||
default: null
|
||||
|
@ -621,7 +621,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
|||
instance = self.recover_instance(instance=instance)
|
||||
instance = self.update_instance(instance=instance, start_vm=start_vm)
|
||||
|
||||
# In check mode, we do not necessarely have an instance
|
||||
# In check mode, we do not necessarily have an instance
|
||||
if instance:
|
||||
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
|
||||
# refresh instance data
|
||||
|
|
|
@ -108,7 +108,7 @@ options:
|
|||
default: null
|
||||
zone:
|
||||
description:
|
||||
- Name of the zone in which the rule shoud be created.
|
||||
- Name of the zone in which the rule should be created.
|
||||
- If not set, default zone is used.
|
||||
required: false
|
||||
default: null
|
||||
|
|
|
@ -38,7 +38,7 @@ options:
|
|||
required: true
|
||||
architecture:
|
||||
description:
|
||||
- The archiecture for the container (e.g. "x86_64" or "i686").
|
||||
- The architecture for the container (e.g. "x86_64" or "i686").
|
||||
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
|
||||
required: false
|
||||
config:
|
||||
|
|
|
@ -419,7 +419,7 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Indicates desired state of the instance.
|
||||
- If C(current), the current state of the VM will be fecthed. You can acces it with C(results.status)
|
||||
- If C(current), the current state of the VM will be fecthed. You can access it with C(results.status)
|
||||
choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
|
||||
required: false
|
||||
default: present
|
||||
|
|
|
@ -667,7 +667,7 @@ class RHEVConn(object):
|
|||
setChanged()
|
||||
try:
|
||||
NIC.update()
|
||||
setMsg('iface has succesfully been updated.')
|
||||
setMsg('iface has successfully been updated.')
|
||||
except Exception as e:
|
||||
setMsg("Failed to update the iface.")
|
||||
setMsg(str(e))
|
||||
|
|
|
@ -17,7 +17,7 @@ Naming
|
|||
detail. A good example of this are floating IPs, which can come from either
|
||||
Nova or Neutron, but which one they come from is immaterial to an end user.
|
||||
* If the module is one that a cloud admin would expect to use, it should be
|
||||
be named with the service and the resouce, such as os\_keystone\_domain.
|
||||
be named with the service and the resource, such as os\_keystone\_domain.
|
||||
* If the module is one that a cloud admin and a cloud consumer could both use,
|
||||
the cloud consumer rules apply.
|
||||
|
||||
|
@ -53,7 +53,7 @@ Libraries
|
|||
OpenStack Client libraries. The OpenStack Client libraries do no have end
|
||||
users as a primary audience, they are for intra-server communication. The
|
||||
python-openstacksdk is the future there, and shade will migrate to it when
|
||||
its ready in a manner that is not noticable to ansible users.
|
||||
its ready in a manner that is not noticeable to ansible users.
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
|
|
@ -284,7 +284,7 @@ def ensure_user_role_exists(keystone, user_name, tenant_name, role_name,
|
|||
|
||||
Return (True, id) if a new role was created or if the role was newly
|
||||
assigned to the user for the tenant. (False, id) if the role already
|
||||
exists and was already assigned to the user ofr the tenant.
|
||||
exists and was already assigned to the user for the tenant.
|
||||
|
||||
"""
|
||||
# Check if the user has the role in the tenant
|
||||
|
|
|
@ -153,7 +153,7 @@ def main():
|
|||
if server:
|
||||
cloud.inspect_machine(server['uuid'], module.params['wait'])
|
||||
# TODO(TheJulia): diff properties, ?and ports? and determine
|
||||
# if a change occured. In theory, the node is always changed
|
||||
# if a change occurred. In theory, the node is always changed
|
||||
# if introspection is able to update the record.
|
||||
module.exit_json(changed=True,
|
||||
ansible_facts=server['properties'])
|
||||
|
|
|
@ -58,7 +58,7 @@ options:
|
|||
default: null
|
||||
project:
|
||||
description:
|
||||
- Name or ID of the project to scope the role assocation to.
|
||||
- Name or ID of the project to scope the role association to.
|
||||
If you are using keystone version 2, then this value is required.
|
||||
required: false
|
||||
default: null
|
||||
|
|
|
@ -45,7 +45,7 @@ options:
|
|||
default: present
|
||||
choices: ['present', 'absent']
|
||||
description:
|
||||
- Determines wether the backend is to be created/modified
|
||||
- Determines whether the backend is to be created/modified
|
||||
or deleted
|
||||
probe:
|
||||
required: false
|
||||
|
|
|
@ -401,7 +401,7 @@ class VmsModule(BaseModule):
|
|||
"""
|
||||
oVirt in version 4.1 doesn't support search by template+version_number,
|
||||
so we need to list all templates with specific name and then iterate
|
||||
throught it's version until we find the version we look for.
|
||||
through it's version until we find the version we look for.
|
||||
"""
|
||||
template = None
|
||||
if self._module.params['template']:
|
||||
|
|
|
@ -109,12 +109,12 @@ options:
|
|||
default: 1
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
required: false
|
||||
default: null
|
||||
subscription_password:
|
||||
description:
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
|
||||
required: false
|
||||
default: null
|
||||
wait:
|
||||
|
|
|
@ -42,11 +42,11 @@ options:
|
|||
choices: [ "us/las", "de/fra", "de/fkb" ]
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
required: false
|
||||
subscription_password:
|
||||
description:
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
|
|
|
@ -44,11 +44,11 @@ options:
|
|||
required: true
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
required: false
|
||||
subscription_password:
|
||||
description:
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
|
|
|
@ -87,11 +87,11 @@ options:
|
|||
required: false
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
required: false
|
||||
subscription_password:
|
||||
description:
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
|
|
|
@ -40,11 +40,11 @@ options:
|
|||
required: true
|
||||
subscription_user:
|
||||
description:
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
|
||||
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
|
||||
required: false
|
||||
subscription_password:
|
||||
description:
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
|
||||
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
|
||||
required: false
|
||||
wait:
|
||||
description:
|
||||
|
|
|
@ -118,7 +118,7 @@ options:
|
|||
default: null
|
||||
post_uri:
|
||||
description:
|
||||
- URL of a post provisioning script ot be loaded and exectued on virtual instance
|
||||
- URL of a post provisioning script to be loaded and executed on virtual instance
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
|
|
|
@ -794,7 +794,7 @@ class PyVmomiHelper(object):
|
|||
clonespec_kwargs['config'].memoryMB = \
|
||||
int(self.params['hardware']['memory_mb'])
|
||||
|
||||
# lets try and assign a static ip addresss
|
||||
# lets try and assign a static ip address
|
||||
if self.params['customize'] is True:
|
||||
ip_settings = list()
|
||||
if self.params['ips']:
|
||||
|
|
|
@ -84,7 +84,7 @@ options:
|
|||
default: None
|
||||
vm_shell_env:
|
||||
description:
|
||||
- Comma seperated list of envirnoment variable, specified in the guest OS notation
|
||||
- Comma separated list of envirnoment variable, specified in the guest OS notation
|
||||
required: False
|
||||
default: None
|
||||
vm_shell_cwd:
|
||||
|
|
|
@ -56,7 +56,7 @@ options:
|
|||
service_name:
|
||||
description:
|
||||
- Unique name for the service on a node, must be unique per node,
|
||||
required if registering a service. May be ommitted if registering
|
||||
required if registering a service. May be omitted if registering
|
||||
a node level check
|
||||
required: false
|
||||
service_id:
|
||||
|
|
|
@ -101,7 +101,7 @@ EXAMPLES = '''
|
|||
- key: 'private/foo'
|
||||
policy: deny
|
||||
|
||||
- name: create an acl with specific token with both key and serivce rules
|
||||
- name: create an acl with specific token with both key and service rules
|
||||
consul_acl:
|
||||
mgmt_token: 'some_management_acl'
|
||||
name: 'Foo access'
|
||||
|
@ -186,7 +186,7 @@ def update_acl(module):
|
|||
changed = True
|
||||
except Exception as e:
|
||||
module.fail_json(
|
||||
msg="No token returned, check your managment key and that \
|
||||
msg="No token returned, check your management key and that \
|
||||
the host is in the acl datacenter %s" % e)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not create/update acl %s" % e)
|
||||
|
|
|
@ -171,7 +171,7 @@ EXAMPLES = '''
|
|||
roles: readWriteAnyDatabase
|
||||
state: present
|
||||
|
||||
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is usefull for oplog access (MONGO_OPLOG_URL).
|
||||
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
|
||||
# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
|
||||
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
|
||||
# This syntax requires mongodb 2.6+ and pymongo 2.5+
|
||||
|
|
|
@ -107,7 +107,7 @@ options:
|
|||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config
|
||||
- name: insert/update "Match User" configuration block in /etc/ssh/sshd_config
|
||||
blockinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
block: |
|
||||
|
|
|
@ -37,7 +37,7 @@ options:
|
|||
basedir:
|
||||
description:
|
||||
- Path of a base directory in which the patch file will be applied.
|
||||
May be ommitted when C(dest) option is specified, otherwise required.
|
||||
May be omitted when C(dest) option is specified, otherwise required.
|
||||
required: false
|
||||
dest:
|
||||
description:
|
||||
|
|
|
@ -71,7 +71,7 @@ options:
|
|||
user_certificate:
|
||||
description:
|
||||
- List of Base-64 encoded server certificates.
|
||||
- If option is ommitted certificates will not be checked or changed.
|
||||
- If option is omitted certificates will not be checked or changed.
|
||||
- If an emtpy list is passed all assigned certificates will be removed.
|
||||
- Certificates already assigned but not passed will be removed.
|
||||
required: false
|
||||
|
|
|
@ -108,7 +108,7 @@ def unfollow_log(module, le_path, logs):
|
|||
|
||||
removed_count = 0
|
||||
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for log in logs:
|
||||
# Query the log first, to see if we even need to remove.
|
||||
if not query_log_status(module, le_path, log):
|
||||
|
|
|
@ -1526,7 +1526,7 @@ class Host(LogicMonitor):
|
|||
groups,
|
||||
properties,
|
||||
alertenable):
|
||||
"""Return a property formated hash for the
|
||||
"""Return a property formatted hash for the
|
||||
creation of a host using the rpc function"""
|
||||
self.module.debug("Running Host._build_host_hash...")
|
||||
|
||||
|
@ -2017,7 +2017,7 @@ class Hostgroup(LogicMonitor):
|
|||
description,
|
||||
properties,
|
||||
alertenable):
|
||||
"""Return a property formated hash for the
|
||||
"""Return a property formatted hash for the
|
||||
creation of a hostgroup using the rpc function"""
|
||||
self.module.debug("Running Hostgroup._build_host_hash")
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ notes:
|
|||
so if Zabbix server's time and host's time are not synchronized,
|
||||
you will get strange results.
|
||||
- Install required module with 'pip install zabbix-api' command.
|
||||
- Checks existance only by maintenance name.
|
||||
- Checks existence only by maintenance name.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -349,7 +349,7 @@ def main():
|
|||
|
||||
(rc, maintenance, error) = get_maintenance_id(zbx, name)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
|
||||
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
|
||||
|
||||
if not maintenance:
|
||||
if not host_names and not host_groups:
|
||||
|
@ -368,7 +368,7 @@ def main():
|
|||
|
||||
(rc, maintenance, error) = get_maintenance_id(zbx, name)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error))
|
||||
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
|
||||
|
||||
if maintenance:
|
||||
if module.check_mode:
|
||||
|
|
|
@ -94,7 +94,7 @@ options:
|
|||
desirable to have the task get the current running-config for
|
||||
every task in a playbook. The I(config) argument allows the
|
||||
implementer to pass in the configuruation to use as the base
|
||||
config for comparision.
|
||||
config for comparison.
|
||||
required: false
|
||||
default: null
|
||||
"""
|
||||
|
|
|
@ -153,7 +153,7 @@ class BigIpDeviceNtp(object):
|
|||
r = self.api.tm.sys.ntp.load()
|
||||
|
||||
if hasattr(r, 'servers'):
|
||||
# Deliberately using sets to supress duplicates
|
||||
# Deliberately using sets to suppress duplicates
|
||||
p['servers'] = set([str(x) for x in r.servers])
|
||||
if hasattr(r, 'timezone'):
|
||||
p['timezone'] = str(r.timezone)
|
||||
|
|
|
@ -284,7 +284,7 @@ class BigIpDeviceSshd(object):
|
|||
r = self.api.tm.sys.sshd.load()
|
||||
|
||||
if hasattr(r, 'allow'):
|
||||
# Deliberately using sets to supress duplicates
|
||||
# Deliberately using sets to suppress duplicates
|
||||
p['allow'] = set([str(x) for x in r.allow])
|
||||
if hasattr(r, 'banner'):
|
||||
p['banner'] = str(r.banner)
|
||||
|
|
|
@ -193,7 +193,7 @@ class BigIpGtmDatacenter(object):
|
|||
)
|
||||
|
||||
if hasattr(r, 'servers'):
|
||||
# Deliberately using sets to supress duplicates
|
||||
# Deliberately using sets to suppress duplicates
|
||||
p['servers'] = set([str(x) for x in r.servers])
|
||||
if hasattr(r, 'contact'):
|
||||
p['contact'] = str(r.contact)
|
||||
|
|
|
@ -105,7 +105,7 @@ options:
|
|||
ratio:
|
||||
description:
|
||||
- Pool member ratio weight. Valid values range from 1 through 100.
|
||||
New pool members -- unless overriden with this value -- default
|
||||
New pool members -- unless overridden with this value -- default
|
||||
to 1.
|
||||
required: false
|
||||
default: null
|
||||
|
|
|
@ -536,7 +536,7 @@ class BigIpSelfIp(object):
|
|||
BIG-IP, we need to massage the values that are provided by the
|
||||
user so that they include the partition.
|
||||
|
||||
:return: List of vlans formatted with preceeding partition
|
||||
:return: List of vlans formatted with preceding partition
|
||||
"""
|
||||
partition = self.params['partition']
|
||||
vlans = self.api.tm.net.vlans.get_collection()
|
||||
|
|
|
@ -111,10 +111,10 @@ options:
|
|||
- Specify if the configuration receives mirrored traffic.
|
||||
pn_unknown_ucast_level:
|
||||
description:
|
||||
- Specify an unkown unicast level in percent. The default value is 100%.
|
||||
- Specify an unknown unicast level in percent. The default value is 100%.
|
||||
pn_unknown_mcast_level:
|
||||
description:
|
||||
- Specify an unkown multicast level in percent. The default value is 100%.
|
||||
- Specify an unknown multicast level in percent. The default value is 100%.
|
||||
pn_broadcast_level:
|
||||
description:
|
||||
- Specify a broadcast level in percent. The default value is 100%.
|
||||
|
|
|
@ -56,7 +56,7 @@ options:
|
|||
required: false
|
||||
integrity:
|
||||
description:
|
||||
- Hashing algoritm, required if version is v3
|
||||
- Hashing algorithm, required if version is v3
|
||||
choices: [ 'md5', 'sha' ]
|
||||
required: false
|
||||
authkey:
|
||||
|
@ -65,7 +65,7 @@ options:
|
|||
required: false
|
||||
privacy:
|
||||
description:
|
||||
- Encryption algoritm, required if level is authPriv
|
||||
- Encryption algorithm, required if level is authPriv
|
||||
choices: [ 'des', 'aes' ]
|
||||
required: false
|
||||
privkey:
|
||||
|
|
|
@ -43,7 +43,7 @@ options:
|
|||
remote_max_checkpoints:
|
||||
description:
|
||||
- The I(remote_max_checkpoints) argument configures the maximum
|
||||
number of rollback files that can be transfered and saved to
|
||||
number of rollback files that can be transferred and saved to
|
||||
a remote location. Valid values for this argument are in the
|
||||
range of 1 to 50
|
||||
required: false
|
||||
|
|
|
@ -84,7 +84,7 @@ def wakeonlan(module, mac, broadcast, port):
|
|||
|
||||
mac_orig = mac
|
||||
|
||||
# Remove possible seperator from MAC address
|
||||
# Remove possible separator from MAC address
|
||||
if len(mac) == 12 + 5:
|
||||
mac = mac.replace(mac[2], '')
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
|
|||
if color == "normal" and text is not None:
|
||||
payload = dict(text=html_escape(text))
|
||||
elif text is not None:
|
||||
# With a custom color we have to set the message as attachment, and explicitely turn markdown parsing on for it.
|
||||
# With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
|
||||
payload = dict(attachments=[dict(text=html_escape(text), color=color, mrkdwn_in=["text"])])
|
||||
if channel is not None:
|
||||
if (channel[0] == '#') or (channel[0] == '@'):
|
||||
|
|
|
@ -149,7 +149,7 @@ def has_changed(string):
|
|||
return "Nothing to install or update" not in string
|
||||
|
||||
def get_available_options(module, command='install'):
|
||||
# get all availabe options from a composer command using composer help to json
|
||||
# get all available options from a composer command using composer help to json
|
||||
rc, out, err = composer_command(module, "help %s --format=json" % command)
|
||||
if rc != 0:
|
||||
output = parse_out(err)
|
||||
|
|
|
@ -120,7 +120,7 @@ def query_package(module, name, state="present"):
|
|||
|
||||
def remove_packages(module, packages):
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
installed, updated = query_package(module, package)
|
||||
|
|
|
@ -112,7 +112,7 @@ def update_package_db(module):
|
|||
def remove_packages(module, packages):
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, package):
|
||||
|
|
|
@ -108,7 +108,7 @@ def remove_packages(module, port_path, packages):
|
|||
""" Uninstalls one or more packages if installed. """
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, port_path, package):
|
||||
|
|
|
@ -448,7 +448,7 @@ def upgrade_packages(module):
|
|||
# Attempt to upgrade all packages.
|
||||
rc, stdout, stderr = execute_command("%s" % upgrade_cmd, module)
|
||||
|
||||
# Try to find any occurance of a package changing version like:
|
||||
# Try to find any occurrence of a package changing version like:
|
||||
# "bzip2-1.0.6->1.0.6p0: ok".
|
||||
match = re.search("\W\w.+->.+: ok\W", stdout)
|
||||
if match:
|
||||
|
|
|
@ -111,7 +111,7 @@ def remove_packages(module, opkg_path, packages):
|
|||
force = "--force-%s" % force
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, opkg_path, package):
|
||||
|
|
|
@ -216,7 +216,7 @@ def remove_packages(module, pacman_path, packages):
|
|||
args = "R"
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
installed, updated, unknown = query_package(module, pacman_path, package)
|
||||
|
|
|
@ -249,7 +249,7 @@ def remove_packages(module, packages):
|
|||
|
||||
remove_c = 0
|
||||
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, package):
|
||||
|
|
|
@ -141,7 +141,7 @@ def pkgng_older_than(module, pkgng_path, compare_version):
|
|||
def remove_packages(module, pkgng_path, packages, dir_arg):
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, pkgng_path, package, dir_arg):
|
||||
|
|
|
@ -109,7 +109,7 @@ def matching_packages(module, name):
|
|||
|
||||
ports_glob_path = module.get_bin_path('ports_glob', True)
|
||||
rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
|
||||
#counts the numer of packages found
|
||||
# counts the number of packages found
|
||||
occurrences = out.count('\n')
|
||||
if occurrences == 0:
|
||||
name_without_digits = re.sub('[0-9]', '', name)
|
||||
|
@ -130,7 +130,7 @@ def remove_packages(module, packages):
|
|||
pkg_delete_path = module.get_bin_path('pkg', True)
|
||||
pkg_delete_path = pkg_delete_path + " delete -y"
|
||||
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, package):
|
||||
|
|
|
@ -94,7 +94,7 @@ def query_package(module, slackpkg_path, name):
|
|||
def remove_packages(module, slackpkg_path, packages):
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, slackpkg_path, package):
|
||||
|
|
|
@ -82,7 +82,7 @@ def compare_package(version1, version2):
|
|||
Return values:
|
||||
-1 first minor
|
||||
0 equal
|
||||
1 fisrt greater """
|
||||
1 first greater """
|
||||
|
||||
def normalize(v):
|
||||
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
|
||||
|
@ -178,7 +178,7 @@ def main():
|
|||
rc, output = install_package(module, depot, name)
|
||||
|
||||
if not rc:
|
||||
msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot
|
||||
msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
|
||||
changed = True
|
||||
|
||||
else:
|
||||
|
|
|
@ -124,7 +124,7 @@ def update_package_db(module):
|
|||
def remove_packages(module, packages):
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, package):
|
||||
|
|
|
@ -161,7 +161,7 @@ def upgrade(module, xbps_path):
|
|||
def remove_packages(module, xbps_path, packages):
|
||||
"""Returns true if package removal succeeds"""
|
||||
changed_packages = []
|
||||
# Using a for loop incase of error, we can report the package that failed
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
installed, updated = query_package(module, xbps_path, package)
|
||||
|
|
|
@ -80,7 +80,7 @@ options:
|
|||
- whether the list of target nodes on the portal should be
|
||||
(re)discovered and added to the persistent iscsi database.
|
||||
Keep in mind that iscsiadm discovery resets configurtion, like node.startup
|
||||
to manual, hence combined with auto_node_startup=yes will allways return
|
||||
to manual, hence combined with auto_node_startup=yes will always return
|
||||
a changed state.
|
||||
show_nodes:
|
||||
required: false
|
||||
|
|
|
@ -127,7 +127,7 @@ def _load_dist_subclass(cls, *args, **kwargs):
|
|||
|
||||
class Svc(object):
|
||||
"""
|
||||
Main class that handles daemontools, can be subclassed and overriden in case
|
||||
Main class that handles daemontools, can be subclassed and overridden in case
|
||||
we want to use a 'derivative' like encore, s6, etc
|
||||
"""
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ from ansible.module_utils.pycompat24 import get_exception
|
|||
from ansible.module_utils.basic import *
|
||||
|
||||
class EjabberdUserException(Exception):
|
||||
""" Base exeption for EjabberdUser class object """
|
||||
""" Base exception for EjabberdUser class object """
|
||||
pass
|
||||
|
||||
class EjabberdUser(object):
|
||||
|
|
|
@ -128,7 +128,7 @@ options:
|
|||
- Defines whether to install plugin dependencies.
|
||||
|
||||
notes:
|
||||
- Plugin installation shoud be run under root or the same user which owns
|
||||
- Plugin installation should be run under root or the same user which owns
|
||||
the plugin files on the disk. Only if the plugin is not installed yet and
|
||||
no version is specified, the API installation is performed which requires
|
||||
only the Web UI credentials.
|
||||
|
|
|
@ -42,14 +42,14 @@ description:
|
|||
- "To use this module, it has to be executed at least twice. Either as two
|
||||
different tasks in the same run or during multiple runs."
|
||||
- "Between these two tasks you have to fulfill the required steps for the
|
||||
choosen challenge by whatever means necessary. For http-01 that means
|
||||
chosen challenge by whatever means necessary. For http-01 that means
|
||||
creating the necessary challenge file on the destination webserver. For
|
||||
dns-01 the necessary dns record has to be created. tls-sni-02 requires
|
||||
you to create a SSL certificate with the appropriate subjectAlternativeNames.
|
||||
It is I(not) the responsibility of this module to perform these steps."
|
||||
- "For details on how to fulfill these challenges, you might have to read through
|
||||
U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)"
|
||||
- "Although the defaults are choosen so that the module can be used with
|
||||
- "Although the defaults are chosen so that the module can be used with
|
||||
the Let's Encrypt CA, the module can be used with any service using the ACME
|
||||
protocol."
|
||||
requirements:
|
||||
|
@ -293,7 +293,7 @@ class ACMEDirectory(object):
|
|||
class ACMEAccount(object):
|
||||
'''
|
||||
ACME account object. Handles the authorized communication with the
|
||||
ACME server. Provides access to accound bound information like
|
||||
ACME server. Provides access to account bound information like
|
||||
the currently active authorizations and valid certificates
|
||||
'''
|
||||
def __init__(self,module):
|
||||
|
@ -607,7 +607,7 @@ class ACMEClient(object):
|
|||
keyauthorization = self.account.get_keyauthorization(token)
|
||||
|
||||
# NOTE: tls-sni-01 is not supported by choice
|
||||
# too complex to be usefull and tls-sni-02 is an alternative
|
||||
# too complex to be useful and tls-sni-02 is an alternative
|
||||
# as soon as it is implemented server side
|
||||
if type == 'http-01':
|
||||
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2
|
||||
|
@ -637,7 +637,7 @@ class ACMEClient(object):
|
|||
def _validate_challenges(self,auth):
|
||||
'''
|
||||
Validate the authorization provided in the auth dict. Returns True
|
||||
when the validation was successfull and False when it was not.
|
||||
when the validation was successful and False when it was not.
|
||||
'''
|
||||
for challenge in auth['challenges']:
|
||||
if self.challenge != challenge['type']:
|
||||
|
@ -716,7 +716,7 @@ class ACMEClient(object):
|
|||
def do_challenges(self):
|
||||
'''
|
||||
Create new authorizations for all domains of the CSR and return
|
||||
the challenge details for the choosen challenge type.
|
||||
the challenge details for the chosen challenge type.
|
||||
'''
|
||||
data = {}
|
||||
for domain in self.domains:
|
||||
|
|
|
@ -175,7 +175,7 @@ Try {
|
|||
Set-Attr $result "changed" $true;
|
||||
}
|
||||
Catch {
|
||||
Fail-Json $result "an exception occured when adding the specified rule"
|
||||
Fail-Json $result "an exception occurred when adding the specified rule"
|
||||
}
|
||||
}
|
||||
ElseIf ($state -eq "absent" -And $match -eq $true) {
|
||||
|
@ -185,7 +185,7 @@ Try {
|
|||
Set-Attr $result "changed" $true;
|
||||
}
|
||||
Catch {
|
||||
Fail-Json $result "an exception occured when removing the specified rule"
|
||||
Fail-Json $result "an exception occurred when removing the specified rule"
|
||||
}
|
||||
}
|
||||
Else {
|
||||
|
@ -200,7 +200,7 @@ Try {
|
|||
}
|
||||
}
|
||||
Catch {
|
||||
Fail-Json $result "an error occured when attempting to $state $rights permission(s) on $path for $user"
|
||||
Fail-Json $result "an error occurred when attempting to $state $rights permission(s) on $path for $user"
|
||||
}
|
||||
|
||||
Exit-Json $result
|
||||
|
|
|
@ -80,7 +80,7 @@ Try {
|
|||
}
|
||||
}
|
||||
Catch {
|
||||
Fail-Json $result "an error occured when attempting to disable inheritance"
|
||||
Fail-Json $result "an error occurred when attempting to disable inheritance"
|
||||
}
|
||||
|
||||
Exit-Json $result
|
||||
|
|
|
@ -48,7 +48,7 @@ options:
|
|||
required: true
|
||||
direction:
|
||||
description:
|
||||
- is this rule for inbound or outbound trafic
|
||||
- is this rule for inbound or outbound traffic
|
||||
default: null
|
||||
required: true
|
||||
choices: ['in', 'out']
|
||||
|
|
|
@ -50,7 +50,7 @@ options:
|
|||
aliases: []
|
||||
attributes:
|
||||
description:
|
||||
- Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
|
||||
- Application Pool attributes from string where attributes are separated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
|
|
@ -88,7 +88,7 @@ try {
|
|||
if (-not $curent_bindings -and $state -eq 'present') {
|
||||
New-WebBinding @binding_parameters -Force
|
||||
|
||||
# Select certificat
|
||||
# Select certificate
|
||||
if($certificateHash -ne $FALSE) {
|
||||
|
||||
$ip = $binding_parameters["IPAddress"]
|
||||
|
|
|
@ -49,7 +49,7 @@ $bind_hostname = Get-Attr $params "hostname" $FALSE;
|
|||
$bind_ssl = Get-Attr $params "ssl" $FALSE;
|
||||
|
||||
# Custom site Parameters from string where properties
|
||||
# are seperated by a pipe and property name/values by colon.
|
||||
# are separated by a pipe and property name/values by colon.
|
||||
# Ex. "foo:1|bar:2"
|
||||
$parameters = Get-Attr $params "parameters" $null;
|
||||
if($parameters -ne $null) {
|
||||
|
|
|
@ -91,7 +91,7 @@ options:
|
|||
aliases: []
|
||||
parameters:
|
||||
description:
|
||||
- Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2"
|
||||
- Custom site Parameters from string where properties are separated by a pipe and property name/values by colon Ex. "foo:1|bar:2"
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
|
|
@ -116,7 +116,7 @@ function BackupFile($path) {
|
|||
function Present($dest, $regexp, $line, $insertafter, $insertbefore, $create, $backup, $backrefs, $validate, $encodingobj, $linesep) {
|
||||
|
||||
# Note that we have to clean up the dest path because ansible wants to treat / and \ as
|
||||
# interchangable in windows pathnames, but .NET framework internals do not support that.
|
||||
# interchangeable in windows pathnames, but .NET framework internals do not support that.
|
||||
$cleandest = $dest.Replace("/", "\");
|
||||
|
||||
# Check if destination exists. If it does not exist, either create it if create == "yes"
|
||||
|
|
|
@ -130,7 +130,7 @@ Try {
|
|||
}
|
||||
}
|
||||
Catch {
|
||||
Fail-Json $result "an error occured when attempting to change owner on $path for $user"
|
||||
Fail-Json $result "an error occurred when attempting to change owner on $path for $user"
|
||||
}
|
||||
|
||||
Exit-Json $result
|
||||
|
|
|
@ -144,9 +144,9 @@ Function Get-RegistryValueIgnoreError
|
|||
}
|
||||
}
|
||||
catch
|
||||
{
|
||||
{
|
||||
$exceptionText = ($_ | Out-String).Trim()
|
||||
Write-Verbose "Exception occured in Get-RegistryValueIgnoreError: $exceptionText"
|
||||
Write-Verbose "Exception occurred in Get-RegistryValueIgnoreError: $exceptionText"
|
||||
}
|
||||
return $null
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ notes:
|
|||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# Syncs the contents of one diretory to another.
|
||||
# Syncs the contents of one directory to another.
|
||||
$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo"
|
||||
|
||||
# Sync the contents of one directory to another, including subdirectories.
|
||||
|
|
|
@ -245,7 +245,7 @@ Try {
|
|||
}
|
||||
}
|
||||
Catch {
|
||||
Fail-Json $result "an error occured when attempting to create share $name"
|
||||
Fail-Json $result "an error occurred when attempting to create share $name"
|
||||
}
|
||||
|
||||
Exit-Json $result
|
||||
Exit-Json $result
|
||||
|
|
|
@ -31,7 +31,7 @@ $timezone = Get-Attr -obj $params -name timezone -failifempty $true -resultobj $
|
|||
Try {
|
||||
# Get the current timezone set
|
||||
$currentTZ = $(tzutil.exe /g)
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." }
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occurred when getting the current machine's timezone setting." /
|
||||
|
||||
If ( $currentTZ -eq $timezone ) {
|
||||
Exit-Json $result "$timezone is already set on this machine"
|
||||
|
@ -40,7 +40,7 @@ Try {
|
|||
$tzExists = $false
|
||||
#Check that timezone can even be set (if it is listed from tzutil as an available timezone to the machine)
|
||||
$tzList = $(tzutil.exe /l)
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occured when listing the available timezones." }
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occurred when listing the available timezones." }
|
||||
ForEach ($tz in $tzList) {
|
||||
If ( $tz -eq $timezone ) {
|
||||
$tzExists = $true
|
||||
|
@ -50,9 +50,9 @@ Try {
|
|||
|
||||
If ( $tzExists ) {
|
||||
tzutil.exe /s "$timezone"
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occured when setting the specified timezone with tzutil." }
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occurred when setting the specified timezone with tzutil." }
|
||||
$newTZ = $(tzutil.exe /g)
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." }
|
||||
If ($LASTEXITCODE -ne 0) { Throw "An error occurred when getting the current machine's timezone setting." }
|
||||
|
||||
If ( $timezone -eq $newTZ ) {
|
||||
$result.changed = $true
|
||||
|
@ -68,4 +68,4 @@ Catch {
|
|||
}
|
||||
|
||||
|
||||
Exit-Json $result;
|
||||
Exit-Json $result;
|
||||
|
|
|
@ -369,7 +369,7 @@ class VaultEditor:
|
|||
try:
|
||||
r = call(['shred', tmp_path])
|
||||
except (OSError, ValueError):
|
||||
# shred is not available on this system, or some other error occured.
|
||||
# shred is not available on this system, or some other error occurred.
|
||||
# ValueError caught because OS X El Capitan is raising an
|
||||
# exception big enough to hit a limit in python2-2.7.11 and below.
|
||||
# Symptom is ValueError: insecure pickle when shred is not
|
||||
|
|
|
@ -557,7 +557,7 @@ class PlayContext(Base):
|
|||
if self.become_user:
|
||||
flags += ' -u %s ' % self.become_user
|
||||
|
||||
#FIXME: make shell independant
|
||||
#FIXME: make shell independent
|
||||
becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)
|
||||
|
||||
elif self.become_method == 'dzdo':
|
||||
|
|
|
@ -427,7 +427,7 @@ class Task(Base, Conditional, Taggable, Become):
|
|||
path_stack = []
|
||||
|
||||
dep_chain = self.get_dep_chain()
|
||||
# inside role: add the dependency chain from current to dependant
|
||||
# inside role: add the dependency chain from current to dependent
|
||||
if dep_chain:
|
||||
path_stack.extend(reversed([x._role_path for x in dep_chain]))
|
||||
|
||||
|
|
|
@ -466,7 +466,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
3 = its a directory, not a file
|
||||
4 = stat module failed, likely due to not finding python
|
||||
'''
|
||||
x = "0" # unknown error has occured
|
||||
x = "0" # unknown error has occurred
|
||||
try:
|
||||
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
|
||||
if remote_stat['exists'] and remote_stat['isdir']:
|
||||
|
|
|
@ -45,7 +45,7 @@ class CallbackModule(CallbackBase):
|
|||
FOREMAN_SSL_CERT: X509 certificate to authenticate to Foreman if
|
||||
https is used
|
||||
FOREMAN_SSL_KEY: the corresponding private key
|
||||
FOREMAN_SSL_VERIFY: wether to verify the Foreman certificate
|
||||
FOREMAN_SSL_VERIFY: whether to verify the Foreman certificate
|
||||
It can be set to '1' to verify SSL certificates using the
|
||||
installed CAs or to a path pointing to a CA bundle. Set to '0'
|
||||
to disable certificate checking.
|
||||
|
|
|
@ -479,7 +479,7 @@ class StrategyBase:
|
|||
|
||||
if original_task.action == 'include_vars':
|
||||
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
|
||||
# find the host we're actually refering too here, which may
|
||||
# find the host we're actually referring too here, which may
|
||||
# be a host that is not really in inventory at all
|
||||
|
||||
if original_task.run_once:
|
||||
|
|
|
@ -30,7 +30,7 @@ options:
|
|||
description:
|
||||
- "If I(True) the module will fetch additional data from the API."
|
||||
- "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
|
||||
attributes of the nested entities by specifing C(nested_attributes)."
|
||||
attributes of the nested entities by specifying C(nested_attributes)."
|
||||
version_added: "2.3"
|
||||
nested_attributes:
|
||||
description:
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# facts standart oVirt documentation fragment
|
||||
# facts standard oVirt documentation fragment
|
||||
DOCUMENTATION = '''
|
||||
options:
|
||||
fetch_nested:
|
||||
description:
|
||||
- "If I(True) the module will fetch additional data from the API."
|
||||
- "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
|
||||
attributes of the nested entities by specifing C(nested_attributes)."
|
||||
attributes of the nested entities by specifying C(nested_attributes)."
|
||||
version_added: "2.3"
|
||||
nested_attributes:
|
||||
description:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue