mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-06-03 14:59:09 -07:00
[wip] Remove network content (#84)
* rebase * remove broken symlinks * more deletes * restore cs_* integration tests * More deletes - from Felix * cs_common * Remove some more ignores
This commit is contained in:
parent
8d203225d3
commit
c313c825f4
2215 changed files with 0 additions and 333978 deletions
|
@ -1,383 +0,0 @@
|
|||
#
|
||||
# Copyright 2016 F5 Networks Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
# Legacy
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
bigsuds_found = True
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
|
||||
|
||||
def f5_argument_spec():
|
||||
return dict(
|
||||
server=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['F5_SERVER'])
|
||||
),
|
||||
user=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['F5_USER'])
|
||||
),
|
||||
password=dict(
|
||||
type='str',
|
||||
aliases=['pass', 'pwd'],
|
||||
required=True,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['F5_PASSWORD'])
|
||||
),
|
||||
validate_certs=dict(
|
||||
default='yes',
|
||||
type='bool',
|
||||
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
|
||||
),
|
||||
server_port=dict(
|
||||
type='int',
|
||||
default=443,
|
||||
fallback=(env_fallback, ['F5_SERVER_PORT'])
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
partition=dict(
|
||||
type='str',
|
||||
default='Common',
|
||||
fallback=(env_fallback, ['F5_PARTITION'])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def f5_parse_arguments(module):
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
|
||||
if module.params['validate_certs']:
|
||||
import ssl
|
||||
if not hasattr(ssl, 'SSLContext'):
|
||||
module.fail_json(
|
||||
msg="bigsuds does not support verifying certificates with python < 2.7.9."
|
||||
"Either update python or set validate_certs=False on the task'")
|
||||
|
||||
return (
|
||||
module.params['server'],
|
||||
module.params['user'],
|
||||
module.params['password'],
|
||||
module.params['state'],
|
||||
module.params['partition'],
|
||||
module.params['validate_certs'],
|
||||
module.params['server_port']
|
||||
)
|
||||
|
||||
|
||||
def bigip_api(bigip, user, password, validate_certs, port=443):
|
||||
try:
|
||||
if bigsuds.__version__ >= '1.0.4':
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
|
||||
elif bigsuds.__version__ == '1.0.3':
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
|
||||
else:
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
except TypeError:
|
||||
# bigsuds < 1.0.3, no verify param
|
||||
if validate_certs:
|
||||
# Note: verified we have SSLContext when we parsed params
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
else:
|
||||
import ssl
|
||||
if hasattr(ssl, 'SSLContext'):
|
||||
# Really, you should never do this. It disables certificate
|
||||
# verification *globally*. But since older bigip libraries
|
||||
# don't give us a way to toggle verification we need to
|
||||
# disable it at the global level.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
|
||||
return api
|
||||
|
||||
|
||||
# Fully Qualified name (with the partition)
|
||||
def fq_name(partition, name):
|
||||
if name is not None and not name.startswith('/'):
|
||||
return '/%s/%s' % (partition, name)
|
||||
return name
|
||||
|
||||
|
||||
# Fully Qualified name (with partition) for a list
|
||||
def fq_list_names(partition, list_names):
|
||||
if list_names is None:
|
||||
return None
|
||||
return map(lambda x: fq_name(partition, x), list_names)
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
transform = ComplexList(spec, module)
|
||||
return transform(commands)
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
responses = list()
|
||||
commands = to_commands(module, to_list(commands))
|
||||
for cmd in commands:
|
||||
cmd = module.jsonify(cmd)
|
||||
rc, out, err = exec_command(module, cmd)
|
||||
if check_rc and rc != 0:
|
||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
|
||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
||||
return responses
|
||||
|
||||
|
||||
# New style
|
||||
|
||||
from abc import ABCMeta, abstractproperty
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
from f5.bigip import ManagementRoot as BigIpMgmt
|
||||
from f5.bigip.contexts import TransactionContextManager as BigIpTxContext
|
||||
|
||||
from f5.bigiq import ManagementRoot as BigIqMgmt
|
||||
|
||||
from f5.iworkflow import ManagementRoot as iWorkflowMgmt
|
||||
from icontrol.exceptions import iControlUnexpectedHTTPError
|
||||
HAS_F5SDK = True
|
||||
except ImportError:
|
||||
HAS_F5SDK = False
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import iteritems, with_metaclass
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import exec_command
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
|
||||
F5_COMMON_ARGS = dict(
|
||||
server=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['F5_SERVER'])
|
||||
),
|
||||
user=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['F5_USER'])
|
||||
),
|
||||
password=dict(
|
||||
type='str',
|
||||
aliases=['pass', 'pwd'],
|
||||
required=True,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['F5_PASSWORD'])
|
||||
),
|
||||
validate_certs=dict(
|
||||
default='yes',
|
||||
type='bool',
|
||||
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
|
||||
),
|
||||
server_port=dict(
|
||||
type='int',
|
||||
default=443,
|
||||
fallback=(env_fallback, ['F5_SERVER_PORT'])
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
partition=dict(
|
||||
type='str',
|
||||
default='Common',
|
||||
fallback=(env_fallback, ['F5_PARTITION'])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class AnsibleF5Client(object):
|
||||
def __init__(self, argument_spec=None, supports_check_mode=False,
|
||||
mutually_exclusive=None, required_together=None,
|
||||
required_if=None, required_one_of=None, add_file_common_args=False,
|
||||
f5_product_name='bigip', sans_state=False, sans_partition=False):
|
||||
|
||||
self.f5_product_name = f5_product_name
|
||||
|
||||
merged_arg_spec = dict()
|
||||
merged_arg_spec.update(F5_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
if sans_state:
|
||||
del merged_arg_spec['state']
|
||||
if sans_partition:
|
||||
del merged_arg_spec['partition']
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params = []
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params = []
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
add_file_common_args=add_file_common_args
|
||||
)
|
||||
|
||||
self.check_mode = self.module.check_mode
|
||||
self._connect_params = self._get_connect_params()
|
||||
|
||||
if 'transport' not in self.module.params or self.module.params['transport'] != 'cli':
|
||||
try:
|
||||
self.api = self._get_mgmt_root(
|
||||
f5_product_name, **self._connect_params
|
||||
)
|
||||
except iControlUnexpectedHTTPError as exc:
|
||||
self.fail(str(exc))
|
||||
|
||||
def fail(self, msg):
|
||||
self.module.fail_json(msg=msg)
|
||||
|
||||
def _get_connect_params(self):
|
||||
params = dict(
|
||||
user=self.module.params['user'],
|
||||
password=self.module.params['password'],
|
||||
server=self.module.params['server'],
|
||||
server_port=self.module.params['server_port'],
|
||||
validate_certs=self.module.params['validate_certs']
|
||||
)
|
||||
return params
|
||||
|
||||
def _get_mgmt_root(self, type, **kwargs):
|
||||
if type == 'bigip':
|
||||
return BigIpMgmt(
|
||||
kwargs['server'],
|
||||
kwargs['user'],
|
||||
kwargs['password'],
|
||||
port=kwargs['server_port'],
|
||||
token='tmos'
|
||||
)
|
||||
elif type == 'iworkflow':
|
||||
return iWorkflowMgmt(
|
||||
kwargs['server'],
|
||||
kwargs['user'],
|
||||
kwargs['password'],
|
||||
port=kwargs['server_port'],
|
||||
token='local'
|
||||
)
|
||||
elif type == 'bigiq':
|
||||
return BigIqMgmt(
|
||||
kwargs['server'],
|
||||
kwargs['user'],
|
||||
kwargs['password'],
|
||||
port=kwargs['server_port'],
|
||||
auth_provider='local'
|
||||
)
|
||||
|
||||
def reconnect(self):
|
||||
"""Attempts to reconnect to a device
|
||||
|
||||
The existing token from a ManagementRoot can become invalid if you,
|
||||
for example, upgrade the device (such as is done in the *_software
|
||||
module.
|
||||
|
||||
This method can be used to reconnect to a remote device without
|
||||
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
|
||||
it will use the same values that were initially provided to those
|
||||
classes
|
||||
|
||||
:return:
|
||||
:raises iControlUnexpectedHTTPError
|
||||
"""
|
||||
self.api = self._get_mgmt_root(
|
||||
self.f5_product_name, **self._connect_params
|
||||
)
|
||||
|
||||
|
||||
class AnsibleF5Parameters(object):
|
||||
def __init__(self, params=None):
|
||||
self._values = defaultdict(lambda: None)
|
||||
self._values['__warnings'] = []
|
||||
if params:
|
||||
self.update(params=params)
|
||||
|
||||
def update(self, params=None):
|
||||
if params:
|
||||
for k, v in iteritems(params):
|
||||
if self.api_map is not None and k in self.api_map:
|
||||
dict_to_use = self.api_map
|
||||
map_key = self.api_map[k]
|
||||
else:
|
||||
dict_to_use = self._values
|
||||
map_key = k
|
||||
|
||||
# Handle weird API parameters like `dns.proxy.__iter__` by
|
||||
# using a map provided by the module developer
|
||||
class_attr = getattr(type(self), map_key, None)
|
||||
if isinstance(class_attr, property):
|
||||
# There is a mapped value for the api_map key
|
||||
if class_attr.fset is None:
|
||||
# If the mapped value does not have an associated setter
|
||||
self._values[map_key] = v
|
||||
else:
|
||||
# The mapped value has a setter
|
||||
setattr(self, map_key, v)
|
||||
else:
|
||||
# If the mapped value is not a @property
|
||||
self._values[map_key] = v
|
||||
|
||||
def __getattr__(self, item):
|
||||
# Ensures that properties that weren't defined, and therefore stashed
|
||||
# in the `_values` dict, will be retrievable.
|
||||
return self._values[item]
|
||||
|
||||
@property
|
||||
def partition(self):
|
||||
if self._values['partition'] is None:
|
||||
return 'Common'
|
||||
return self._values['partition'].strip('/')
|
||||
|
||||
@partition.setter
|
||||
def partition(self, value):
|
||||
self._values['partition'] = value
|
||||
|
||||
def _filter_params(self, params):
|
||||
return dict((k, v) for k, v in iteritems(params) if v is not None)
|
||||
|
||||
|
||||
class F5ModuleError(Exception):
|
||||
pass
|
|
@ -1,153 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
AXAPI_PORT_PROTOCOLS = {
|
||||
'tcp': 2,
|
||||
'udp': 3,
|
||||
}
|
||||
|
||||
AXAPI_VPORT_PROTOCOLS = {
|
||||
'tcp': 2,
|
||||
'udp': 3,
|
||||
'fast-http': 9,
|
||||
'http': 11,
|
||||
'https': 12,
|
||||
}
|
||||
|
||||
|
||||
def a10_argument_spec():
|
||||
return dict(
|
||||
host=dict(type='str', required=True),
|
||||
username=dict(type='str', aliases=['user', 'admin'], required=True),
|
||||
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
|
||||
write_config=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
|
||||
def axapi_failure(result):
|
||||
if 'response' in result and result['response'].get('status') == 'fail':
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def axapi_call(module, url, post=None):
|
||||
'''
|
||||
Returns a datastructure based on the result of the API call
|
||||
'''
|
||||
rsp, info = fetch_url(module, url, data=post)
|
||||
if not rsp or info['status'] >= 400:
|
||||
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
|
||||
try:
|
||||
raw_data = rsp.read()
|
||||
data = json.loads(raw_data)
|
||||
except ValueError:
|
||||
# at least one API call (system.action.write_config) returns
|
||||
# XML even when JSON is requested, so do some minimal handling
|
||||
# here to prevent failing even when the call succeeded
|
||||
if 'status="ok"' in raw_data.lower():
|
||||
data = {"response": {"status": "OK"}}
|
||||
else:
|
||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||
except Exception:
|
||||
module.fail_json(msg="could not read the result from the host")
|
||||
finally:
|
||||
rsp.close()
|
||||
return data
|
||||
|
||||
|
||||
def axapi_authenticate(module, base_url, username, password):
|
||||
url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
|
||||
result = axapi_call(module, url)
|
||||
if axapi_failure(result):
|
||||
return module.fail_json(msg=result['response']['err']['msg'])
|
||||
sessid = result['session_id']
|
||||
return base_url + '&session_id=' + sessid
|
||||
|
||||
|
||||
def axapi_authenticate_v3(module, base_url, username, password):
|
||||
url = base_url
|
||||
auth_payload = {"credentials": {"username": username, "password": password}}
|
||||
result = axapi_call_v3(module, url, method='POST', body=json.dumps(auth_payload))
|
||||
if axapi_failure(result):
|
||||
return module.fail_json(msg=result['response']['err']['msg'])
|
||||
signature = result['authresponse']['signature']
|
||||
return signature
|
||||
|
||||
|
||||
def axapi_call_v3(module, url, method=None, body=None, signature=None):
|
||||
'''
|
||||
Returns a datastructure based on the result of the API call
|
||||
'''
|
||||
if signature:
|
||||
headers = {'content-type': 'application/json', 'Authorization': 'A10 %s' % signature}
|
||||
else:
|
||||
headers = {'content-type': 'application/json'}
|
||||
rsp, info = fetch_url(module, url, method=method, data=body, headers=headers)
|
||||
if not rsp or info['status'] >= 400:
|
||||
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
|
||||
try:
|
||||
raw_data = rsp.read()
|
||||
data = json.loads(raw_data)
|
||||
except ValueError:
|
||||
# at least one API call (system.action.write_config) returns
|
||||
# XML even when JSON is requested, so do some minimal handling
|
||||
# here to prevent failing even when the call succeeded
|
||||
if 'status="ok"' in raw_data.lower():
|
||||
data = {"response": {"status": "OK"}}
|
||||
else:
|
||||
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
|
||||
except Exception:
|
||||
module.fail_json(msg="could not read the result from the host")
|
||||
finally:
|
||||
rsp.close()
|
||||
return data
|
||||
|
||||
|
||||
def axapi_enabled_disabled(flag):
|
||||
'''
|
||||
The axapi uses 0/1 integer values for flags, rather than strings
|
||||
or booleans, so convert the given flag to a 0 or 1. For now, params
|
||||
are specified as strings only so thats what we check.
|
||||
'''
|
||||
if flag == 'enabled':
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def axapi_get_port_protocol(protocol):
|
||||
return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
|
||||
|
||||
|
||||
def axapi_get_vport_protocol(protocol):
|
||||
return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
|
|
@ -1,129 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2016 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import exec_command
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
|
||||
aireos_provider_spec = {
|
||||
'host': dict(),
|
||||
'port': dict(type='int'),
|
||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
||||
'timeout': dict(type='int'),
|
||||
}
|
||||
aireos_argument_spec = {
|
||||
'provider': dict(type='dict', options=aireos_provider_spec)
|
||||
}
|
||||
|
||||
aireos_top_spec = {
|
||||
'host': dict(removed_in_version=2.9),
|
||||
'port': dict(removed_in_version=2.9, type='int'),
|
||||
'username': dict(removed_in_version=2.9),
|
||||
'password': dict(removed_in_version=2.9, no_log=True),
|
||||
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
|
||||
'timeout': dict(removed_in_version=2.9, type='int'),
|
||||
}
|
||||
aireos_argument_spec.update(aireos_top_spec)
|
||||
|
||||
|
||||
def sanitize(resp):
|
||||
# Takes response from device and strips whitespace from all lines
|
||||
# Aireos adds in extra preceding whitespace which netcfg parses as children/parents, which Aireos does not do
|
||||
# Aireos also adds in trailing whitespace that is unused
|
||||
cleaned = []
|
||||
for line in resp.splitlines():
|
||||
cleaned.append(line.strip())
|
||||
return '\n'.join(cleaned).strip()
|
||||
|
||||
|
||||
def get_provider_argspec():
|
||||
return aireos_provider_spec
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
cmd = 'show run-config commands '
|
||||
cmd += ' '.join(flags)
|
||||
cmd = cmd.strip()
|
||||
|
||||
try:
|
||||
return _DEVICE_CONFIGS[cmd]
|
||||
except KeyError:
|
||||
rc, out, err = exec_command(module, cmd)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
|
||||
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
|
||||
_DEVICE_CONFIGS[cmd] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
transform = ComplexList(spec, module)
|
||||
return transform(commands)
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
responses = list()
|
||||
commands = to_commands(module, to_list(commands))
|
||||
for cmd in commands:
|
||||
cmd = module.jsonify(cmd)
|
||||
rc, out, err = exec_command(module, cmd)
|
||||
if check_rc and rc != 0:
|
||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
|
||||
responses.append(sanitize(to_text(out, errors='surrogate_then_replace')))
|
||||
return responses
|
||||
|
||||
|
||||
def load_config(module, commands):
|
||||
|
||||
rc, out, err = exec_command(module, 'config')
|
||||
if rc != 0:
|
||||
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
|
||||
|
||||
for command in to_list(commands):
|
||||
if command == 'end':
|
||||
continue
|
||||
rc, out, err = exec_command(module, command)
|
||||
if rc != 0:
|
||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
|
||||
|
||||
exec_command(module, 'end')
|
|
@ -1,180 +0,0 @@
|
|||
#
|
||||
# Copyright (c) 2017 Apstra Inc, <community@apstra.com>
|
||||
#
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
"""
|
||||
This module adds shared support for Apstra AOS modules
|
||||
|
||||
In order to use this module, include it as part of your module
|
||||
|
||||
from ansible.module_utils.network.aos.aos import (check_aos_version, get_aos_session, find_collection_item,
|
||||
content_to_dict, do_load_resource)
|
||||
|
||||
"""
|
||||
import json
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
import yaml
|
||||
HAS_YAML = True
|
||||
except ImportError:
|
||||
HAS_YAML = False
|
||||
|
||||
try:
|
||||
from apstra.aosom.session import Session
|
||||
|
||||
HAS_AOS_PYEZ = True
|
||||
except ImportError:
|
||||
HAS_AOS_PYEZ = False
|
||||
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
def check_aos_version(module, min=False):
|
||||
"""
|
||||
Check if the library aos-pyez is present.
|
||||
If provided, also check if the minimum version requirement is met
|
||||
"""
|
||||
if not HAS_AOS_PYEZ:
|
||||
module.fail_json(msg='aos-pyez is not installed. Please see details '
|
||||
'here: https://github.com/Apstra/aos-pyez')
|
||||
|
||||
elif min:
|
||||
import apstra.aosom
|
||||
AOS_PYEZ_VERSION = apstra.aosom.__version__
|
||||
|
||||
if LooseVersion(AOS_PYEZ_VERSION) < LooseVersion(min):
|
||||
module.fail_json(msg='aos-pyez >= %s is required for this module' % min)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_aos_session(module, auth):
|
||||
"""
|
||||
Resume an existing session and return an AOS object.
|
||||
|
||||
Args:
|
||||
auth (dict): An AOS session as obtained by aos_login module blocks::
|
||||
|
||||
dict( token=<token>,
|
||||
server=<ip>,
|
||||
port=<port>
|
||||
)
|
||||
|
||||
Return:
|
||||
Aos object
|
||||
"""
|
||||
|
||||
check_aos_version(module)
|
||||
|
||||
aos = Session()
|
||||
aos.session = auth
|
||||
|
||||
return aos
|
||||
|
||||
|
||||
def find_collection_item(collection, item_name=False, item_id=False):
|
||||
"""
|
||||
Find collection_item based on name or id from a collection object
|
||||
Both Collection_item and Collection Objects are provided by aos-pyez library
|
||||
|
||||
Return
|
||||
collection_item: object corresponding to the collection type
|
||||
"""
|
||||
my_dict = None
|
||||
|
||||
if item_name:
|
||||
my_dict = collection.find(label=item_name)
|
||||
elif item_id:
|
||||
my_dict = collection.find(uid=item_id)
|
||||
|
||||
if my_dict is None:
|
||||
return collection['']
|
||||
else:
|
||||
return my_dict
|
||||
|
||||
|
||||
def content_to_dict(module, content):
|
||||
"""
|
||||
Convert 'content' into a Python Dict based on 'content_format'
|
||||
"""
|
||||
|
||||
# if not HAS_YAML:
|
||||
# module.fail_json(msg="Python Library Yaml is not present, mandatory to use 'content'")
|
||||
|
||||
content_dict = None
|
||||
|
||||
# try:
|
||||
# content_dict = json.loads(content.replace("\'", '"'))
|
||||
# except:
|
||||
# module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid")
|
||||
#
|
||||
# elif format in ['yaml', 'var']:
|
||||
|
||||
try:
|
||||
content_dict = yaml.safe_load(content)
|
||||
|
||||
if not isinstance(content_dict, dict):
|
||||
raise Exception()
|
||||
|
||||
# Check if dict is empty and return an error if it's
|
||||
if not content_dict:
|
||||
raise Exception()
|
||||
|
||||
except Exception:
|
||||
module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
|
||||
|
||||
# replace the string with the dict
|
||||
module.params['content'] = content_dict
|
||||
|
||||
return content_dict
|
||||
|
||||
|
||||
def do_load_resource(module, collection, name):
|
||||
"""
|
||||
Create a new object (collection.item) by loading a datastructure directly
|
||||
"""
|
||||
|
||||
try:
|
||||
item = find_collection_item(collection, name, '')
|
||||
except Exception:
|
||||
module.fail_json(msg="An error occurred while running 'find_collection_item'")
|
||||
|
||||
if item.exists:
|
||||
module.exit_json(changed=False, name=item.name, id=item.id, value=item.value)
|
||||
|
||||
# If not in check mode, apply the changes
|
||||
if not module.check_mode:
|
||||
try:
|
||||
item.datum = module.params['content']
|
||||
item.write()
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to write item content : %r" % to_native(e))
|
||||
|
||||
module.exit_json(changed=True, name=item.name, id=item.id, value=item.value)
|
|
@ -1,113 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their own
|
||||
# license to the complete work.
|
||||
#
|
||||
# Copyright (C) 2019 APCON, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Contains utility methods
|
||||
# APCON Networking
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import EntityCollection
|
||||
from ansible.module_utils.connection import Connection, exec_command
|
||||
from ansible.module_utils.connection import ConnectionError
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
_CONNECTION = None
|
||||
|
||||
|
||||
command_spec = {
|
||||
'command': dict(key=True),
|
||||
}
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
global _CONNECTION
|
||||
if _CONNECTION:
|
||||
return _CONNECTION
|
||||
_CONNECTION = Connection(module._socket_path)
|
||||
|
||||
return _CONNECTION
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
cmd = ' '.join(flags).strip()
|
||||
|
||||
try:
|
||||
return _DEVICE_CONFIGS[cmd]
|
||||
except KeyError:
|
||||
conn = get_connection(module)
|
||||
out = conn.get(cmd)
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
_DEVICE_CONFIGS[cmd] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
transform = EntityCollection(module, command_spec)
|
||||
commands = transform(commands)
|
||||
|
||||
responses = list()
|
||||
|
||||
for cmd in commands:
|
||||
out = connection.get(**cmd)
|
||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
def load_config(module, config):
|
||||
try:
|
||||
conn = get_connection(module)
|
||||
conn.edit_config(config)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
|
||||
def get_defaults_flag(module):
|
||||
rc, out, err = exec_command(module, 'display running-config ?')
|
||||
out = to_text(out, errors='surrogate_then_replace')
|
||||
|
||||
commands = set()
|
||||
for line in out.splitlines():
|
||||
if line:
|
||||
commands.add(line.strip().split()[0])
|
||||
|
||||
if 'all' in commands:
|
||||
return 'all'
|
||||
else:
|
||||
return 'full'
|
|
@ -1,131 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2016 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import exec_command
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
|
||||
aruba_provider_spec = {
|
||||
'host': dict(),
|
||||
'port': dict(type='int'),
|
||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
||||
'timeout': dict(type='int'),
|
||||
}
|
||||
aruba_argument_spec = {
|
||||
'provider': dict(type='dict', options=aruba_provider_spec)
|
||||
}
|
||||
|
||||
aruba_top_spec = {
|
||||
'host': dict(removed_in_version=2.9),
|
||||
'port': dict(removed_in_version=2.9, type='int'),
|
||||
'username': dict(removed_in_version=2.9),
|
||||
'password': dict(removed_in_version=2.9, no_log=True),
|
||||
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
|
||||
'timeout': dict(removed_in_version=2.9, type='int'),
|
||||
}
|
||||
|
||||
aruba_argument_spec.update(aruba_top_spec)
|
||||
|
||||
|
||||
def get_provider_argspec():
|
||||
return aruba_provider_spec
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
cmd = 'show running-config '
|
||||
cmd += ' '.join(flags)
|
||||
cmd = cmd.strip()
|
||||
|
||||
try:
|
||||
return _DEVICE_CONFIGS[cmd]
|
||||
except KeyError:
|
||||
rc, out, err = exec_command(module, cmd)
|
||||
if rc != 0:
|
||||
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
|
||||
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
|
||||
_DEVICE_CONFIGS[cmd] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def sanitize(resp):
|
||||
# Takes response from device and adjusts leading whitespace to just 1 space
|
||||
cleaned = []
|
||||
for line in resp.splitlines():
|
||||
cleaned.append(re.sub(r"^\s+", " ", line))
|
||||
return '\n'.join(cleaned).strip()
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
transform = ComplexList(spec, module)
|
||||
return transform(commands)
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
responses = list()
|
||||
commands = to_commands(module, to_list(commands))
|
||||
for cmd in commands:
|
||||
cmd = module.jsonify(cmd)
|
||||
rc, out, err = exec_command(module, cmd)
|
||||
if check_rc and rc != 0:
|
||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
|
||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
||||
return responses
|
||||
|
||||
|
||||
def load_config(module, commands):
|
||||
|
||||
rc, out, err = exec_command(module, 'configure terminal')
|
||||
if rc != 0:
|
||||
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
|
||||
|
||||
for command in to_list(commands):
|
||||
if command == 'end':
|
||||
continue
|
||||
rc, out, err = exec_command(module, command)
|
||||
if rc != 0:
|
||||
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
|
||||
|
||||
exec_command(module, 'end')
|
|
@ -1,572 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
"""
|
||||
Created on Aug 16, 2016
|
||||
|
||||
@author: Gaurav Rastogi (grastogi@avinetworks.com)
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
|
||||
try:
|
||||
from ansible_collections.community.general.plugins.module_utils.network.avi.avi_api import (
|
||||
ApiSession, ObjectNotFound, avi_sdk_syslog_logger, AviCredentials, HAS_AVI)
|
||||
except ImportError:
|
||||
HAS_AVI = False
|
||||
|
||||
|
||||
if os.environ.get('AVI_LOG_HANDLER', '') != 'syslog':
|
||||
log = logging.getLogger(__name__)
|
||||
else:
|
||||
# Ansible does not allow logging from the modules.
|
||||
log = avi_sdk_syslog_logger()
|
||||
|
||||
|
||||
def _check_type_string(x):
|
||||
"""
|
||||
:param x:
|
||||
:return: True if it is of type string
|
||||
"""
|
||||
if isinstance(x, str):
|
||||
return True
|
||||
if sys.version_info[0] < 3:
|
||||
try:
|
||||
return isinstance(x, unicode)
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
|
||||
class AviCheckModeResponse(object):
|
||||
"""
|
||||
Class to support ansible check mode.
|
||||
"""
|
||||
|
||||
def __init__(self, obj, status_code=200):
|
||||
self.obj = obj
|
||||
self.status_code = status_code
|
||||
|
||||
def json(self):
|
||||
return self.obj
|
||||
|
||||
|
||||
def ansible_return(module, rsp, changed, req=None, existing_obj=None,
|
||||
api_context=None):
|
||||
"""
|
||||
:param module: AnsibleModule
|
||||
:param rsp: ApiResponse from avi_api
|
||||
:param changed: boolean
|
||||
:param req: ApiRequest to avi_api
|
||||
:param existing_obj: object to be passed debug output
|
||||
:param api_context: api login context
|
||||
|
||||
helper function to return the right ansible based on the error code and
|
||||
changed
|
||||
Returns: specific ansible module exit function
|
||||
"""
|
||||
|
||||
if rsp is not None and rsp.status_code > 299:
|
||||
return module.fail_json(
|
||||
msg='Error %d Msg %s req: %s api_context:%s ' % (
|
||||
rsp.status_code, rsp.text, req, api_context))
|
||||
api_creds = AviCredentials()
|
||||
api_creds.update_from_ansible_module(module)
|
||||
key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
|
||||
api_creds.port)
|
||||
disable_fact = module.params.get('avi_disable_session_cache_as_fact')
|
||||
|
||||
fact_context = None
|
||||
if not disable_fact:
|
||||
fact_context = module.params.get('api_context', {})
|
||||
if fact_context:
|
||||
fact_context.update({key: api_context})
|
||||
else:
|
||||
fact_context = {key: api_context}
|
||||
|
||||
obj_val = rsp.json() if rsp else existing_obj
|
||||
|
||||
if (obj_val and module.params.get("obj_username", None) and
|
||||
"username" in obj_val):
|
||||
obj_val["obj_username"] = obj_val["username"]
|
||||
if (obj_val and module.params.get("obj_password", None) and
|
||||
"password" in obj_val):
|
||||
obj_val["obj_password"] = obj_val["password"]
|
||||
old_obj_val = existing_obj if changed and existing_obj else None
|
||||
api_context_val = api_context if disable_fact else None
|
||||
ansible_facts_val = dict(
|
||||
avi_api_context=fact_context) if not disable_fact else {}
|
||||
|
||||
return module.exit_json(
|
||||
changed=changed, obj=obj_val, old_obj=old_obj_val,
|
||||
ansible_facts=ansible_facts_val, api_context=api_context_val)
|
||||
|
||||
|
||||
def purge_optional_fields(obj, module):
|
||||
"""
|
||||
It purges the optional arguments to be sent to the controller.
|
||||
:param obj: dictionary of the ansible object passed as argument.
|
||||
:param module: AnsibleModule
|
||||
return modified obj
|
||||
"""
|
||||
purge_fields = []
|
||||
for param, spec in module.argument_spec.items():
|
||||
if not spec.get('required', False):
|
||||
if param not in obj:
|
||||
# these are ansible common items
|
||||
continue
|
||||
if obj[param] is None:
|
||||
purge_fields.append(param)
|
||||
log.debug('purging fields %s', purge_fields)
|
||||
for param in purge_fields:
|
||||
obj.pop(param, None)
|
||||
return obj
|
||||
|
||||
|
||||
def cleanup_absent_fields(obj):
|
||||
"""
|
||||
cleans up any field that is marked as state: absent. It needs to be removed
|
||||
from the object if it is present.
|
||||
:param obj:
|
||||
:return: Purged object
|
||||
"""
|
||||
if type(obj) != dict:
|
||||
return obj
|
||||
cleanup_keys = []
|
||||
for k, v in obj.items():
|
||||
if type(v) == dict:
|
||||
if (('state' in v and v['state'] == 'absent') or
|
||||
(v == "{'state': 'absent'}")):
|
||||
cleanup_keys.append(k)
|
||||
else:
|
||||
cleanup_absent_fields(v)
|
||||
if not v:
|
||||
cleanup_keys.append(k)
|
||||
elif type(v) == list:
|
||||
new_list = []
|
||||
for elem in v:
|
||||
elem = cleanup_absent_fields(elem)
|
||||
if elem:
|
||||
# remove the item from list
|
||||
new_list.append(elem)
|
||||
if new_list:
|
||||
obj[k] = new_list
|
||||
else:
|
||||
cleanup_keys.append(k)
|
||||
elif isinstance(v, str) or isinstance(v, str):
|
||||
if v == "{'state': 'absent'}":
|
||||
cleanup_keys.append(k)
|
||||
for k in cleanup_keys:
|
||||
del obj[k]
|
||||
return obj
|
||||
|
||||
|
||||
RE_REF_MATCH = re.compile(r'^/api/[\w/]+\?name\=[\w]+[^#<>]*$')
|
||||
# if HTTP ref match then strip out the #name
|
||||
HTTP_REF_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.+')
|
||||
HTTP_REF_W_NAME_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.*#.+')
|
||||
|
||||
|
||||
def ref_n_str_cmp(x, y):
|
||||
"""
|
||||
compares two references
|
||||
1. check for exact reference
|
||||
2. check for obj_type/uuid
|
||||
3. check for name
|
||||
|
||||
if x is ref=name then extract uuid and name from y and use it.
|
||||
if x is http_ref then
|
||||
strip x and y
|
||||
compare them.
|
||||
|
||||
if x and y are urls then match with split on #
|
||||
if x is a RE_REF_MATCH then extract name
|
||||
if y is a REF_MATCH then extract name
|
||||
:param x: first string
|
||||
:param y: second string from controller's object
|
||||
|
||||
Returns
|
||||
True if they are equivalent else False
|
||||
"""
|
||||
if type(y) in (int, float, bool, int, complex):
|
||||
y = str(y)
|
||||
x = str(x)
|
||||
if not (_check_type_string(x) and _check_type_string(y)):
|
||||
return False
|
||||
y_uuid = y_name = str(y)
|
||||
x = str(x)
|
||||
if RE_REF_MATCH.match(x):
|
||||
x = x.split('name=')[1]
|
||||
elif HTTP_REF_MATCH.match(x):
|
||||
x = x.rsplit('#', 1)[0]
|
||||
y = y.rsplit('#', 1)[0]
|
||||
elif RE_REF_MATCH.match(y):
|
||||
y = y.split('name=')[1]
|
||||
|
||||
if HTTP_REF_W_NAME_MATCH.match(y):
|
||||
path = y.split('api/', 1)[1]
|
||||
# Fetching name or uuid from path /xxxx_xx/xx/xx_x/uuid_or_name
|
||||
uuid_or_name = path.split('/')[-1]
|
||||
parts = uuid_or_name.rsplit('#', 1)
|
||||
y_uuid = parts[0]
|
||||
y_name = parts[1] if len(parts) > 1 else ''
|
||||
# is just string but y is a url so match either uuid or name
|
||||
result = (x in (y, y_name, y_uuid))
|
||||
if not result:
|
||||
log.debug('x: %s y: %s y_name %s y_uuid %s',
|
||||
x, y, y_name, y_uuid)
|
||||
return result
|
||||
|
||||
|
||||
def avi_obj_cmp(x, y, sensitive_fields=None):
|
||||
"""
|
||||
compares whether x is fully contained in y. The comparision is different
|
||||
from a simple dictionary compare for following reasons
|
||||
1. Some fields could be references. The object in controller returns the
|
||||
full URL for those references. However, the ansible script would have
|
||||
it specified as /api/pool?name=blah. So, the reference fields need
|
||||
to match uuid, relative reference based on name and actual reference.
|
||||
|
||||
2. Optional fields with defaults: In case there are optional fields with
|
||||
defaults then controller automatically fills it up. This would
|
||||
cause the comparison with Ansible object specification to always return
|
||||
changed.
|
||||
|
||||
3. Optional fields without defaults: This is most tricky. The issue is
|
||||
how to specify deletion of such objects from ansible script. If the
|
||||
ansible playbook has object specified as Null then Avi controller will
|
||||
reject for non Message(dict) type fields. In addition, to deal with the
|
||||
defaults=null issue all the fields that are set with None are purged
|
||||
out before comparing with Avi controller's version
|
||||
|
||||
So, the solution is to pass state: absent if any optional field needs
|
||||
to be deleted from the configuration. The script would return changed
|
||||
=true if it finds a key in the controller version and it is marked with
|
||||
state: absent in ansible playbook. Alternatively, it would return
|
||||
false if key is not present in the controller object. Before, doing
|
||||
put or post it would purge the fields that are marked state: absent.
|
||||
|
||||
:param x: first string
|
||||
:param y: second string from controller's object
|
||||
:param sensitive_fields: sensitive fields to ignore for diff
|
||||
|
||||
Returns:
|
||||
True if x is subset of y else False
|
||||
"""
|
||||
if not sensitive_fields:
|
||||
sensitive_fields = set()
|
||||
if isinstance(x, str) or isinstance(x, str):
|
||||
# Special handling for strings as they can be references.
|
||||
return ref_n_str_cmp(x, y)
|
||||
if type(x) not in [list, dict]:
|
||||
# if it is not list or dict or string then simply compare the values
|
||||
return x == y
|
||||
if type(x) == list:
|
||||
# should compare each item in the list and that should match
|
||||
if len(x) != len(y):
|
||||
log.debug('x has %d items y has %d', len(x), len(y))
|
||||
return False
|
||||
for i in zip(x, y):
|
||||
if not avi_obj_cmp(i[0], i[1], sensitive_fields=sensitive_fields):
|
||||
# no need to continue
|
||||
return False
|
||||
|
||||
if type(x) == dict:
|
||||
x.pop('_last_modified', None)
|
||||
x.pop('tenant', None)
|
||||
y.pop('_last_modified', None)
|
||||
x.pop('api_version', None)
|
||||
y.pop('api_verison', None)
|
||||
d_xks = [k for k in x.keys() if k in sensitive_fields]
|
||||
|
||||
if d_xks:
|
||||
# if there is sensitive field then always return changed
|
||||
return False
|
||||
# pop the keys that are marked deleted but not present in y
|
||||
# return false if item is marked absent and is present in y
|
||||
d_x_absent_ks = []
|
||||
for k, v in x.items():
|
||||
if v is None:
|
||||
d_x_absent_ks.append(k)
|
||||
continue
|
||||
if isinstance(v, dict):
|
||||
if ('state' in v) and (v['state'] == 'absent'):
|
||||
if type(y) == dict and k not in y:
|
||||
d_x_absent_ks.append(k)
|
||||
else:
|
||||
return False
|
||||
elif not v:
|
||||
d_x_absent_ks.append(k)
|
||||
elif isinstance(v, list) and not v:
|
||||
d_x_absent_ks.append(k)
|
||||
# Added condition to check key in dict.
|
||||
elif isinstance(v, str) or (k in y and isinstance(y[k], str)):
|
||||
# this is the case when ansible converts the dictionary into a
|
||||
# string.
|
||||
if v == "{'state': 'absent'}" and k not in y:
|
||||
d_x_absent_ks.append(k)
|
||||
elif not v and k not in y:
|
||||
# this is the case when x has set the value that qualifies
|
||||
# as not but y does not have that value
|
||||
d_x_absent_ks.append(k)
|
||||
for k in d_x_absent_ks:
|
||||
x.pop(k)
|
||||
x_keys = set(x.keys())
|
||||
y_keys = set(y.keys())
|
||||
if not x_keys.issubset(y_keys):
|
||||
# log.debug('x has %s and y has %s keys', len(x_keys), len(y_keys))
|
||||
return False
|
||||
for k, v in x.items():
|
||||
if k not in y:
|
||||
# log.debug('k %s is not in y %s', k, y)
|
||||
return False
|
||||
if not avi_obj_cmp(v, y[k], sensitive_fields=sensitive_fields):
|
||||
# log.debug('k %s v %s did not match in y %s', k, v, y[k])
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
POP_FIELDS = ['state', 'controller', 'username', 'password', 'api_version',
|
||||
'avi_credentials', 'avi_api_update_method', 'avi_api_patch_op',
|
||||
'api_context', 'tenant', 'tenant_uuid', 'avi_disable_session_cache_as_fact']
|
||||
|
||||
|
||||
def get_api_context(module, api_creds):
|
||||
api_context = module.params.get('api_context')
|
||||
if api_context and module.params.get('avi_disable_session_cache_as_fact'):
|
||||
return api_context
|
||||
elif api_context and not module.params.get(
|
||||
'avi_disable_session_cache_as_fact'):
|
||||
key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
|
||||
api_creds.port)
|
||||
return api_context.get(key)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def avi_ansible_api(module, obj_type, sensitive_fields):
|
||||
"""
|
||||
This converts the Ansible module into AVI object and invokes APIs
|
||||
:param module: Ansible module
|
||||
:param obj_type: string representing Avi object type
|
||||
:param sensitive_fields: sensitive fields to be excluded for comparison
|
||||
purposes.
|
||||
Returns:
|
||||
success: module.exit_json with obj=avi object
|
||||
faliure: module.fail_json
|
||||
"""
|
||||
|
||||
api_creds = AviCredentials()
|
||||
api_creds.update_from_ansible_module(module)
|
||||
api_context = get_api_context(module, api_creds)
|
||||
if api_context:
|
||||
api = ApiSession.get_session(
|
||||
api_creds.controller,
|
||||
api_creds.username,
|
||||
password=api_creds.password,
|
||||
timeout=api_creds.timeout,
|
||||
tenant=api_creds.tenant,
|
||||
tenant_uuid=api_creds.tenant_uuid,
|
||||
token=api_context['csrftoken'],
|
||||
port=api_creds.port,
|
||||
session_id=api_context['session_id'],
|
||||
csrftoken=api_context['csrftoken'])
|
||||
else:
|
||||
api = ApiSession.get_session(
|
||||
api_creds.controller,
|
||||
api_creds.username,
|
||||
password=api_creds.password,
|
||||
timeout=api_creds.timeout,
|
||||
tenant=api_creds.tenant,
|
||||
tenant_uuid=api_creds.tenant_uuid,
|
||||
token=api_creds.token,
|
||||
port=api_creds.port)
|
||||
state = module.params['state']
|
||||
# Get the api version.
|
||||
avi_update_method = module.params.get('avi_api_update_method', 'put')
|
||||
avi_patch_op = module.params.get('avi_api_patch_op', 'add')
|
||||
|
||||
api_version = api_creds.api_version
|
||||
name = module.params.get('name', None)
|
||||
# Added Support to get uuid
|
||||
uuid = module.params.get('uuid', None)
|
||||
check_mode = module.check_mode
|
||||
if uuid and obj_type != 'cluster':
|
||||
obj_path = '%s/%s' % (obj_type, uuid)
|
||||
else:
|
||||
obj_path = '%s/' % obj_type
|
||||
obj = deepcopy(module.params)
|
||||
tenant = obj.pop('tenant', '')
|
||||
tenant_uuid = obj.pop('tenant_uuid', '')
|
||||
# obj.pop('cloud_ref', None)
|
||||
for k in POP_FIELDS:
|
||||
obj.pop(k, None)
|
||||
purge_optional_fields(obj, module)
|
||||
|
||||
# Special code to handle situation where object has a field
|
||||
# named username. This is used in case of api/user
|
||||
# The following code copies the username and password
|
||||
# from the obj_username and obj_password fields.
|
||||
if 'obj_username' in obj:
|
||||
obj['username'] = obj['obj_username']
|
||||
obj.pop('obj_username')
|
||||
if 'obj_password' in obj:
|
||||
obj['password'] = obj['obj_password']
|
||||
obj.pop('obj_password')
|
||||
if 'full_name' not in obj and 'name' in obj and obj_type == "user":
|
||||
obj['full_name'] = obj['name']
|
||||
# Special case as name represent full_name in user module
|
||||
# As per API response, name is always same as username regardless of full_name
|
||||
obj['name'] = obj['username']
|
||||
|
||||
log.info('passed object %s ', obj)
|
||||
|
||||
if uuid:
|
||||
# Get the object based on uuid.
|
||||
try:
|
||||
existing_obj = api.get(
|
||||
obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
params={'include_refs': '', 'include_name': ''},
|
||||
api_version=api_version)
|
||||
existing_obj = existing_obj.json()
|
||||
except ObjectNotFound:
|
||||
existing_obj = None
|
||||
elif name:
|
||||
params = {'include_refs': '', 'include_name': ''}
|
||||
if obj.get('cloud_ref', None):
|
||||
# this is the case when gets have to be scoped with cloud
|
||||
cloud = obj['cloud_ref'].split('name=')[1]
|
||||
params['cloud_ref.name'] = cloud
|
||||
existing_obj = api.get_object_by_name(
|
||||
obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
params=params, api_version=api_version)
|
||||
|
||||
# Need to check if tenant_ref was provided and the object returned
|
||||
# is actually in admin tenant.
|
||||
if existing_obj and 'tenant_ref' in obj and 'tenant_ref' in existing_obj:
|
||||
# https://10.10.25.42/api/tenant/admin#admin
|
||||
existing_obj_tenant = existing_obj['tenant_ref'].split('#')[1]
|
||||
obj_tenant = obj['tenant_ref'].split('name=')[1]
|
||||
if obj_tenant != existing_obj_tenant:
|
||||
existing_obj = None
|
||||
else:
|
||||
# added api version to avi api call.
|
||||
existing_obj = api.get(obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
params={'include_refs': '', 'include_name': ''},
|
||||
api_version=api_version).json()
|
||||
|
||||
if state == 'absent':
|
||||
rsp = None
|
||||
changed = False
|
||||
err = False
|
||||
if not check_mode and existing_obj:
|
||||
try:
|
||||
if name is not None:
|
||||
# added api version to avi api call.
|
||||
rsp = api.delete_by_name(
|
||||
obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
api_version=api_version)
|
||||
else:
|
||||
# added api version to avi api call.
|
||||
rsp = api.delete(
|
||||
obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
api_version=api_version)
|
||||
except ObjectNotFound:
|
||||
pass
|
||||
if check_mode and existing_obj:
|
||||
changed = True
|
||||
|
||||
if rsp:
|
||||
if rsp.status_code == 204:
|
||||
changed = True
|
||||
else:
|
||||
err = True
|
||||
if not err:
|
||||
return ansible_return(
|
||||
module, rsp, changed, existing_obj=existing_obj,
|
||||
api_context=api.get_context())
|
||||
elif rsp:
|
||||
return module.fail_json(msg=rsp.text)
|
||||
|
||||
rsp = None
|
||||
req = None
|
||||
if existing_obj:
|
||||
# this is case of modify as object exists. should find out
|
||||
# if changed is true or not
|
||||
if name is not None and obj_type != 'cluster':
|
||||
obj_uuid = existing_obj['uuid']
|
||||
obj_path = '%s/%s' % (obj_type, obj_uuid)
|
||||
if avi_update_method == 'put':
|
||||
changed = not avi_obj_cmp(obj, existing_obj, sensitive_fields)
|
||||
obj = cleanup_absent_fields(obj)
|
||||
if changed:
|
||||
req = obj
|
||||
if check_mode:
|
||||
# No need to process any further.
|
||||
rsp = AviCheckModeResponse(obj=existing_obj)
|
||||
else:
|
||||
rsp = api.put(
|
||||
obj_path, data=req, tenant=tenant,
|
||||
tenant_uuid=tenant_uuid, api_version=api_version)
|
||||
elif check_mode:
|
||||
rsp = AviCheckModeResponse(obj=existing_obj)
|
||||
else:
|
||||
if check_mode:
|
||||
# No need to process any further.
|
||||
rsp = AviCheckModeResponse(obj=existing_obj)
|
||||
changed = True
|
||||
else:
|
||||
obj.pop('name', None)
|
||||
patch_data = {avi_patch_op: obj}
|
||||
rsp = api.patch(
|
||||
obj_path, data=patch_data, tenant=tenant,
|
||||
tenant_uuid=tenant_uuid, api_version=api_version)
|
||||
obj = rsp.json()
|
||||
changed = not avi_obj_cmp(obj, existing_obj)
|
||||
if changed:
|
||||
log.debug('EXISTING OBJ %s', existing_obj)
|
||||
log.debug('NEW OBJ %s', obj)
|
||||
else:
|
||||
changed = True
|
||||
req = obj
|
||||
if check_mode:
|
||||
rsp = AviCheckModeResponse(obj=None)
|
||||
else:
|
||||
rsp = api.post(obj_type, data=obj, tenant=tenant,
|
||||
tenant_uuid=tenant_uuid, api_version=api_version)
|
||||
return ansible_return(module, rsp, changed, req, existing_obj=existing_obj,
|
||||
api_context=api.get_context())
|
||||
|
||||
|
||||
def avi_common_argument_spec():
|
||||
"""
|
||||
Returns common arguments for all Avi modules
|
||||
:return: dict
|
||||
"""
|
||||
credentials_spec = dict(
|
||||
controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
|
||||
username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
|
||||
password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
|
||||
api_version=dict(default='16.4.4', type='str'),
|
||||
tenant=dict(default='admin'),
|
||||
tenant_uuid=dict(default='', type='str'),
|
||||
port=dict(type='int'),
|
||||
timeout=dict(default=300, type='int'),
|
||||
token=dict(default='', type='str', no_log=True),
|
||||
session_id=dict(default='', type='str', no_log=True),
|
||||
csrftoken=dict(default='', type='str', no_log=True)
|
||||
)
|
||||
|
||||
return dict(
|
||||
controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
|
||||
username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
|
||||
password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
|
||||
tenant=dict(default='admin'),
|
||||
tenant_uuid=dict(default=''),
|
||||
api_version=dict(default='16.4.4', type='str'),
|
||||
avi_credentials=dict(default=None, type='dict',
|
||||
options=credentials_spec),
|
||||
api_context=dict(type='dict'),
|
||||
avi_disable_session_cache_as_fact=dict(default=False, type='bool'))
|
|
@ -1,38 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Gaurav Rastogi <grastogi@avinetworks.com>, 2017
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# This module initially matched the namespace of network module avi. However,
|
||||
# that causes namespace import error when other modules from avi namespaces
|
||||
# are imported. Added import of absolute_import to avoid import collisions for
|
||||
# avi.sdk.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.avi.ansible_utils import (
|
||||
avi_ansible_api, avi_common_argument_spec, ansible_return,
|
||||
avi_obj_cmp, cleanup_absent_fields, AviCheckModeResponse, HAS_AVI)
|
|
@ -1,972 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
import os
|
||||
import sys
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from ssl import SSLError
|
||||
|
||||
|
||||
class MockResponse(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise Exception("Requests library Response object not found. Using fake one.")
|
||||
|
||||
|
||||
class MockRequestsConnectionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MockSession(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise Exception("Requests library Session object not found. Using fake one.")
|
||||
|
||||
|
||||
HAS_AVI = True
|
||||
try:
|
||||
from requests import ConnectionError as RequestsConnectionError
|
||||
from requests import Response
|
||||
from requests.sessions import Session
|
||||
except ImportError:
|
||||
HAS_AVI = False
|
||||
Response = MockResponse
|
||||
RequestsConnectionError = MockRequestsConnectionError
|
||||
Session = MockSession
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sessionDict = {}
|
||||
|
||||
|
||||
def avi_timedelta(td):
|
||||
'''
|
||||
This is a wrapper class to workaround python 2.6 builtin datetime.timedelta
|
||||
does not have total_seconds method
|
||||
:param timedelta object
|
||||
'''
|
||||
if type(td) != timedelta:
|
||||
raise TypeError()
|
||||
if sys.version_info >= (2, 7):
|
||||
ts = td.total_seconds()
|
||||
else:
|
||||
ts = td.seconds + (24 * 3600 * td.days)
|
||||
return ts
|
||||
|
||||
|
||||
def avi_sdk_syslog_logger(logger_name='avi.sdk'):
|
||||
# The following sets up syslog module to log underlying avi SDK messages
|
||||
# based on the environment variables:
|
||||
# AVI_LOG_HANDLER: names the logging handler to use. Only syslog is
|
||||
# supported.
|
||||
# AVI_LOG_LEVEL: Logging level used for the avi SDK. Default is DEBUG
|
||||
# AVI_SYSLOG_ADDRESS: Destination address for the syslog handler.
|
||||
# Default is /dev/log
|
||||
from logging.handlers import SysLogHandler
|
||||
lf = '[%(asctime)s] %(levelname)s [%(module)s.%(funcName)s:%(lineno)d] %(message)s'
|
||||
log = logging.getLogger(logger_name)
|
||||
log_level = os.environ.get('AVI_LOG_LEVEL', 'DEBUG')
|
||||
if log_level:
|
||||
log.setLevel(getattr(logging, log_level))
|
||||
formatter = logging.Formatter(lf)
|
||||
sh = SysLogHandler(address=os.environ.get('AVI_SYSLOG_ADDRESS', '/dev/log'))
|
||||
sh.setFormatter(formatter)
|
||||
log.addHandler(sh)
|
||||
return log
|
||||
|
||||
|
||||
class ObjectNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class APIError(Exception):
|
||||
def __init__(self, arg, rsp=None):
|
||||
self.args = [arg, rsp]
|
||||
self.rsp = rsp
|
||||
|
||||
|
||||
class AviServerError(APIError):
|
||||
def __init__(self, arg, rsp=None):
|
||||
super(AviServerError, self).__init__(arg, rsp)
|
||||
|
||||
|
||||
class APINotImplemented(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ApiResponse(Response):
|
||||
"""
|
||||
Returns copy of the requests.Response object provides additional helper
|
||||
routines
|
||||
1. obj: returns dictionary of Avi Object
|
||||
"""
|
||||
def __init__(self, rsp):
|
||||
super(ApiResponse, self).__init__()
|
||||
for k, v in list(rsp.__dict__.items()):
|
||||
setattr(self, k, v)
|
||||
|
||||
def json(self):
|
||||
"""
|
||||
Extends the session default json interface to handle special errors
|
||||
and raise Exceptions
|
||||
returns the Avi object as a dictionary from rsp.text
|
||||
"""
|
||||
if self.status_code in (200, 201):
|
||||
if not self.text:
|
||||
# In cases like status_code == 201 the response text could be
|
||||
# empty string.
|
||||
return None
|
||||
return super(ApiResponse, self).json()
|
||||
elif self.status_code == 204:
|
||||
# No response needed; e.g., delete operation
|
||||
return None
|
||||
elif self.status_code == 404:
|
||||
raise ObjectNotFound('HTTP Error: %s Error Msg %s' % (
|
||||
self.status_code, self.text), self)
|
||||
elif self.status_code >= 500:
|
||||
raise AviServerError('HTTP Error: %s Error Msg %s' % (
|
||||
self.status_code, self.text), self)
|
||||
else:
|
||||
raise APIError('HTTP Error: %s Error Msg %s' % (
|
||||
self.status_code, self.text), self)
|
||||
|
||||
def count(self):
|
||||
"""
|
||||
return the number of objects in the collection response. If it is not
|
||||
a collection response then it would simply return 1.
|
||||
"""
|
||||
obj = self.json()
|
||||
if 'count' in obj:
|
||||
# this was a resposne to collection
|
||||
return obj['count']
|
||||
return 1
|
||||
|
||||
@staticmethod
|
||||
def to_avi_response(resp):
|
||||
if type(resp) == Response:
|
||||
return ApiResponse(resp)
|
||||
return resp
|
||||
|
||||
|
||||
class AviCredentials(object):
|
||||
controller = ''
|
||||
username = ''
|
||||
password = ''
|
||||
api_version = '16.4.4'
|
||||
tenant = None
|
||||
tenant_uuid = None
|
||||
token = None
|
||||
port = None
|
||||
timeout = 300
|
||||
session_id = None
|
||||
csrftoken = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def update_from_ansible_module(self, m):
|
||||
"""
|
||||
:param m: ansible module
|
||||
:return:
|
||||
"""
|
||||
if m.params.get('avi_credentials'):
|
||||
for k, v in m.params['avi_credentials'].items():
|
||||
if hasattr(self, k):
|
||||
setattr(self, k, v)
|
||||
if m.params['controller']:
|
||||
self.controller = m.params['controller']
|
||||
if m.params['username']:
|
||||
self.username = m.params['username']
|
||||
if m.params['password']:
|
||||
self.password = m.params['password']
|
||||
if (m.params['api_version'] and
|
||||
(m.params['api_version'] != '16.4.4')):
|
||||
self.api_version = m.params['api_version']
|
||||
if m.params['tenant']:
|
||||
self.tenant = m.params['tenant']
|
||||
if m.params['tenant_uuid']:
|
||||
self.tenant_uuid = m.params['tenant_uuid']
|
||||
if m.params.get('session_id'):
|
||||
self.session_id = m.params['session_id']
|
||||
if m.params.get('csrftoken'):
|
||||
self.csrftoken = m.params['csrftoken']
|
||||
|
||||
def __str__(self):
|
||||
return 'controller %s user %s api %s tenant %s' % (
|
||||
self.controller, self.username, self.api_version, self.tenant)
|
||||
|
||||
|
||||
class ApiSession(Session):
|
||||
"""
|
||||
Extends the Request library's session object to provide helper
|
||||
utilities to work with Avi Controller like authentication, api massaging
|
||||
etc.
|
||||
"""
|
||||
|
||||
# This keeps track of the process which created the cache.
|
||||
# At anytime the pid of the process changes then it would create
|
||||
# a new cache for that process.
|
||||
AVI_SLUG = 'Slug'
|
||||
SESSION_CACHE_EXPIRY = 20 * 60
|
||||
SHARED_USER_HDRS = ['X-CSRFToken', 'Session-Id', 'Referer', 'Content-Type']
|
||||
MAX_API_RETRIES = 3
|
||||
|
||||
def __init__(self, controller_ip=None, username=None, password=None,
|
||||
token=None, tenant=None, tenant_uuid=None, verify=False,
|
||||
port=None, timeout=60, api_version=None,
|
||||
retry_conxn_errors=True, data_log=False,
|
||||
avi_credentials=None, session_id=None, csrftoken=None,
|
||||
lazy_authentication=False, max_api_retries=None):
|
||||
"""
|
||||
ApiSession takes ownership of avi_credentials and may update the
|
||||
information inside it.
|
||||
|
||||
Initialize new session object with authenticated token from login api.
|
||||
It also keeps a cache of user sessions that are cleaned up if inactive
|
||||
for more than 20 mins.
|
||||
|
||||
Notes:
|
||||
01. If mode is https and port is none or 443, we don't embed the
|
||||
port in the prefix. The prefix would be 'https://ip'. If port
|
||||
is a non-default value then we concatenate https://ip:port
|
||||
in the prefix.
|
||||
02. If mode is http and the port is none or 80, we don't embed the
|
||||
port in the prefix. The prefix would be 'http://ip'. If port is
|
||||
a non-default value, then we concatenate http://ip:port in
|
||||
the prefix.
|
||||
"""
|
||||
super(ApiSession, self).__init__()
|
||||
if not avi_credentials:
|
||||
tenant = tenant if tenant else "admin"
|
||||
self.avi_credentials = AviCredentials(
|
||||
controller=controller_ip, username=username, password=password,
|
||||
api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
token=token, port=port, timeout=timeout,
|
||||
session_id=session_id, csrftoken=csrftoken)
|
||||
else:
|
||||
self.avi_credentials = avi_credentials
|
||||
self.headers = {}
|
||||
self.verify = verify
|
||||
self.retry_conxn_errors = retry_conxn_errors
|
||||
self.remote_api_version = {}
|
||||
self.session_cookie_name = ''
|
||||
self.user_hdrs = {}
|
||||
self.data_log = data_log
|
||||
self.num_session_retries = 0
|
||||
self.retry_wait_time = 0
|
||||
self.max_session_retries = (
|
||||
self.MAX_API_RETRIES if max_api_retries is None
|
||||
else int(max_api_retries))
|
||||
# Refer Notes 01 and 02
|
||||
k_port = port if port else 443
|
||||
if self.avi_credentials.controller.startswith('http'):
|
||||
k_port = 80 if not self.avi_credentials.port else k_port
|
||||
if self.avi_credentials.port is None or self.avi_credentials.port\
|
||||
== 80:
|
||||
self.prefix = self.avi_credentials.controller
|
||||
else:
|
||||
self.prefix = '{x}:{y}'.format(
|
||||
x=self.avi_credentials.controller,
|
||||
y=self.avi_credentials.port)
|
||||
else:
|
||||
if port is None or port == 443:
|
||||
self.prefix = 'https://{x}'.format(
|
||||
x=self.avi_credentials.controller)
|
||||
else:
|
||||
self.prefix = 'https://{x}:{y}'.format(
|
||||
x=self.avi_credentials.controller,
|
||||
y=self.avi_credentials.port)
|
||||
self.timeout = timeout
|
||||
self.key = '%s:%s:%s' % (self.avi_credentials.controller,
|
||||
self.avi_credentials.username, k_port)
|
||||
# Added api token and session id to sessionDict for handle single
|
||||
# session
|
||||
if self.avi_credentials.csrftoken:
|
||||
sessionDict[self.key] = {
|
||||
'api': self,
|
||||
"csrftoken": self.avi_credentials.csrftoken,
|
||||
"session_id": self.avi_credentials.session_id,
|
||||
"last_used": datetime.utcnow()
|
||||
}
|
||||
elif lazy_authentication:
|
||||
sessionDict.get(self.key, {}).update(
|
||||
{'api': self, "last_used": datetime.utcnow()})
|
||||
else:
|
||||
self.authenticate_session()
|
||||
|
||||
self.num_session_retries = 0
|
||||
self.pid = os.getpid()
|
||||
ApiSession._clean_inactive_sessions()
|
||||
return
|
||||
|
||||
@property
|
||||
def controller_ip(self):
|
||||
return self.avi_credentials.controller
|
||||
|
||||
@controller_ip.setter
|
||||
def controller_ip(self, controller_ip):
|
||||
self.avi_credentials.controller = controller_ip
|
||||
|
||||
@property
|
||||
def username(self):
|
||||
return self.avi_credentials.username
|
||||
|
||||
@property
|
||||
def connected(self):
|
||||
return sessionDict.get(self.key, {}).get('connected', False)
|
||||
|
||||
@username.setter
|
||||
def username(self, username):
|
||||
self.avi_credentials.username = username
|
||||
|
||||
@property
|
||||
def password(self):
|
||||
return self.avi_credentials.password
|
||||
|
||||
@password.setter
|
||||
def password(self, password):
|
||||
self.avi_credentials.password = password
|
||||
|
||||
@property
|
||||
def keystone_token(self):
|
||||
return sessionDict.get(self.key, {}).get('csrftoken', None)
|
||||
|
||||
@keystone_token.setter
|
||||
def keystone_token(self, token):
|
||||
sessionDict[self.key]['csrftoken'] = token
|
||||
|
||||
@property
|
||||
def tenant_uuid(self):
|
||||
self.avi_credentials.tenant_uuid
|
||||
|
||||
@tenant_uuid.setter
|
||||
def tenant_uuid(self, tenant_uuid):
|
||||
self.avi_credentials.tenant_uuid = tenant_uuid
|
||||
|
||||
@property
|
||||
def tenant(self):
|
||||
return self.avi_credentials.tenant
|
||||
|
||||
@tenant.setter
|
||||
def tenant(self, tenant):
|
||||
if tenant:
|
||||
self.avi_credentials.tenant = tenant
|
||||
else:
|
||||
self.avi_credentials.tenant = 'admin'
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
self.avi_credentials.port
|
||||
|
||||
@port.setter
|
||||
def port(self, port):
|
||||
self.avi_credentials.port = port
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
return self.avi_credentials.api_version
|
||||
|
||||
@api_version.setter
|
||||
def api_version(self, api_version):
|
||||
self.avi_credentials.api_version = api_version
|
||||
|
||||
@property
|
||||
def session_id(self):
|
||||
return sessionDict[self.key]['session_id']
|
||||
|
||||
def get_context(self):
|
||||
return {
|
||||
'session_id': sessionDict[self.key]['session_id'],
|
||||
'csrftoken': sessionDict[self.key]['csrftoken']
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def clear_cached_sessions():
|
||||
global sessionDict
|
||||
sessionDict = {}
|
||||
|
||||
@staticmethod
|
||||
def get_session(
|
||||
controller_ip=None, username=None, password=None, token=None, tenant=None,
|
||||
tenant_uuid=None, verify=False, port=None, timeout=60,
|
||||
retry_conxn_errors=True, api_version=None, data_log=False,
|
||||
avi_credentials=None, session_id=None, csrftoken=None,
|
||||
lazy_authentication=False, max_api_retries=None):
|
||||
"""
|
||||
returns the session object for same user and tenant
|
||||
calls init if session dose not exist and adds it to session cache
|
||||
:param controller_ip: controller IP address
|
||||
:param username:
|
||||
:param password:
|
||||
:param token: Token to use; example, a valid keystone token
|
||||
:param tenant: Name of the tenant on Avi Controller
|
||||
:param tenant_uuid: Don't specify tenant when using tenant_id
|
||||
:param port: Rest-API may use a different port other than 443
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param retry_conxn_errors: retry on connection errors
|
||||
:param api_version: Controller API version
|
||||
"""
|
||||
if not avi_credentials:
|
||||
tenant = tenant if tenant else "admin"
|
||||
avi_credentials = AviCredentials(
|
||||
controller=controller_ip, username=username, password=password,
|
||||
api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
token=token, port=port, timeout=timeout,
|
||||
session_id=session_id, csrftoken=csrftoken)
|
||||
|
||||
k_port = avi_credentials.port if avi_credentials.port else 443
|
||||
if avi_credentials.controller.startswith('http'):
|
||||
k_port = 80 if not avi_credentials.port else k_port
|
||||
key = '%s:%s:%s' % (avi_credentials.controller,
|
||||
avi_credentials.username, k_port)
|
||||
cached_session = sessionDict.get(key)
|
||||
if cached_session:
|
||||
user_session = cached_session['api']
|
||||
if not (user_session.avi_credentials.csrftoken or
|
||||
lazy_authentication):
|
||||
user_session.authenticate_session()
|
||||
else:
|
||||
user_session = ApiSession(
|
||||
controller_ip, username, password, token=token, tenant=tenant,
|
||||
tenant_uuid=tenant_uuid, verify=verify, port=port,
|
||||
timeout=timeout, retry_conxn_errors=retry_conxn_errors,
|
||||
api_version=api_version, data_log=data_log,
|
||||
avi_credentials=avi_credentials,
|
||||
lazy_authentication=lazy_authentication,
|
||||
max_api_retries=max_api_retries)
|
||||
ApiSession._clean_inactive_sessions()
|
||||
return user_session
|
||||
|
||||
def reset_session(self):
|
||||
"""
|
||||
resets and re-authenticates the current session.
|
||||
"""
|
||||
sessionDict[self.key]['connected'] = False
|
||||
logger.info('resetting session for %s', self.key)
|
||||
self.user_hdrs = {}
|
||||
for k, v in self.headers.items():
|
||||
if k not in self.SHARED_USER_HDRS:
|
||||
self.user_hdrs[k] = v
|
||||
self.headers = {}
|
||||
self.authenticate_session()
|
||||
|
||||
def authenticate_session(self):
|
||||
"""
|
||||
Performs session authentication with Avi controller and stores
|
||||
session cookies and sets header options like tenant.
|
||||
"""
|
||||
body = {"username": self.avi_credentials.username}
|
||||
if self.avi_credentials.password:
|
||||
body["password"] = self.avi_credentials.password
|
||||
elif self.avi_credentials.token:
|
||||
body["token"] = self.avi_credentials.token
|
||||
else:
|
||||
raise APIError("Neither user password or token provided")
|
||||
logger.debug('authenticating user %s prefix %s',
|
||||
self.avi_credentials.username, self.prefix)
|
||||
self.cookies.clear()
|
||||
err = None
|
||||
try:
|
||||
rsp = super(ApiSession, self).post(
|
||||
self.prefix + "/login", body, timeout=self.timeout, verify=self.verify)
|
||||
|
||||
if rsp.status_code == 200:
|
||||
self.num_session_retries = 0
|
||||
self.remote_api_version = rsp.json().get('version', {})
|
||||
self.session_cookie_name = rsp.json().get('session_cookie_name', 'sessionid')
|
||||
self.headers.update(self.user_hdrs)
|
||||
if rsp.cookies and 'csrftoken' in rsp.cookies:
|
||||
csrftoken = rsp.cookies['csrftoken']
|
||||
sessionDict[self.key] = {
|
||||
'csrftoken': csrftoken,
|
||||
'session_id': rsp.cookies[self.session_cookie_name],
|
||||
'last_used': datetime.utcnow(),
|
||||
'api': self,
|
||||
'connected': True
|
||||
}
|
||||
logger.debug("authentication success for user %s",
|
||||
self.avi_credentials.username)
|
||||
return
|
||||
# Check for bad request and invalid credentials response code
|
||||
elif rsp.status_code in [401, 403]:
|
||||
logger.error('Status Code %s msg %s', rsp.status_code, rsp.text)
|
||||
err = APIError('Status Code %s msg %s' % (
|
||||
rsp.status_code, rsp.text), rsp)
|
||||
raise err
|
||||
else:
|
||||
logger.error("Error status code %s msg %s", rsp.status_code,
|
||||
rsp.text)
|
||||
err = APIError('Status Code %s msg %s' % (
|
||||
rsp.status_code, rsp.text), rsp)
|
||||
except (RequestsConnectionError, SSLError) as e:
|
||||
if not self.retry_conxn_errors:
|
||||
raise
|
||||
logger.warning('Connection error retrying %s', e)
|
||||
err = e
|
||||
# comes here only if there was either exception or login was not
|
||||
# successful
|
||||
if self.retry_wait_time:
|
||||
time.sleep(self.retry_wait_time)
|
||||
self.num_session_retries += 1
|
||||
if self.num_session_retries > self.max_session_retries:
|
||||
self.num_session_retries = 0
|
||||
logger.error("giving up after %d retries connection failure %s",
|
||||
self.max_session_retries, True)
|
||||
ret_err = (
|
||||
err if err else APIError("giving up after %d retries connection failure %s" %
|
||||
(self.max_session_retries, True)))
|
||||
raise ret_err
|
||||
self.authenticate_session()
|
||||
return
|
||||
|
||||
def _get_api_headers(self, tenant, tenant_uuid, timeout, headers,
|
||||
api_version):
|
||||
"""
|
||||
returns the headers that are passed to the requests.Session api calls.
|
||||
"""
|
||||
api_hdrs = copy.deepcopy(self.headers)
|
||||
api_hdrs.update({
|
||||
"Referer": self.prefix,
|
||||
"Content-Type": "application/json"
|
||||
})
|
||||
api_hdrs['timeout'] = str(timeout)
|
||||
if self.key in sessionDict and 'csrftoken' in sessionDict.get(self.key):
|
||||
api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken']
|
||||
else:
|
||||
self.authenticate_session()
|
||||
api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken']
|
||||
if api_version:
|
||||
api_hdrs['X-Avi-Version'] = api_version
|
||||
elif self.avi_credentials.api_version:
|
||||
api_hdrs['X-Avi-Version'] = self.avi_credentials.api_version
|
||||
if tenant:
|
||||
tenant_uuid = None
|
||||
elif tenant_uuid:
|
||||
tenant = None
|
||||
else:
|
||||
tenant = self.avi_credentials.tenant
|
||||
tenant_uuid = self.avi_credentials.tenant_uuid
|
||||
if tenant_uuid:
|
||||
api_hdrs.update({"X-Avi-Tenant-UUID": "%s" % tenant_uuid})
|
||||
api_hdrs.pop("X-Avi-Tenant", None)
|
||||
elif tenant:
|
||||
api_hdrs.update({"X-Avi-Tenant": "%s" % tenant})
|
||||
api_hdrs.pop("X-Avi-Tenant-UUID", None)
|
||||
# Override any user headers that were passed by users. We don't know
|
||||
# when the user had updated the user_hdrs
|
||||
if self.user_hdrs:
|
||||
api_hdrs.update(self.user_hdrs)
|
||||
if headers:
|
||||
# overwrite the headers passed via the API calls.
|
||||
api_hdrs.update(headers)
|
||||
return api_hdrs
|
||||
|
||||
def _api(self, api_name, path, tenant, tenant_uuid, data=None,
|
||||
headers=None, timeout=None, api_version=None, **kwargs):
|
||||
"""
|
||||
It calls the requests.Session APIs and handles session expiry
|
||||
and other situations where session needs to be reset.
|
||||
returns ApiResponse object
|
||||
:param path: takes relative path to the AVI api.
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param headers: dictionary of headers that override the session
|
||||
headers.
|
||||
"""
|
||||
if self.pid != os.getpid():
|
||||
logger.info('pid %d change detected new %d. Closing session',
|
||||
self.pid, os.getpid())
|
||||
self.close()
|
||||
self.pid = os.getpid()
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
fullpath = self._get_api_path(path)
|
||||
fn = getattr(super(ApiSession, self), api_name)
|
||||
api_hdrs = self._get_api_headers(tenant, tenant_uuid, timeout, headers,
|
||||
api_version)
|
||||
connection_error = False
|
||||
err = None
|
||||
cookies = {
|
||||
'csrftoken': api_hdrs['X-CSRFToken'],
|
||||
}
|
||||
try:
|
||||
if self.session_cookie_name:
|
||||
cookies[self.session_cookie_name] = sessionDict[self.key]['session_id']
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
if (data is not None) and (type(data) == dict):
|
||||
resp = fn(fullpath, data=json.dumps(data), headers=api_hdrs,
|
||||
timeout=timeout, cookies=cookies, **kwargs)
|
||||
else:
|
||||
resp = fn(fullpath, data=data, headers=api_hdrs,
|
||||
timeout=timeout, cookies=cookies, **kwargs)
|
||||
except (RequestsConnectionError, SSLError) as e:
|
||||
logger.warning('Connection error retrying %s', e)
|
||||
if not self.retry_conxn_errors:
|
||||
raise
|
||||
connection_error = True
|
||||
err = e
|
||||
except Exception as e:
|
||||
logger.error('Error in Requests library %s', e)
|
||||
raise
|
||||
if not connection_error:
|
||||
logger.debug('path: %s http_method: %s hdrs: %s params: '
|
||||
'%s data: %s rsp: %s', fullpath, api_name.upper(),
|
||||
api_hdrs, kwargs, data,
|
||||
(resp.text if self.data_log else 'None'))
|
||||
if connection_error or resp.status_code in (401, 419):
|
||||
if connection_error:
|
||||
try:
|
||||
self.close()
|
||||
except Exception:
|
||||
# ignoring exception in cleanup path
|
||||
pass
|
||||
logger.warning('Connection failed, retrying.')
|
||||
# Adding sleep before retrying
|
||||
if self.retry_wait_time:
|
||||
time.sleep(self.retry_wait_time)
|
||||
else:
|
||||
logger.info('received error %d %s so resetting connection',
|
||||
resp.status_code, resp.text)
|
||||
ApiSession.reset_session(self)
|
||||
self.num_session_retries += 1
|
||||
if self.num_session_retries > self.max_session_retries:
|
||||
# Added this such that any code which re-tries can succeed
|
||||
# eventually.
|
||||
self.num_session_retries = 0
|
||||
if not connection_error:
|
||||
err = APIError('Status Code %s msg %s' % (
|
||||
resp.status_code, resp.text), resp)
|
||||
logger.error(
|
||||
"giving up after %d retries conn failure %s err %s",
|
||||
self.max_session_retries, connection_error, err)
|
||||
ret_err = (
|
||||
err if err else APIError("giving up after %d retries connection failure %s" %
|
||||
(self.max_session_retries, True)))
|
||||
raise ret_err
|
||||
# should restore the updated_hdrs to one passed down
|
||||
resp = self._api(api_name, path, tenant, tenant_uuid, data,
|
||||
headers=headers, api_version=api_version,
|
||||
timeout=timeout, **kwargs)
|
||||
self.num_session_retries = 0
|
||||
|
||||
if resp.cookies and 'csrftoken' in resp.cookies:
|
||||
csrftoken = resp.cookies['csrftoken']
|
||||
self.headers.update({"X-CSRFToken": csrftoken})
|
||||
self._update_session_last_used()
|
||||
return ApiResponse.to_avi_response(resp)
|
||||
|
||||
def get_controller_details(self):
|
||||
result = {
|
||||
"controller_ip": self.controller_ip,
|
||||
"controller_api_version": self.remote_api_version
|
||||
}
|
||||
return result
|
||||
|
||||
def get(self, path, tenant='', tenant_uuid='', timeout=None, params=None,
|
||||
api_version=None, **kwargs):
|
||||
"""
|
||||
It extends the Session Library interface to add AVI API prefixes,
|
||||
handle session exceptions related to authentication and update
|
||||
the global user session cache.
|
||||
:param path: takes relative path to the AVI api.
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
get method takes relative path to service and kwargs as per Session
|
||||
class get method
|
||||
returns session's response object
|
||||
"""
|
||||
return self._api('get', path, tenant, tenant_uuid, timeout=timeout,
|
||||
params=params, api_version=api_version, **kwargs)
|
||||
|
||||
def get_object_by_name(self, path, name, tenant='', tenant_uuid='',
|
||||
timeout=None, params=None, api_version=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Helper function to access Avi REST Objects using object
|
||||
type and name. It behaves like python dictionary interface where it
|
||||
returns None when the object is not present in the AviController.
|
||||
Internally, it transforms the request to api/path?name=<name>...
|
||||
:param path: relative path to service
|
||||
:param name: name of the object
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns dictionary object if successful else None
|
||||
"""
|
||||
obj = None
|
||||
if not params:
|
||||
params = {}
|
||||
params['name'] = name
|
||||
resp = self.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
|
||||
timeout=timeout,
|
||||
params=params, api_version=api_version, **kwargs)
|
||||
if resp.status_code in (401, 419):
|
||||
ApiSession.reset_session(self)
|
||||
resp = self.get_object_by_name(
|
||||
path, name, tenant, tenant_uuid, timeout=timeout,
|
||||
params=params, **kwargs)
|
||||
if resp.status_code > 499 or 'Invalid version' in resp.text:
|
||||
logger.error('Error in get object by name for %s named %s. '
|
||||
'Error: %s', path, name, resp.text)
|
||||
raise AviServerError(resp.text, rsp=resp)
|
||||
elif resp.status_code > 299:
|
||||
return obj
|
||||
try:
|
||||
if 'results' in resp.json():
|
||||
obj = resp.json()['results'][0]
|
||||
else:
|
||||
# For apis returning single object eg. api/cluster
|
||||
obj = resp.json()
|
||||
except IndexError:
|
||||
logger.warning('Warning: Object Not found for %s named %s',
|
||||
path, name)
|
||||
obj = None
|
||||
self._update_session_last_used()
|
||||
return obj
|
||||
|
||||
def post(self, path, data=None, tenant='', tenant_uuid='', timeout=None,
|
||||
force_uuid=None, params=None, api_version=None, **kwargs):
|
||||
"""
|
||||
It extends the Session Library interface to add AVI API prefixes,
|
||||
handle session exceptions related to authentication and update
|
||||
the global user session cache.
|
||||
:param path: takes relative path to the AVI api.It is modified by
|
||||
the library to conform to AVI Controller's REST API interface
|
||||
:param data: dictionary of the data. Support for json string
|
||||
is deprecated
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns session's response object
|
||||
"""
|
||||
if force_uuid is not None:
|
||||
headers = kwargs.get('headers', {})
|
||||
headers[self.AVI_SLUG] = force_uuid
|
||||
kwargs['headers'] = headers
|
||||
return self._api('post', path, tenant, tenant_uuid, data=data,
|
||||
timeout=timeout, params=params,
|
||||
api_version=api_version, **kwargs)
|
||||
|
||||
def put(self, path, data=None, tenant='', tenant_uuid='',
|
||||
timeout=None, params=None, api_version=None, **kwargs):
|
||||
"""
|
||||
It extends the Session Library interface to add AVI API prefixes,
|
||||
handle session exceptions related to authentication and update
|
||||
the global user session cache.
|
||||
:param path: takes relative path to the AVI api.It is modified by
|
||||
the library to conform to AVI Controller's REST API interface
|
||||
:param data: dictionary of the data. Support for json string
|
||||
is deprecated
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns session's response object
|
||||
"""
|
||||
return self._api('put', path, tenant, tenant_uuid, data=data,
|
||||
timeout=timeout, params=params,
|
||||
api_version=api_version, **kwargs)
|
||||
|
||||
def patch(self, path, data=None, tenant='', tenant_uuid='',
|
||||
timeout=None, params=None, api_version=None, **kwargs):
|
||||
"""
|
||||
It extends the Session Library interface to add AVI API prefixes,
|
||||
handle session exceptions related to authentication and update
|
||||
the global user session cache.
|
||||
:param path: takes relative path to the AVI api.It is modified by
|
||||
the library to conform to AVI Controller's REST API interface
|
||||
:param data: dictionary of the data. Support for json string
|
||||
is deprecated
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns session's response object
|
||||
"""
|
||||
return self._api('patch', path, tenant, tenant_uuid, data=data,
|
||||
timeout=timeout, params=params,
|
||||
api_version=api_version, **kwargs)
|
||||
|
||||
def put_by_name(self, path, name, data=None, tenant='',
|
||||
tenant_uuid='', timeout=None, params=None,
|
||||
api_version=None, **kwargs):
|
||||
"""
|
||||
Helper function to perform HTTP PUT on Avi REST Objects using object
|
||||
type and name.
|
||||
Internally, it transforms the request to api/path?name=<name>...
|
||||
:param path: relative path to service
|
||||
:param name: name of the object
|
||||
:param data: dictionary of the data. Support for json string
|
||||
is deprecated
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns session's response object
|
||||
"""
|
||||
uuid = self._get_uuid_by_name(
|
||||
path, name, tenant, tenant_uuid, api_version=api_version)
|
||||
path = '%s/%s' % (path, uuid)
|
||||
return self.put(path, data, tenant, tenant_uuid, timeout=timeout,
|
||||
params=params, api_version=api_version, **kwargs)
|
||||
|
||||
def delete(self, path, tenant='', tenant_uuid='', timeout=None, params=None,
|
||||
data=None, api_version=None, **kwargs):
|
||||
"""
|
||||
It extends the Session Library interface to add AVI API prefixes,
|
||||
handle session exceptions related to authentication and update
|
||||
the global user session cache.
|
||||
:param path: takes relative path to the AVI api.It is modified by
|
||||
the library to conform to AVI Controller's REST API interface
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param data: dictionary of the data. Support for json string
|
||||
is deprecated
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns session's response object
|
||||
"""
|
||||
return self._api('delete', path, tenant, tenant_uuid, data=data,
|
||||
timeout=timeout, params=params,
|
||||
api_version=api_version, **kwargs)
|
||||
|
||||
def delete_by_name(self, path, name, tenant='', tenant_uuid='',
|
||||
timeout=None, params=None, api_version=None, **kwargs):
|
||||
"""
|
||||
Helper function to perform HTTP DELETE on Avi REST Objects using object
|
||||
type and name.Internally, it transforms the request to
|
||||
api/path?name=<name>...
|
||||
:param path: relative path to service
|
||||
:param name: name of the object
|
||||
:param tenant: overrides the tenant used during session creation
|
||||
:param tenant_uuid: overrides the tenant or tenant_uuid during session
|
||||
creation
|
||||
:param timeout: timeout for API calls; Default value is 60 seconds
|
||||
:param params: dictionary of key value pairs to be sent as query
|
||||
parameters
|
||||
:param api_version: overrides x-avi-header in request header during
|
||||
session creation
|
||||
returns session's response object
|
||||
"""
|
||||
uuid = self._get_uuid_by_name(path, name, tenant, tenant_uuid,
|
||||
api_version=api_version)
|
||||
if not uuid:
|
||||
raise ObjectNotFound("%s/?name=%s" % (path, name))
|
||||
path = '%s/%s' % (path, uuid)
|
||||
return self.delete(path, tenant, tenant_uuid, timeout=timeout,
|
||||
params=params, api_version=api_version, **kwargs)
|
||||
|
||||
def get_obj_ref(self, obj):
|
||||
"""returns reference url from dict object"""
|
||||
if not obj:
|
||||
return None
|
||||
if isinstance(obj, Response):
|
||||
obj = json.loads(obj.text)
|
||||
if obj.get(0, None):
|
||||
return obj[0]['url']
|
||||
elif obj.get('url', None):
|
||||
return obj['url']
|
||||
elif obj.get('results', None):
|
||||
return obj['results'][0]['url']
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_obj_uuid(self, obj):
|
||||
"""returns uuid from dict object"""
|
||||
if not obj:
|
||||
raise ObjectNotFound('Object %s Not found' % (obj))
|
||||
if isinstance(obj, Response):
|
||||
obj = json.loads(obj.text)
|
||||
if obj.get(0, None):
|
||||
return obj[0]['uuid']
|
||||
elif obj.get('uuid', None):
|
||||
return obj['uuid']
|
||||
elif obj.get('results', None):
|
||||
return obj['results'][0]['uuid']
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_api_path(self, path, uuid=None):
|
||||
"""
|
||||
This function returns the full url from relative path and uuid.
|
||||
"""
|
||||
if path == 'logout':
|
||||
return self.prefix + '/' + path
|
||||
elif uuid:
|
||||
return self.prefix + '/api/' + path + '/' + uuid
|
||||
else:
|
||||
return self.prefix + '/api/' + path
|
||||
|
||||
def _get_uuid_by_name(self, path, name, tenant='admin',
|
||||
tenant_uuid='', api_version=None):
|
||||
"""gets object by name and service path and returns uuid"""
|
||||
resp = self.get_object_by_name(
|
||||
path, name, tenant, tenant_uuid, api_version=api_version)
|
||||
if not resp:
|
||||
raise ObjectNotFound("%s/%s" % (path, name))
|
||||
return self.get_obj_uuid(resp)
|
||||
|
||||
def _update_session_last_used(self):
|
||||
if self.key in sessionDict:
|
||||
sessionDict[self.key]["last_used"] = datetime.utcnow()
|
||||
|
||||
@staticmethod
|
||||
def _clean_inactive_sessions():
|
||||
"""Removes sessions which are inactive more than 20 min"""
|
||||
session_cache = sessionDict
|
||||
logger.debug("cleaning inactive sessions in pid %d num elem %d",
|
||||
os.getpid(), len(session_cache))
|
||||
keys_to_delete = []
|
||||
for key, session in list(session_cache.items()):
|
||||
tdiff = avi_timedelta(datetime.utcnow() - session["last_used"])
|
||||
if tdiff < ApiSession.SESSION_CACHE_EXPIRY:
|
||||
continue
|
||||
keys_to_delete.append(key)
|
||||
for key in keys_to_delete:
|
||||
del session_cache[key]
|
||||
logger.debug("Removed session for : %s", key)
|
||||
|
||||
def delete_session(self):
|
||||
""" Removes the session for cleanup"""
|
||||
logger.debug("Removed session for : %s", self.key)
|
||||
sessionDict.pop(self.key, None)
|
||||
return
|
||||
# End of file
|
|
@ -1,91 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2016, Ted Elhourani <ted@bigswitch.com>
|
||||
#
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
class Response(object):
|
||||
|
||||
def __init__(self, resp, info):
|
||||
self.body = None
|
||||
if resp:
|
||||
self.body = resp.read()
|
||||
self.info = info
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
if not self.body:
|
||||
if "body" in self.info:
|
||||
return json.loads(self.info["body"])
|
||||
return None
|
||||
try:
|
||||
return json.loads(self.body)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@property
|
||||
def status_code(self):
|
||||
return self.info["status"]
|
||||
|
||||
|
||||
class Rest(object):
|
||||
|
||||
def __init__(self, module, headers, baseurl):
|
||||
self.module = module
|
||||
self.headers = headers
|
||||
self.baseurl = baseurl
|
||||
|
||||
def _url_builder(self, path):
|
||||
if path[0] == '/':
|
||||
path = path[1:]
|
||||
return '%s/%s' % (self.baseurl, path)
|
||||
|
||||
def send(self, method, path, data=None, headers=None):
|
||||
url = self._url_builder(path)
|
||||
data = self.module.jsonify(data)
|
||||
|
||||
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
|
||||
|
||||
return Response(resp, info)
|
||||
|
||||
def get(self, path, data=None, headers=None):
|
||||
return self.send('GET', path, data, headers)
|
||||
|
||||
def put(self, path, data=None, headers=None):
|
||||
return self.send('PUT', path, data, headers)
|
||||
|
||||
def post(self, path, data=None, headers=None):
|
||||
return self.send('POST', path, data, headers)
|
||||
|
||||
def patch(self, path, data=None, headers=None):
|
||||
return self.send('PATCH', path, data, headers)
|
||||
|
||||
def delete(self, path, data=None, headers=None):
|
||||
return self.send('DELETE', path, data, headers)
|
|
@ -1,421 +0,0 @@
|
|||
#
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
#
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2017 Red Hat, Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import exec_command, ConnectionError
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import NetconfConnection
|
||||
|
||||
|
||||
try:
|
||||
from ncclient.xml_ import to_xml, new_ele_ns
|
||||
HAS_NCCLIENT = True
|
||||
except ImportError:
|
||||
HAS_NCCLIENT = False
|
||||
|
||||
|
||||
try:
|
||||
from lxml import etree
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as etree
|
||||
|
||||
_DEVICE_CLI_CONNECTION = None
|
||||
_DEVICE_NC_CONNECTION = None
|
||||
|
||||
ce_provider_spec = {
|
||||
'host': dict(),
|
||||
'port': dict(type='int'),
|
||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
||||
'use_ssl': dict(type='bool'),
|
||||
'validate_certs': dict(type='bool'),
|
||||
'timeout': dict(type='int'),
|
||||
'transport': dict(default='cli', choices=['cli', 'netconf']),
|
||||
}
|
||||
ce_argument_spec = {
|
||||
'provider': dict(type='dict', options=ce_provider_spec),
|
||||
}
|
||||
ce_top_spec = {
|
||||
'host': dict(removed_in_version=2.9),
|
||||
'port': dict(removed_in_version=2.9, type='int'),
|
||||
'username': dict(removed_in_version=2.9),
|
||||
'password': dict(removed_in_version=2.9, no_log=True),
|
||||
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
|
||||
'use_ssl': dict(removed_in_version=2.9, type='bool'),
|
||||
'validate_certs': dict(removed_in_version=2.9, type='bool'),
|
||||
'timeout': dict(removed_in_version=2.9, type='int'),
|
||||
'transport': dict(removed_in_version=2.9, choices=['cli', 'netconf']),
|
||||
}
|
||||
ce_argument_spec.update(ce_top_spec)
|
||||
|
||||
|
||||
def to_string(data):
|
||||
return re.sub(r'<data\s+.+?(/>|>)', r'<data\1', data)
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def load_params(module):
|
||||
"""load_params"""
|
||||
provider = module.params.get('provider') or dict()
|
||||
for key, value in iteritems(provider):
|
||||
if key in ce_argument_spec:
|
||||
if module.params.get(key) is None and value is not None:
|
||||
module.params[key] = value
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
"""get_connection"""
|
||||
global _DEVICE_CLI_CONNECTION
|
||||
if not _DEVICE_CLI_CONNECTION:
|
||||
load_params(module)
|
||||
conn = Cli(module)
|
||||
_DEVICE_CLI_CONNECTION = conn
|
||||
return _DEVICE_CLI_CONNECTION
|
||||
|
||||
|
||||
def rm_config_prefix(cfg):
|
||||
if not cfg:
|
||||
return cfg
|
||||
|
||||
cmds = cfg.split("\n")
|
||||
for i in range(len(cmds)):
|
||||
if not cmds[i]:
|
||||
continue
|
||||
if '~' in cmds[i]:
|
||||
index = cmds[i].index('~')
|
||||
if cmds[i][:index] == ' ' * index:
|
||||
cmds[i] = cmds[i].replace("~", "", 1)
|
||||
return '\n'.join(cmds)
|
||||
|
||||
|
||||
class Cli:
|
||||
|
||||
def __init__(self, module):
|
||||
self._module = module
|
||||
self._device_configs = {}
|
||||
|
||||
def exec_command(self, command):
|
||||
if isinstance(command, dict):
|
||||
command = self._module.jsonify(command)
|
||||
|
||||
return exec_command(self._module, command)
|
||||
|
||||
def get_config(self, flags=None):
|
||||
"""Retrieves the current config from the device or cache
|
||||
"""
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
cmd = 'display current-configuration '
|
||||
cmd += ' '.join(flags)
|
||||
cmd = cmd.strip()
|
||||
|
||||
try:
|
||||
return self._device_configs[cmd]
|
||||
except KeyError:
|
||||
rc, out, err = self.exec_command(cmd)
|
||||
if rc != 0:
|
||||
self._module.fail_json(msg=err)
|
||||
cfg = str(out).strip()
|
||||
# remove default configuration prefix '~'
|
||||
for flag in flags:
|
||||
if "include-default" in flag:
|
||||
cfg = rm_config_prefix(cfg)
|
||||
break
|
||||
|
||||
self._device_configs[cmd] = cfg
|
||||
return cfg
|
||||
|
||||
def run_commands(self, commands, check_rc=True):
|
||||
"""Run list of commands on remote device and return results
|
||||
"""
|
||||
responses = list()
|
||||
|
||||
for item in to_list(commands):
|
||||
|
||||
rc, out, err = self.exec_command(item)
|
||||
|
||||
if check_rc and rc != 0:
|
||||
self._module.fail_json(msg=cli_err_msg(item['command'].strip(), err))
|
||||
|
||||
try:
|
||||
out = self._module.from_json(out)
|
||||
except ValueError:
|
||||
out = str(out).strip()
|
||||
|
||||
responses.append(out)
|
||||
return responses
|
||||
|
||||
def load_config(self, config):
|
||||
"""Sends configuration commands to the remote device
|
||||
"""
|
||||
rc, out, err = self.exec_command('mmi-mode enable')
|
||||
if rc != 0:
|
||||
self._module.fail_json(msg='unable to set mmi-mode enable', output=err)
|
||||
rc, out, err = self.exec_command('system-view immediately')
|
||||
if rc != 0:
|
||||
self._module.fail_json(msg='unable to enter system-view', output=err)
|
||||
|
||||
for cmd in config:
|
||||
rc, out, err = self.exec_command(cmd)
|
||||
if rc != 0:
|
||||
self._module.fail_json(msg=cli_err_msg(cmd.strip(), err))
|
||||
|
||||
self.exec_command('return')
|
||||
|
||||
|
||||
def cli_err_msg(cmd, err):
|
||||
""" get cli exception message"""
|
||||
|
||||
if not err:
|
||||
return "Error: Fail to get cli exception message."
|
||||
|
||||
msg = list()
|
||||
err_list = str(err).split("\r\n")
|
||||
for err in err_list:
|
||||
err = err.strip('.,\r\n\t ')
|
||||
if not err:
|
||||
continue
|
||||
if cmd and cmd == err:
|
||||
continue
|
||||
if " at '^' position" in err:
|
||||
err = err.replace(" at '^' position", "").strip()
|
||||
err = err.strip('.,\r\n\t ')
|
||||
if err == "^":
|
||||
continue
|
||||
if len(err) > 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]:
|
||||
continue
|
||||
err.strip('.,\r\n\t ')
|
||||
if err:
|
||||
msg.append(err)
|
||||
|
||||
if cmd:
|
||||
msg.insert(0, "Command: %s" % cmd)
|
||||
|
||||
return ", ".join(msg).capitalize() + "."
|
||||
|
||||
|
||||
def to_command(module, commands):
|
||||
default_output = 'text'
|
||||
transform = ComplexList(dict(
|
||||
command=dict(key=True),
|
||||
output=dict(default=default_output),
|
||||
prompt=dict(),
|
||||
answer=dict()
|
||||
), module)
|
||||
|
||||
commands = transform(to_list(commands))
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
conn = get_connection(module)
|
||||
return conn.get_config(flags)
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
conn = get_connection(module)
|
||||
return conn.run_commands(to_command(module, commands), check_rc)
|
||||
|
||||
|
||||
def load_config(module, config):
|
||||
"""load_config"""
|
||||
conn = get_connection(module)
|
||||
return conn.load_config(config)
|
||||
|
||||
|
||||
def ce_unknown_host_cb(host, fingerprint):
|
||||
""" ce_unknown_host_cb """
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_nc_set_id(xml_str):
|
||||
"""get netconf set-id value"""
|
||||
|
||||
result = re.findall(r'<rpc-reply.+?set-id=\"(\d+)\"', xml_str)
|
||||
if not result:
|
||||
return None
|
||||
return result[0]
|
||||
|
||||
|
||||
def get_xml_line(xml_list, index):
|
||||
"""get xml specified line valid string data"""
|
||||
|
||||
ele = None
|
||||
while xml_list and not ele:
|
||||
if index >= 0 and index >= len(xml_list):
|
||||
return None
|
||||
if index < 0 and abs(index) > len(xml_list):
|
||||
return None
|
||||
|
||||
ele = xml_list[index]
|
||||
if not ele.replace(" ", ""):
|
||||
xml_list.pop(index)
|
||||
ele = None
|
||||
return ele
|
||||
|
||||
|
||||
def merge_nc_xml(xml1, xml2):
|
||||
"""merge xml1 and xml2"""
|
||||
|
||||
xml1_list = xml1.split("</data>")[0].split("\n")
|
||||
xml2_list = xml2.split("<data>")[1].split("\n")
|
||||
|
||||
while True:
|
||||
xml1_ele1 = get_xml_line(xml1_list, -1)
|
||||
xml1_ele2 = get_xml_line(xml1_list, -2)
|
||||
xml2_ele1 = get_xml_line(xml2_list, 0)
|
||||
xml2_ele2 = get_xml_line(xml2_list, 1)
|
||||
if not xml1_ele1 or not xml1_ele2 or not xml2_ele1 or not xml2_ele2:
|
||||
return xml1
|
||||
|
||||
if "xmlns" in xml2_ele1:
|
||||
xml2_ele1 = xml2_ele1.lstrip().split(" ")[0] + ">"
|
||||
if "xmlns" in xml2_ele2:
|
||||
xml2_ele2 = xml2_ele2.lstrip().split(" ")[0] + ">"
|
||||
if xml1_ele1.replace(" ", "").replace("/", "") == xml2_ele1.replace(" ", "").replace("/", ""):
|
||||
if xml1_ele2.replace(" ", "").replace("/", "") == xml2_ele2.replace(" ", "").replace("/", ""):
|
||||
xml1_list.pop()
|
||||
xml2_list.pop(0)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
return "\n".join(xml1_list + xml2_list)
|
||||
|
||||
|
||||
def get_nc_connection(module):
|
||||
global _DEVICE_NC_CONNECTION
|
||||
if not _DEVICE_NC_CONNECTION:
|
||||
load_params(module)
|
||||
conn = NetconfConnection(module._socket_path)
|
||||
_DEVICE_NC_CONNECTION = conn
|
||||
return _DEVICE_NC_CONNECTION
|
||||
|
||||
|
||||
def set_nc_config(module, xml_str):
|
||||
""" set_config """
|
||||
|
||||
conn = get_nc_connection(module)
|
||||
try:
|
||||
out = conn.edit_config(target='running', config=xml_str, default_operation='merge',
|
||||
error_option='rollback-on-error')
|
||||
finally:
|
||||
# conn.unlock(target = 'candidate')
|
||||
pass
|
||||
return to_string(to_xml(out))
|
||||
|
||||
|
||||
def get_nc_next(module, xml_str):
|
||||
""" get_nc_next for exchange capability """
|
||||
|
||||
conn = get_nc_connection(module)
|
||||
result = None
|
||||
if xml_str is not None:
|
||||
response = conn.get(xml_str, if_rpc_reply=True)
|
||||
result = response.find('./*')
|
||||
set_id = response.get('set-id')
|
||||
while True and set_id is not None:
|
||||
try:
|
||||
fetch_node = new_ele_ns('get-next', 'http://www.huawei.com/netconf/capability/base/1.0', {'set-id': set_id})
|
||||
next_xml = conn.dispatch_rpc(etree.tostring(fetch_node))
|
||||
if next_xml is not None:
|
||||
result.extend(next_xml.find('./*'))
|
||||
set_id = next_xml.get('set-id')
|
||||
except ConnectionError:
|
||||
break
|
||||
if result is not None:
|
||||
return etree.tostring(result)
|
||||
return result
|
||||
|
||||
|
||||
def get_nc_config(module, xml_str):
|
||||
""" get_config """
|
||||
|
||||
conn = get_nc_connection(module)
|
||||
if xml_str is not None:
|
||||
response = conn.get(xml_str)
|
||||
else:
|
||||
return None
|
||||
|
||||
return to_string(to_xml(response))
|
||||
|
||||
|
||||
def execute_nc_action(module, xml_str):
|
||||
""" huawei execute-action """
|
||||
|
||||
conn = get_nc_connection(module)
|
||||
response = conn.execute_action(xml_str)
|
||||
return to_string(to_xml(response))
|
||||
|
||||
|
||||
def execute_nc_cli(module, xml_str):
|
||||
""" huawei execute-cli """
|
||||
|
||||
if xml_str is not None:
|
||||
try:
|
||||
conn = get_nc_connection(module)
|
||||
out = conn.execute_nc_cli(command=xml_str)
|
||||
return to_string(to_xml(out))
|
||||
except Exception as exc:
|
||||
raise Exception(exc)
|
||||
|
||||
|
||||
def check_ip_addr(ipaddr):
|
||||
""" check ip address, Supports IPv4 and IPv6 """
|
||||
|
||||
if not ipaddr or '\x00' in ipaddr:
|
||||
return False
|
||||
|
||||
try:
|
||||
res = socket.getaddrinfo(ipaddr, 0, socket.AF_UNSPEC,
|
||||
socket.SOCK_STREAM,
|
||||
0, socket.AI_NUMERICHOST)
|
||||
return bool(res)
|
||||
except socket.gaierror:
|
||||
err = sys.exc_info()[1]
|
||||
if err.args[0] == socket.EAI_NONAME:
|
||||
return False
|
||||
raise
|
|
@ -1,660 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their own
|
||||
# license to the complete work.
|
||||
#
|
||||
# Copyright (C) 2017 Lenovo, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Contains utility methods
|
||||
# Lenovo Networking
|
||||
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
import json
|
||||
try:
|
||||
from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos_errorcodes
|
||||
from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos_devicerules
|
||||
HAS_LIB = True
|
||||
except Exception:
|
||||
HAS_LIB = False
|
||||
from distutils.cmd import Command
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection
|
||||
from ansible.module_utils.connection import Connection, exec_command
|
||||
from ansible.module_utils.connection import ConnectionError
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
_CONNECTION = None
|
||||
_VALID_USER_ROLES = ['network-admin', 'network-operator']
|
||||
|
||||
cnos_provider_spec = {
|
||||
'host': dict(),
|
||||
'port': dict(type='int'),
|
||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']),
|
||||
no_log=True),
|
||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']),
|
||||
type='path'),
|
||||
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']),
|
||||
type='bool'),
|
||||
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']),
|
||||
no_log=True),
|
||||
'timeout': dict(type='int'),
|
||||
'context': dict(),
|
||||
'passwords': dict()
|
||||
}
|
||||
|
||||
cnos_argument_spec = {
|
||||
'provider': dict(type='dict', options=cnos_provider_spec),
|
||||
}
|
||||
|
||||
command_spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict(),
|
||||
'check_all': dict()
|
||||
}
|
||||
|
||||
|
||||
def get_provider_argspec():
|
||||
return cnos_provider_spec
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def get_user_roles():
|
||||
return _VALID_USER_ROLES
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
global _CONNECTION
|
||||
if _CONNECTION:
|
||||
return _CONNECTION
|
||||
_CONNECTION = Connection(module._socket_path)
|
||||
|
||||
context = None
|
||||
try:
|
||||
context = module.params['context']
|
||||
except KeyError:
|
||||
context = None
|
||||
|
||||
if context:
|
||||
if context == 'system':
|
||||
command = 'changeto system'
|
||||
else:
|
||||
command = 'changeto context %s' % context
|
||||
_CONNECTION.get(command)
|
||||
|
||||
return _CONNECTION
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
passwords = None
|
||||
try:
|
||||
passwords = module.params['passwords']
|
||||
except KeyError:
|
||||
passwords = None
|
||||
if passwords:
|
||||
cmd = 'more system:running-config'
|
||||
else:
|
||||
cmd = 'display running-config '
|
||||
cmd += ' '.join(flags)
|
||||
cmd = cmd.strip()
|
||||
|
||||
try:
|
||||
return _DEVICE_CONFIGS[cmd]
|
||||
except KeyError:
|
||||
conn = get_connection(module)
|
||||
out = conn.get(cmd)
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
_DEVICE_CONFIGS[cmd] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
if not isinstance(commands, list):
|
||||
raise AssertionError('argument must be of type <list>')
|
||||
|
||||
transform = EntityCollection(module, command_spec)
|
||||
commands = transform(commands)
|
||||
|
||||
for index, item in enumerate(commands):
|
||||
if module.check_mode and not item['command'].startswith('show'):
|
||||
module.warn('only show commands are supported when using check '
|
||||
'mode, not executing `%s`' % item['command'])
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
connection.get('enable')
|
||||
commands = to_commands(module, to_list(commands))
|
||||
|
||||
responses = list()
|
||||
|
||||
for cmd in commands:
|
||||
out = connection.get(**cmd)
|
||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
def run_cnos_commands(module, commands, check_rc=True):
|
||||
retVal = ''
|
||||
enter_config = {'command': 'configure terminal', 'prompt': None,
|
||||
'answer': None}
|
||||
exit_config = {'command': 'end', 'prompt': None, 'answer': None}
|
||||
commands.insert(0, enter_config)
|
||||
commands.append(exit_config)
|
||||
for cmd in commands:
|
||||
retVal = retVal + '>> ' + cmd['command'] + '\n'
|
||||
try:
|
||||
responses = run_commands(module, commands, check_rc)
|
||||
for response in responses:
|
||||
retVal = retVal + '<< ' + response + '\n'
|
||||
except Exception as e:
|
||||
errMsg = ''
|
||||
if hasattr(e, 'message'):
|
||||
errMsg = e.message
|
||||
else:
|
||||
errMsg = str(e)
|
||||
# Exception in Exceptions
|
||||
if 'VLAN_ACCESS_MAP' in errMsg:
|
||||
return retVal + '<<' + errMsg + '\n'
|
||||
if 'confederation identifier' in errMsg:
|
||||
return retVal + '<<' + errMsg + '\n'
|
||||
# Add more here if required
|
||||
retVal = retVal + '<< ' + 'Error-101 ' + errMsg + '\n'
|
||||
return str(retVal)
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
if hasattr(module, '_cnos_capabilities'):
|
||||
return module._cnos_capabilities
|
||||
try:
|
||||
capabilities = Connection(module._socket_path).get_capabilities()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
module._cnos_capabilities = json.loads(capabilities)
|
||||
return module._cnos_capabilities
|
||||
|
||||
|
||||
def load_config(module, config):
|
||||
try:
|
||||
conn = get_connection(module)
|
||||
conn.get('enable')
|
||||
resp = conn.edit_config(config)
|
||||
return resp.get('response')
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
|
||||
def get_defaults_flag(module):
|
||||
rc, out, err = exec_command(module, 'display running-config ?')
|
||||
out = to_text(out, errors='surrogate_then_replace')
|
||||
|
||||
commands = set()
|
||||
for line in out.splitlines():
|
||||
if line:
|
||||
commands.add(line.strip().split()[0])
|
||||
|
||||
if 'all' in commands:
|
||||
return 'all'
|
||||
else:
|
||||
return 'full'
|
||||
|
||||
|
||||
def enterEnableModeForDevice(enablePassword, timeout, obj):
|
||||
command = "enable\n"
|
||||
pwdPrompt = "password:"
|
||||
# debugOutput(enablePassword)
|
||||
# debugOutput('\n')
|
||||
obj.settimeout(int(timeout))
|
||||
# Executing enable
|
||||
obj.send(command)
|
||||
flag = False
|
||||
retVal = ""
|
||||
count = 5
|
||||
while not flag:
|
||||
# If wait time is execeeded.
|
||||
if(count == 0):
|
||||
flag = True
|
||||
else:
|
||||
count = count - 1
|
||||
# A delay of one second
|
||||
time.sleep(1)
|
||||
try:
|
||||
buffByte = obj.recv(9999)
|
||||
buff = buffByte.decode()
|
||||
retVal = retVal + buff
|
||||
# debugOutput(buff)
|
||||
gotit = buff.find(pwdPrompt)
|
||||
if(gotit != -1):
|
||||
time.sleep(1)
|
||||
if(enablePassword is None or enablePassword == ""):
|
||||
return "\n Error-106"
|
||||
obj.send(enablePassword)
|
||||
obj.send("\r")
|
||||
obj.send("\n")
|
||||
time.sleep(1)
|
||||
innerBuffByte = obj.recv(9999)
|
||||
innerBuff = innerBuffByte.decode()
|
||||
retVal = retVal + innerBuff
|
||||
# debugOutput(innerBuff)
|
||||
innerGotit = innerBuff.find("#")
|
||||
if(innerGotit != -1):
|
||||
return retVal
|
||||
else:
|
||||
gotit = buff.find("#")
|
||||
if(gotit != -1):
|
||||
return retVal
|
||||
except Exception:
|
||||
retVal = retVal + "\n Error-101"
|
||||
flag = True
|
||||
if(retVal == ""):
|
||||
retVal = "\n Error-101"
|
||||
return retVal
|
||||
# EOM
|
||||
|
||||
|
||||
def waitForDeviceResponse(command, prompt, timeout, obj):
|
||||
obj.settimeout(int(timeout))
|
||||
obj.send(command)
|
||||
flag = False
|
||||
retVal = ""
|
||||
while not flag:
|
||||
time.sleep(1)
|
||||
try:
|
||||
buffByte = obj.recv(9999)
|
||||
buff = buffByte.decode()
|
||||
retVal = retVal + buff
|
||||
# debugOutput(retVal)
|
||||
gotit = buff.find(prompt)
|
||||
if(gotit != -1):
|
||||
flag = True
|
||||
except Exception:
|
||||
# debugOutput(prompt)
|
||||
if prompt == "(yes/no)?":
|
||||
pass
|
||||
elif prompt == "Password:":
|
||||
pass
|
||||
else:
|
||||
retVal = retVal + "\n Error-101"
|
||||
flag = True
|
||||
return retVal
|
||||
# EOM
|
||||
|
||||
|
||||
def checkOutputForError(output):
|
||||
retVal = ""
|
||||
index = output.lower().find('error')
|
||||
startIndex = index + 6
|
||||
if(index == -1):
|
||||
index = output.lower().find('invalid')
|
||||
startIndex = index + 8
|
||||
if(index == -1):
|
||||
index = output.lower().find('cannot be enabled in l2 interface')
|
||||
startIndex = index + 34
|
||||
if(index == -1):
|
||||
index = output.lower().find('incorrect')
|
||||
startIndex = index + 10
|
||||
if(index == -1):
|
||||
index = output.lower().find('failure')
|
||||
startIndex = index + 8
|
||||
if(index == -1):
|
||||
return None
|
||||
|
||||
endIndex = startIndex + 3
|
||||
errorCode = output[startIndex:endIndex]
|
||||
result = errorCode.isdigit()
|
||||
if(result is not True):
|
||||
return "Device returned an Error. Please check Results for more \
|
||||
information"
|
||||
|
||||
errorFile = "dictionary/ErrorCodes.lvo"
|
||||
try:
|
||||
# with open(errorFile, 'r') as f:
|
||||
f = open(errorFile, 'r')
|
||||
for line in f:
|
||||
if('=' in line):
|
||||
data = line.split('=')
|
||||
if(data[0].strip() == errorCode):
|
||||
errorString = data[1].strip()
|
||||
return errorString
|
||||
except Exception:
|
||||
errorString = cnos_errorcodes.getErrorString(errorCode)
|
||||
errorString = errorString.strip()
|
||||
return errorString
|
||||
return "Error Code Not Found"
|
||||
# EOM
|
||||
|
||||
|
||||
def checkSanityofVariable(deviceType, variableId, variableValue):
|
||||
retVal = ""
|
||||
ruleFile = "dictionary/" + deviceType + "_rules.lvo"
|
||||
ruleString = getRuleStringForVariable(deviceType, ruleFile, variableId)
|
||||
retVal = validateValueAgainstRule(ruleString, variableValue)
|
||||
return retVal
|
||||
# EOM
|
||||
|
||||
|
||||
def getRuleStringForVariable(deviceType, ruleFile, variableId):
|
||||
retVal = ""
|
||||
try:
|
||||
# with open(ruleFile, 'r') as f:
|
||||
f = open(ruleFile, 'r')
|
||||
for line in f:
|
||||
# debugOutput(line)
|
||||
if(':' in line):
|
||||
data = line.split(':')
|
||||
# debugOutput(data[0])
|
||||
if(data[0].strip() == variableId):
|
||||
retVal = line
|
||||
except Exception:
|
||||
ruleString = cnos_devicerules.getRuleString(deviceType, variableId)
|
||||
retVal = ruleString.strip()
|
||||
return retVal
|
||||
# EOM
|
||||
|
||||
|
||||
def validateValueAgainstRule(ruleString, variableValue):
|
||||
|
||||
retVal = ""
|
||||
if(ruleString == ""):
|
||||
return 1
|
||||
rules = ruleString.split(':')
|
||||
variableType = rules[1].strip()
|
||||
varRange = rules[2].strip()
|
||||
if(variableType == "INTEGER"):
|
||||
result = checkInteger(variableValue)
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-111"
|
||||
elif(variableType == "FLOAT"):
|
||||
result = checkFloat(variableValue)
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-112"
|
||||
|
||||
elif(variableType == "INTEGER_VALUE"):
|
||||
int_range = varRange.split('-')
|
||||
r = range(int(int_range[0].strip()), int(int_range[1].strip()))
|
||||
if(checkInteger(variableValue) is not True):
|
||||
return "Error-111"
|
||||
result = int(variableValue) in r
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-113"
|
||||
|
||||
elif(variableType == "INTEGER_VALUE_RANGE"):
|
||||
int_range = varRange.split('-')
|
||||
varLower = int_range[0].strip()
|
||||
varHigher = int_range[1].strip()
|
||||
r = range(int(varLower), int(varHigher))
|
||||
val_range = variableValue.split('-')
|
||||
try:
|
||||
valLower = val_range[0].strip()
|
||||
valHigher = val_range[1].strip()
|
||||
except Exception:
|
||||
return "Error-113"
|
||||
if((checkInteger(valLower) is not True) or
|
||||
(checkInteger(valHigher) is not True)):
|
||||
# debugOutput("Error-114")
|
||||
return "Error-114"
|
||||
result = (int(valLower) in r) and (int(valHigher)in r) \
|
||||
and (int(valLower) < int(valHigher))
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
# debugOutput("Error-113")
|
||||
return "Error-113"
|
||||
|
||||
elif(variableType == "INTEGER_OPTIONS"):
|
||||
int_options = varRange.split(',')
|
||||
if(checkInteger(variableValue) is not True):
|
||||
return "Error-111"
|
||||
for opt in int_options:
|
||||
if(opt.strip() is variableValue):
|
||||
result = True
|
||||
break
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-115"
|
||||
|
||||
elif(variableType == "LONG"):
|
||||
result = checkLong(variableValue)
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-116"
|
||||
|
||||
elif(variableType == "LONG_VALUE"):
|
||||
long_range = varRange.split('-')
|
||||
r = range(int(long_range[0].strip()), int(long_range[1].strip()))
|
||||
if(checkLong(variableValue) is not True):
|
||||
# debugOutput(variableValue)
|
||||
return "Error-116"
|
||||
result = int(variableValue) in r
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-113"
|
||||
|
||||
elif(variableType == "LONG_VALUE_RANGE"):
|
||||
long_range = varRange.split('-')
|
||||
r = range(int(long_range[0].strip()), int(long_range[1].strip()))
|
||||
val_range = variableValue.split('-')
|
||||
if((checkLong(val_range[0]) is not True) or
|
||||
(checkLong(val_range[1]) is not True)):
|
||||
return "Error-117"
|
||||
result = (val_range[0] in r) and (
|
||||
val_range[1] in r) and (val_range[0] < val_range[1])
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-113"
|
||||
elif(variableType == "LONG_OPTIONS"):
|
||||
long_options = varRange.split(',')
|
||||
if(checkLong(variableValue) is not True):
|
||||
return "Error-116"
|
||||
for opt in long_options:
|
||||
if(opt.strip() == variableValue):
|
||||
result = True
|
||||
break
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-115"
|
||||
|
||||
elif(variableType == "TEXT"):
|
||||
if(variableValue == ""):
|
||||
return "Error-118"
|
||||
if(True is isinstance(variableValue, str)):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-119"
|
||||
|
||||
elif(variableType == "NO_VALIDATION"):
|
||||
if(variableValue == ""):
|
||||
return "Error-118"
|
||||
else:
|
||||
return "ok"
|
||||
|
||||
elif(variableType == "TEXT_OR_EMPTY"):
|
||||
if(variableValue is None or variableValue == ""):
|
||||
return "ok"
|
||||
if(result == isinstance(variableValue, str)):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-119"
|
||||
|
||||
elif(variableType == "MATCH_TEXT"):
|
||||
if(variableValue == ""):
|
||||
return "Error-118"
|
||||
if(isinstance(variableValue, str)):
|
||||
if(varRange == variableValue):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-120"
|
||||
else:
|
||||
return "Error-119"
|
||||
|
||||
elif(variableType == "MATCH_TEXT_OR_EMPTY"):
|
||||
if(variableValue is None or variableValue == ""):
|
||||
return "ok"
|
||||
if(isinstance(variableValue, str)):
|
||||
if(varRange == variableValue):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-120"
|
||||
else:
|
||||
return "Error-119"
|
||||
|
||||
elif(variableType == "TEXT_OPTIONS"):
|
||||
str_options = varRange.split(',')
|
||||
if(isinstance(variableValue, str) is not True):
|
||||
return "Error-119"
|
||||
result = False
|
||||
for opt in str_options:
|
||||
if(opt.strip() == variableValue):
|
||||
result = True
|
||||
break
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-115"
|
||||
|
||||
elif(variableType == "TEXT_OPTIONS_OR_EMPTY"):
|
||||
if(variableValue is None or variableValue == ""):
|
||||
return "ok"
|
||||
str_options = varRange.split(',')
|
||||
if(isinstance(variableValue, str) is not True):
|
||||
return "Error-119"
|
||||
for opt in str_options:
|
||||
if(opt.strip() == variableValue):
|
||||
result = True
|
||||
break
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-115"
|
||||
|
||||
elif(variableType == "IPV4Address"):
|
||||
try:
|
||||
socket.inet_pton(socket.AF_INET, variableValue)
|
||||
result = True
|
||||
except socket.error:
|
||||
result = False
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-121"
|
||||
elif(variableType == "IPV4AddressWithMask"):
|
||||
if(variableValue is None or variableValue == ""):
|
||||
return "Error-119"
|
||||
str_options = variableValue.split('/')
|
||||
ipaddr = str_options[0]
|
||||
mask = str_options[1]
|
||||
try:
|
||||
socket.inet_pton(socket.AF_INET, ipaddr)
|
||||
if(checkInteger(mask) is True):
|
||||
result = True
|
||||
else:
|
||||
result = False
|
||||
except socket.error:
|
||||
result = False
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-121"
|
||||
|
||||
elif(variableType == "IPV6Address"):
|
||||
try:
|
||||
socket.inet_pton(socket.AF_INET6, variableValue)
|
||||
result = True
|
||||
except socket.error:
|
||||
result = False
|
||||
if(result is True):
|
||||
return "ok"
|
||||
else:
|
||||
return "Error-122"
|
||||
|
||||
return retVal
|
||||
# EOM
|
||||
|
||||
|
||||
def disablePaging(remote_conn):
|
||||
remote_conn.send("terminal length 0\n")
|
||||
time.sleep(1)
|
||||
# Clear the buffer on the screen
|
||||
outputByte = remote_conn.recv(1000)
|
||||
output = outputByte.decode()
|
||||
return output
|
||||
# EOM
|
||||
|
||||
|
||||
def checkInteger(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
# EOM
|
||||
|
||||
|
||||
def checkFloat(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
# EOM
|
||||
|
||||
|
||||
def checkLong(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def debugOutput(command):
|
||||
f = open('debugOutput.txt', 'a')
|
||||
f.write(str(command)) # python will convert \n to os.linesep
|
||||
f.close() # you can omit in most cases as the destructor will call it
|
||||
# EOM
|
File diff suppressed because it is too large
Load diff
|
@ -1,256 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their own
|
||||
# license to the complete work.
|
||||
#
|
||||
# Copyright (C) 2017 Lenovo, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Contains error codes and methods
|
||||
# Lenovo Networking
|
||||
|
||||
errorDict = {0: 'Success',
|
||||
1: 'NOK',
|
||||
101: 'Device Response Timed out',
|
||||
102: 'Command Not supported - Use CLI command',
|
||||
103: 'Invalid Context',
|
||||
104: 'Command Value Not Supported as of Now. Use vlan Id only',
|
||||
105: 'Invalid interface Range',
|
||||
106: 'Please provide Enable Password.',
|
||||
108: '',
|
||||
109: '',
|
||||
110: 'Invalid protocol option',
|
||||
111: 'The Value is not Integer',
|
||||
112: 'The Value is not Float',
|
||||
113: 'Value is not in Range',
|
||||
114: 'Range value is not Integer',
|
||||
115: 'Value is not in Options',
|
||||
116: 'The Value is not Long',
|
||||
117: 'Range value is not Long',
|
||||
118: 'The Value cannot be empty',
|
||||
119: 'The Value is not String',
|
||||
120: 'The Value is not Matching',
|
||||
121: 'The Value is not IPV4 Address',
|
||||
122: 'The Value is not IPV6 Address',
|
||||
123: '',
|
||||
124: '',
|
||||
125: '',
|
||||
126: '',
|
||||
127: '',
|
||||
128: '',
|
||||
129: '',
|
||||
130: 'Invalid Access Map Name',
|
||||
131: 'Invalid Vlan Dot1q Tag',
|
||||
132: 'Invalid Vlan filter value',
|
||||
133: 'Invalid Vlan Range Value',
|
||||
134: 'Invalid Vlan Id',
|
||||
135: 'Invalid Vlan Access Map Action',
|
||||
136: 'Invalid Vlan Access Map Name',
|
||||
137: 'Invalid Access List',
|
||||
138: 'Invalid Vlan Access Map parameter',
|
||||
139: 'Invalid Vlan Name',
|
||||
140: 'Invalid Vlan Flood value,',
|
||||
141: 'Invalid Vlan State Value',
|
||||
142: 'Invalid Vlan Last Member query Interval',
|
||||
143: 'Invalid Querier IP address',
|
||||
144: 'Invalid Querier Time out',
|
||||
145: 'Invalid Query Interval',
|
||||
146: 'Invalid Vlan query max response time',
|
||||
147: 'Invalid vlan robustness variable',
|
||||
148: 'Invalid Vlan Startup Query count',
|
||||
149: 'Invalid vlan Startup Query Interval',
|
||||
150: 'Invalid Vlan snooping version',
|
||||
151: 'Invalid Vlan Ethernet Interface',
|
||||
152: 'Invalid Vlan Port Tag Number',
|
||||
153: 'Invalid mrouter option',
|
||||
154: 'Invalid Vlan Option',
|
||||
155: '',
|
||||
156: '',
|
||||
157: '',
|
||||
158: '',
|
||||
159: '',
|
||||
160: 'Invalid Vlag Auto Recovery Value',
|
||||
161: 'Invalid Vlag Config Consistency Value',
|
||||
162: 'Invalid Vlag Port Aggregation Number',
|
||||
163: 'Invalid Vlag Priority Value',
|
||||
164: 'Invalid Vlag Startup delay value',
|
||||
165: 'Invalid Vlag Trie Id',
|
||||
166: 'Invalid Vlag Instance Option',
|
||||
167: 'Invalid Vlag Keep Alive Attempts',
|
||||
168: 'Invalid Vlag Keep Alive Interval',
|
||||
169: 'Invalid Vlag Retry Interval',
|
||||
170: 'Invalid Vlag Peer Ip VRF Value',
|
||||
171: 'Invalid Vlag Health Check Options',
|
||||
172: 'Invalid Vlag Option',
|
||||
173: '',
|
||||
174: '',
|
||||
175: '',
|
||||
176: 'Invalid BGP As Number',
|
||||
177: 'Invalid Routing protocol option',
|
||||
178: 'Invalid BGP Address Family',
|
||||
179: 'Invalid AS Path options',
|
||||
180: 'Invalid BGP med options',
|
||||
181: 'Invalid Best Path option',
|
||||
182: 'Invalid BGP Local count number',
|
||||
183: 'Cluster Id has to either IP or AS Number',
|
||||
184: 'Invalid confederation identifier',
|
||||
185: 'Invalid Confederation Peer AS Value',
|
||||
186: 'Invalid Confederation Option',
|
||||
187: 'Invalid state path relay value',
|
||||
188: 'Invalid Maxas Limit AS Value',
|
||||
189: 'Invalid Neighbor IP Address or Neighbor AS Number',
|
||||
190: 'Invalid Router Id',
|
||||
191: 'Invalid BGP Keep Alive Interval',
|
||||
192: 'Invalid BGP Hold time',
|
||||
193: 'Invalid BGP Option',
|
||||
194: 'Invalid BGP Address Family option',
|
||||
195: 'Invalid BGP Address Family Redistribution option. ',
|
||||
196: 'Invalid BGP Address Family Route Map Name',
|
||||
197: 'Invalid Next Hop Critical Delay',
|
||||
198: 'Invalid Next Hop Non Critical Delay',
|
||||
199: 'Invalid Multipath Number Value',
|
||||
200: 'Invalid Aggegation Group Mode',
|
||||
201: 'Invalid Aggregation Group No',
|
||||
202: 'Invalid BFD Access Vlan',
|
||||
203: 'Invalid CFD Bridgeport Mode',
|
||||
204: 'Invalid Trunk Option',
|
||||
205: 'Invalid BFD Option',
|
||||
206: 'Invalid Portchannel description',
|
||||
207: 'Invalid Portchannel duplex option',
|
||||
208: 'Invalid Flow control option state',
|
||||
209: 'Invalid Flow control option',
|
||||
210: 'Invalid LACP Port priority',
|
||||
211: 'Invalid LACP Time out options',
|
||||
212: 'Invalid LACP Command options',
|
||||
213: 'Invalid LLDP TLV Option',
|
||||
214: 'Invalid LLDP Option',
|
||||
215: 'Invalid Load interval delay',
|
||||
216: 'Invalid Load interval Counter Number',
|
||||
217: 'Invalid Load Interval option',
|
||||
218: 'Invalid Mac Access Group Name',
|
||||
219: 'Invalid Mac Address',
|
||||
220: 'Invalid Microburst threshold value',
|
||||
221: 'Invalid MTU Value',
|
||||
222: 'Invalid Service instance value',
|
||||
223: 'Invalid service policy name',
|
||||
224: 'Invalid service policy options',
|
||||
225: 'Invalid Interface speed value',
|
||||
226: 'Invalid Storm control level value',
|
||||
227: 'Invalid Storm control option',
|
||||
228: 'Invalid Portchannel dot1q tag',
|
||||
229: 'Invalid VRRP Id Value',
|
||||
230: 'Invalid VRRP Options',
|
||||
231: 'Invalid portchannel source interface option',
|
||||
232: 'Invalid portchannel load balance options',
|
||||
233: 'Invalid Portchannel configuration attribute',
|
||||
234: 'Invalid BFD Interval Value',
|
||||
235: 'Invalid BFD minrx Value',
|
||||
236: 'Invalid BFD multiplier Value',
|
||||
237: 'Invalid Key Chain Value',
|
||||
238: 'Invalid key name option',
|
||||
239: 'Invalid key id value',
|
||||
240: 'Invalid Key Option',
|
||||
241: 'Invalid authentication option',
|
||||
242: 'Invalid destination Ip',
|
||||
243: 'Invalid source Ip',
|
||||
244: 'Invalid IP Option',
|
||||
245: 'Invalid Access group option',
|
||||
246: 'Invalid Access group name',
|
||||
247: 'Invalid ARP MacAddress Value',
|
||||
248: 'Invalid ARP timeout value',
|
||||
249: 'Invalid ARP Option',
|
||||
250: 'Invalid dhcp request option',
|
||||
251: 'Invalid dhcp Client option',
|
||||
252: 'Invalid relay Ip Address',
|
||||
253: 'Invalid dhcp Option',
|
||||
254: 'Invalid OSPF Option',
|
||||
255: 'Invalid OSPF Id IP Address Value',
|
||||
256: 'Invalid Ip Router Option',
|
||||
257: 'Invalid Spanning tree bpdufilter Options',
|
||||
258: 'Invalid Spanning tree bpduguard Options',
|
||||
259: 'Invalid Spanning tree cost Options',
|
||||
260: 'Invalid Spanning tree guard Options',
|
||||
261: 'Invalid Spanning tree link-type Options',
|
||||
262: 'Invalid Spanning tree link-type Options',
|
||||
263: 'Invalid Spanning tree options',
|
||||
264: 'Port-priority in increments of 32 is required',
|
||||
265: 'Invalid Spanning tree vlan options',
|
||||
266: 'Invalid IPv6 option',
|
||||
267: 'Invalid IPV6 neighbor IP Address',
|
||||
268: 'Invalid IPV6 neighbor mac address',
|
||||
269: 'Invalid IPV6 dhcp option',
|
||||
270: 'Invalid IPV6 relay address option',
|
||||
271: 'Invalid IPV6 Ethernet option',
|
||||
272: 'Invalid IPV6 Vlan option',
|
||||
273: 'Invalid IPV6 Link Local option',
|
||||
274: 'Invalid IPV6 dhcp option',
|
||||
275: 'Invalid IPV6 Address',
|
||||
276: 'Invalid IPV6 Address option',
|
||||
277: 'Invalid BFD neighbor options',
|
||||
278: 'Invalid Secondary option',
|
||||
289: 'Invalid PortChannel IPV4 address',
|
||||
290: 'Invalid Max Path Options',
|
||||
291: 'Invalid Distance Local Route value',
|
||||
292: 'Invalid Distance Internal AS value',
|
||||
293: 'Invalid Distance External AS value',
|
||||
294: 'Invalid BGP Reachability Half Life',
|
||||
295: 'Invalid BGP Dampening parameter',
|
||||
296: 'Invalid BGP Aggregate Prefix value',
|
||||
297: 'Invalid BGP Aggregate Prefix Option',
|
||||
298: 'Invalid BGP Address Family Route Map Name',
|
||||
299: 'Invalid BGP Net IP Mask Value',
|
||||
300: 'Invalid BGP Net IP Prefix Value',
|
||||
301: 'Invalid BGP Neighbor configuration option',
|
||||
302: 'Invalid BGP Neighbor Weight Value',
|
||||
303: 'Invalid Neigbor update source option',
|
||||
304: 'Invalid Ethernet slot/chassis number',
|
||||
305: 'Invalid Loopback Interface number',
|
||||
306: 'Invalid vlan id',
|
||||
307: 'Invalid Number of hops',
|
||||
308: 'Invalid Neighbor Keepalive interval',
|
||||
309: 'Invalid Neighbor timer hold time',
|
||||
310: 'Invalid neighbor password ',
|
||||
311: 'Invalid Max peer limit',
|
||||
312: 'Invalid Local AS Number',
|
||||
313: 'Invalid maximum hop count',
|
||||
314: 'Invalid neighbor description',
|
||||
315: 'Invalid Neighbor connect timer value',
|
||||
316: 'Invalid Neighbor address family option',
|
||||
317: 'Invalid neighbor address family option',
|
||||
318: 'Invalid route-map name',
|
||||
319: 'Invalid route-map',
|
||||
320: 'Invalid Name of a prefix list',
|
||||
321: 'Invalid Filter incoming option',
|
||||
322: 'Invalid AS path access-list name',
|
||||
323: 'Invalid Filter route option',
|
||||
324: 'Invalid route-map name',
|
||||
325: 'Invalid Number of occurrences of AS number',
|
||||
326: 'Invalid Prefix Limit'}
|
||||
|
||||
|
||||
def getErrorString(errorCode):
|
||||
retVal = errorDict[int(errorCode)]
|
||||
return retVal
|
||||
# EOM
|
|
@ -1,132 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2018 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
import json
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
|
||||
_DEVICE_CONFIGS = None
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
if hasattr(module, '_edgeos_connection'):
|
||||
return module._edgeos_connection
|
||||
|
||||
capabilities = get_capabilities(module)
|
||||
network_api = capabilities.get('network_api')
|
||||
if network_api == 'cliconf':
|
||||
module._edgeos_connection = Connection(module._socket_path)
|
||||
else:
|
||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
||||
|
||||
return module._edgeos_connection
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
if hasattr(module, '_edgeos_capabilities'):
|
||||
return module._edgeos_capabilities
|
||||
|
||||
capabilities = Connection(module._socket_path).get_capabilities()
|
||||
module._edgeos_capabilities = json.loads(capabilities)
|
||||
return module._edgeos_capabilities
|
||||
|
||||
|
||||
def get_config(module):
|
||||
global _DEVICE_CONFIGS
|
||||
|
||||
if _DEVICE_CONFIGS is not None:
|
||||
return _DEVICE_CONFIGS
|
||||
else:
|
||||
connection = get_connection(module)
|
||||
out = connection.get_config()
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
_DEVICE_CONFIGS = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
responses = list()
|
||||
connection = get_connection(module)
|
||||
|
||||
for cmd in to_list(commands):
|
||||
if isinstance(cmd, dict):
|
||||
command = cmd['command']
|
||||
prompt = cmd['prompt']
|
||||
answer = cmd['answer']
|
||||
else:
|
||||
command = cmd
|
||||
prompt = None
|
||||
answer = None
|
||||
|
||||
try:
|
||||
out = connection.get(command, prompt, answer)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
try:
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
except UnicodeError:
|
||||
module.fail_json(msg=u'Failed to decode output from %s: %s' %
|
||||
(cmd, to_text(out)))
|
||||
|
||||
responses.append(out)
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
def load_config(module, commands, commit=False, comment=None):
|
||||
connection = get_connection(module)
|
||||
|
||||
try:
|
||||
out = connection.edit_config(commands)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
diff = None
|
||||
if module._diff:
|
||||
out = connection.get('compare')
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
|
||||
if not out.startswith('No changes'):
|
||||
out = connection.get('show')
|
||||
diff = to_text(out, errors='surrogate_or_strict').strip()
|
||||
|
||||
if commit:
|
||||
try:
|
||||
out = connection.commit(comment)
|
||||
except ConnectionError:
|
||||
connection.discard_changes()
|
||||
module.fail_json(msg='commit failed: %s' % out)
|
||||
|
||||
if not commit:
|
||||
connection.discard_changes()
|
||||
else:
|
||||
connection.get('exit')
|
||||
|
||||
if diff:
|
||||
return diff
|
|
@ -1,168 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2018 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
|
||||
|
||||
def build_aggregate_spec(element_spec, required, *extra_spec):
|
||||
aggregate_spec = deepcopy(element_spec)
|
||||
for elt in required:
|
||||
aggregate_spec[elt] = dict(required=True)
|
||||
remove_default_spec(aggregate_spec)
|
||||
argument_spec = dict(
|
||||
aggregate=dict(type='list', elements='dict', options=aggregate_spec)
|
||||
)
|
||||
argument_spec.update(element_spec)
|
||||
for elt in extra_spec:
|
||||
argument_spec.update(elt)
|
||||
return argument_spec
|
||||
|
||||
|
||||
def map_params_to_obj(module):
|
||||
obj = []
|
||||
aggregate = module.params.get('aggregate')
|
||||
if aggregate:
|
||||
for item in aggregate:
|
||||
for key in item:
|
||||
if item.get(key) is None:
|
||||
item[key] = module.params[key]
|
||||
|
||||
d = item.copy()
|
||||
obj.append(d)
|
||||
else:
|
||||
obj.append(module.params)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
if hasattr(module, '_edgeswitch_connection'):
|
||||
return module._edgeswitch_connection
|
||||
|
||||
capabilities = get_capabilities(module)
|
||||
network_api = capabilities.get('network_api')
|
||||
if network_api == 'cliconf':
|
||||
module._edgeswitch_connection = Connection(module._socket_path)
|
||||
else:
|
||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
||||
|
||||
return module._edgeswitch_connection
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
if hasattr(module, '_edgeswitch_capabilities'):
|
||||
return module._edgeswitch_capabilities
|
||||
try:
|
||||
capabilities = Connection(module._socket_path).get_capabilities()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
module._edgeswitch_capabilities = json.loads(capabilities)
|
||||
return module._edgeswitch_capabilities
|
||||
|
||||
|
||||
def get_defaults_flag(module):
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
out = connection.get_defaults_flag()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
return to_text(out, errors='surrogate_then_replace').strip()
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flag_str = ' '.join(to_list(flags))
|
||||
|
||||
try:
|
||||
return _DEVICE_CONFIGS[flag_str]
|
||||
except KeyError:
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
out = connection.get_config(flags=flags)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
_DEVICE_CONFIGS[flag_str] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def get_interfaces_config(module):
|
||||
config = get_config(module)
|
||||
lines = config.split('\n')
|
||||
interfaces = {}
|
||||
interface = None
|
||||
for line in lines:
|
||||
if line == 'exit':
|
||||
if interface:
|
||||
interfaces[interface[0]] = interface
|
||||
interface = None
|
||||
elif interface:
|
||||
interface.append(line)
|
||||
else:
|
||||
match = re.match(r'^interface (.*)$', line)
|
||||
if match:
|
||||
interface = list()
|
||||
interface.append(line)
|
||||
|
||||
return interfaces
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
transform = ComplexList(spec, module)
|
||||
return transform(commands)
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
return connection.run_commands(commands=commands, check_rc=check_rc)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
|
||||
def load_config(module, commands):
|
||||
connection = get_connection(module)
|
||||
|
||||
try:
|
||||
resp = connection.edit_config(commands)
|
||||
return resp.get('response')
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
|
@ -1,91 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2018 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class InterfaceConfiguration:
|
||||
def __init__(self):
|
||||
self.commands = []
|
||||
self.merged = False
|
||||
|
||||
def has_same_commands(self, interface):
|
||||
len1 = len(self.commands)
|
||||
len2 = len(interface.commands)
|
||||
return len1 == len2 and len1 == len(frozenset(self.commands).intersection(interface.commands))
|
||||
|
||||
|
||||
def merge_interfaces(interfaces):
|
||||
""" to reduce commands generated by an edgeswitch module
|
||||
we take interfaces one by one and we try to merge them with neighbors if everyone has same commands to run
|
||||
"""
|
||||
merged = {}
|
||||
|
||||
for i, interface in interfaces.items():
|
||||
if interface.merged:
|
||||
continue
|
||||
interface.merged = True
|
||||
|
||||
match = re.match(r'(\d+)\/(\d+)', i)
|
||||
group = int(match.group(1))
|
||||
start = int(match.group(2))
|
||||
end = start
|
||||
|
||||
while True:
|
||||
try:
|
||||
start = start - 1
|
||||
key = '{0}/{1}'.format(group, start)
|
||||
neighbor = interfaces[key]
|
||||
if not neighbor.merged and interface.has_same_commands(neighbor):
|
||||
neighbor.merged = True
|
||||
else:
|
||||
break
|
||||
except KeyError:
|
||||
break
|
||||
start = start + 1
|
||||
|
||||
while True:
|
||||
try:
|
||||
end = end + 1
|
||||
key = '{0}/{1}'.format(group, end)
|
||||
neighbor = interfaces[key]
|
||||
if not neighbor.merged and interface.has_same_commands(neighbor):
|
||||
neighbor.merged = True
|
||||
else:
|
||||
break
|
||||
except KeyError:
|
||||
break
|
||||
end = end - 1
|
||||
|
||||
if end == start:
|
||||
key = '{0}/{1}'.format(group, start)
|
||||
else:
|
||||
key = '{0}/{1}-{2}/{3}'.format(group, start, group, end)
|
||||
|
||||
merged[key] = interface
|
||||
return merged
|
|
@ -1,172 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by
|
||||
# Ansible still belong to the author of the module, and may assign their own
|
||||
# license to the complete work.
|
||||
#
|
||||
# Copyright (C) 2017 Lenovo.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Contains utility methods
|
||||
# Lenovo Networking
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection
|
||||
from ansible.module_utils.connection import Connection, exec_command
|
||||
from ansible.module_utils.connection import ConnectionError
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
_CONNECTION = None
|
||||
|
||||
enos_provider_spec = {
|
||||
'host': dict(),
|
||||
'port': dict(type='int'),
|
||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
||||
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
|
||||
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
|
||||
'timeout': dict(type='int'),
|
||||
'context': dict(),
|
||||
'passwords': dict()
|
||||
}
|
||||
|
||||
enos_argument_spec = {
|
||||
'provider': dict(type='dict', options=enos_provider_spec),
|
||||
}
|
||||
|
||||
command_spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
|
||||
|
||||
def get_provider_argspec():
|
||||
return enos_provider_spec
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
global _CONNECTION
|
||||
if _CONNECTION:
|
||||
return _CONNECTION
|
||||
_CONNECTION = Connection(module._socket_path)
|
||||
|
||||
context = None
|
||||
try:
|
||||
context = module.params['context']
|
||||
except KeyError:
|
||||
context = None
|
||||
|
||||
if context:
|
||||
if context == 'system':
|
||||
command = 'changeto system'
|
||||
else:
|
||||
command = 'changeto context %s' % context
|
||||
_CONNECTION.get(command)
|
||||
|
||||
return _CONNECTION
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = [] if flags is None else flags
|
||||
|
||||
passwords = None
|
||||
try:
|
||||
passwords = module.params['passwords']
|
||||
except KeyError:
|
||||
passwords = None
|
||||
if passwords:
|
||||
cmd = 'more system:running-config'
|
||||
else:
|
||||
cmd = 'show running-config '
|
||||
cmd += ' '.join(flags)
|
||||
cmd = cmd.strip()
|
||||
|
||||
try:
|
||||
return _DEVICE_CONFIGS[cmd]
|
||||
except KeyError:
|
||||
conn = get_connection(module)
|
||||
out = conn.get(cmd)
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
_DEVICE_CONFIGS[cmd] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
if not isinstance(commands, list):
|
||||
raise AssertionError('argument must be of type <list>')
|
||||
|
||||
transform = EntityCollection(module, command_spec)
|
||||
commands = transform(commands)
|
||||
|
||||
for index, item in enumerate(commands):
|
||||
if module.check_mode and not item['command'].startswith('show'):
|
||||
module.warn('only show commands are supported when using check '
|
||||
'mode, not executing `%s`' % item['command'])
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
|
||||
commands = to_commands(module, to_list(commands))
|
||||
|
||||
responses = list()
|
||||
|
||||
for cmd in commands:
|
||||
out = connection.get(**cmd)
|
||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
def load_config(module, config):
|
||||
try:
|
||||
conn = get_connection(module)
|
||||
conn.get('enable')
|
||||
conn.edit_config(config)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
|
||||
def get_defaults_flag(module):
|
||||
rc, out, err = exec_command(module, 'show running-config ?')
|
||||
out = to_text(out, errors='surrogate_then_replace')
|
||||
|
||||
commands = set()
|
||||
for line in out.splitlines():
|
||||
if line:
|
||||
commands.add(line.strip().split()[0])
|
||||
|
||||
if 'all' in commands:
|
||||
return 'all'
|
||||
else:
|
||||
return 'full'
|
|
@ -1,49 +0,0 @@
|
|||
#
|
||||
# Copyright (c) 2019 Ericsson AB.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
if hasattr(module, '_eric_eccli_connection'):
|
||||
return module._eric_eccli_connection
|
||||
|
||||
capabilities = get_capabilities(module)
|
||||
network_api = capabilities.get('network_api')
|
||||
if network_api == 'cliconf':
|
||||
module._eric_eccli_connection = Connection(module._socket_path)
|
||||
else:
|
||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
||||
|
||||
return module._eric_eccli_connection
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
if hasattr(module, '_eric_eccli_capabilities'):
|
||||
return module._eric_eccli_capabilities
|
||||
try:
|
||||
capabilities = Connection(module._socket_path).get_capabilities()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
module._eric_eccli_capabilities = json.loads(capabilities)
|
||||
return module._eric_eccli_capabilities
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
return connection.run_commands(commands=commands, check_rc=check_rc)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
|
@ -1,23 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The arg spec for the exos facts module.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class FactsArgs(object): # pylint: disable=R0903
|
||||
""" The arg spec for the exos facts module
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
argument_spec = {
|
||||
'gather_subset': dict(default=['!config'], type='list'),
|
||||
'gather_network_resources': dict(type='list'),
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
#############################################
|
||||
# WARNING #
|
||||
#############################################
|
||||
#
|
||||
# This file is auto generated by the resource
|
||||
# module builder playbook.
|
||||
#
|
||||
# Do not edit this file manually.
|
||||
#
|
||||
# Changes to this file will be over written
|
||||
# by the resource module builder.
|
||||
#
|
||||
# Changes should be made in the model used to
|
||||
# generate this file or in the resource module
|
||||
# builder template.
|
||||
#
|
||||
#############################################
|
||||
"""
|
||||
The arg spec for the exos_l2_interfaces module
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class L2_interfacesArgs(object): # pylint: disable=R0903
|
||||
"""The arg spec for the exos_l2_interfaces module
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
argument_spec = {
|
||||
'config': {
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'access': {'options': {'vlan': {'type': 'int'}},
|
||||
'type': 'dict'},
|
||||
'name': {'required': True, 'type': 'str'},
|
||||
'trunk': {'options': {'native_vlan': {'type': 'int'}, 'trunk_allowed_vlans': {'type': 'list'}},
|
||||
'type': 'dict'}},
|
||||
'type': 'list'},
|
||||
'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'], 'default': 'merged', 'type': 'str'}
|
||||
} # pylint: disable=C0301
|
|
@ -1,57 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
#############################################
|
||||
# WARNING #
|
||||
#############################################
|
||||
#
|
||||
# This file is auto generated by the resource
|
||||
# module builder playbook.
|
||||
#
|
||||
# Do not edit this file manually.
|
||||
#
|
||||
# Changes to this file will be over written
|
||||
# by the resource module builder.
|
||||
#
|
||||
# Changes should be made in the model used to
|
||||
# generate this file or in the resource module
|
||||
# builder template.
|
||||
#
|
||||
#############################################
|
||||
|
||||
"""
|
||||
The arg spec for the exos_lldp_global module
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class Lldp_globalArgs(object): # pylint: disable=R0903
|
||||
"""The arg spec for the exos_lldp_global module
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
argument_spec = {
|
||||
'config': {
|
||||
'options': {
|
||||
'interval': {'default': 30, 'type': 'int'},
|
||||
'tlv_select': {
|
||||
'options': {
|
||||
'management_address': {'type': 'bool'},
|
||||
'port_description': {'type': 'bool'},
|
||||
'system_capabilities': {'type': 'bool'},
|
||||
'system_description': {
|
||||
'default': True,
|
||||
'type': 'bool'},
|
||||
'system_name': {'default': True, 'type': 'bool'}},
|
||||
'type': 'dict'}},
|
||||
'type': 'dict'},
|
||||
'state': {
|
||||
'choices': ['merged', 'replaced', 'deleted'],
|
||||
'default': 'merged',
|
||||
'type': 'str'}} # pylint: disable=C0301
|
|
@ -1,49 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
#############################################
|
||||
# WARNING #
|
||||
#############################################
|
||||
#
|
||||
# This file is auto generated by the resource
|
||||
# module builder playbook.
|
||||
#
|
||||
# Do not edit this file manually.
|
||||
#
|
||||
# Changes to this file will be over written
|
||||
# by the resource module builder.
|
||||
#
|
||||
# Changes should be made in the model used to
|
||||
# generate this file or in the resource module
|
||||
# builder template.
|
||||
#
|
||||
#############################################
|
||||
|
||||
"""
|
||||
The arg spec for the exos_lldp_interfaces module
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class Lldp_interfacesArgs(object): # pylint: disable=R0903
|
||||
"""The arg spec for the exos_lldp_interfaces module
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
argument_spec = {
|
||||
'config': {
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'enabled': {'type': 'bool'},
|
||||
'name': {'required': True, 'type': 'str'}},
|
||||
'type': 'list'},
|
||||
'state': {
|
||||
'choices': ['merged', 'replaced', 'overridden', 'deleted'],
|
||||
'default': 'merged',
|
||||
'type': 'str'}} # pylint: disable=C0301
|
|
@ -1,53 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
#############################################
|
||||
# WARNING #
|
||||
#############################################
|
||||
#
|
||||
# This file is auto generated by the resource
|
||||
# module builder playbook.
|
||||
#
|
||||
# Do not edit this file manually.
|
||||
#
|
||||
# Changes to this file will be over written
|
||||
# by the resource module builder.
|
||||
#
|
||||
# Changes should be made in the model used to
|
||||
# generate this file or in the resource module
|
||||
# builder template.
|
||||
#
|
||||
#############################################
|
||||
|
||||
"""
|
||||
The arg spec for the exos_vlans module
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
class VlansArgs(object): # pylint: disable=R0903
|
||||
"""The arg spec for the exos_vlans module
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
argument_spec = {
|
||||
'config': {
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'name': {'type': 'str'},
|
||||
'state': {
|
||||
'choices': ['active', 'suspend'],
|
||||
'default': 'active',
|
||||
'type': 'str'},
|
||||
'vlan_id': {'required': True, 'type': 'int'}},
|
||||
'type': 'list'},
|
||||
'state': {
|
||||
'choices': ['merged', 'replaced', 'overridden', 'deleted'],
|
||||
'default': 'merged',
|
||||
'type': 'str'}} # pylint: disable=C0301
|
|
@ -1,294 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos_l2_interfaces class
|
||||
It is in this file where the current configuration (as dict)
|
||||
is compared to the provided configuration (as dict) and the command set
|
||||
necessary to bring the current configuration to it's desired end-state is
|
||||
created
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, dict_diff
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
|
||||
class L2_interfaces(ConfigBase):
|
||||
"""
|
||||
The exos_l2_interfaces class
|
||||
"""
|
||||
|
||||
gather_subset = [
|
||||
'!all',
|
||||
'!min',
|
||||
]
|
||||
|
||||
gather_network_resources = [
|
||||
'l2_interfaces',
|
||||
]
|
||||
|
||||
L2_INTERFACE_NATIVE = {
|
||||
"data": {
|
||||
"openconfig-vlan:config": {
|
||||
"interface-mode": "TRUNK",
|
||||
"native-vlan": None,
|
||||
"trunk-vlans": []
|
||||
}
|
||||
},
|
||||
"method": "PATCH",
|
||||
"path": None
|
||||
}
|
||||
|
||||
L2_INTERFACE_TRUNK = {
|
||||
"data": {
|
||||
"openconfig-vlan:config": {
|
||||
"interface-mode": "TRUNK",
|
||||
"trunk-vlans": []
|
||||
}
|
||||
},
|
||||
"method": "PATCH",
|
||||
"path": None
|
||||
}
|
||||
|
||||
L2_INTERFACE_ACCESS = {
|
||||
"data": {
|
||||
"openconfig-vlan:config": {
|
||||
"interface-mode": "ACCESS",
|
||||
"access-vlan": None
|
||||
}
|
||||
},
|
||||
"method": "PATCH",
|
||||
"path": None
|
||||
}
|
||||
|
||||
L2_PATH = "/rest/restconf/data/openconfig-interfaces:interfaces/interface="
|
||||
|
||||
def __init__(self, module):
|
||||
super(L2_interfaces, self).__init__(module)
|
||||
|
||||
def get_l2_interfaces_facts(self):
|
||||
""" Get the 'facts' (the current configuration)
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The current configuration as a dictionary
|
||||
"""
|
||||
facts, _warnings = Facts(self._module).get_facts(
|
||||
self.gather_subset, self.gather_network_resources)
|
||||
l2_interfaces_facts = facts['ansible_network_resources'].get(
|
||||
'l2_interfaces')
|
||||
if not l2_interfaces_facts:
|
||||
return []
|
||||
return l2_interfaces_facts
|
||||
|
||||
def execute_module(self):
|
||||
""" Execute the module
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The result from module execution
|
||||
"""
|
||||
result = {'changed': False}
|
||||
warnings = list()
|
||||
requests = list()
|
||||
|
||||
existing_l2_interfaces_facts = self.get_l2_interfaces_facts()
|
||||
requests.extend(self.set_config(existing_l2_interfaces_facts))
|
||||
if requests:
|
||||
if not self._module.check_mode:
|
||||
send_requests(self._module, requests=requests)
|
||||
result['changed'] = True
|
||||
result['requests'] = requests
|
||||
|
||||
changed_l2_interfaces_facts = self.get_l2_interfaces_facts()
|
||||
|
||||
result['before'] = existing_l2_interfaces_facts
|
||||
if result['changed']:
|
||||
result['after'] = changed_l2_interfaces_facts
|
||||
|
||||
result['warnings'] = warnings
|
||||
return result
|
||||
|
||||
def set_config(self, existing_l2_interfaces_facts):
|
||||
""" Collect the configuration from the args passed to the module,
|
||||
collect the current configuration (as a dict from facts)
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
want = self._module.params['config']
|
||||
have = existing_l2_interfaces_facts
|
||||
resp = self.set_state(want, have)
|
||||
return to_list(resp)
|
||||
|
||||
def set_state(self, want, have):
|
||||
""" Select the appropriate function based on the state provided
|
||||
|
||||
:param want: the desired configuration as a dictionary
|
||||
:param have: the current configuration as a dictionary
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
state = self._module.params['state']
|
||||
if state == 'overridden':
|
||||
requests = self._state_overridden(want, have)
|
||||
elif state == 'deleted':
|
||||
requests = self._state_deleted(want, have)
|
||||
elif state == 'merged':
|
||||
requests = self._state_merged(want, have)
|
||||
elif state == 'replaced':
|
||||
requests = self._state_replaced(want, have)
|
||||
return requests
|
||||
|
||||
def _state_replaced(self, want, have):
|
||||
""" The request generator when state is replaced
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w["name"] == h["name"]:
|
||||
if dict_diff(w, h):
|
||||
l2_request = self._update_patch_request(w, h)
|
||||
l2_request["data"] = json.dumps(l2_request["data"])
|
||||
requests.append(l2_request)
|
||||
break
|
||||
|
||||
return requests
|
||||
|
||||
def _state_overridden(self, want, have):
|
||||
""" The request generator when state is overridden
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
have_copy = []
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w["name"] == h["name"]:
|
||||
if dict_diff(w, h):
|
||||
l2_request = self._update_patch_request(w, h)
|
||||
l2_request["data"] = json.dumps(l2_request["data"])
|
||||
requests.append(l2_request)
|
||||
have_copy.append(h)
|
||||
break
|
||||
|
||||
for h in have:
|
||||
if h not in have_copy:
|
||||
l2_delete = self._update_delete_request(h)
|
||||
if l2_delete["path"]:
|
||||
l2_delete["data"] = json.dumps(l2_delete["data"])
|
||||
requests.append(l2_delete)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_merged(self, want, have):
|
||||
""" The request generator when state is merged
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to merge the provided into
|
||||
the current configuration
|
||||
"""
|
||||
requests = []
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w["name"] == h["name"]:
|
||||
if dict_diff(h, w):
|
||||
l2_request = self._update_patch_request(w, h)
|
||||
l2_request["data"] = json.dumps(l2_request["data"])
|
||||
requests.append(l2_request)
|
||||
break
|
||||
|
||||
return requests
|
||||
|
||||
def _state_deleted(self, want, have):
|
||||
""" The request generator when state is deleted
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to remove the current configuration
|
||||
of the provided objects
|
||||
"""
|
||||
requests = []
|
||||
if want:
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w["name"] == h["name"]:
|
||||
l2_delete = self._update_delete_request(h)
|
||||
if l2_delete["path"]:
|
||||
l2_delete["data"] = json.dumps(l2_delete["data"])
|
||||
requests.append(l2_delete)
|
||||
break
|
||||
|
||||
else:
|
||||
for h in have:
|
||||
l2_delete = self._update_delete_request(h)
|
||||
if l2_delete["path"]:
|
||||
l2_delete["data"] = json.dumps(l2_delete["data"])
|
||||
requests.append(l2_delete)
|
||||
|
||||
return requests
|
||||
|
||||
def _update_patch_request(self, want, have):
|
||||
|
||||
facts, _warnings = Facts(self._module).get_facts(
|
||||
self.gather_subset, ['vlans', ])
|
||||
vlans_facts = facts['ansible_network_resources'].get('vlans')
|
||||
|
||||
vlan_id = []
|
||||
|
||||
for vlan in vlans_facts:
|
||||
vlan_id.append(vlan['vlan_id'])
|
||||
|
||||
if want.get("access"):
|
||||
if want["access"]["vlan"] in vlan_id:
|
||||
l2_request = deepcopy(self.L2_INTERFACE_ACCESS)
|
||||
l2_request["data"]["openconfig-vlan:config"]["access-vlan"] = want["access"]["vlan"]
|
||||
l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
|
||||
else:
|
||||
self._module.fail_json(msg="VLAN %s does not exist" % (want["access"]["vlan"]))
|
||||
|
||||
elif want.get("trunk"):
|
||||
if want["trunk"]["native_vlan"]:
|
||||
if want["trunk"]["native_vlan"] in vlan_id:
|
||||
l2_request = deepcopy(self.L2_INTERFACE_NATIVE)
|
||||
l2_request["data"]["openconfig-vlan:config"]["native-vlan"] = want["trunk"]["native_vlan"]
|
||||
l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
|
||||
for vlan in want["trunk"]["trunk_allowed_vlans"]:
|
||||
if int(vlan) in vlan_id:
|
||||
l2_request["data"]["openconfig-vlan:config"]["trunk-vlans"].append(int(vlan))
|
||||
else:
|
||||
self._module.fail_json(msg="VLAN %s does not exist" % (vlan))
|
||||
else:
|
||||
self._module.fail_json(msg="VLAN %s does not exist" % (want["trunk"]["native_vlan"]))
|
||||
else:
|
||||
l2_request = deepcopy(self.L2_INTERFACE_TRUNK)
|
||||
l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
|
||||
for vlan in want["trunk"]["trunk_allowed_vlans"]:
|
||||
if int(vlan) in vlan_id:
|
||||
l2_request["data"]["openconfig-vlan:config"]["trunk-vlans"].append(int(vlan))
|
||||
else:
|
||||
self._module.fail_json(msg="VLAN %s does not exist" % (vlan))
|
||||
return l2_request
|
||||
|
||||
def _update_delete_request(self, have):
|
||||
|
||||
l2_request = deepcopy(self.L2_INTERFACE_ACCESS)
|
||||
|
||||
if have["access"] and have["access"]["vlan"] != 1 or have["trunk"] or not have["access"]:
|
||||
l2_request["data"]["openconfig-vlan:config"]["access-vlan"] = 1
|
||||
l2_request["path"] = self.L2_PATH + str(have["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
|
||||
|
||||
return l2_request
|
|
@ -1,199 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos_lldp_global class
|
||||
It is in this file where the current configuration (as dict)
|
||||
is compared to the provided configuration (as dict) and the command set
|
||||
necessary to bring the current configuration to it's desired end-state is
|
||||
created
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
import json
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class Lldp_global(ConfigBase):
|
||||
"""
|
||||
The exos_lldp_global class
|
||||
"""
|
||||
|
||||
gather_subset = [
|
||||
'!all',
|
||||
'!min',
|
||||
]
|
||||
|
||||
gather_network_resources = [
|
||||
'lldp_global',
|
||||
]
|
||||
|
||||
LLDP_DEFAULT_INTERVAL = 30
|
||||
LLDP_DEFAULT_TLV = {
|
||||
'system_name': True,
|
||||
'system_description': True,
|
||||
'system_capabilities': False,
|
||||
'port_description': False,
|
||||
'management_address': False
|
||||
}
|
||||
LLDP_REQUEST = {
|
||||
"data": {"openconfig-lldp:config": {}},
|
||||
"method": "PUT",
|
||||
"path": "/rest/restconf/data/openconfig-lldp:lldp/config"
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
super(Lldp_global, self).__init__(module)
|
||||
|
||||
def get_lldp_global_facts(self):
|
||||
""" Get the 'facts' (the current configuration)
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The current configuration as a dictionary
|
||||
"""
|
||||
facts, _warnings = Facts(self._module).get_facts(
|
||||
self.gather_subset, self.gather_network_resources)
|
||||
lldp_global_facts = facts['ansible_network_resources'].get('lldp_global')
|
||||
if not lldp_global_facts:
|
||||
return {}
|
||||
return lldp_global_facts
|
||||
|
||||
def execute_module(self):
|
||||
""" Execute the module
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The result from module execution
|
||||
"""
|
||||
result = {'changed': False}
|
||||
warnings = list()
|
||||
requests = list()
|
||||
|
||||
existing_lldp_global_facts = self.get_lldp_global_facts()
|
||||
requests.extend(self.set_config(existing_lldp_global_facts))
|
||||
if requests:
|
||||
if not self._module.check_mode:
|
||||
send_requests(self._module, requests)
|
||||
result['changed'] = True
|
||||
result['requests'] = requests
|
||||
|
||||
changed_lldp_global_facts = self.get_lldp_global_facts()
|
||||
|
||||
result['before'] = existing_lldp_global_facts
|
||||
if result['changed']:
|
||||
result['after'] = changed_lldp_global_facts
|
||||
|
||||
result['warnings'] = warnings
|
||||
return result
|
||||
|
||||
def set_config(self, existing_lldp_global_facts):
|
||||
""" Collect the configuration from the args passed to the module,
|
||||
collect the current configuration (as a dict from facts)
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
want = self._module.params['config']
|
||||
have = existing_lldp_global_facts
|
||||
resp = self.set_state(want, have)
|
||||
return to_list(resp)
|
||||
|
||||
def set_state(self, want, have):
|
||||
""" Select the appropriate function based on the state provided
|
||||
|
||||
:param want: the desired configuration as a dictionary
|
||||
:param have: the current configuration as a dictionary
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
state = self._module.params['state']
|
||||
|
||||
if state == 'deleted':
|
||||
requests = self._state_deleted(want, have)
|
||||
elif state == 'merged':
|
||||
requests = self._state_merged(want, have)
|
||||
elif state == 'replaced':
|
||||
requests = self._state_replaced(want, have)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_replaced(self, want, have):
|
||||
""" The request generator when state is replaced
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
requests.extend(self._state_deleted(want, have))
|
||||
requests.extend(self._state_merged(want, have))
|
||||
return requests
|
||||
|
||||
def _state_merged(self, want, have):
|
||||
""" The request generator when state is merged
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to merge the provided into
|
||||
the current configuration
|
||||
"""
|
||||
requests = []
|
||||
|
||||
request = deepcopy(self.LLDP_REQUEST)
|
||||
self._update_lldp_config_body_if_diff(want, have, request)
|
||||
|
||||
if len(request["data"]["openconfig-lldp:config"]):
|
||||
request["data"] = json.dumps(request["data"])
|
||||
requests.append(request)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_deleted(self, want, have):
|
||||
""" The request generator when state is deleted
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to remove the current configuration
|
||||
of the provided objects
|
||||
"""
|
||||
requests = []
|
||||
|
||||
request = deepcopy(self.LLDP_REQUEST)
|
||||
if want:
|
||||
self._update_lldp_config_body_if_diff(want, have, request)
|
||||
else:
|
||||
if self.LLDP_DEFAULT_INTERVAL != have['interval']:
|
||||
request["data"]["openconfig-lldp:config"].update(
|
||||
{"hello-timer": self.LLDP_DEFAULT_INTERVAL})
|
||||
|
||||
if have['tlv_select'] != self.LLDP_DEFAULT_TLV:
|
||||
request["data"]["openconfig-lldp:config"].update(
|
||||
{"suppress-tlv-advertisement": [key.upper() for key, value in self.LLDP_DEFAULT_TLV.items() if not value]})
|
||||
request["data"]["openconfig-lldp:config"]["suppress-tlv-advertisement"].sort()
|
||||
if len(request["data"]["openconfig-lldp:config"]):
|
||||
request["data"] = json.dumps(request["data"])
|
||||
requests.append(request)
|
||||
|
||||
return requests
|
||||
|
||||
def _update_lldp_config_body_if_diff(self, want, have, request):
|
||||
if want.get('interval'):
|
||||
if want['interval'] != have['interval']:
|
||||
request["data"]["openconfig-lldp:config"].update(
|
||||
{"hello-timer": want['interval']})
|
||||
if want.get('tlv_select'):
|
||||
# Create list of TLVs to be suppressed which aren't already
|
||||
want_suppress = [key.upper() for key, value in want["tlv_select"].items() if have["tlv_select"][key] != value and value is False]
|
||||
if want_suppress:
|
||||
# Add previously suppressed TLVs to the list as we are doing a PUT op
|
||||
want_suppress.extend([key.upper() for key, value in have["tlv_select"].items() if value is False])
|
||||
request["data"]["openconfig-lldp:config"].update(
|
||||
{"suppress-tlv-advertisement": want_suppress})
|
||||
request["data"]["openconfig-lldp:config"]["suppress-tlv-advertisement"].sort()
|
|
@ -1,243 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos_lldp_interfaces class
|
||||
It is in this file where the current configuration (as dict)
|
||||
is compared to the provided configuration (as dict) and the command set
|
||||
necessary to bring the current configuration to it's desired end-state is
|
||||
created
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, dict_diff
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
|
||||
class Lldp_interfaces(ConfigBase):
|
||||
"""
|
||||
The exos_lldp_interfaces class
|
||||
"""
|
||||
|
||||
gather_subset = [
|
||||
'!all',
|
||||
'!min',
|
||||
]
|
||||
|
||||
gather_network_resources = [
|
||||
'lldp_interfaces',
|
||||
]
|
||||
|
||||
LLDP_INTERFACE = {
|
||||
"data": {
|
||||
"openconfig-lldp:config": {
|
||||
"name": None,
|
||||
"enabled": True
|
||||
}
|
||||
},
|
||||
"method": "PATCH",
|
||||
"path": None
|
||||
}
|
||||
|
||||
LLDP_PATH = "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface="
|
||||
|
||||
def __init__(self, module):
|
||||
super(Lldp_interfaces, self).__init__(module)
|
||||
|
||||
def get_lldp_interfaces_facts(self):
|
||||
""" Get the 'facts' (the current configuration)
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The current configuration as a dictionary
|
||||
"""
|
||||
facts, _warnings = Facts(self._module).get_facts(
|
||||
self.gather_subset, self.gather_network_resources)
|
||||
lldp_interfaces_facts = facts['ansible_network_resources'].get(
|
||||
'lldp_interfaces')
|
||||
if not lldp_interfaces_facts:
|
||||
return []
|
||||
return lldp_interfaces_facts
|
||||
|
||||
def execute_module(self):
|
||||
""" Execute the module
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The result from module execution
|
||||
"""
|
||||
result = {'changed': False}
|
||||
warnings = list()
|
||||
requests = list()
|
||||
|
||||
existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
|
||||
requests.extend(self.set_config(existing_lldp_interfaces_facts))
|
||||
if requests:
|
||||
if not self._module.check_mode:
|
||||
send_requests(self._module, requests=requests)
|
||||
result['changed'] = True
|
||||
result['requests'] = requests
|
||||
|
||||
changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
|
||||
|
||||
result['before'] = existing_lldp_interfaces_facts
|
||||
if result['changed']:
|
||||
result['after'] = changed_lldp_interfaces_facts
|
||||
|
||||
result['warnings'] = warnings
|
||||
return result
|
||||
|
||||
def set_config(self, existing_lldp_interfaces_facts):
|
||||
""" Collect the configuration from the args passed to the module,
|
||||
collect the current configuration (as a dict from facts)
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
want = self._module.params['config']
|
||||
have = existing_lldp_interfaces_facts
|
||||
resp = self.set_state(want, have)
|
||||
return to_list(resp)
|
||||
|
||||
def set_state(self, want, have):
|
||||
""" Select the appropriate function based on the state provided
|
||||
|
||||
:param want: the desired configuration as a dictionary
|
||||
:param have: the current configuration as a dictionary
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
state = self._module.params['state']
|
||||
if state == 'overridden':
|
||||
requests = self._state_overridden(want, have)
|
||||
elif state == 'deleted':
|
||||
requests = self._state_deleted(want, have)
|
||||
elif state == 'merged':
|
||||
requests = self._state_merged(want, have)
|
||||
elif state == 'replaced':
|
||||
requests = self._state_replaced(want, have)
|
||||
return requests
|
||||
|
||||
def _state_replaced(self, want, have):
|
||||
""" The request generator when state is replaced
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w['name'] == h['name']:
|
||||
lldp_request = self._update_patch_request(w, h)
|
||||
if lldp_request["path"]:
|
||||
lldp_request["data"] = json.dumps(lldp_request["data"])
|
||||
requests.append(lldp_request)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_overridden(self, want, have):
|
||||
""" The request generator when state is overridden
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
have_copy = []
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w['name'] == h['name']:
|
||||
lldp_request = self._update_patch_request(w, h)
|
||||
if lldp_request["path"]:
|
||||
lldp_request["data"] = json.dumps(lldp_request["data"])
|
||||
requests.append(lldp_request)
|
||||
have_copy.append(h)
|
||||
|
||||
for h in have:
|
||||
if h not in have_copy:
|
||||
if not h['enabled']:
|
||||
lldp_delete = self._update_delete_request(h)
|
||||
if lldp_delete["path"]:
|
||||
lldp_delete["data"] = json.dumps(lldp_delete["data"])
|
||||
requests.append(lldp_delete)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_merged(self, want, have):
|
||||
""" The request generator when state is merged
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to merge the provided into
|
||||
the current configuration
|
||||
"""
|
||||
requests = []
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w['name'] == h['name']:
|
||||
lldp_request = self._update_patch_request(w, h)
|
||||
if lldp_request["path"]:
|
||||
lldp_request["data"] = json.dumps(lldp_request["data"])
|
||||
requests.append(lldp_request)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_deleted(self, want, have):
|
||||
""" The request generator when state is deleted
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to remove the current configuration
|
||||
of the provided objects
|
||||
"""
|
||||
requests = []
|
||||
if want:
|
||||
for w in want:
|
||||
for h in have:
|
||||
if w['name'] == h['name']:
|
||||
if not h['enabled']:
|
||||
lldp_delete = self._update_delete_request(h)
|
||||
if lldp_delete["path"]:
|
||||
lldp_delete["data"] = json.dumps(
|
||||
lldp_delete["data"])
|
||||
requests.append(lldp_delete)
|
||||
else:
|
||||
for h in have:
|
||||
if not h['enabled']:
|
||||
lldp_delete = self._update_delete_request(h)
|
||||
if lldp_delete["path"]:
|
||||
lldp_delete["data"] = json.dumps(lldp_delete["data"])
|
||||
requests.append(lldp_delete)
|
||||
|
||||
return requests
|
||||
|
||||
def _update_patch_request(self, want, have):
|
||||
|
||||
lldp_request = deepcopy(self.LLDP_INTERFACE)
|
||||
|
||||
if have['enabled'] != want['enabled']:
|
||||
lldp_request["data"]["openconfig-lldp:config"]["name"] = want[
|
||||
'name']
|
||||
lldp_request["data"]["openconfig-lldp:config"]["enabled"] = want[
|
||||
'enabled']
|
||||
lldp_request["path"] = self.LLDP_PATH + str(
|
||||
want['name']) + "/config"
|
||||
|
||||
return lldp_request
|
||||
|
||||
def _update_delete_request(self, have):
|
||||
|
||||
lldp_delete = deepcopy(self.LLDP_INTERFACE)
|
||||
|
||||
lldp_delete["data"]["openconfig-lldp:config"]["name"] = have['name']
|
||||
lldp_delete["data"]["openconfig-lldp:config"]["enabled"] = True
|
||||
lldp_delete["path"] = self.LLDP_PATH + str(have['name']) + "/config"
|
||||
|
||||
return lldp_delete
|
|
@ -1,277 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos_vlans class
|
||||
It is in this file where the current configuration (as dict)
|
||||
is compared to the provided configuration (as dict) and the command set
|
||||
necessary to bring the current configuration to it's desired end-state is
|
||||
created
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import ConfigBase
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, dict_diff
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.facts import Facts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.utils.utils import search_obj_in_list
|
||||
|
||||
|
||||
class Vlans(ConfigBase):
|
||||
"""
|
||||
The exos_vlans class
|
||||
"""
|
||||
|
||||
gather_subset = [
|
||||
'!all',
|
||||
'!min',
|
||||
]
|
||||
|
||||
gather_network_resources = [
|
||||
'vlans',
|
||||
]
|
||||
|
||||
VLAN_POST = {
|
||||
"data": {"openconfig-vlan:vlans": []},
|
||||
"method": "POST",
|
||||
"path": "/rest/restconf/data/openconfig-vlan:vlans/"
|
||||
}
|
||||
|
||||
VLAN_PATCH = {
|
||||
"data": {"openconfig-vlan:vlans": {"vlan": []}},
|
||||
"method": "PATCH",
|
||||
"path": "/rest/restconf/data/openconfig-vlan:vlans/"
|
||||
}
|
||||
|
||||
VLAN_DELETE = {
|
||||
"method": "DELETE",
|
||||
"path": None
|
||||
}
|
||||
|
||||
DEL_PATH = "/rest/restconf/data/openconfig-vlan:vlans/vlan="
|
||||
|
||||
REQUEST_BODY = {
|
||||
"config": {"name": None, "status": "ACTIVE", "tpid": "oc-vlan-types:TPID_0x8100", "vlan-id": None}
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
super(Vlans, self).__init__(module)
|
||||
|
||||
def get_vlans_facts(self):
|
||||
""" Get the 'facts' (the current configuration)
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The current configuration as a dictionary
|
||||
"""
|
||||
facts, _warnings = Facts(self._module).get_facts(
|
||||
self.gather_subset, self.gather_network_resources)
|
||||
vlans_facts = facts['ansible_network_resources'].get('vlans')
|
||||
if not vlans_facts:
|
||||
return []
|
||||
return vlans_facts
|
||||
|
||||
def execute_module(self):
|
||||
""" Execute the module
|
||||
|
||||
:rtype: A dictionary
|
||||
:returns: The result from module execution
|
||||
"""
|
||||
result = {'changed': False}
|
||||
warnings = list()
|
||||
requests = list()
|
||||
|
||||
existing_vlans_facts = self.get_vlans_facts()
|
||||
requests.extend(self.set_config(existing_vlans_facts))
|
||||
if requests:
|
||||
if not self._module.check_mode:
|
||||
send_requests(self._module, requests=requests)
|
||||
result['changed'] = True
|
||||
result['requests'] = requests
|
||||
|
||||
changed_vlans_facts = self.get_vlans_facts()
|
||||
|
||||
result['before'] = existing_vlans_facts
|
||||
if result['changed']:
|
||||
result['after'] = changed_vlans_facts
|
||||
|
||||
result['warnings'] = warnings
|
||||
return result
|
||||
|
||||
def set_config(self, existing_vlans_facts):
|
||||
""" Collect the configuration from the args passed to the module,
|
||||
collect the current configuration (as a dict from facts)
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
want = self._module.params['config']
|
||||
have = existing_vlans_facts
|
||||
resp = self.set_state(want, have)
|
||||
return to_list(resp)
|
||||
|
||||
def set_state(self, want, have):
|
||||
""" Select the appropriate function based on the state provided
|
||||
|
||||
:param want: the desired configuration as a dictionary
|
||||
:param have: the current configuration as a dictionary
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
state = self._module.params['state']
|
||||
if state == 'overridden':
|
||||
requests = self._state_overridden(want, have)
|
||||
elif state == 'deleted':
|
||||
requests = self._state_deleted(want, have)
|
||||
elif state == 'merged':
|
||||
requests = self._state_merged(want, have)
|
||||
elif state == 'replaced':
|
||||
requests = self._state_replaced(want, have)
|
||||
return requests
|
||||
|
||||
def _state_replaced(self, want, have):
|
||||
""" The request generator when state is replaced
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
request_patch = deepcopy(self.VLAN_PATCH)
|
||||
|
||||
for w in want:
|
||||
if w.get('vlan_id'):
|
||||
h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
|
||||
if h:
|
||||
if dict_diff(w, h):
|
||||
request_body = self._update_patch_request(w)
|
||||
request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body)
|
||||
else:
|
||||
request_post = self._update_post_request(w)
|
||||
requests.append(request_post)
|
||||
|
||||
if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]):
|
||||
request_patch["data"] = json.dumps(request_patch["data"])
|
||||
requests.append(request_patch)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_overridden(self, want, have):
|
||||
""" The request generator when state is overridden
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to migrate the current configuration
|
||||
to the desired configuration
|
||||
"""
|
||||
requests = []
|
||||
request_patch = deepcopy(self.VLAN_PATCH)
|
||||
|
||||
have_copy = []
|
||||
for w in want:
|
||||
if w.get('vlan_id'):
|
||||
h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
|
||||
if h:
|
||||
if dict_diff(w, h):
|
||||
request_body = self._update_patch_request(w)
|
||||
request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body)
|
||||
have_copy.append(h)
|
||||
else:
|
||||
request_post = self._update_post_request(w)
|
||||
requests.append(request_post)
|
||||
|
||||
for h in have:
|
||||
if h not in have_copy and h['vlan_id'] != 1:
|
||||
request_delete = self._update_delete_request(h)
|
||||
requests.append(request_delete)
|
||||
|
||||
if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]):
|
||||
request_patch["data"] = json.dumps(request_patch["data"])
|
||||
requests.append(request_patch)
|
||||
|
||||
return requests
|
||||
|
||||
def _state_merged(self, want, have):
|
||||
""" The requests generator when state is merged
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to merge the provided into
|
||||
the current configuration
|
||||
"""
|
||||
requests = []
|
||||
|
||||
request_patch = deepcopy(self.VLAN_PATCH)
|
||||
|
||||
for w in want:
|
||||
if w.get('vlan_id'):
|
||||
h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
|
||||
if h:
|
||||
if dict_diff(w, h):
|
||||
request_body = self._update_patch_request(w)
|
||||
request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body)
|
||||
else:
|
||||
request_post = self._update_post_request(w)
|
||||
requests.append(request_post)
|
||||
|
||||
if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]):
|
||||
request_patch["data"] = json.dumps(request_patch["data"])
|
||||
requests.append(request_patch)
|
||||
return requests
|
||||
|
||||
def _state_deleted(self, want, have):
|
||||
""" The requests generator when state is deleted
|
||||
|
||||
:rtype: A list
|
||||
:returns: the requests necessary to remove the current configuration
|
||||
of the provided objects
|
||||
"""
|
||||
requests = []
|
||||
|
||||
if want:
|
||||
for w in want:
|
||||
if w.get('vlan_id'):
|
||||
h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
|
||||
if h:
|
||||
request_delete = self._update_delete_request(h)
|
||||
requests.append(request_delete)
|
||||
|
||||
else:
|
||||
if not have:
|
||||
return requests
|
||||
for h in have:
|
||||
if h['vlan_id'] == 1:
|
||||
continue
|
||||
else:
|
||||
request_delete = self._update_delete_request(h)
|
||||
requests.append(request_delete)
|
||||
|
||||
return requests
|
||||
|
||||
def _update_vlan_config_body(self, want, request):
|
||||
request["config"]["name"] = want["name"]
|
||||
request["config"]["status"] = "SUSPENDED" if want["state"] == "suspend" else want["state"].upper()
|
||||
request["config"]["vlan-id"] = want["vlan_id"]
|
||||
return request
|
||||
|
||||
def _update_patch_request(self, want):
|
||||
request_body = deepcopy(self.REQUEST_BODY)
|
||||
request_body = self._update_vlan_config_body(want, request_body)
|
||||
return request_body
|
||||
|
||||
def _update_post_request(self, want):
|
||||
request_post = deepcopy(self.VLAN_POST)
|
||||
request_body = deepcopy(self.REQUEST_BODY)
|
||||
request_body = self._update_vlan_config_body(want, request_body)
|
||||
request_post["data"]["openconfig-vlan:vlans"].append(request_body)
|
||||
request_post["data"] = json.dumps(request_post["data"])
|
||||
return request_post
|
||||
|
||||
def _update_delete_request(self, have):
|
||||
request_delete = deepcopy(self.VLAN_DELETE)
|
||||
request_delete["path"] = self.DEL_PATH + str(have['vlan_id'])
|
||||
return request_delete
|
|
@ -1,219 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2016 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
import json
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.common._collections_compat import Mapping
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
|
||||
_DEVICE_CONNECTION = None
|
||||
|
||||
|
||||
class Cli:
|
||||
def __init__(self, module):
|
||||
self._module = module
|
||||
self._device_configs = {}
|
||||
self._connection = None
|
||||
|
||||
def get_capabilities(self):
|
||||
"""Returns platform info of the remove device
|
||||
"""
|
||||
connection = self._get_connection()
|
||||
return json.loads(connection.get_capabilities())
|
||||
|
||||
def _get_connection(self):
|
||||
if not self._connection:
|
||||
self._connection = Connection(self._module._socket_path)
|
||||
return self._connection
|
||||
|
||||
def get_config(self, flags=None):
|
||||
"""Retrieves the current config from the device or cache
|
||||
"""
|
||||
flags = [] if flags is None else flags
|
||||
if self._device_configs == {}:
|
||||
connection = self._get_connection()
|
||||
try:
|
||||
out = connection.get_config(flags=flags)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
|
||||
return self._device_configs
|
||||
|
||||
def run_commands(self, commands, check_rc=True):
|
||||
"""Runs list of commands on remote device and returns results
|
||||
"""
|
||||
connection = self._get_connection()
|
||||
try:
|
||||
response = connection.run_commands(commands=commands, check_rc=check_rc)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
return response
|
||||
|
||||
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
||||
conn = self._get_connection()
|
||||
try:
|
||||
diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match,
|
||||
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
return diff
|
||||
|
||||
|
||||
class HttpApi:
|
||||
def __init__(self, module):
|
||||
self._module = module
|
||||
self._device_configs = {}
|
||||
self._connection_obj = None
|
||||
|
||||
def get_capabilities(self):
|
||||
"""Returns platform info of the remove device
|
||||
"""
|
||||
try:
|
||||
capabilities = self._connection.get_capabilities()
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
|
||||
return json.loads(capabilities)
|
||||
|
||||
@property
|
||||
def _connection(self):
|
||||
if not self._connection_obj:
|
||||
self._connection_obj = Connection(self._module._socket_path)
|
||||
return self._connection_obj
|
||||
|
||||
def get_config(self, flags=None):
|
||||
"""Retrieves the current config from the device or cache
|
||||
"""
|
||||
flags = [] if flags is None else flags
|
||||
if self._device_configs == {}:
|
||||
try:
|
||||
out = self._connection.get_config(flags=flags)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
|
||||
return self._device_configs
|
||||
|
||||
def run_commands(self, commands, check_rc=True):
|
||||
"""Runs list of commands on remote device and returns results
|
||||
"""
|
||||
try:
|
||||
response = self._connection.run_commands(commands=commands, check_rc=check_rc)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
return response
|
||||
|
||||
def send_requests(self, requests):
|
||||
"""Send a list of http requests to remote device and return results
|
||||
"""
|
||||
if requests is None:
|
||||
raise ValueError("'requests' value is required")
|
||||
|
||||
responses = list()
|
||||
for req in to_list(requests):
|
||||
try:
|
||||
response = self._connection.send_request(**req)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
responses.append(response)
|
||||
return responses
|
||||
|
||||
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
||||
try:
|
||||
diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match,
|
||||
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
|
||||
except ConnectionError as exc:
|
||||
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
return diff
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
conn = get_connection(module)
|
||||
return conn.get_capabilities()
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
global _DEVICE_CONNECTION
|
||||
if not _DEVICE_CONNECTION:
|
||||
connection_proxy = Connection(module._socket_path)
|
||||
cap = json.loads(connection_proxy.get_capabilities())
|
||||
if cap['network_api'] == 'cliconf':
|
||||
conn = Cli(module)
|
||||
elif cap['network_api'] == 'exosapi':
|
||||
conn = HttpApi(module)
|
||||
else:
|
||||
module.fail_json(msg='Invalid connection type %s' % cap['network_api'])
|
||||
_DEVICE_CONNECTION = conn
|
||||
return _DEVICE_CONNECTION
|
||||
|
||||
|
||||
def get_config(module, flags=None):
|
||||
flags = None if flags is None else flags
|
||||
conn = get_connection(module)
|
||||
return conn.get_config(flags)
|
||||
|
||||
|
||||
def load_config(module, commands):
|
||||
conn = get_connection(module)
|
||||
return conn.run_commands(to_command(module, commands))
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
conn = get_connection(module)
|
||||
return conn.run_commands(to_command(module, commands), check_rc=check_rc)
|
||||
|
||||
|
||||
def to_command(module, commands):
|
||||
transform = ComplexList(dict(
|
||||
command=dict(key=True),
|
||||
output=dict(default='text'),
|
||||
prompt=dict(type='list'),
|
||||
answer=dict(type='list'),
|
||||
sendonly=dict(type='bool', default=False),
|
||||
check_all=dict(type='bool', default=False),
|
||||
), module)
|
||||
return transform(to_list(commands))
|
||||
|
||||
|
||||
def send_requests(module, requests):
|
||||
conn = get_connection(module)
|
||||
return conn.send_requests(to_request(module, requests))
|
||||
|
||||
|
||||
def to_request(module, requests):
|
||||
transform = ComplexList(dict(
|
||||
path=dict(key=True),
|
||||
method=dict(),
|
||||
data=dict(type='dict'),
|
||||
), module)
|
||||
return transform(to_list(requests))
|
||||
|
||||
|
||||
def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
|
||||
conn = get_connection(module)
|
||||
return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
|
|
@ -1,61 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The facts class for exos
|
||||
this file validates each subset of facts and selectively
|
||||
calls the appropriate facts gathering function
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.facts.facts import FactsArgs
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts import FactsBase
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.lldp_global.lldp_global import Lldp_globalFacts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.vlans.vlans import VlansFacts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.legacy.base import Default, Hardware, Interfaces, Config
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.lldp_interfaces.lldp_interfaces import Lldp_interfacesFacts
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.facts.l2_interfaces.l2_interfaces import L2_interfacesFacts
|
||||
|
||||
FACT_LEGACY_SUBSETS = dict(
|
||||
default=Default,
|
||||
hardware=Hardware,
|
||||
interfaces=Interfaces,
|
||||
config=Config)
|
||||
|
||||
FACT_RESOURCE_SUBSETS = dict(
|
||||
lldp_global=Lldp_globalFacts,
|
||||
vlans=VlansFacts,
|
||||
lldp_interfaces=Lldp_interfacesFacts,
|
||||
l2_interfaces=L2_interfacesFacts,
|
||||
)
|
||||
|
||||
|
||||
class Facts(FactsBase):
|
||||
""" The fact class for exos
|
||||
"""
|
||||
|
||||
VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys())
|
||||
VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys())
|
||||
|
||||
def __init__(self, module):
|
||||
super(Facts, self).__init__(module)
|
||||
|
||||
def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None):
|
||||
""" Collect the facts for exos
|
||||
|
||||
:param legacy_facts_type: List of legacy facts types
|
||||
:param resource_facts_type: List of resource fact types
|
||||
:param data: previously collected conf
|
||||
:rtype: dict
|
||||
:return: the facts gathered
|
||||
"""
|
||||
if self.VALID_RESOURCE_SUBSETS:
|
||||
self.get_network_resources_facts(FACT_RESOURCE_SUBSETS, resource_facts_type, data)
|
||||
|
||||
if self.VALID_LEGACY_GATHER_SUBSETS:
|
||||
self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type)
|
||||
|
||||
return self.ansible_facts, self._warnings
|
|
@ -1,92 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos l2_interfaces fact class
|
||||
It is in this file the configuration is collected from the device
|
||||
for a given resource, parsed, and the facts tree is populated
|
||||
based on the configuration.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
|
||||
class L2_interfacesFacts(object):
|
||||
""" The exos l2_interfaces fact class
|
||||
"""
|
||||
def __init__(self, module, subspec='config', options='options'):
|
||||
self._module = module
|
||||
self.argument_spec = L2_interfacesArgs.argument_spec
|
||||
spec = deepcopy(self.argument_spec)
|
||||
if subspec:
|
||||
if options:
|
||||
facts_argument_spec = spec[subspec][options]
|
||||
else:
|
||||
facts_argument_spec = spec[subspec]
|
||||
else:
|
||||
facts_argument_spec = spec
|
||||
|
||||
self.generated_spec = utils.generate_dict(facts_argument_spec)
|
||||
|
||||
def populate_facts(self, connection, ansible_facts, data=None):
|
||||
""" Populate the facts for l2_interfaces
|
||||
:param connection: the device connection
|
||||
:param ansible_facts: Facts dictionary
|
||||
:param data: previously collected conf
|
||||
:rtype: dictionary
|
||||
:returns: facts
|
||||
"""
|
||||
|
||||
if not data:
|
||||
request = [{
|
||||
"path": "/rest/restconf/data/openconfig-interfaces:interfaces",
|
||||
"method": "GET"
|
||||
}]
|
||||
data = send_requests(self._module, requests=request)
|
||||
|
||||
objs = []
|
||||
if data:
|
||||
for d in data[0]["openconfig-interfaces:interfaces"]["interface"]:
|
||||
obj = self.render_config(self.generated_spec, d)
|
||||
if obj:
|
||||
objs.append(obj)
|
||||
|
||||
ansible_facts['ansible_network_resources'].pop('l2_interfaces', None)
|
||||
facts = {}
|
||||
if objs:
|
||||
params = utils.validate_config(self.argument_spec, {'config': objs})
|
||||
facts['l2_interfaces'] = params['config']
|
||||
|
||||
ansible_facts['ansible_network_resources'].update(facts)
|
||||
return ansible_facts
|
||||
|
||||
def render_config(self, spec, conf):
|
||||
"""
|
||||
Render config as dictionary structure and delete keys
|
||||
from spec for null values
|
||||
|
||||
:param spec: The facts tree, generated from the argspec
|
||||
:param conf: The configuration
|
||||
:rtype: dictionary
|
||||
:returns: The generated config
|
||||
"""
|
||||
config = deepcopy(spec)
|
||||
if conf["config"]["type"] == "ethernetCsmacd":
|
||||
conf_dict = conf["openconfig-if-ethernet:ethernet"]["openconfig-vlan:switched-vlan"]["config"]
|
||||
config["name"] = conf["name"]
|
||||
if conf_dict["interface-mode"] == "ACCESS":
|
||||
config["access"]["vlan"] = conf_dict.get("access-vlan")
|
||||
else:
|
||||
if 'native-vlan' in conf_dict:
|
||||
config["trunk"]["native_vlan"] = conf_dict.get("native-vlan")
|
||||
config["trunk"]["trunk_allowed_vlans"] = conf_dict.get("trunk-vlans")
|
||||
return utils.remove_empties(config)
|
|
@ -1,263 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
"""
|
||||
The exos legacy fact class
|
||||
It is in this file the configuration is collected from the device
|
||||
for a given resource, parsed, and the facts tree is populated
|
||||
based on the configuration.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import run_commands
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
|
||||
class FactsBase(object):
|
||||
|
||||
COMMANDS = list()
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.facts = dict()
|
||||
self.warnings = list()
|
||||
self.responses = None
|
||||
|
||||
def populate(self):
|
||||
self.responses = run_commands(self.module, self.COMMANDS)
|
||||
|
||||
def run(self, cmd):
|
||||
return run_commands(self.module, cmd)
|
||||
|
||||
|
||||
class Default(FactsBase):
|
||||
|
||||
COMMANDS = [
|
||||
'show version',
|
||||
'show switch'
|
||||
]
|
||||
|
||||
def populate(self):
|
||||
super(Default, self).populate()
|
||||
data = self.responses[0]
|
||||
if data:
|
||||
self.facts['version'] = self.parse_version(data)
|
||||
self.facts['serialnum'] = self.parse_serialnum(data)
|
||||
|
||||
data = self.responses[1]
|
||||
if data:
|
||||
self.facts['model'] = self.parse_model(data)
|
||||
self.facts['hostname'] = self.parse_hostname(data)
|
||||
|
||||
def parse_version(self, data):
|
||||
match = re.search(r'Image\s+: ExtremeXOS version (\S+)', data)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
def parse_model(self, data):
|
||||
match = re.search(r'System Type:\s+(.*$)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
def parse_hostname(self, data):
|
||||
match = re.search(r'SysName:\s+(\S+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
def parse_serialnum(self, data):
|
||||
match = re.search(r'Switch\s+: \S+ (\S+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
# For stack, return serial number of the first switch in the stack.
|
||||
match = re.search(r'Slot-\d+\s+: \S+ (\S+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
# Handle unique formatting for VM
|
||||
match = re.search(r'Switch\s+: PN:\S+\s+SN:(\S+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
|
||||
class Hardware(FactsBase):
|
||||
|
||||
COMMANDS = [
|
||||
'show memory'
|
||||
]
|
||||
|
||||
def populate(self):
|
||||
super(Hardware, self).populate()
|
||||
data = self.responses[0]
|
||||
if data:
|
||||
self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0))
|
||||
self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0))
|
||||
|
||||
def parse_memtotal(self, data):
|
||||
match = re.search(r' Total DRAM \(KB\): (\d+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
# Handle unique formatting for VM
|
||||
match = re.search(r' Total \s+\(KB\): (\d+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
def parse_memfree(self, data):
|
||||
match = re.search(r' Free\s+\(KB\): (\d+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
|
||||
class Config(FactsBase):
|
||||
|
||||
COMMANDS = ['show configuration detail']
|
||||
|
||||
def populate(self):
|
||||
super(Config, self).populate()
|
||||
data = self.responses[0]
|
||||
if data:
|
||||
self.facts['config'] = data
|
||||
|
||||
|
||||
class Interfaces(FactsBase):
|
||||
|
||||
COMMANDS = [
|
||||
'show switch',
|
||||
{'command': 'show port config', 'output': 'json'},
|
||||
{'command': 'show port description', 'output': 'json'},
|
||||
{'command': 'show vlan detail', 'output': 'json'},
|
||||
{'command': 'show lldp neighbors', 'output': 'json'}
|
||||
]
|
||||
|
||||
def populate(self):
|
||||
super(Interfaces, self).populate()
|
||||
|
||||
self.facts['all_ipv4_addresses'] = list()
|
||||
self.facts['all_ipv6_addresses'] = list()
|
||||
|
||||
data = self.responses[0]
|
||||
if data:
|
||||
sysmac = self.parse_sysmac(data)
|
||||
|
||||
data = self.responses[1]
|
||||
if data:
|
||||
self.facts['interfaces'] = self.populate_interfaces(data, sysmac)
|
||||
|
||||
data = self.responses[2]
|
||||
if data:
|
||||
self.populate_interface_descriptions(data)
|
||||
|
||||
data = self.responses[3]
|
||||
if data:
|
||||
self.populate_vlan_interfaces(data, sysmac)
|
||||
|
||||
data = self.responses[4]
|
||||
if data:
|
||||
self.facts['neighbors'] = self.parse_neighbors(data)
|
||||
|
||||
def parse_sysmac(self, data):
|
||||
match = re.search(r'System MAC:\s+(\S+)', data, re.M)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
def populate_interfaces(self, interfaces, sysmac):
|
||||
facts = dict()
|
||||
for elem in interfaces:
|
||||
intf = dict()
|
||||
|
||||
if 'show_ports_config' not in elem:
|
||||
continue
|
||||
|
||||
key = str(elem['show_ports_config']['port'])
|
||||
|
||||
if elem['show_ports_config']['linkState'] == 2:
|
||||
# Link state is "not present", don't include
|
||||
continue
|
||||
|
||||
intf['type'] = 'Ethernet'
|
||||
intf['macaddress'] = sysmac
|
||||
intf['bandwidth_configured'] = str(elem['show_ports_config']['speedCfg'])
|
||||
intf['bandwidth'] = str(elem['show_ports_config']['speedActual'])
|
||||
intf['duplex_configured'] = elem['show_ports_config']['duplexCfg']
|
||||
intf['duplex'] = elem['show_ports_config']['duplexActual']
|
||||
if elem['show_ports_config']['linkState'] == 1:
|
||||
intf['lineprotocol'] = 'up'
|
||||
else:
|
||||
intf['lineprotocol'] = 'down'
|
||||
if elem['show_ports_config']['portState'] == 1:
|
||||
intf['operstatus'] = 'up'
|
||||
else:
|
||||
intf['operstatus'] = 'admin down'
|
||||
|
||||
facts[key] = intf
|
||||
return facts
|
||||
|
||||
def populate_interface_descriptions(self, data):
|
||||
for elem in data:
|
||||
if 'show_ports_description' not in elem:
|
||||
continue
|
||||
key = str(elem['show_ports_description']['port'])
|
||||
|
||||
if 'descriptionString' in elem['show_ports_description']:
|
||||
desc = elem['show_ports_description']['descriptionString']
|
||||
self.facts['interfaces'][key]['description'] = desc
|
||||
|
||||
def populate_vlan_interfaces(self, data, sysmac):
|
||||
for elem in data:
|
||||
if 'vlanProc' in elem:
|
||||
key = elem['vlanProc']['name1']
|
||||
if key not in self.facts['interfaces']:
|
||||
intf = dict()
|
||||
intf['type'] = 'VLAN'
|
||||
intf['macaddress'] = sysmac
|
||||
self.facts['interfaces'][key] = intf
|
||||
|
||||
if elem['vlanProc']['ipAddress'] != '0.0.0.0':
|
||||
self.facts['interfaces'][key]['ipv4'] = list()
|
||||
addr = elem['vlanProc']['ipAddress']
|
||||
subnet = elem['vlanProc']['maskForDisplay']
|
||||
ipv4 = dict(address=addr, subnet=subnet)
|
||||
self.add_ip_address(addr, 'ipv4')
|
||||
self.facts['interfaces'][key]['ipv4'].append(ipv4)
|
||||
|
||||
if 'rtifIpv6Address' in elem:
|
||||
key = elem['rtifIpv6Address']['rtifName']
|
||||
if key not in self.facts['interfaces']:
|
||||
intf = dict()
|
||||
intf['type'] = 'VLAN'
|
||||
intf['macaddress'] = sysmac
|
||||
self.facts['interfaces'][key] = intf
|
||||
self.facts['interfaces'][key]['ipv6'] = list()
|
||||
addr, subnet = elem['rtifIpv6Address']['ipv6_address_mask'].split('/')
|
||||
ipv6 = dict(address=addr, subnet=subnet)
|
||||
self.add_ip_address(addr, 'ipv6')
|
||||
self.facts['interfaces'][key]['ipv6'].append(ipv6)
|
||||
|
||||
def add_ip_address(self, address, family):
|
||||
if family == 'ipv4':
|
||||
if address not in self.facts['all_ipv4_addresses']:
|
||||
self.facts['all_ipv4_addresses'].append(address)
|
||||
else:
|
||||
if address not in self.facts['all_ipv6_addresses']:
|
||||
self.facts['all_ipv6_addresses'].append(address)
|
||||
|
||||
def parse_neighbors(self, data):
|
||||
facts = dict()
|
||||
for elem in data:
|
||||
if 'lldpPortNbrInfoShort' not in elem:
|
||||
continue
|
||||
intf = str(elem['lldpPortNbrInfoShort']['port'])
|
||||
if intf not in facts:
|
||||
facts[intf] = list()
|
||||
fact = dict()
|
||||
fact['host'] = elem['lldpPortNbrInfoShort']['nbrSysName']
|
||||
fact['port'] = str(elem['lldpPortNbrInfoShort']['nbrPortID'])
|
||||
facts[intf].append(fact)
|
||||
return facts
|
|
@ -1,97 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos lldp_global fact class
|
||||
It is in this file the configuration is collected from the device
|
||||
for a given resource, parsed, and the facts tree is populated
|
||||
based on the configuration.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.lldp_global.lldp_global \
|
||||
import Lldp_globalArgs
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
|
||||
class Lldp_globalFacts(object):
|
||||
""" The exos lldp_global fact class
|
||||
"""
|
||||
|
||||
TLV_SELECT_OPTIONS = [
|
||||
"SYSTEM_NAME",
|
||||
"SYSTEM_DESCRIPTION",
|
||||
"SYSTEM_CAPABILITIES",
|
||||
"MANAGEMENT_ADDRESS",
|
||||
"PORT_DESCRIPTION"]
|
||||
|
||||
def __init__(self, module, subspec='config', options='options'):
|
||||
self._module = module
|
||||
self.argument_spec = Lldp_globalArgs.argument_spec
|
||||
spec = deepcopy(self.argument_spec)
|
||||
if subspec:
|
||||
if options:
|
||||
facts_argument_spec = spec[subspec][options]
|
||||
else:
|
||||
facts_argument_spec = spec[subspec]
|
||||
else:
|
||||
facts_argument_spec = spec
|
||||
|
||||
self.generated_spec = utils.generate_dict(facts_argument_spec)
|
||||
|
||||
def populate_facts(self, connection, ansible_facts, data=None):
|
||||
""" Populate the facts for lldp_global
|
||||
:param connection: the device connection
|
||||
:param ansible_facts: Facts dictionary
|
||||
:param data: previously collected conf
|
||||
:rtype: dictionary
|
||||
:returns: facts
|
||||
"""
|
||||
if not data:
|
||||
request = {
|
||||
"path": "/rest/restconf/data/openconfig-lldp:lldp/config/",
|
||||
"method": "GET",
|
||||
}
|
||||
data = send_requests(self._module, request)
|
||||
|
||||
obj = {}
|
||||
if data:
|
||||
lldp_obj = self.render_config(self.generated_spec, data[0])
|
||||
if lldp_obj:
|
||||
obj = lldp_obj
|
||||
|
||||
ansible_facts['ansible_network_resources'].pop('lldp_global', None)
|
||||
facts = {}
|
||||
|
||||
params = utils.validate_config(self.argument_spec, {'config': obj})
|
||||
facts['lldp_global'] = params['config']
|
||||
|
||||
ansible_facts['ansible_network_resources'].update(facts)
|
||||
return ansible_facts
|
||||
|
||||
def render_config(self, spec, conf):
|
||||
"""
|
||||
Render config as dictionary structure and delete keys
|
||||
from spec for null values
|
||||
|
||||
:param spec: The facts tree, generated from the argspec
|
||||
:param conf: The configuration
|
||||
:rtype: dictionary
|
||||
:returns: The generated config
|
||||
"""
|
||||
config = deepcopy(spec)
|
||||
config['interval'] = conf["openconfig-lldp:config"]["hello-timer"]
|
||||
|
||||
for item in self.TLV_SELECT_OPTIONS:
|
||||
config["tlv_select"][item.lower()] = (
|
||||
False if (item in conf["openconfig-lldp:config"]["suppress-tlv-advertisement"])
|
||||
else True)
|
||||
|
||||
return utils.remove_empties(config)
|
|
@ -1,88 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos lldp_interfaces fact class
|
||||
It is in this file the configuration is collected from the device
|
||||
for a given resource, parsed, and the facts tree is populated
|
||||
based on the configuration.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.lldp_interfaces.lldp_interfaces import Lldp_interfacesArgs
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
|
||||
class Lldp_interfacesFacts(object):
|
||||
""" The exos lldp_interfaces fact class
|
||||
"""
|
||||
|
||||
def __init__(self, module, subspec='config', options='options'):
|
||||
self._module = module
|
||||
self.argument_spec = Lldp_interfacesArgs.argument_spec
|
||||
spec = deepcopy(self.argument_spec)
|
||||
if subspec:
|
||||
if options:
|
||||
facts_argument_spec = spec[subspec][options]
|
||||
else:
|
||||
facts_argument_spec = spec[subspec]
|
||||
else:
|
||||
facts_argument_spec = spec
|
||||
|
||||
self.generated_spec = utils.generate_dict(facts_argument_spec)
|
||||
|
||||
def populate_facts(self, connection, ansible_facts, data=None):
|
||||
""" Populate the facts for lldp_interfaces
|
||||
:param connection: the device connection
|
||||
:param ansible_facts: Facts dictionary
|
||||
:param data: previously collected conf
|
||||
:rtype: dictionary
|
||||
:returns: facts
|
||||
"""
|
||||
|
||||
if not data:
|
||||
request = [{
|
||||
"path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4",
|
||||
"method": "GET"
|
||||
}]
|
||||
data = send_requests(self._module, requests=request)
|
||||
|
||||
objs = []
|
||||
if data:
|
||||
for d in data[0]["openconfig-lldp:interfaces"]["interface"]:
|
||||
obj = self.render_config(self.generated_spec, d["config"])
|
||||
if obj:
|
||||
objs.append(obj)
|
||||
|
||||
ansible_facts['ansible_network_resources'].pop('lldp_interfaces', None)
|
||||
facts = {}
|
||||
if objs:
|
||||
params = utils.validate_config(self.argument_spec, {'config': objs})
|
||||
facts['lldp_interfaces'] = params['config']
|
||||
|
||||
ansible_facts['ansible_network_resources'].update(facts)
|
||||
return ansible_facts
|
||||
|
||||
def render_config(self, spec, conf):
|
||||
"""
|
||||
Render config as dictionary structure and delete keys
|
||||
from spec for null values
|
||||
|
||||
:param spec: The facts tree, generated from the argspec
|
||||
:param conf: The configuration
|
||||
:rtype: dictionary
|
||||
:returns: The generated config
|
||||
"""
|
||||
config = deepcopy(spec)
|
||||
|
||||
config["name"] = conf["name"]
|
||||
config["enabled"] = bool(conf["enabled"])
|
||||
|
||||
return utils.remove_empties(config)
|
|
@ -1,89 +0,0 @@
|
|||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Red Hat
|
||||
# GNU General Public License v3.0+
|
||||
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
"""
|
||||
The exos vlans fact class
|
||||
It is in this file the configuration is collected from the device
|
||||
for a given resource, parsed, and the facts tree is populated
|
||||
based on the configuration.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import utils
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.argspec.vlans.vlans import VlansArgs
|
||||
from ansible_collections.community.general.plugins.module_utils.network.exos.exos import send_requests
|
||||
|
||||
|
||||
class VlansFacts(object):
|
||||
""" The exos vlans fact class
|
||||
"""
|
||||
|
||||
def __init__(self, module, subspec='config', options='options'):
|
||||
self._module = module
|
||||
self.argument_spec = VlansArgs.argument_spec
|
||||
spec = deepcopy(self.argument_spec)
|
||||
if subspec:
|
||||
if options:
|
||||
facts_argument_spec = spec[subspec][options]
|
||||
else:
|
||||
facts_argument_spec = spec[subspec]
|
||||
else:
|
||||
facts_argument_spec = spec
|
||||
|
||||
self.generated_spec = utils.generate_dict(facts_argument_spec)
|
||||
|
||||
def populate_facts(self, connection, ansible_facts, data=None):
|
||||
""" Populate the facts for vlans
|
||||
:param connection: the device connection
|
||||
:param ansible_facts: Facts dictionary
|
||||
:param data: previously collected conf
|
||||
:rtype: dictionary
|
||||
:returns: facts
|
||||
"""
|
||||
|
||||
if not data:
|
||||
request = [{
|
||||
"path": "/rest/restconf/data/openconfig-vlan:vlans?depth=5",
|
||||
"method": "GET"
|
||||
}]
|
||||
data = send_requests(self._module, requests=request)
|
||||
|
||||
objs = []
|
||||
if data:
|
||||
for d in data[0]["openconfig-vlan:vlans"]["vlan"]:
|
||||
obj = self.render_config(self.generated_spec, d["config"])
|
||||
if obj:
|
||||
objs.append(obj)
|
||||
|
||||
ansible_facts['ansible_network_resources'].pop('vlans', None)
|
||||
facts = {}
|
||||
if objs:
|
||||
params = utils.validate_config(self.argument_spec, {'config': objs})
|
||||
facts['vlans'] = params['config']
|
||||
|
||||
ansible_facts['ansible_network_resources'].update(facts)
|
||||
return ansible_facts
|
||||
|
||||
def render_config(self, spec, conf):
|
||||
"""
|
||||
Render config as dictionary structure and delete keys
|
||||
from spec for null values
|
||||
|
||||
:param spec: The facts tree, generated from the argspec
|
||||
:param conf: The configuration
|
||||
:rtype: dictionary
|
||||
:returns: The generated config
|
||||
"""
|
||||
config = deepcopy(spec)
|
||||
|
||||
config["name"] = conf["name"]
|
||||
config["state"] = "suspend" if conf["status"] == "SUSPENDED" else conf["status"].lower()
|
||||
config["vlan_id"] = conf["vlan-id"]
|
||||
|
||||
return utils.remove_empties(config)
|
|
@ -1,9 +0,0 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
def search_obj_in_list(item, lst, key):
|
||||
for o in lst:
|
||||
if o[key] == item:
|
||||
return o
|
||||
return None
|
|
@ -1,57 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2017 F5 Networks Inc.
|
||||
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from f5.iworkflow import ManagementRoot
|
||||
from icontrol.exceptions import iControlUnexpectedHTTPError
|
||||
HAS_F5SDK = True
|
||||
except ImportError:
|
||||
HAS_F5SDK = False
|
||||
|
||||
try:
|
||||
from library.module_utils.network.f5.common import F5BaseClient
|
||||
from library.module_utils.network.f5.common import F5ModuleError
|
||||
except ImportError:
|
||||
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import F5BaseClient
|
||||
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import F5ModuleError
|
||||
|
||||
|
||||
class F5Client(F5BaseClient):
|
||||
@property
|
||||
def api(self):
|
||||
exc = None
|
||||
if self._client:
|
||||
return self._client
|
||||
for x in range(0, 3):
|
||||
try:
|
||||
server = self.params['provider']['server'] or self.params['server']
|
||||
user = self.params['provider']['user'] or self.params['user']
|
||||
password = self.params['provider']['password'] or self.params['password']
|
||||
server_port = self.params['provider']['server_port'] or self.params['server_port'] or 443
|
||||
validate_certs = self.params['provider']['validate_certs'] or self.params['validate_certs']
|
||||
|
||||
result = ManagementRoot(
|
||||
server,
|
||||
user,
|
||||
password,
|
||||
port=server_port,
|
||||
verify=validate_certs,
|
||||
token='local'
|
||||
)
|
||||
self._client = result
|
||||
return self._client
|
||||
except Exception as ex:
|
||||
exc = ex
|
||||
time.sleep(3)
|
||||
error = 'Unable to connect to {0} on port {1}.'.format(self.params['server'], self.params['server_port'])
|
||||
if exc is not None:
|
||||
error += ' The reported error was "{0}".'.format(str(exc))
|
||||
raise F5ModuleError(error)
|
|
@ -1,121 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2017 F5 Networks Inc.
|
||||
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
try:
|
||||
import bigsuds
|
||||
bigsuds_found = True
|
||||
except ImportError:
|
||||
bigsuds_found = False
|
||||
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
|
||||
|
||||
def f5_argument_spec():
|
||||
return dict(
|
||||
server=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['F5_SERVER'])
|
||||
),
|
||||
user=dict(
|
||||
type='str',
|
||||
required=True,
|
||||
fallback=(env_fallback, ['F5_USER'])
|
||||
),
|
||||
password=dict(
|
||||
type='str',
|
||||
aliases=['pass', 'pwd'],
|
||||
required=True,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['F5_PASSWORD'])
|
||||
),
|
||||
validate_certs=dict(
|
||||
default='yes',
|
||||
type='bool',
|
||||
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
|
||||
),
|
||||
server_port=dict(
|
||||
type='int',
|
||||
default=443,
|
||||
fallback=(env_fallback, ['F5_SERVER_PORT'])
|
||||
),
|
||||
state=dict(
|
||||
type='str',
|
||||
default='present',
|
||||
choices=['present', 'absent']
|
||||
),
|
||||
partition=dict(
|
||||
type='str',
|
||||
default='Common',
|
||||
fallback=(env_fallback, ['F5_PARTITION'])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def f5_parse_arguments(module):
|
||||
if not bigsuds_found:
|
||||
module.fail_json(msg="the python bigsuds module is required")
|
||||
|
||||
if module.params['validate_certs']:
|
||||
import ssl
|
||||
if not hasattr(ssl, 'SSLContext'):
|
||||
module.fail_json(
|
||||
msg="bigsuds does not support verifying certificates with python < 2.7.9."
|
||||
"Either update python or set validate_certs=False on the task'")
|
||||
|
||||
return (
|
||||
module.params['server'],
|
||||
module.params['user'],
|
||||
module.params['password'],
|
||||
module.params['state'],
|
||||
module.params['partition'],
|
||||
module.params['validate_certs'],
|
||||
module.params['server_port']
|
||||
)
|
||||
|
||||
|
||||
def bigip_api(bigip, user, password, validate_certs, port=443):
|
||||
try:
|
||||
if bigsuds.__version__ >= '1.0.4':
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
|
||||
elif bigsuds.__version__ == '1.0.3':
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
|
||||
else:
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
except TypeError:
|
||||
# bigsuds < 1.0.3, no verify param
|
||||
if validate_certs:
|
||||
# Note: verified we have SSLContext when we parsed params
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
else:
|
||||
import ssl
|
||||
if hasattr(ssl, 'SSLContext'):
|
||||
# Really, you should never do this. It disables certificate
|
||||
# verification *globally*. But since older bigip libraries
|
||||
# don't give us a way to toggle verification we need to
|
||||
# disable it at the global level.
|
||||
# From https://www.python.org/dev/peps/pep-0476/#id29
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
|
||||
|
||||
return api
|
||||
|
||||
|
||||
# Fully Qualified name (with the partition)
|
||||
def fq_name(partition, name):
|
||||
if name is not None and not name.startswith('/'):
|
||||
return '/%s/%s' % (partition, name)
|
||||
return name
|
||||
|
||||
|
||||
# Fully Qualified name (with partition) for a list
|
||||
def fq_list_names(partition, list_names):
|
||||
if list_names is None:
|
||||
return None
|
||||
return map(lambda x: fq_name(partition, x), list_names)
|
|
@ -1,122 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2017, F5 Networks Inc.
|
||||
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
import re
|
||||
|
||||
try:
|
||||
from library.module_utils.network.f5.common import F5ModuleError
|
||||
except ImportError:
|
||||
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import F5ModuleError
|
||||
|
||||
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
|
||||
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
|
||||
|
||||
|
||||
def check_header_validity(header):
|
||||
"""Verifies that header value is a string which doesn't contain
|
||||
leading whitespace or return characters.
|
||||
|
||||
NOTE: This is a slightly modified version of the original function
|
||||
taken from the requests library:
|
||||
http://docs.python-requests.org/en/master/_modules/requests/utils/
|
||||
|
||||
:param header: string containing ':'.
|
||||
"""
|
||||
try:
|
||||
name, value = header.split(':')
|
||||
except ValueError:
|
||||
raise F5ModuleError('Invalid header format: {0}'.format(header))
|
||||
if name == '':
|
||||
raise F5ModuleError('Invalid header format: {0}'.format(header))
|
||||
|
||||
if isinstance(value, bytes):
|
||||
pat = _CLEAN_HEADER_REGEX_BYTE
|
||||
else:
|
||||
pat = _CLEAN_HEADER_REGEX_STR
|
||||
try:
|
||||
if not pat.match(value):
|
||||
raise F5ModuleError("Invalid return character or leading space in header: %s" % name)
|
||||
except TypeError:
|
||||
raise F5ModuleError("Value for header {%s: %s} must be of type str or "
|
||||
"bytes, not %s" % (name, value, type(value)))
|
||||
|
||||
|
||||
def build_service_uri(base_uri, partition, name):
|
||||
"""Build the proper uri for a service resource.
|
||||
This follows the scheme:
|
||||
<base_uri>/~<partition>~<<name>.app>~<name>
|
||||
:param base_uri: str -- base uri of the REST endpoint
|
||||
:param partition: str -- partition for the service
|
||||
:param name: str -- name of the service
|
||||
:returns: str -- uri to access the service
|
||||
"""
|
||||
name = name.replace('/', '~')
|
||||
return '%s~%s~%s.app~%s' % (base_uri, partition, name, name)
|
||||
|
||||
|
||||
def parseStats(entry):
|
||||
if 'description' in entry:
|
||||
return entry['description']
|
||||
elif 'value' in entry:
|
||||
return entry['value']
|
||||
elif 'entries' in entry or 'nestedStats' in entry and 'entries' in entry['nestedStats']:
|
||||
if 'entries' in entry:
|
||||
entries = entry['entries']
|
||||
else:
|
||||
entries = entry['nestedStats']['entries']
|
||||
result = None
|
||||
|
||||
for name in entries:
|
||||
entry = entries[name]
|
||||
if 'https://localhost' in name:
|
||||
name = name.split('/')
|
||||
name = name[-1]
|
||||
if result and isinstance(result, list):
|
||||
result.append(parseStats(entry))
|
||||
elif result and isinstance(result, dict):
|
||||
result[name] = parseStats(entry)
|
||||
else:
|
||||
try:
|
||||
int(name)
|
||||
result = list()
|
||||
result.append(parseStats(entry))
|
||||
except ValueError:
|
||||
result = dict()
|
||||
result[name] = parseStats(entry)
|
||||
else:
|
||||
if '.' in name:
|
||||
names = name.split('.')
|
||||
key = names[0]
|
||||
value = names[1]
|
||||
if result is None:
|
||||
# result can be None if this branch is reached first
|
||||
#
|
||||
# For example, the mgmt/tm/net/trunk/NAME/stats API
|
||||
# returns counters.bitsIn before anything else.
|
||||
result = dict()
|
||||
result[key] = dict()
|
||||
elif key not in result:
|
||||
result[key] = dict()
|
||||
elif result[key] is None:
|
||||
result[key] = dict()
|
||||
result[key][value] = parseStats(entry)
|
||||
else:
|
||||
if result and isinstance(result, list):
|
||||
result.append(parseStats(entry))
|
||||
elif result and isinstance(result, dict):
|
||||
result[name] = parseStats(entry)
|
||||
else:
|
||||
try:
|
||||
int(name)
|
||||
result = list()
|
||||
result.append(parseStats(entry))
|
||||
except ValueError:
|
||||
result = dict()
|
||||
result[name] = parseStats(entry)
|
||||
return result
|
|
@ -1,292 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2017 Fortinet, Inc
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
# BEGIN STATIC DATA AND MESSAGES
|
||||
class FAZMethods:
|
||||
GET = "get"
|
||||
SET = "set"
|
||||
EXEC = "exec"
|
||||
EXECUTE = "exec"
|
||||
UPDATE = "update"
|
||||
ADD = "add"
|
||||
DELETE = "delete"
|
||||
REPLACE = "replace"
|
||||
CLONE = "clone"
|
||||
MOVE = "move"
|
||||
|
||||
|
||||
BASE_HEADERS = {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json'
|
||||
}
|
||||
|
||||
|
||||
# FAZ RETURN CODES
|
||||
FAZ_RC = {
|
||||
"faz_return_codes": {
|
||||
0: {
|
||||
"msg": "OK",
|
||||
"changed": True,
|
||||
"stop_on_success": True
|
||||
},
|
||||
-100000: {
|
||||
"msg": "Module returned without actually running anything. "
|
||||
"Check parameters, and please contact the authors if needed.",
|
||||
"failed": True
|
||||
},
|
||||
-2: {
|
||||
"msg": "Object already exists.",
|
||||
"skipped": True,
|
||||
"changed": False,
|
||||
"good_codes": [0, -2]
|
||||
},
|
||||
-6: {
|
||||
"msg": "Invalid Url. Sometimes this can happen because the path is mapped to a hostname or object that"
|
||||
" doesn't exist. Double check your input object parameters."
|
||||
},
|
||||
-3: {
|
||||
"msg": "Object doesn't exist.",
|
||||
"skipped": True,
|
||||
"changed": False,
|
||||
"good_codes": [0, -3]
|
||||
},
|
||||
-10131: {
|
||||
"msg": "Object dependency failed. Do all named objects in parameters exist?",
|
||||
"changed": False,
|
||||
"skipped": True
|
||||
},
|
||||
-9998: {
|
||||
"msg": "Duplicate object. Try using mode='set', if using add. STOPPING. Use 'ignore_errors=yes' in playbook"
|
||||
"to override and mark successful.",
|
||||
},
|
||||
-20042: {
|
||||
"msg": "Device Unreachable.",
|
||||
"skipped": True
|
||||
},
|
||||
-10033: {
|
||||
"msg": "Duplicate object. Try using mode='set', if using add.",
|
||||
"changed": False,
|
||||
"skipped": True
|
||||
},
|
||||
-10000: {
|
||||
"msg": "Duplicate object. Try using mode='set', if using add.",
|
||||
"changed": False,
|
||||
"skipped": True
|
||||
},
|
||||
-20010: {
|
||||
"msg": "Device already added to FortiAnalyzer. Serial number already in use.",
|
||||
"good_codes": [0, -20010],
|
||||
"changed": False,
|
||||
"stop_on_failure": False
|
||||
},
|
||||
-20002: {
|
||||
"msg": "Invalid Argument -- Does this Device exist on FortiAnalyzer?",
|
||||
"changed": False,
|
||||
"skipped": True,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DEFAULT_RESULT_OBJ = (-100000, {"msg": "Nothing Happened. Check that handle_response is being called!"})
|
||||
FAIL_SOCKET_MSG = {"msg": "Socket Path Empty! The persistent connection manager is messed up. "
|
||||
"Try again in a few moments."}
|
||||
|
||||
|
||||
# BEGIN ERROR EXCEPTIONS
|
||||
class FAZBaseException(Exception):
|
||||
"""Wrapper to catch the unexpected"""
|
||||
|
||||
def __init__(self, msg=None, *args, **kwargs):
|
||||
if msg is None:
|
||||
msg = "An exception occurred within the fortianalyzer.py httpapi connection plugin."
|
||||
super(FAZBaseException, self).__init__(msg, *args)
|
||||
|
||||
# END ERROR CLASSES
|
||||
|
||||
|
||||
# BEGIN CLASSES
|
||||
class FAZCommon(object):
|
||||
|
||||
@staticmethod
|
||||
def format_request(method, url, *args, **kwargs):
|
||||
"""
|
||||
Formats the payload from the module, into a payload the API handler can use.
|
||||
|
||||
:param url: Connection URL to access
|
||||
:type url: string
|
||||
:param method: The preferred API Request method (GET, ADD, POST, etc....)
|
||||
:type method: basestring
|
||||
:param kwargs: The payload dictionary from the module to be converted.
|
||||
|
||||
:return: Properly formatted dictionary payload for API Request via Connection Plugin.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
params = [{"url": url}]
|
||||
if args:
|
||||
for arg in args:
|
||||
params[0].update(arg)
|
||||
if kwargs:
|
||||
keylist = list(kwargs)
|
||||
for k in keylist:
|
||||
kwargs[k.replace("__", "-")] = kwargs.pop(k)
|
||||
if method == "get" or method == "clone":
|
||||
params[0].update(kwargs)
|
||||
else:
|
||||
if kwargs.get("data", False):
|
||||
params[0]["data"] = kwargs["data"]
|
||||
else:
|
||||
params[0]["data"] = kwargs
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def split_comma_strings_into_lists(obj):
|
||||
"""
|
||||
Splits a CSV String into a list. Also takes a dictionary, and converts any CSV strings in any key, to a list.
|
||||
|
||||
:param obj: object in CSV format to be parsed.
|
||||
:type obj: str or dict
|
||||
|
||||
:return: A list containing the CSV items.
|
||||
:rtype: list
|
||||
"""
|
||||
return_obj = ()
|
||||
if isinstance(obj, dict):
|
||||
if len(obj) > 0:
|
||||
for k, v in obj.items():
|
||||
if isinstance(v, str):
|
||||
new_list = list()
|
||||
if "," in v:
|
||||
new_items = v.split(",")
|
||||
for item in new_items:
|
||||
new_list.append(item.strip())
|
||||
obj[k] = new_list
|
||||
return_obj = obj
|
||||
elif isinstance(obj, str):
|
||||
return_obj = obj.replace(" ", "").split(",")
|
||||
|
||||
return return_obj
|
||||
|
||||
@staticmethod
|
||||
def cidr_to_netmask(cidr):
|
||||
"""
|
||||
Converts a CIDR Network string to full blown IP/Subnet format in decimal format.
|
||||
Decided not use IP Address module to keep includes to a minimum.
|
||||
|
||||
:param cidr: String object in CIDR format to be processed
|
||||
:type cidr: str
|
||||
|
||||
:return: A string object that looks like this "x.x.x.x/y.y.y.y"
|
||||
:rtype: str
|
||||
"""
|
||||
if isinstance(cidr, str):
|
||||
cidr = int(cidr)
|
||||
mask = (0xffffffff >> (32 - cidr)) << (32 - cidr)
|
||||
return (str((0xff000000 & mask) >> 24) + '.'
|
||||
+ str((0x00ff0000 & mask) >> 16) + '.'
|
||||
+ str((0x0000ff00 & mask) >> 8) + '.'
|
||||
+ str((0x000000ff & mask)))
|
||||
|
||||
@staticmethod
|
||||
def paramgram_child_list_override(list_overrides, paramgram, module):
|
||||
"""
|
||||
If a list of items was provided to a "parent" paramgram attribute, the paramgram needs to be rewritten.
|
||||
The child keys of the desired attribute need to be deleted, and then that "parent" keys' contents is replaced
|
||||
With the list of items that was provided.
|
||||
|
||||
:param list_overrides: Contains the response from the FortiAnalyzer.
|
||||
:type list_overrides: list
|
||||
:param paramgram: Contains the paramgram passed to the modules' local modify function.
|
||||
:type paramgram: dict
|
||||
:param module: Contains the Ansible Module Object being used by the module.
|
||||
:type module: classObject
|
||||
|
||||
:return: A new "paramgram" refactored to allow for multiple entries being added.
|
||||
:rtype: dict
|
||||
"""
|
||||
if len(list_overrides) > 0:
|
||||
for list_variable in list_overrides:
|
||||
try:
|
||||
list_variable = list_variable.replace("-", "_")
|
||||
override_data = module.params[list_variable]
|
||||
if override_data:
|
||||
del paramgram[list_variable]
|
||||
paramgram[list_variable] = override_data
|
||||
except BaseException as e:
|
||||
raise FAZBaseException("Error occurred merging custom lists for the paramgram parent: " + str(e))
|
||||
return paramgram
|
||||
|
||||
@staticmethod
|
||||
def syslog(module, msg):
|
||||
try:
|
||||
module.log(msg=msg)
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
# RECURSIVE FUNCTIONS START
|
||||
def prepare_dict(obj):
|
||||
"""
|
||||
Removes any keys from a dictionary that are only specific to our use in the module. FortiAnalyzer will reject
|
||||
requests with these empty/None keys in it.
|
||||
|
||||
:param obj: Dictionary object to be processed.
|
||||
:type obj: dict
|
||||
|
||||
:return: Processed dictionary.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
list_of_elems = ["mode", "adom", "host", "username", "password"]
|
||||
|
||||
if isinstance(obj, dict):
|
||||
obj = dict((key, prepare_dict(value)) for (key, value) in obj.items() if key not in list_of_elems)
|
||||
return obj
|
||||
|
||||
|
||||
def scrub_dict(obj):
|
||||
"""
|
||||
Removes any keys from a dictionary that are EMPTY -- this includes parent keys. FortiAnalyzer doesn't
|
||||
like empty keys in dictionaries
|
||||
|
||||
:param obj: Dictionary object to be processed.
|
||||
:type obj: dict
|
||||
|
||||
:return: Processed dictionary.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
if isinstance(obj, dict):
|
||||
return dict((k, scrub_dict(v)) for k, v in obj.items() if v and scrub_dict(v))
|
||||
else:
|
||||
return obj
|
|
@ -1,477 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2017 Fortinet, Inc
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZ_RC
|
||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZBaseException
|
||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZCommon
|
||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import scrub_dict
|
||||
from ansible_collections.community.general.plugins.module_utils.network.fortianalyzer.common import FAZMethods
|
||||
|
||||
|
||||
# ACTIVE BUG WITH OUR DEBUG IMPORT CALL - BECAUSE IT'S UNDER MODULE_UTILITIES
|
||||
# WHEN module_common.recursive_finder() runs under the module loader, it looks for this namespace debug import
|
||||
# and because it's not there, it always fails, regardless of it being under a try/catch here.
|
||||
# we're going to move it to a different namespace.
|
||||
# # check for debug lib
|
||||
# try:
|
||||
# from ansible.module_utils.network.fortianalyzer.fortianalyzer_debug import debug_dump
|
||||
# HAS_FAZ_DEBUG = True
|
||||
# except:
|
||||
# HAS_FAZ_DEBUG = False
|
||||
|
||||
|
||||
# BEGIN HANDLER CLASSES
|
||||
class FortiAnalyzerHandler(object):
|
||||
def __init__(self, conn, module):
|
||||
self._conn = conn
|
||||
self._module = module
|
||||
self._tools = FAZCommon
|
||||
self._uses_workspace = None
|
||||
self._uses_adoms = None
|
||||
self._locked_adom_list = list()
|
||||
self._lock_info = None
|
||||
|
||||
self.workspace_check()
|
||||
if self._uses_workspace:
|
||||
self.get_lock_info(adom=self._module.paramgram["adom"])
|
||||
|
||||
def process_request(self, url, datagram, method):
|
||||
"""
|
||||
Formats and Runs the API Request via Connection Plugin. Streamlined for use from Modules.
|
||||
|
||||
:param url: Connection URL to access
|
||||
:type url: string
|
||||
:param datagram: The prepared payload for the API Request in dictionary format
|
||||
:type datagram: dict
|
||||
:param method: The preferred API Request method (GET, ADD, POST, etc....)
|
||||
:type method: basestring
|
||||
|
||||
:return: Dictionary containing results of the API Request via Connection Plugin.
|
||||
:rtype: dict
|
||||
"""
|
||||
try:
|
||||
adom = self._module.paramgram["adom"]
|
||||
if self.uses_workspace and adom not in self._locked_adom_list and method != FAZMethods.GET:
|
||||
self.lock_adom(adom=adom)
|
||||
except BaseException as err:
|
||||
raise FAZBaseException(err)
|
||||
|
||||
data = self._tools.format_request(method, url, **datagram)
|
||||
response = self._conn.send_request(method, data)
|
||||
|
||||
try:
|
||||
adom = self._module.paramgram["adom"]
|
||||
if self.uses_workspace and adom in self._locked_adom_list \
|
||||
and response[0] == 0 and method != FAZMethods.GET:
|
||||
self.commit_changes(adom=adom)
|
||||
except BaseException as err:
|
||||
raise FAZBaseException(err)
|
||||
|
||||
# if HAS_FAZ_DEBUG:
|
||||
# try:
|
||||
# debug_dump(response, datagram, self._module.paramgram, url, method)
|
||||
# except BaseException:
|
||||
# pass
|
||||
|
||||
return response
|
||||
|
||||
def workspace_check(self):
|
||||
"""
|
||||
Checks FortiAnalyzer for the use of Workspace mode.
|
||||
"""
|
||||
url = "/cli/global/system/global"
|
||||
data = {"fields": ["workspace-mode", "adom-status"]}
|
||||
resp_obj = self.process_request(url, data, FAZMethods.GET)
|
||||
try:
|
||||
if resp_obj[1]["workspace-mode"] in ["workflow", "normal"]:
|
||||
self.uses_workspace = True
|
||||
elif resp_obj[1]["workspace-mode"] == "disabled":
|
||||
self.uses_workspace = False
|
||||
except KeyError:
|
||||
self.uses_workspace = False
|
||||
except BaseException as err:
|
||||
raise FAZBaseException(msg="Couldn't determine workspace-mode in the plugin. Error: " + str(err))
|
||||
try:
|
||||
if resp_obj[1]["adom-status"] in [1, "enable"]:
|
||||
self.uses_adoms = True
|
||||
else:
|
||||
self.uses_adoms = False
|
||||
except KeyError:
|
||||
self.uses_adoms = False
|
||||
except BaseException as err:
|
||||
raise FAZBaseException(msg="Couldn't determine adom-status in the plugin. Error: " + str(err))
|
||||
|
||||
def run_unlock(self):
|
||||
"""
|
||||
Checks for ADOM status, if locked, it will unlock
|
||||
"""
|
||||
for adom_locked in self._locked_adom_list:
|
||||
self.unlock_adom(adom_locked)
|
||||
|
||||
def lock_adom(self, adom=None):
|
||||
"""
|
||||
Locks an ADOM for changes
|
||||
"""
|
||||
if not adom or adom == "root":
|
||||
url = "/dvmdb/adom/root/workspace/lock"
|
||||
else:
|
||||
if adom.lower() == "global":
|
||||
url = "/dvmdb/global/workspace/lock/"
|
||||
else:
|
||||
url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom)
|
||||
datagram = {}
|
||||
data = self._tools.format_request(FAZMethods.EXEC, url, **datagram)
|
||||
resp_obj = self._conn.send_request(FAZMethods.EXEC, data)
|
||||
code = resp_obj[0]
|
||||
if code == 0 and resp_obj[1]["status"]["message"].lower() == "ok":
|
||||
self.add_adom_to_lock_list(adom)
|
||||
else:
|
||||
lockinfo = self.get_lock_info(adom=adom)
|
||||
self._module.fail_json(msg=("An error occurred trying to lock the adom. Error: "
|
||||
+ str(resp_obj) + ", LOCK INFO: " + str(lockinfo)))
|
||||
return resp_obj
|
||||
|
||||
def unlock_adom(self, adom=None):
|
||||
"""
|
||||
Unlocks an ADOM after changes
|
||||
"""
|
||||
if not adom or adom == "root":
|
||||
url = "/dvmdb/adom/root/workspace/unlock"
|
||||
else:
|
||||
if adom.lower() == "global":
|
||||
url = "/dvmdb/global/workspace/unlock/"
|
||||
else:
|
||||
url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom)
|
||||
datagram = {}
|
||||
data = self._tools.format_request(FAZMethods.EXEC, url, **datagram)
|
||||
resp_obj = self._conn.send_request(FAZMethods.EXEC, data)
|
||||
code = resp_obj[0]
|
||||
if code == 0 and resp_obj[1]["status"]["message"].lower() == "ok":
|
||||
self.remove_adom_from_lock_list(adom)
|
||||
else:
|
||||
self._module.fail_json(msg=("An error occurred trying to unlock the adom. Error: " + str(resp_obj)))
|
||||
return resp_obj
|
||||
|
||||
def get_lock_info(self, adom=None):
|
||||
"""
|
||||
Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible
|
||||
for some reason, then unlock it.
|
||||
"""
|
||||
if not adom or adom == "root":
|
||||
url = "/dvmdb/adom/root/workspace/lockinfo"
|
||||
else:
|
||||
if adom.lower() == "global":
|
||||
url = "/dvmdb/global/workspace/lockinfo/"
|
||||
else:
|
||||
url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom)
|
||||
datagram = {}
|
||||
data = self._tools.format_request(FAZMethods.GET, url, **datagram)
|
||||
resp_obj = self._conn.send_request(FAZMethods.GET, data)
|
||||
code = resp_obj[0]
|
||||
if code != 0:
|
||||
self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. Error: " + str(resp_obj)))
|
||||
elif code == 0:
|
||||
self._lock_info = resp_obj[1]
|
||||
return resp_obj
|
||||
|
||||
def commit_changes(self, adom=None, aux=False):
|
||||
"""
|
||||
Commits changes to an ADOM
|
||||
"""
|
||||
if not adom or adom == "root":
|
||||
url = "/dvmdb/adom/root/workspace/commit"
|
||||
else:
|
||||
if aux:
|
||||
url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom)
|
||||
else:
|
||||
if adom.lower() == "global":
|
||||
url = "/dvmdb/global/workspace/commit/"
|
||||
else:
|
||||
url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom)
|
||||
datagram = {}
|
||||
data = self._tools.format_request(FAZMethods.EXEC, url, **datagram)
|
||||
resp_obj = self._conn.send_request(FAZMethods.EXEC, data)
|
||||
code = resp_obj[0]
|
||||
if code != 0:
|
||||
self._module.fail_json(msg=("An error occurred trying to commit changes to the adom. Error: "
|
||||
+ str(resp_obj)))
|
||||
|
||||
def govern_response(self, module, results, msg=None, good_codes=None,
|
||||
stop_on_fail=None, stop_on_success=None, skipped=None,
|
||||
changed=None, unreachable=None, failed=None, success=None, changed_if_success=None,
|
||||
ansible_facts=None):
|
||||
"""
|
||||
This function will attempt to apply default values to canned responses from FortiAnalyzer we know of.
|
||||
This saves time, and turns the response in the module into a "one-liner", while still giving us...
|
||||
the flexibility to directly use return_response in modules if we have too. This function saves repeated code.
|
||||
|
||||
:param module: The Ansible Module CLASS object, used to run fail/exit json
|
||||
:type module: object
|
||||
:param msg: An overridable custom message from the module that called this.
|
||||
:type msg: string
|
||||
:param results: A dictionary object containing an API call results
|
||||
:type results: dict
|
||||
:param good_codes: A list of exit codes considered successful from FortiAnalyzer
|
||||
:type good_codes: list
|
||||
:param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true)
|
||||
:type stop_on_fail: boolean
|
||||
:param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false)
|
||||
:type stop_on_success: boolean
|
||||
:param changed: If True, tells Ansible that object was changed (default: false)
|
||||
:type skipped: boolean
|
||||
:param skipped: If True, tells Ansible that object was skipped (default: false)
|
||||
:type skipped: boolean
|
||||
:param unreachable: If True, tells Ansible that object was unreachable (default: false)
|
||||
:type unreachable: boolean
|
||||
:param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false)
|
||||
:type unreachable: boolean
|
||||
:param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false)
|
||||
:type unreachable: boolean
|
||||
:param changed_if_success: If True, defaults to changed if successful if you specify or not"
|
||||
:type changed_if_success: boolean
|
||||
:param ansible_facts: A prepared dictionary of ansible facts from the execution.
|
||||
:type ansible_facts: dict
|
||||
"""
|
||||
if module is None and results is None:
|
||||
raise FAZBaseException("govern_response() was called without a module and/or results tuple! Fix!")
|
||||
# Get the Return code from results
|
||||
try:
|
||||
rc = results[0]
|
||||
except BaseException:
|
||||
raise FAZBaseException("govern_response() was called without the return code at results[0]")
|
||||
|
||||
# init a few items
|
||||
rc_data = None
|
||||
|
||||
# Get the default values for the said return code.
|
||||
try:
|
||||
rc_codes = FAZ_RC.get('faz_return_codes')
|
||||
rc_data = rc_codes.get(rc)
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
if not rc_data:
|
||||
rc_data = {}
|
||||
# ONLY add to overrides if not none -- This is very important that the keys aren't added at this stage
|
||||
# if they are empty. And there aren't that many, so let's just do a few if then statements.
|
||||
if good_codes is not None:
|
||||
rc_data["good_codes"] = good_codes
|
||||
if stop_on_fail is not None:
|
||||
rc_data["stop_on_fail"] = stop_on_fail
|
||||
if stop_on_success is not None:
|
||||
rc_data["stop_on_success"] = stop_on_success
|
||||
if skipped is not None:
|
||||
rc_data["skipped"] = skipped
|
||||
if changed is not None:
|
||||
rc_data["changed"] = changed
|
||||
if unreachable is not None:
|
||||
rc_data["unreachable"] = unreachable
|
||||
if failed is not None:
|
||||
rc_data["failed"] = failed
|
||||
if success is not None:
|
||||
rc_data["success"] = success
|
||||
if changed_if_success is not None:
|
||||
rc_data["changed_if_success"] = changed_if_success
|
||||
if results is not None:
|
||||
rc_data["results"] = results
|
||||
if msg is not None:
|
||||
rc_data["msg"] = msg
|
||||
if ansible_facts is None:
|
||||
rc_data["ansible_facts"] = {}
|
||||
else:
|
||||
rc_data["ansible_facts"] = ansible_facts
|
||||
|
||||
return self.return_response(module=module,
|
||||
results=results,
|
||||
msg=rc_data.get("msg", "NULL"),
|
||||
good_codes=rc_data.get("good_codes", (0,)),
|
||||
stop_on_fail=rc_data.get("stop_on_fail", True),
|
||||
stop_on_success=rc_data.get("stop_on_success", False),
|
||||
skipped=rc_data.get("skipped", False),
|
||||
changed=rc_data.get("changed", False),
|
||||
changed_if_success=rc_data.get("changed_if_success", False),
|
||||
unreachable=rc_data.get("unreachable", False),
|
||||
failed=rc_data.get("failed", False),
|
||||
success=rc_data.get("success", False),
|
||||
ansible_facts=rc_data.get("ansible_facts", dict()))
|
||||
|
||||
def return_response(self, module, results, msg="NULL", good_codes=(0,),
|
||||
stop_on_fail=True, stop_on_success=False, skipped=False,
|
||||
changed=False, unreachable=False, failed=False, success=False, changed_if_success=True,
|
||||
ansible_facts=()):
|
||||
"""
|
||||
This function controls the logout and error reporting after an method or function runs. The exit_json for
|
||||
ansible comes from logic within this function. If this function returns just the msg, it means to continue
|
||||
execution on the playbook. It is called from the ansible module, or from the self.govern_response function.
|
||||
|
||||
:param module: The Ansible Module CLASS object, used to run fail/exit json
|
||||
:type module: object
|
||||
:param msg: An overridable custom message from the module that called this.
|
||||
:type msg: string
|
||||
:param results: A dictionary object containing an API call results
|
||||
:type results: dict
|
||||
:param good_codes: A list of exit codes considered successful from FortiAnalyzer
|
||||
:type good_codes: list
|
||||
:param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true)
|
||||
:type stop_on_fail: boolean
|
||||
:param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false)
|
||||
:type stop_on_success: boolean
|
||||
:param changed: If True, tells Ansible that object was changed (default: false)
|
||||
:type skipped: boolean
|
||||
:param skipped: If True, tells Ansible that object was skipped (default: false)
|
||||
:type skipped: boolean
|
||||
:param unreachable: If True, tells Ansible that object was unreachable (default: false)
|
||||
:type unreachable: boolean
|
||||
:param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false)
|
||||
:type unreachable: boolean
|
||||
:param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false)
|
||||
:type unreachable: boolean
|
||||
:param changed_if_success: If True, defaults to changed if successful if you specify or not"
|
||||
:type changed_if_success: boolean
|
||||
:param ansible_facts: A prepared dictionary of ansible facts from the execution.
|
||||
:type ansible_facts: dict
|
||||
|
||||
:return: A string object that contains an error message
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
# VALIDATION ERROR
|
||||
if (len(results) == 0) or (failed and success) or (changed and unreachable):
|
||||
module.exit_json(msg="Handle_response was called with no results, or conflicting failed/success or "
|
||||
"changed/unreachable parameters. Fix the exit code on module. "
|
||||
"Generic Failure", failed=True)
|
||||
|
||||
# IDENTIFY SUCCESS/FAIL IF NOT DEFINED
|
||||
if not failed and not success:
|
||||
if len(results) > 0:
|
||||
if results[0] not in good_codes:
|
||||
failed = True
|
||||
elif results[0] in good_codes:
|
||||
success = True
|
||||
|
||||
if len(results) > 0:
|
||||
# IF NO MESSAGE WAS SUPPLIED, GET IT FROM THE RESULTS, IF THAT DOESN'T WORK, THEN WRITE AN ERROR MESSAGE
|
||||
if msg == "NULL":
|
||||
try:
|
||||
msg = results[1]['status']['message']
|
||||
except BaseException:
|
||||
msg = "No status message returned at results[1][status][message], " \
|
||||
"and none supplied to msg parameter for handle_response."
|
||||
|
||||
if failed:
|
||||
# BECAUSE SKIPPED/FAILED WILL OFTEN OCCUR ON CODES THAT DON'T GET INCLUDED, THEY ARE CONSIDERED FAILURES
|
||||
# HOWEVER, THEY ARE MUTUALLY EXCLUSIVE, SO IF IT IS MARKED SKIPPED OR UNREACHABLE BY THE MODULE LOGIC
|
||||
# THEN REMOVE THE FAILED FLAG SO IT DOESN'T OVERRIDE THE DESIRED STATUS OF SKIPPED OR UNREACHABLE.
|
||||
if failed and skipped:
|
||||
failed = False
|
||||
if failed and unreachable:
|
||||
failed = False
|
||||
if stop_on_fail:
|
||||
if self._uses_workspace:
|
||||
try:
|
||||
self.run_unlock()
|
||||
except BaseException as err:
|
||||
raise FAZBaseException(msg=("Couldn't unlock ADOM! Error: " + str(err)))
|
||||
module.exit_json(msg=msg, failed=failed, changed=changed, unreachable=unreachable, skipped=skipped,
|
||||
results=results[1], ansible_facts=ansible_facts, rc=results[0],
|
||||
invocation={"module_args": ansible_facts["ansible_params"]})
|
||||
elif success:
|
||||
if changed_if_success:
|
||||
changed = True
|
||||
success = False
|
||||
if stop_on_success:
|
||||
if self._uses_workspace:
|
||||
try:
|
||||
self.run_unlock()
|
||||
except BaseException as err:
|
||||
raise FAZBaseException(msg=("Couldn't unlock ADOM! Error: " + str(err)))
|
||||
module.exit_json(msg=msg, success=success, changed=changed, unreachable=unreachable,
|
||||
skipped=skipped, results=results[1], ansible_facts=ansible_facts, rc=results[0],
|
||||
invocation={"module_args": ansible_facts["ansible_params"]})
|
||||
return msg
|
||||
|
||||
@staticmethod
|
||||
def construct_ansible_facts(response, ansible_params, paramgram, *args, **kwargs):
|
||||
"""
|
||||
Constructs a dictionary to return to ansible facts, containing various information about the execution.
|
||||
|
||||
:param response: Contains the response from the FortiAnalyzer.
|
||||
:type response: dict
|
||||
:param ansible_params: Contains the parameters Ansible was called with.
|
||||
:type ansible_params: dict
|
||||
:param paramgram: Contains the paramgram passed to the modules' local modify function.
|
||||
:type paramgram: dict
|
||||
:param args: Free-form arguments that could be added.
|
||||
:param kwargs: Free-form keyword arguments that could be added.
|
||||
|
||||
:return: A dictionary containing lots of information to append to Ansible Facts.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
facts = {
|
||||
"response": response,
|
||||
"ansible_params": scrub_dict(ansible_params),
|
||||
"paramgram": scrub_dict(paramgram),
|
||||
}
|
||||
|
||||
if args:
|
||||
facts["custom_args"] = args
|
||||
if kwargs:
|
||||
facts.update(kwargs)
|
||||
|
||||
return facts
|
||||
|
||||
@property
|
||||
def uses_workspace(self):
|
||||
return self._uses_workspace
|
||||
|
||||
@uses_workspace.setter
|
||||
def uses_workspace(self, val):
|
||||
self._uses_workspace = val
|
||||
|
||||
@property
|
||||
def uses_adoms(self):
|
||||
return self._uses_adoms
|
||||
|
||||
@uses_adoms.setter
|
||||
def uses_adoms(self, val):
|
||||
self._uses_adoms = val
|
||||
|
||||
def add_adom_to_lock_list(self, adom):
|
||||
if adom not in self._locked_adom_list:
|
||||
self._locked_adom_list.append(adom)
|
||||
|
||||
def remove_adom_from_lock_list(self, adom):
|
||||
if adom in self._locked_adom_list:
|
||||
self._locked_adom_list.remove(adom)
|
|
@ -1,238 +0,0 @@
|
|||
# Copyright (c) 2018 Cisco and/or its affiliates.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
import re
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.common.collections import is_string
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
INVALID_IDENTIFIER_SYMBOLS = r'[^a-zA-Z0-9_]'
|
||||
|
||||
IDENTITY_PROPERTIES = ['id', 'version', 'ruleId']
|
||||
NON_COMPARABLE_PROPERTIES = IDENTITY_PROPERTIES + ['isSystemDefined', 'links', 'token', 'rulePosition']
|
||||
|
||||
|
||||
class HTTPMethod:
|
||||
GET = 'get'
|
||||
POST = 'post'
|
||||
PUT = 'put'
|
||||
DELETE = 'delete'
|
||||
|
||||
|
||||
class ResponseParams:
|
||||
SUCCESS = 'success'
|
||||
STATUS_CODE = 'status_code'
|
||||
RESPONSE = 'response'
|
||||
|
||||
|
||||
class FtdConfigurationError(Exception):
|
||||
def __init__(self, msg, obj=None):
|
||||
super(FtdConfigurationError, self).__init__(msg)
|
||||
self.msg = msg
|
||||
self.obj = obj
|
||||
|
||||
|
||||
class FtdServerError(Exception):
|
||||
def __init__(self, response, code):
|
||||
super(FtdServerError, self).__init__(response)
|
||||
self.response = response
|
||||
self.code = code
|
||||
|
||||
|
||||
class FtdUnexpectedResponse(Exception):
|
||||
"""The exception to be raised in case of unexpected responses from 3d parties."""
|
||||
pass
|
||||
|
||||
|
||||
def construct_ansible_facts(response, params):
|
||||
facts = dict()
|
||||
if response:
|
||||
response_body = response['items'] if 'items' in response else response
|
||||
if params.get('register_as'):
|
||||
facts[params['register_as']] = response_body
|
||||
elif type(response_body) is dict and response_body.get('name') and response_body.get('type'):
|
||||
object_name = re.sub(INVALID_IDENTIFIER_SYMBOLS, '_', response_body['name'].lower())
|
||||
fact_name = '%s_%s' % (response_body['type'], object_name)
|
||||
facts[fact_name] = response_body
|
||||
return facts
|
||||
|
||||
|
||||
def copy_identity_properties(source_obj, dest_obj):
|
||||
for property_name in IDENTITY_PROPERTIES:
|
||||
if property_name in source_obj:
|
||||
dest_obj[property_name] = source_obj[property_name]
|
||||
return dest_obj
|
||||
|
||||
|
||||
def is_object_ref(d):
|
||||
"""
|
||||
Checks if a dictionary is a reference object. The dictionary is considered to be a
|
||||
reference object when it contains non-empty 'id' and 'type' fields.
|
||||
|
||||
:type d: dict
|
||||
:return: True if passed dictionary is a reference object, otherwise False
|
||||
"""
|
||||
has_id = 'id' in d.keys() and d['id']
|
||||
has_type = 'type' in d.keys() and d['type']
|
||||
return has_id and has_type
|
||||
|
||||
|
||||
def equal_object_refs(d1, d2):
|
||||
"""
|
||||
Checks whether two references point to the same object.
|
||||
|
||||
:type d1: dict
|
||||
:type d2: dict
|
||||
:return: True if passed references point to the same object, otherwise False
|
||||
"""
|
||||
have_equal_ids = d1['id'] == d2['id']
|
||||
have_equal_types = d1['type'] == d2['type']
|
||||
return have_equal_ids and have_equal_types
|
||||
|
||||
|
||||
def equal_lists(l1, l2):
|
||||
"""
|
||||
Checks whether two lists are equal. The order of elements in the arrays is important.
|
||||
|
||||
:type l1: list
|
||||
:type l2: list
|
||||
:return: True if passed lists, their elements and order of elements are equal. Otherwise, returns False.
|
||||
"""
|
||||
if len(l1) != len(l2):
|
||||
return False
|
||||
|
||||
for v1, v2 in zip(l1, l2):
|
||||
if not equal_values(v1, v2):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def equal_dicts(d1, d2, compare_by_reference=True):
|
||||
"""
|
||||
Checks whether two dictionaries are equal. If `compare_by_reference` is set to True, dictionaries referencing
|
||||
objects are compared using `equal_object_refs` method. Otherwise, every key and value is checked.
|
||||
|
||||
:type d1: dict
|
||||
:type d2: dict
|
||||
:param compare_by_reference: if True, dictionaries referencing objects are compared using `equal_object_refs` method
|
||||
:return: True if passed dicts are equal. Otherwise, returns False.
|
||||
"""
|
||||
if compare_by_reference and is_object_ref(d1) and is_object_ref(d2):
|
||||
return equal_object_refs(d1, d2)
|
||||
|
||||
if len(d1) != len(d2):
|
||||
return False
|
||||
|
||||
for key, v1 in d1.items():
|
||||
if key not in d2:
|
||||
return False
|
||||
|
||||
v2 = d2[key]
|
||||
if not equal_values(v1, v2):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def equal_values(v1, v2):
|
||||
"""
|
||||
Checks whether types and content of two values are the same. In case of complex objects, the method might be
|
||||
called recursively.
|
||||
|
||||
:param v1: first value
|
||||
:param v2: second value
|
||||
:return: True if types and content of passed values are equal. Otherwise, returns False.
|
||||
:rtype: bool
|
||||
"""
|
||||
|
||||
# string-like values might have same text but different types, so checking them separately
|
||||
if is_string(v1) and is_string(v2):
|
||||
return to_text(v1) == to_text(v2)
|
||||
|
||||
if type(v1) != type(v2):
|
||||
return False
|
||||
value_type = type(v1)
|
||||
|
||||
if value_type == list:
|
||||
return equal_lists(v1, v2)
|
||||
elif value_type == dict:
|
||||
return equal_dicts(v1, v2)
|
||||
else:
|
||||
return v1 == v2
|
||||
|
||||
|
||||
def equal_objects(d1, d2):
|
||||
"""
|
||||
Checks whether two objects are equal. Ignores special object properties (e.g. 'id', 'version') and
|
||||
properties with None and empty values. In case properties contains a reference to the other object,
|
||||
only object identities (ids and types) are checked. Also, if an array field contains multiple references
|
||||
to the same object, duplicates are ignored when comparing objects.
|
||||
|
||||
:type d1: dict
|
||||
:type d2: dict
|
||||
:return: True if passed objects and their properties are equal. Otherwise, returns False.
|
||||
"""
|
||||
|
||||
def prepare_data_for_comparison(d):
|
||||
d = dict((k, d[k]) for k in d.keys() if k not in NON_COMPARABLE_PROPERTIES and d[k])
|
||||
d = delete_ref_duplicates(d)
|
||||
return d
|
||||
|
||||
d1 = prepare_data_for_comparison(d1)
|
||||
d2 = prepare_data_for_comparison(d2)
|
||||
return equal_dicts(d1, d2, compare_by_reference=False)
|
||||
|
||||
|
||||
def delete_ref_duplicates(d):
|
||||
"""
|
||||
Removes reference duplicates from array fields: if an array contains multiple items and some of
|
||||
them refer to the same object, only unique references are preserved (duplicates are removed).
|
||||
|
||||
:param d: dict with data
|
||||
:type d: dict
|
||||
:return: dict without reference duplicates
|
||||
"""
|
||||
|
||||
def delete_ref_duplicates_from_list(refs):
|
||||
if all(type(i) == dict and is_object_ref(i) for i in refs):
|
||||
unique_refs = set()
|
||||
unique_list = list()
|
||||
for i in refs:
|
||||
key = (i['id'], i['type'])
|
||||
if key not in unique_refs:
|
||||
unique_refs.add(key)
|
||||
unique_list.append(i)
|
||||
|
||||
return list(unique_list)
|
||||
|
||||
else:
|
||||
return refs
|
||||
|
||||
if not d:
|
||||
return d
|
||||
|
||||
modified_d = {}
|
||||
for k, v in iteritems(d):
|
||||
if type(v) == list:
|
||||
modified_d[k] = delete_ref_duplicates_from_list(v)
|
||||
elif type(v) == dict:
|
||||
modified_d[k] = delete_ref_duplicates(v)
|
||||
else:
|
||||
modified_d[k] = v
|
||||
return modified_d
|
|
@ -1,565 +0,0 @@
|
|||
# Copyright (c) 2018 Cisco and/or its affiliates.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
import copy
|
||||
from functools import partial
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.ftd.common import HTTPMethod, equal_objects, FtdConfigurationError, \
|
||||
FtdServerError, ResponseParams, copy_identity_properties, FtdUnexpectedResponse
|
||||
from ansible_collections.community.general.plugins.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
DEFAULT_PAGE_SIZE = 10
|
||||
DEFAULT_OFFSET = 0
|
||||
|
||||
UNPROCESSABLE_ENTITY_STATUS = 422
|
||||
INVALID_UUID_ERROR_MESSAGE = "Validation failed due to an invalid UUID"
|
||||
DUPLICATE_NAME_ERROR_MESSAGE = "Validation failed due to a duplicate name"
|
||||
|
||||
MULTIPLE_DUPLICATES_FOUND_ERROR = (
|
||||
"Multiple objects matching specified filters are found. "
|
||||
"Please, define filters more precisely to match one object exactly."
|
||||
)
|
||||
DUPLICATE_ERROR = (
|
||||
"Cannot add a new object. "
|
||||
"An object with the same name but different parameters already exists."
|
||||
)
|
||||
ADD_OPERATION_NOT_SUPPORTED_ERROR = (
|
||||
"Cannot add a new object while executing an upsert request. "
|
||||
"Creation of objects with this type is not supported."
|
||||
)
|
||||
|
||||
PATH_PARAMS_FOR_DEFAULT_OBJ = {'objId': 'default'}
|
||||
|
||||
|
||||
class OperationNamePrefix:
|
||||
ADD = 'add'
|
||||
EDIT = 'edit'
|
||||
GET = 'get'
|
||||
DELETE = 'delete'
|
||||
UPSERT = 'upsert'
|
||||
|
||||
|
||||
class QueryParams:
|
||||
FILTER = 'filter'
|
||||
|
||||
|
||||
class ParamName:
|
||||
QUERY_PARAMS = 'query_params'
|
||||
PATH_PARAMS = 'path_params'
|
||||
DATA = 'data'
|
||||
FILTERS = 'filters'
|
||||
|
||||
|
||||
class CheckModeException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class FtdInvalidOperationNameError(Exception):
|
||||
def __init__(self, operation_name):
|
||||
super(FtdInvalidOperationNameError, self).__init__(operation_name)
|
||||
self.operation_name = operation_name
|
||||
|
||||
|
||||
class OperationChecker(object):
|
||||
|
||||
@classmethod
|
||||
def is_add_operation(cls, operation_name, operation_spec):
|
||||
"""
|
||||
Check if operation defined with 'operation_name' is add object operation according to 'operation_spec'.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:param operation_spec: specification of the operation being called by the user
|
||||
:type operation_spec: dict
|
||||
:return: True if the called operation is add object operation, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
# Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
|
||||
return operation_name.startswith(OperationNamePrefix.ADD) and is_post_request(operation_spec)
|
||||
|
||||
@classmethod
|
||||
def is_edit_operation(cls, operation_name, operation_spec):
|
||||
"""
|
||||
Check if operation defined with 'operation_name' is edit object operation according to 'operation_spec'.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:param operation_spec: specification of the operation being called by the user
|
||||
:type operation_spec: dict
|
||||
:return: True if the called operation is edit object operation, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
# Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
|
||||
return operation_name.startswith(OperationNamePrefix.EDIT) and is_put_request(operation_spec)
|
||||
|
||||
@classmethod
|
||||
def is_delete_operation(cls, operation_name, operation_spec):
|
||||
"""
|
||||
Check if operation defined with 'operation_name' is delete object operation according to 'operation_spec'.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:param operation_spec: specification of the operation being called by the user
|
||||
:type operation_spec: dict
|
||||
:return: True if the called operation is delete object operation, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
# Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
|
||||
return operation_name.startswith(OperationNamePrefix.DELETE) \
|
||||
and operation_spec[OperationField.METHOD] == HTTPMethod.DELETE
|
||||
|
||||
@classmethod
|
||||
def is_get_list_operation(cls, operation_name, operation_spec):
|
||||
"""
|
||||
Check if operation defined with 'operation_name' is get list of objects operation according to 'operation_spec'.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:param operation_spec: specification of the operation being called by the user
|
||||
:type operation_spec: dict
|
||||
:return: True if the called operation is get a list of objects operation, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
return operation_spec[OperationField.METHOD] == HTTPMethod.GET \
|
||||
and operation_spec[OperationField.RETURN_MULTIPLE_ITEMS]
|
||||
|
||||
@classmethod
|
||||
def is_get_operation(cls, operation_name, operation_spec):
|
||||
"""
|
||||
Check if operation defined with 'operation_name' is get objects operation according to 'operation_spec'.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:param operation_spec: specification of the operation being called by the user
|
||||
:type operation_spec: dict
|
||||
:return: True if the called operation is get object operation, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
return operation_spec[OperationField.METHOD] == HTTPMethod.GET \
|
||||
and not operation_spec[OperationField.RETURN_MULTIPLE_ITEMS]
|
||||
|
||||
@classmethod
|
||||
def is_upsert_operation(cls, operation_name):
|
||||
"""
|
||||
Check if operation defined with 'operation_name' is upsert objects operation according to 'operation_name'.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:return: True if the called operation is upsert object operation, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
return operation_name.startswith(OperationNamePrefix.UPSERT)
|
||||
|
||||
@classmethod
|
||||
def is_find_by_filter_operation(cls, operation_name, params, operation_spec):
|
||||
"""
|
||||
Checks whether the called operation is 'find by filter'. This operation fetches all objects and finds
|
||||
the matching ones by the given filter. As filtering is done on the client side, this operation should be used
|
||||
only when selected filters are not implemented on the server side.
|
||||
|
||||
:param operation_name: name of the operation being called by the user
|
||||
:type operation_name: str
|
||||
:param operation_spec: specification of the operation being called by the user
|
||||
:type operation_spec: dict
|
||||
:param params: params - params should contain 'filters'
|
||||
:return: True if the called operation is find by filter, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
is_get_list = cls.is_get_list_operation(operation_name, operation_spec)
|
||||
return is_get_list and ParamName.FILTERS in params and params[ParamName.FILTERS]
|
||||
|
||||
@classmethod
|
||||
def is_upsert_operation_supported(cls, operations):
|
||||
"""
|
||||
Checks if all operations required for upsert object operation are defined in 'operations'.
|
||||
|
||||
:param operations: specification of the operations supported by model
|
||||
:type operations: dict
|
||||
:return: True if all criteria required to provide requested called operation are satisfied, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
has_edit_op = next((name for name, spec in iteritems(operations) if cls.is_edit_operation(name, spec)), None)
|
||||
has_get_list_op = next((name for name, spec in iteritems(operations)
|
||||
if cls.is_get_list_operation(name, spec)), None)
|
||||
return has_edit_op and has_get_list_op
|
||||
|
||||
|
||||
class BaseConfigurationResource(object):
|
||||
|
||||
def __init__(self, conn, check_mode=False):
|
||||
self._conn = conn
|
||||
self.config_changed = False
|
||||
self._operation_spec_cache = {}
|
||||
self._models_operations_specs_cache = {}
|
||||
self._check_mode = check_mode
|
||||
self._operation_checker = OperationChecker
|
||||
self._system_info = None
|
||||
|
||||
def execute_operation(self, op_name, params):
|
||||
"""
|
||||
Allow user request execution of simple operations(natively supported by API provider) as well as complex
|
||||
operations(operations that are implemented as a set of simple operations).
|
||||
|
||||
:param op_name: name of the operation being called by the user
|
||||
:type op_name: str
|
||||
:param params: definition of the params that operation should be executed with
|
||||
:type params: dict
|
||||
:return: Result of the operation being executed
|
||||
:rtype: dict
|
||||
"""
|
||||
if self._operation_checker.is_upsert_operation(op_name):
|
||||
return self.upsert_object(op_name, params)
|
||||
else:
|
||||
return self.crud_operation(op_name, params)
|
||||
|
||||
def crud_operation(self, op_name, params):
|
||||
"""
|
||||
Allow user request execution of simple operations(natively supported by API provider) only.
|
||||
|
||||
:param op_name: name of the operation being called by the user
|
||||
:type op_name: str
|
||||
:param params: definition of the params that operation should be executed with
|
||||
:type params: dict
|
||||
:return: Result of the operation being executed
|
||||
:rtype: dict
|
||||
"""
|
||||
op_spec = self.get_operation_spec(op_name)
|
||||
if op_spec is None:
|
||||
raise FtdInvalidOperationNameError(op_name)
|
||||
|
||||
if self._operation_checker.is_add_operation(op_name, op_spec):
|
||||
resp = self.add_object(op_name, params)
|
||||
elif self._operation_checker.is_edit_operation(op_name, op_spec):
|
||||
resp = self.edit_object(op_name, params)
|
||||
elif self._operation_checker.is_delete_operation(op_name, op_spec):
|
||||
resp = self.delete_object(op_name, params)
|
||||
elif self._operation_checker.is_find_by_filter_operation(op_name, params, op_spec):
|
||||
resp = list(self.get_objects_by_filter(op_name, params))
|
||||
else:
|
||||
resp = self.send_general_request(op_name, params)
|
||||
return resp
|
||||
|
||||
def get_operation_spec(self, operation_name):
|
||||
if operation_name not in self._operation_spec_cache:
|
||||
self._operation_spec_cache[operation_name] = self._conn.get_operation_spec(operation_name)
|
||||
return self._operation_spec_cache[operation_name]
|
||||
|
||||
def get_operation_specs_by_model_name(self, model_name):
|
||||
if model_name not in self._models_operations_specs_cache:
|
||||
model_op_specs = self._conn.get_operation_specs_by_model_name(model_name)
|
||||
self._models_operations_specs_cache[model_name] = model_op_specs
|
||||
for op_name, op_spec in iteritems(model_op_specs):
|
||||
self._operation_spec_cache.setdefault(op_name, op_spec)
|
||||
return self._models_operations_specs_cache[model_name]
|
||||
|
||||
def get_objects_by_filter(self, operation_name, params):
|
||||
|
||||
def match_filters(filter_params, obj):
|
||||
for k, v in iteritems(filter_params):
|
||||
if k not in obj or obj[k] != v:
|
||||
return False
|
||||
return True
|
||||
|
||||
dummy, query_params, path_params = _get_user_params(params)
|
||||
# copy required params to avoid mutation of passed `params` dict
|
||||
url_params = {ParamName.QUERY_PARAMS: dict(query_params), ParamName.PATH_PARAMS: dict(path_params)}
|
||||
|
||||
filters = params.get(ParamName.FILTERS) or {}
|
||||
if QueryParams.FILTER not in url_params[ParamName.QUERY_PARAMS] and 'name' in filters:
|
||||
# most endpoints only support filtering by name, so remaining `filters` are applied on returned objects
|
||||
url_params[ParamName.QUERY_PARAMS][QueryParams.FILTER] = self._stringify_name_filter(filters)
|
||||
|
||||
item_generator = iterate_over_pageable_resource(
|
||||
partial(self.send_general_request, operation_name=operation_name), url_params
|
||||
)
|
||||
return (i for i in item_generator if match_filters(filters, i))
|
||||
|
||||
def _stringify_name_filter(self, filters):
|
||||
build_version = self.get_build_version()
|
||||
if build_version >= '6.4.0':
|
||||
return "fts~%s" % filters['name']
|
||||
return "name:%s" % filters['name']
|
||||
|
||||
def _fetch_system_info(self):
|
||||
if not self._system_info:
|
||||
params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ}
|
||||
self._system_info = self.send_general_request('getSystemInformation', params)
|
||||
|
||||
return self._system_info
|
||||
|
||||
def get_build_version(self):
|
||||
system_info = self._fetch_system_info()
|
||||
return system_info['databaseInfo']['buildVersion']
|
||||
|
||||
def add_object(self, operation_name, params):
|
||||
def is_duplicate_name_error(err):
|
||||
return err.code == UNPROCESSABLE_ENTITY_STATUS and DUPLICATE_NAME_ERROR_MESSAGE in str(err)
|
||||
|
||||
try:
|
||||
return self.send_general_request(operation_name, params)
|
||||
except FtdServerError as e:
|
||||
if is_duplicate_name_error(e):
|
||||
return self._check_equality_with_existing_object(operation_name, params, e)
|
||||
else:
|
||||
raise e
|
||||
|
||||
def _check_equality_with_existing_object(self, operation_name, params, e):
|
||||
"""
|
||||
Looks for an existing object that caused "object duplicate" error and
|
||||
checks whether it corresponds to the one specified in `params`.
|
||||
|
||||
In case a single object is found and it is equal to one we are trying
|
||||
to create, the existing object is returned.
|
||||
|
||||
When the existing object is not equal to the object being created or
|
||||
several objects are returned, an exception is raised.
|
||||
"""
|
||||
model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME]
|
||||
existing_obj = self._find_object_matching_params(model_name, params)
|
||||
|
||||
if existing_obj is not None:
|
||||
if equal_objects(existing_obj, params[ParamName.DATA]):
|
||||
return existing_obj
|
||||
else:
|
||||
raise FtdConfigurationError(DUPLICATE_ERROR, existing_obj)
|
||||
|
||||
raise e
|
||||
|
||||
def _find_object_matching_params(self, model_name, params):
|
||||
get_list_operation = self._find_get_list_operation(model_name)
|
||||
if not get_list_operation:
|
||||
return None
|
||||
|
||||
data = params[ParamName.DATA]
|
||||
if not params.get(ParamName.FILTERS):
|
||||
params[ParamName.FILTERS] = {'name': data['name']}
|
||||
|
||||
obj = None
|
||||
filtered_objs = self.get_objects_by_filter(get_list_operation, params)
|
||||
|
||||
for i, obj in enumerate(filtered_objs):
|
||||
if i > 0:
|
||||
raise FtdConfigurationError(MULTIPLE_DUPLICATES_FOUND_ERROR)
|
||||
obj = obj
|
||||
|
||||
return obj
|
||||
|
||||
def _find_get_list_operation(self, model_name):
|
||||
operations = self.get_operation_specs_by_model_name(model_name) or {}
|
||||
return next((
|
||||
op for op, op_spec in operations.items()
|
||||
if self._operation_checker.is_get_list_operation(op, op_spec)), None)
|
||||
|
||||
def _find_get_operation(self, model_name):
|
||||
operations = self.get_operation_specs_by_model_name(model_name) or {}
|
||||
return next((
|
||||
op for op, op_spec in operations.items()
|
||||
if self._operation_checker.is_get_operation(op, op_spec)), None)
|
||||
|
||||
def delete_object(self, operation_name, params):
|
||||
def is_invalid_uuid_error(err):
|
||||
return err.code == UNPROCESSABLE_ENTITY_STATUS and INVALID_UUID_ERROR_MESSAGE in str(err)
|
||||
|
||||
try:
|
||||
return self.send_general_request(operation_name, params)
|
||||
except FtdServerError as e:
|
||||
if is_invalid_uuid_error(e):
|
||||
return {'status': 'Referenced object does not exist'}
|
||||
else:
|
||||
raise e
|
||||
|
||||
def edit_object(self, operation_name, params):
|
||||
data, dummy, path_params = _get_user_params(params)
|
||||
|
||||
model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME]
|
||||
get_operation = self._find_get_operation(model_name)
|
||||
|
||||
if get_operation:
|
||||
existing_object = self.send_general_request(get_operation, {ParamName.PATH_PARAMS: path_params})
|
||||
if not existing_object:
|
||||
raise FtdConfigurationError('Referenced object does not exist')
|
||||
elif equal_objects(existing_object, data):
|
||||
return existing_object
|
||||
|
||||
return self.send_general_request(operation_name, params)
|
||||
|
||||
def send_general_request(self, operation_name, params):
|
||||
def stop_if_check_mode():
|
||||
if self._check_mode:
|
||||
raise CheckModeException()
|
||||
|
||||
self.validate_params(operation_name, params)
|
||||
stop_if_check_mode()
|
||||
|
||||
data, query_params, path_params = _get_user_params(params)
|
||||
op_spec = self.get_operation_spec(operation_name)
|
||||
url, method = op_spec[OperationField.URL], op_spec[OperationField.METHOD]
|
||||
|
||||
return self._send_request(url, method, data, path_params, query_params)
|
||||
|
||||
def _send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None):
|
||||
def raise_for_failure(resp):
|
||||
if not resp[ResponseParams.SUCCESS]:
|
||||
raise FtdServerError(resp[ResponseParams.RESPONSE], resp[ResponseParams.STATUS_CODE])
|
||||
|
||||
response = self._conn.send_request(url_path=url_path, http_method=http_method, body_params=body_params,
|
||||
path_params=path_params, query_params=query_params)
|
||||
raise_for_failure(response)
|
||||
if http_method != HTTPMethod.GET:
|
||||
self.config_changed = True
|
||||
return response[ResponseParams.RESPONSE]
|
||||
|
||||
def validate_params(self, operation_name, params):
|
||||
report = {}
|
||||
op_spec = self.get_operation_spec(operation_name)
|
||||
data, query_params, path_params = _get_user_params(params)
|
||||
|
||||
def validate(validation_method, field_name, user_params):
|
||||
key = 'Invalid %s provided' % field_name
|
||||
try:
|
||||
is_valid, validation_report = validation_method(operation_name, user_params)
|
||||
if not is_valid:
|
||||
report[key] = validation_report
|
||||
except Exception as e:
|
||||
report[key] = str(e)
|
||||
return report
|
||||
|
||||
validate(self._conn.validate_query_params, ParamName.QUERY_PARAMS, query_params)
|
||||
validate(self._conn.validate_path_params, ParamName.PATH_PARAMS, path_params)
|
||||
if is_post_request(op_spec) or is_put_request(op_spec):
|
||||
validate(self._conn.validate_data, ParamName.DATA, data)
|
||||
|
||||
if report:
|
||||
raise ValidationError(report)
|
||||
|
||||
@staticmethod
|
||||
def _get_operation_name(checker, operations):
|
||||
return next((op_name for op_name, op_spec in iteritems(operations) if checker(op_name, op_spec)), None)
|
||||
|
||||
def _add_upserted_object(self, model_operations, params):
|
||||
add_op_name = self._get_operation_name(self._operation_checker.is_add_operation, model_operations)
|
||||
if not add_op_name:
|
||||
raise FtdConfigurationError(ADD_OPERATION_NOT_SUPPORTED_ERROR)
|
||||
return self.add_object(add_op_name, params)
|
||||
|
||||
def _edit_upserted_object(self, model_operations, existing_object, params):
|
||||
edit_op_name = self._get_operation_name(self._operation_checker.is_edit_operation, model_operations)
|
||||
_set_default(params, 'path_params', {})
|
||||
_set_default(params, 'data', {})
|
||||
|
||||
params['path_params']['objId'] = existing_object['id']
|
||||
copy_identity_properties(existing_object, params['data'])
|
||||
return self.edit_object(edit_op_name, params)
|
||||
|
||||
def upsert_object(self, op_name, params):
|
||||
"""
|
||||
Updates an object if it already exists, or tries to create a new one if there is no
|
||||
such object. If multiple objects match filter criteria, or add operation is not supported,
|
||||
the exception is raised.
|
||||
|
||||
:param op_name: upsert operation name
|
||||
:type op_name: str
|
||||
:param params: params that upsert operation should be executed with
|
||||
:type params: dict
|
||||
:return: upserted object representation
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
def extract_and_validate_model():
|
||||
model = op_name[len(OperationNamePrefix.UPSERT):]
|
||||
if not self._conn.get_model_spec(model):
|
||||
raise FtdInvalidOperationNameError(op_name)
|
||||
return model
|
||||
|
||||
model_name = extract_and_validate_model()
|
||||
model_operations = self.get_operation_specs_by_model_name(model_name)
|
||||
|
||||
if not self._operation_checker.is_upsert_operation_supported(model_operations):
|
||||
raise FtdInvalidOperationNameError(op_name)
|
||||
|
||||
existing_obj = self._find_object_matching_params(model_name, params)
|
||||
if existing_obj:
|
||||
equal_to_existing_obj = equal_objects(existing_obj, params[ParamName.DATA])
|
||||
return existing_obj if equal_to_existing_obj \
|
||||
else self._edit_upserted_object(model_operations, existing_obj, params)
|
||||
else:
|
||||
return self._add_upserted_object(model_operations, params)
|
||||
|
||||
|
||||
def _set_default(params, field_name, value):
|
||||
if field_name not in params or params[field_name] is None:
|
||||
params[field_name] = value
|
||||
|
||||
|
||||
def is_post_request(operation_spec):
|
||||
return operation_spec[OperationField.METHOD] == HTTPMethod.POST
|
||||
|
||||
|
||||
def is_put_request(operation_spec):
|
||||
return operation_spec[OperationField.METHOD] == HTTPMethod.PUT
|
||||
|
||||
|
||||
def _get_user_params(params):
|
||||
return params.get(ParamName.DATA) or {}, params.get(ParamName.QUERY_PARAMS) or {}, params.get(
|
||||
ParamName.PATH_PARAMS) or {}
|
||||
|
||||
|
||||
def iterate_over_pageable_resource(resource_func, params):
|
||||
"""
|
||||
A generator function that iterates over a resource that supports pagination and lazily returns present items
|
||||
one by one.
|
||||
|
||||
:param resource_func: function that receives `params` argument and returns a page of objects
|
||||
:type resource_func: callable
|
||||
:param params: initial dictionary of parameters that will be passed to the resource_func.
|
||||
Should contain `query_params` inside.
|
||||
:type params: dict
|
||||
:return: an iterator containing returned items
|
||||
:rtype: iterator of dict
|
||||
"""
|
||||
# creating a copy not to mutate passed dict
|
||||
params = copy.deepcopy(params)
|
||||
params[ParamName.QUERY_PARAMS].setdefault('limit', DEFAULT_PAGE_SIZE)
|
||||
params[ParamName.QUERY_PARAMS].setdefault('offset', DEFAULT_OFFSET)
|
||||
limit = int(params[ParamName.QUERY_PARAMS]['limit'])
|
||||
|
||||
def received_less_items_than_requested(items_in_response, items_expected):
|
||||
if items_in_response == items_expected:
|
||||
return False
|
||||
elif items_in_response < items_expected:
|
||||
return True
|
||||
|
||||
raise FtdUnexpectedResponse(
|
||||
"Get List of Objects Response from the server contains more objects than requested. "
|
||||
"There are {0} item(s) in the response while {1} was(ere) requested".format(
|
||||
items_in_response, items_expected)
|
||||
)
|
||||
|
||||
while True:
|
||||
result = resource_func(params=params)
|
||||
|
||||
for item in result['items']:
|
||||
yield item
|
||||
|
||||
if received_less_items_than_requested(len(result['items']), limit):
|
||||
break
|
||||
|
||||
# creating a copy not to mutate existing dict
|
||||
params = copy.deepcopy(params)
|
||||
query_params = params[ParamName.QUERY_PARAMS]
|
||||
query_params['offset'] = int(query_params['offset']) + limit
|
|
@ -1,138 +0,0 @@
|
|||
# Copyright (c) 2019 Cisco and/or its affiliates.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
from kick.device2.ftd5500x.actions.ftd5500x import Ftd5500x
|
||||
from kick.device2.kp.actions import Kp
|
||||
|
||||
HAS_KICK = True
|
||||
except ImportError:
|
||||
HAS_KICK = False
|
||||
|
||||
|
||||
def assert_kick_is_installed(module):
|
||||
if not HAS_KICK:
|
||||
module.fail_json(msg='Firepower-kickstart library is required to run this module. '
|
||||
'Please, install the library with `pip install firepower-kickstart` '
|
||||
'command and run the playbook again.')
|
||||
|
||||
|
||||
class FtdModel:
|
||||
FTD_ASA5506_X = 'Cisco ASA5506-X Threat Defense'
|
||||
FTD_ASA5508_X = 'Cisco ASA5508-X Threat Defense'
|
||||
FTD_ASA5516_X = 'Cisco ASA5516-X Threat Defense'
|
||||
|
||||
FTD_2110 = 'Cisco Firepower 2110 Threat Defense'
|
||||
FTD_2120 = 'Cisco Firepower 2120 Threat Defense'
|
||||
FTD_2130 = 'Cisco Firepower 2130 Threat Defense'
|
||||
FTD_2140 = 'Cisco Firepower 2140 Threat Defense'
|
||||
|
||||
@classmethod
|
||||
def supported_models(cls):
|
||||
return [getattr(cls, item) for item in dir(cls) if item.startswith('FTD_')]
|
||||
|
||||
|
||||
class FtdPlatformFactory(object):
|
||||
|
||||
@staticmethod
|
||||
def create(model, module_params):
|
||||
for cls in AbstractFtdPlatform.__subclasses__():
|
||||
if cls.supports_ftd_model(model):
|
||||
return cls(module_params)
|
||||
raise ValueError("FTD model '%s' is not supported by this module." % model)
|
||||
|
||||
|
||||
class AbstractFtdPlatform(object):
|
||||
PLATFORM_MODELS = []
|
||||
|
||||
def install_ftd_image(self, params):
|
||||
raise NotImplementedError('The method should be overridden in subclass')
|
||||
|
||||
@classmethod
|
||||
def supports_ftd_model(cls, model):
|
||||
return model in cls.PLATFORM_MODELS
|
||||
|
||||
@staticmethod
|
||||
def parse_rommon_file_location(rommon_file_location):
|
||||
rommon_url = urlparse(rommon_file_location)
|
||||
if rommon_url.scheme != 'tftp':
|
||||
raise ValueError('The ROMMON image must be downloaded from TFTP server, other protocols are not supported.')
|
||||
return rommon_url.netloc, rommon_url.path
|
||||
|
||||
|
||||
class Ftd2100Platform(AbstractFtdPlatform):
|
||||
PLATFORM_MODELS = [FtdModel.FTD_2110, FtdModel.FTD_2120, FtdModel.FTD_2130, FtdModel.FTD_2140]
|
||||
|
||||
def __init__(self, params):
|
||||
self._ftd = Kp(hostname=params["device_hostname"],
|
||||
login_username=params["device_username"],
|
||||
login_password=params["device_password"],
|
||||
sudo_password=params.get("device_sudo_password") or params["device_password"])
|
||||
|
||||
def install_ftd_image(self, params):
|
||||
line = self._ftd.ssh_console(ip=params["console_ip"],
|
||||
port=params["console_port"],
|
||||
username=params["console_username"],
|
||||
password=params["console_password"])
|
||||
|
||||
try:
|
||||
rommon_server, rommon_path = self.parse_rommon_file_location(params["rommon_file_location"])
|
||||
line.baseline_fp2k_ftd(tftp_server=rommon_server,
|
||||
rommon_file=rommon_path,
|
||||
uut_hostname=params["device_hostname"],
|
||||
uut_username=params["device_username"],
|
||||
uut_password=params.get("device_new_password") or params["device_password"],
|
||||
uut_ip=params["device_ip"],
|
||||
uut_netmask=params["device_netmask"],
|
||||
uut_gateway=params["device_gateway"],
|
||||
dns_servers=params["dns_server"],
|
||||
search_domains=params["search_domains"],
|
||||
fxos_url=params["image_file_location"],
|
||||
ftd_version=params["image_version"])
|
||||
finally:
|
||||
line.disconnect()
|
||||
|
||||
|
||||
class FtdAsa5500xPlatform(AbstractFtdPlatform):
|
||||
PLATFORM_MODELS = [FtdModel.FTD_ASA5506_X, FtdModel.FTD_ASA5508_X, FtdModel.FTD_ASA5516_X]
|
||||
|
||||
def __init__(self, params):
|
||||
self._ftd = Ftd5500x(hostname=params["device_hostname"],
|
||||
login_password=params["device_password"],
|
||||
sudo_password=params.get("device_sudo_password") or params["device_password"])
|
||||
|
||||
def install_ftd_image(self, params):
|
||||
line = self._ftd.ssh_console(ip=params["console_ip"],
|
||||
port=params["console_port"],
|
||||
username=params["console_username"],
|
||||
password=params["console_password"])
|
||||
try:
|
||||
rommon_server, rommon_path = self.parse_rommon_file_location(params["rommon_file_location"])
|
||||
line.rommon_to_new_image(rommon_tftp_server=rommon_server,
|
||||
rommon_image=rommon_path,
|
||||
pkg_image=params["image_file_location"],
|
||||
uut_ip=params["device_ip"],
|
||||
uut_netmask=params["device_netmask"],
|
||||
uut_gateway=params["device_gateway"],
|
||||
dns_server=params["dns_server"],
|
||||
search_domains=params["search_domains"],
|
||||
hostname=params["device_hostname"])
|
||||
finally:
|
||||
line.disconnect()
|
|
@ -1,638 +0,0 @@
|
|||
# Copyright (c) 2018 Cisco and/or its affiliates.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.ftd.common import HTTPMethod
|
||||
from ansible.module_utils.six import integer_types, string_types, iteritems
|
||||
|
||||
FILE_MODEL_NAME = '_File'
|
||||
SUCCESS_RESPONSE_CODE = '200'
|
||||
DELETE_PREFIX = 'delete'
|
||||
|
||||
|
||||
class OperationField:
|
||||
URL = 'url'
|
||||
METHOD = 'method'
|
||||
PARAMETERS = 'parameters'
|
||||
MODEL_NAME = 'modelName'
|
||||
DESCRIPTION = 'description'
|
||||
RETURN_MULTIPLE_ITEMS = 'returnMultipleItems'
|
||||
TAGS = "tags"
|
||||
|
||||
|
||||
class SpecProp:
|
||||
DEFINITIONS = 'definitions'
|
||||
OPERATIONS = 'operations'
|
||||
MODELS = 'models'
|
||||
MODEL_OPERATIONS = 'model_operations'
|
||||
|
||||
|
||||
class PropName:
|
||||
ENUM = 'enum'
|
||||
TYPE = 'type'
|
||||
REQUIRED = 'required'
|
||||
INVALID_TYPE = 'invalid_type'
|
||||
REF = '$ref'
|
||||
ALL_OF = 'allOf'
|
||||
BASE_PATH = 'basePath'
|
||||
PATHS = 'paths'
|
||||
OPERATION_ID = 'operationId'
|
||||
SCHEMA = 'schema'
|
||||
ITEMS = 'items'
|
||||
PROPERTIES = 'properties'
|
||||
RESPONSES = 'responses'
|
||||
NAME = 'name'
|
||||
DESCRIPTION = 'description'
|
||||
|
||||
|
||||
class PropType:
|
||||
STRING = 'string'
|
||||
BOOLEAN = 'boolean'
|
||||
INTEGER = 'integer'
|
||||
NUMBER = 'number'
|
||||
OBJECT = 'object'
|
||||
ARRAY = 'array'
|
||||
FILE = 'file'
|
||||
|
||||
|
||||
class OperationParams:
|
||||
PATH = 'path'
|
||||
QUERY = 'query'
|
||||
|
||||
|
||||
class QueryParams:
|
||||
FILTER = 'filter'
|
||||
|
||||
|
||||
class PathParams:
|
||||
OBJ_ID = 'objId'
|
||||
|
||||
|
||||
def _get_model_name_from_url(schema_ref):
|
||||
path = schema_ref.split('/')
|
||||
return path[len(path) - 1]
|
||||
|
||||
|
||||
class IllegalArgumentException(ValueError):
|
||||
"""
|
||||
Exception raised when the function parameters:
|
||||
- not all passed
|
||||
- empty string
|
||||
- wrong type
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class FdmSwaggerParser:
|
||||
_definitions = None
|
||||
_base_path = None
|
||||
|
||||
def parse_spec(self, spec, docs=None):
|
||||
"""
|
||||
This method simplifies a swagger format, resolves a model name for each operation, and adds documentation for
|
||||
each operation and model if it is provided.
|
||||
|
||||
:param spec: An API specification in the swagger format, see
|
||||
<https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md>
|
||||
:type spec: dict
|
||||
:param spec: A documentation map containing descriptions for models, operations and operation parameters.
|
||||
:type docs: dict
|
||||
:rtype: dict
|
||||
:return:
|
||||
Ex.
|
||||
The models field contains model definition from swagger see
|
||||
<#https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#definitions>
|
||||
{
|
||||
'models':{
|
||||
'model_name':{...},
|
||||
...
|
||||
},
|
||||
'operations':{
|
||||
'operation_name':{
|
||||
'method': 'get', #post, put, delete
|
||||
'url': '/api/fdm/v2/object/networks', #url already contains a value from `basePath`
|
||||
'modelName': 'NetworkObject', # it is a link to the model from 'models'
|
||||
# None - for a delete operation or we don't have information
|
||||
# '_File' - if an endpoint works with files
|
||||
'returnMultipleItems': False, # shows if the operation returns a single item or an item list
|
||||
'parameters': {
|
||||
'path':{
|
||||
'param_name':{
|
||||
'type': 'string'#integer, boolean, number
|
||||
'required' True #False
|
||||
}
|
||||
...
|
||||
},
|
||||
'query':{
|
||||
'param_name':{
|
||||
'type': 'string'#integer, boolean, number
|
||||
'required' True #False
|
||||
}
|
||||
...
|
||||
}
|
||||
}
|
||||
},
|
||||
...
|
||||
},
|
||||
'model_operations':{
|
||||
'model_name':{ # a list of operations available for the current model
|
||||
'operation_name':{
|
||||
... # the same as in the operations section
|
||||
},
|
||||
...
|
||||
},
|
||||
...
|
||||
}
|
||||
}
|
||||
"""
|
||||
self._definitions = spec[SpecProp.DEFINITIONS]
|
||||
self._base_path = spec[PropName.BASE_PATH]
|
||||
operations = self._get_operations(spec)
|
||||
|
||||
if docs:
|
||||
operations = self._enrich_operations_with_docs(operations, docs)
|
||||
self._definitions = self._enrich_definitions_with_docs(self._definitions, docs)
|
||||
|
||||
return {
|
||||
SpecProp.MODELS: self._definitions,
|
||||
SpecProp.OPERATIONS: operations,
|
||||
SpecProp.MODEL_OPERATIONS: self._get_model_operations(operations)
|
||||
}
|
||||
|
||||
@property
|
||||
def base_path(self):
|
||||
return self._base_path
|
||||
|
||||
def _get_model_operations(self, operations):
|
||||
model_operations = {}
|
||||
for operations_name, params in iteritems(operations):
|
||||
model_name = params[OperationField.MODEL_NAME]
|
||||
model_operations.setdefault(model_name, {})[operations_name] = params
|
||||
return model_operations
|
||||
|
||||
def _get_operations(self, spec):
|
||||
paths_dict = spec[PropName.PATHS]
|
||||
operations_dict = {}
|
||||
for url, operation_params in iteritems(paths_dict):
|
||||
for method, params in iteritems(operation_params):
|
||||
operation = {
|
||||
OperationField.METHOD: method,
|
||||
OperationField.URL: self._base_path + url,
|
||||
OperationField.MODEL_NAME: self._get_model_name(method, params),
|
||||
OperationField.RETURN_MULTIPLE_ITEMS: self._return_multiple_items(params),
|
||||
OperationField.TAGS: params.get(OperationField.TAGS, [])
|
||||
}
|
||||
if OperationField.PARAMETERS in params:
|
||||
operation[OperationField.PARAMETERS] = self._get_rest_params(params[OperationField.PARAMETERS])
|
||||
|
||||
operation_id = params[PropName.OPERATION_ID]
|
||||
operations_dict[operation_id] = operation
|
||||
return operations_dict
|
||||
|
||||
def _enrich_operations_with_docs(self, operations, docs):
|
||||
def get_operation_docs(op):
|
||||
op_url = op[OperationField.URL][len(self._base_path):]
|
||||
return docs[PropName.PATHS].get(op_url, {}).get(op[OperationField.METHOD], {})
|
||||
|
||||
for operation in operations.values():
|
||||
operation_docs = get_operation_docs(operation)
|
||||
operation[OperationField.DESCRIPTION] = operation_docs.get(PropName.DESCRIPTION, '')
|
||||
|
||||
if OperationField.PARAMETERS in operation:
|
||||
param_descriptions = dict((
|
||||
(p[PropName.NAME], p[PropName.DESCRIPTION])
|
||||
for p in operation_docs.get(OperationField.PARAMETERS, {})
|
||||
))
|
||||
|
||||
for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.PATH].items():
|
||||
params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
|
||||
|
||||
for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.QUERY].items():
|
||||
params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
|
||||
|
||||
return operations
|
||||
|
||||
def _enrich_definitions_with_docs(self, definitions, docs):
|
||||
for model_name, model_def in definitions.items():
|
||||
model_docs = docs[SpecProp.DEFINITIONS].get(model_name, {})
|
||||
model_def[PropName.DESCRIPTION] = model_docs.get(PropName.DESCRIPTION, '')
|
||||
for prop_name, prop_spec in model_def.get(PropName.PROPERTIES, {}).items():
|
||||
prop_spec[PropName.DESCRIPTION] = model_docs.get(PropName.PROPERTIES, {}).get(prop_name, '')
|
||||
prop_spec[PropName.REQUIRED] = prop_name in model_def.get(PropName.REQUIRED, [])
|
||||
return definitions
|
||||
|
||||
def _get_model_name(self, method, params):
|
||||
if method == HTTPMethod.GET:
|
||||
return self._get_model_name_from_responses(params)
|
||||
elif method == HTTPMethod.POST or method == HTTPMethod.PUT:
|
||||
return self._get_model_name_for_post_put_requests(params)
|
||||
elif method == HTTPMethod.DELETE:
|
||||
return self._get_model_name_from_delete_operation(params)
|
||||
else:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _return_multiple_items(op_params):
|
||||
"""
|
||||
Defines if the operation returns one item or a list of items.
|
||||
|
||||
:param op_params: operation specification
|
||||
:return: True if the operation returns a list of items, otherwise False
|
||||
"""
|
||||
try:
|
||||
schema = op_params[PropName.RESPONSES][SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
|
||||
return PropName.ITEMS in schema[PropName.PROPERTIES]
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def _get_model_name_from_delete_operation(self, params):
|
||||
operation_id = params[PropName.OPERATION_ID]
|
||||
if operation_id.startswith(DELETE_PREFIX):
|
||||
model_name = operation_id[len(DELETE_PREFIX):]
|
||||
if model_name in self._definitions:
|
||||
return model_name
|
||||
return None
|
||||
|
||||
def _get_model_name_for_post_put_requests(self, params):
|
||||
model_name = None
|
||||
if OperationField.PARAMETERS in params:
|
||||
body_param_dict = self._get_body_param_from_parameters(params[OperationField.PARAMETERS])
|
||||
if body_param_dict:
|
||||
schema_ref = body_param_dict[PropName.SCHEMA][PropName.REF]
|
||||
model_name = self._get_model_name_byschema_ref(schema_ref)
|
||||
if model_name is None:
|
||||
model_name = self._get_model_name_from_responses(params)
|
||||
return model_name
|
||||
|
||||
@staticmethod
|
||||
def _get_body_param_from_parameters(params):
|
||||
return next((param for param in params if param['in'] == 'body'), None)
|
||||
|
||||
def _get_model_name_from_responses(self, params):
|
||||
responses = params[PropName.RESPONSES]
|
||||
if SUCCESS_RESPONSE_CODE in responses:
|
||||
response = responses[SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
|
||||
if PropName.REF in response:
|
||||
return self._get_model_name_byschema_ref(response[PropName.REF])
|
||||
elif PropName.PROPERTIES in response:
|
||||
ref = response[PropName.PROPERTIES][PropName.ITEMS][PropName.ITEMS][PropName.REF]
|
||||
return self._get_model_name_byschema_ref(ref)
|
||||
elif (PropName.TYPE in response) and response[PropName.TYPE] == PropType.FILE:
|
||||
return FILE_MODEL_NAME
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_rest_params(self, params):
|
||||
path = {}
|
||||
query = {}
|
||||
operation_param = {
|
||||
OperationParams.PATH: path,
|
||||
OperationParams.QUERY: query
|
||||
}
|
||||
for param in params:
|
||||
in_param = param['in']
|
||||
if in_param == OperationParams.QUERY:
|
||||
query[param[PropName.NAME]] = self._simplify_param_def(param)
|
||||
elif in_param == OperationParams.PATH:
|
||||
path[param[PropName.NAME]] = self._simplify_param_def(param)
|
||||
return operation_param
|
||||
|
||||
@staticmethod
|
||||
def _simplify_param_def(param):
|
||||
return {
|
||||
PropName.TYPE: param[PropName.TYPE],
|
||||
PropName.REQUIRED: param[PropName.REQUIRED]
|
||||
}
|
||||
|
||||
def _get_model_name_byschema_ref(self, schema_ref):
|
||||
model_name = _get_model_name_from_url(schema_ref)
|
||||
model_def = self._definitions[model_name]
|
||||
if PropName.ALL_OF in model_def:
|
||||
return self._get_model_name_byschema_ref(model_def[PropName.ALL_OF][0][PropName.REF])
|
||||
else:
|
||||
return model_name
|
||||
|
||||
|
||||
class FdmSwaggerValidator:
|
||||
def __init__(self, spec):
|
||||
"""
|
||||
:param spec: dict
|
||||
data from FdmSwaggerParser().parse_spec()
|
||||
"""
|
||||
self._operations = spec[SpecProp.OPERATIONS]
|
||||
self._models = spec[SpecProp.MODELS]
|
||||
|
||||
def validate_data(self, operation_name, data=None):
|
||||
"""
|
||||
Validate data for the post|put requests
|
||||
:param operation_name: string
|
||||
The value must be non empty string.
|
||||
The operation name is used to get a model specification
|
||||
:param data: dict
|
||||
The value must be in the format that the model(from operation) expects
|
||||
:rtype: (bool, string|dict)
|
||||
:return:
|
||||
(True, None) - if data valid
|
||||
Invalid:
|
||||
(False, {
|
||||
'required': [ #list of the fields that are required but were not present in the data
|
||||
'field_name',
|
||||
'patent.field_name',# when the nested field is omitted
|
||||
'patent.list[2].field_name' # if data is array and one of the field is omitted
|
||||
],
|
||||
'invalid_type':[ #list of the fields with invalid data
|
||||
{
|
||||
'path': 'objId', #field name or path to the field. Ex. objects[3].id, parent.name
|
||||
'expected_type': 'string',# expected type. Ex. 'object', 'array', 'string', 'integer',
|
||||
# 'boolean', 'number'
|
||||
'actually_value': 1 # the value that user passed
|
||||
}
|
||||
]
|
||||
})
|
||||
:raises IllegalArgumentException
|
||||
'The operation_name parameter must be a non-empty string' if operation_name is not valid
|
||||
'The data parameter must be a dict' if data neither dict or None
|
||||
'{operation_name} operation does not support' if the spec does not contain the operation
|
||||
"""
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
self._check_validate_data_params(data, operation_name)
|
||||
|
||||
operation = self._operations[operation_name]
|
||||
model = self._models[operation[OperationField.MODEL_NAME]]
|
||||
status = self._init_report()
|
||||
|
||||
self._validate_object(status, model, data, '')
|
||||
|
||||
if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
|
||||
return False, self._delete_empty_field_from_report(status)
|
||||
return True, None
|
||||
|
||||
def _check_validate_data_params(self, data, operation_name):
|
||||
if not operation_name or not isinstance(operation_name, string_types):
|
||||
raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
|
||||
if not isinstance(data, dict):
|
||||
raise IllegalArgumentException("The data parameter must be a dict")
|
||||
if operation_name not in self._operations:
|
||||
raise IllegalArgumentException("{0} operation does not support".format(operation_name))
|
||||
|
||||
def validate_query_params(self, operation_name, params):
|
||||
"""
|
||||
Validate params for the get requests. Use this method for validating the query part of the url.
|
||||
:param operation_name: string
|
||||
The value must be non empty string.
|
||||
The operation name is used to get a params specification
|
||||
:param params: dict
|
||||
should be in the format that the specification(from operation) expects
|
||||
Ex.
|
||||
{
|
||||
'objId': "string_value",
|
||||
'p_integer': 1,
|
||||
'p_boolean': True,
|
||||
'p_number': 2.3
|
||||
}
|
||||
:rtype:(Boolean, msg)
|
||||
:return:
|
||||
(True, None) - if params valid
|
||||
Invalid:
|
||||
(False, {
|
||||
'required': [ #list of the fields that are required but are not present in the params
|
||||
'field_name'
|
||||
],
|
||||
'invalid_type':[ #list of the fields with invalid data and expected type of the params
|
||||
{
|
||||
'path': 'objId', #field name
|
||||
'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
|
||||
'actually_value': 1 # the value that user passed
|
||||
}
|
||||
]
|
||||
})
|
||||
:raises IllegalArgumentException
|
||||
'The operation_name parameter must be a non-empty string' if operation_name is not valid
|
||||
'The params parameter must be a dict' if params neither dict or None
|
||||
'{operation_name} operation does not support' if the spec does not contain the operation
|
||||
"""
|
||||
return self._validate_url_params(operation_name, params, resource=OperationParams.QUERY)
|
||||
|
||||
def validate_path_params(self, operation_name, params):
|
||||
"""
|
||||
Validate params for the get requests. Use this method for validating the path part of the url.
|
||||
:param operation_name: string
|
||||
The value must be non empty string.
|
||||
The operation name is used to get a params specification
|
||||
:param params: dict
|
||||
should be in the format that the specification(from operation) expects
|
||||
|
||||
Ex.
|
||||
{
|
||||
'objId': "string_value",
|
||||
'p_integer': 1,
|
||||
'p_boolean': True,
|
||||
'p_number': 2.3
|
||||
}
|
||||
:rtype:(Boolean, msg)
|
||||
:return:
|
||||
(True, None) - if params valid
|
||||
Invalid:
|
||||
(False, {
|
||||
'required': [ #list of the fields that are required but are not present in the params
|
||||
'field_name'
|
||||
],
|
||||
'invalid_type':[ #list of the fields with invalid data and expected type of the params
|
||||
{
|
||||
'path': 'objId', #field name
|
||||
'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
|
||||
'actually_value': 1 # the value that user passed
|
||||
}
|
||||
]
|
||||
})
|
||||
:raises IllegalArgumentException
|
||||
'The operation_name parameter must be a non-empty string' if operation_name is not valid
|
||||
'The params parameter must be a dict' if params neither dict or None
|
||||
'{operation_name} operation does not support' if the spec does not contain the operation
|
||||
"""
|
||||
return self._validate_url_params(operation_name, params, resource=OperationParams.PATH)
|
||||
|
||||
def _validate_url_params(self, operation, params, resource):
|
||||
if params is None:
|
||||
params = {}
|
||||
|
||||
self._check_validate_url_params(operation, params)
|
||||
|
||||
operation = self._operations[operation]
|
||||
if OperationField.PARAMETERS in operation and resource in operation[OperationField.PARAMETERS]:
|
||||
spec = operation[OperationField.PARAMETERS][resource]
|
||||
status = self._init_report()
|
||||
self._check_url_params(status, spec, params)
|
||||
|
||||
if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
|
||||
return False, self._delete_empty_field_from_report(status)
|
||||
return True, None
|
||||
else:
|
||||
return True, None
|
||||
|
||||
def _check_validate_url_params(self, operation, params):
|
||||
if not operation or not isinstance(operation, string_types):
|
||||
raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
|
||||
if not isinstance(params, dict):
|
||||
raise IllegalArgumentException("The params parameter must be a dict")
|
||||
if operation not in self._operations:
|
||||
raise IllegalArgumentException("{0} operation does not support".format(operation))
|
||||
|
||||
def _check_url_params(self, status, spec, params):
|
||||
for prop_name in spec.keys():
|
||||
prop = spec[prop_name]
|
||||
if prop[PropName.REQUIRED] and prop_name not in params:
|
||||
status[PropName.REQUIRED].append(prop_name)
|
||||
continue
|
||||
if prop_name in params:
|
||||
expected_type = prop[PropName.TYPE]
|
||||
value = params[prop_name]
|
||||
if prop_name in params and not self._is_correct_simple_types(expected_type, value, allow_null=False):
|
||||
self._add_invalid_type_report(status, '', prop_name, expected_type, value)
|
||||
|
||||
def _validate_object(self, status, model, data, path):
|
||||
if self._is_enum(model):
|
||||
self._check_enum(status, model, data, path)
|
||||
elif self._is_object(model):
|
||||
self._check_object(status, model, data, path)
|
||||
|
||||
def _is_enum(self, model):
|
||||
return self._is_string_type(model) and PropName.ENUM in model
|
||||
|
||||
def _check_enum(self, status, model, value, path):
|
||||
if value is not None and value not in model[PropName.ENUM]:
|
||||
self._add_invalid_type_report(status, path, '', PropName.ENUM, value)
|
||||
|
||||
def _add_invalid_type_report(self, status, path, prop_name, expected_type, actually_value):
|
||||
status[PropName.INVALID_TYPE].append({
|
||||
'path': self._create_path_to_field(path, prop_name),
|
||||
'expected_type': expected_type,
|
||||
'actually_value': actually_value
|
||||
})
|
||||
|
||||
def _check_object(self, status, model, data, path):
|
||||
if data is None:
|
||||
return
|
||||
|
||||
if not isinstance(data, dict):
|
||||
self._add_invalid_type_report(status, path, '', PropType.OBJECT, data)
|
||||
return None
|
||||
|
||||
if PropName.REQUIRED in model:
|
||||
self._check_required_fields(status, model[PropName.REQUIRED], data, path)
|
||||
|
||||
model_properties = model[PropName.PROPERTIES]
|
||||
for prop in model_properties.keys():
|
||||
if prop in data:
|
||||
model_prop_val = model_properties[prop]
|
||||
expected_type = model_prop_val[PropName.TYPE]
|
||||
actually_value = data[prop]
|
||||
self._check_types(status, actually_value, expected_type, model_prop_val, path, prop)
|
||||
|
||||
def _check_types(self, status, actually_value, expected_type, model, path, prop_name):
|
||||
if expected_type == PropType.OBJECT:
|
||||
ref_model = self._get_model_by_ref(model)
|
||||
|
||||
self._validate_object(status, ref_model, actually_value,
|
||||
path=self._create_path_to_field(path, prop_name))
|
||||
elif expected_type == PropType.ARRAY:
|
||||
self._check_array(status, model, actually_value,
|
||||
path=self._create_path_to_field(path, prop_name))
|
||||
elif not self._is_correct_simple_types(expected_type, actually_value):
|
||||
self._add_invalid_type_report(status, path, prop_name, expected_type, actually_value)
|
||||
|
||||
def _get_model_by_ref(self, model_prop_val):
|
||||
model = _get_model_name_from_url(model_prop_val[PropName.REF])
|
||||
return self._models[model]
|
||||
|
||||
def _check_required_fields(self, status, required_fields, data, path):
|
||||
missed_required_fields = [self._create_path_to_field(path, field) for field in
|
||||
required_fields if field not in data.keys() or data[field] is None]
|
||||
if len(missed_required_fields) > 0:
|
||||
status[PropName.REQUIRED] += missed_required_fields
|
||||
|
||||
def _check_array(self, status, model, data, path):
|
||||
if data is None:
|
||||
return
|
||||
elif not isinstance(data, list):
|
||||
self._add_invalid_type_report(status, path, '', PropType.ARRAY, data)
|
||||
else:
|
||||
item_model = model[PropName.ITEMS]
|
||||
for i, item_data in enumerate(data):
|
||||
self._check_types(status, item_data, item_model[PropName.TYPE], item_model, "{0}[{1}]".format(path, i),
|
||||
'')
|
||||
|
||||
@staticmethod
|
||||
def _is_correct_simple_types(expected_type, value, allow_null=True):
|
||||
def is_numeric_string(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
if value is None and allow_null:
|
||||
return True
|
||||
elif expected_type == PropType.STRING:
|
||||
return isinstance(value, string_types)
|
||||
elif expected_type == PropType.BOOLEAN:
|
||||
return isinstance(value, bool)
|
||||
elif expected_type == PropType.INTEGER:
|
||||
is_integer = isinstance(value, integer_types) and not isinstance(value, bool)
|
||||
is_digit_string = isinstance(value, string_types) and value.isdigit()
|
||||
return is_integer or is_digit_string
|
||||
elif expected_type == PropType.NUMBER:
|
||||
is_number = isinstance(value, (integer_types, float)) and not isinstance(value, bool)
|
||||
is_numeric_string = isinstance(value, string_types) and is_numeric_string(value)
|
||||
return is_number or is_numeric_string
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _is_string_type(model):
|
||||
return PropName.TYPE in model and model[PropName.TYPE] == PropType.STRING
|
||||
|
||||
@staticmethod
|
||||
def _init_report():
|
||||
return {
|
||||
PropName.REQUIRED: [],
|
||||
PropName.INVALID_TYPE: []
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _delete_empty_field_from_report(status):
|
||||
if not status[PropName.REQUIRED]:
|
||||
del status[PropName.REQUIRED]
|
||||
if not status[PropName.INVALID_TYPE]:
|
||||
del status[PropName.INVALID_TYPE]
|
||||
return status
|
||||
|
||||
@staticmethod
|
||||
def _create_path_to_field(path='', field=''):
|
||||
separator = ''
|
||||
if path and field:
|
||||
separator = '.'
|
||||
return "{0}{1}{2}".format(path, separator, field)
|
||||
|
||||
@staticmethod
|
||||
def _is_object(model):
|
||||
return PropName.TYPE in model and model[PropName.TYPE] == PropType.OBJECT
|
|
@ -1,41 +0,0 @@
|
|||
# Copyright (c) 2018 Cisco and/or its affiliates.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.ftd.configuration import ParamName, PATH_PARAMS_FOR_DEFAULT_OBJ
|
||||
|
||||
|
||||
class FtdOperations:
|
||||
"""
|
||||
Utility class for common operation names
|
||||
"""
|
||||
GET_SYSTEM_INFO = 'getSystemInformation'
|
||||
GET_MANAGEMENT_IP_LIST = 'getManagementIPList'
|
||||
GET_DNS_SETTING_LIST = 'getDeviceDNSSettingsList'
|
||||
GET_DNS_SERVER_GROUP = 'getDNSServerGroup'
|
||||
|
||||
|
||||
def get_system_info(resource):
|
||||
"""
|
||||
Executes `getSystemInformation` operation and returns information about the system.
|
||||
|
||||
:param resource: a BaseConfigurationResource object to connect to the device
|
||||
:return: a dictionary with system information about the device and its software
|
||||
"""
|
||||
path_params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ}
|
||||
system_info = resource.execute_operation(FtdOperations.GET_SYSTEM_INFO, path_params)
|
||||
return system_info
|
|
@ -1,69 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Ansible Project
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
|
||||
_DEVICE_CONFIGS = {}
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
return Connection(module._socket_path)
|
||||
|
||||
|
||||
def load_config(module, commands):
|
||||
connection = get_connection(module)
|
||||
|
||||
try:
|
||||
resp = connection.edit_config(candidate=commands)
|
||||
return resp.get('response')
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
return connection.run_commands(commands=commands, check_rc=check_rc)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
|
||||
|
||||
def exec_scp(module, command):
|
||||
connection = Connection(module._socket_path)
|
||||
return connection.scp(**command)
|
||||
|
||||
|
||||
def get_config(module, flags=None, compare=None):
|
||||
flag_str = ' '.join(to_list(flags))
|
||||
try:
|
||||
return _DEVICE_CONFIGS[flag_str]
|
||||
except KeyError:
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
out = connection.get_config(flags=flags, compare=compare)
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
_DEVICE_CONFIGS[flag_str] = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def check_args(module, warnings):
|
||||
pass
|
||||
|
||||
|
||||
def get_defaults_flag(module):
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
out = connection.get_defaults_flag()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
return to_text(out, errors='surrogate_then_replace').strip()
|
|
@ -1,69 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2018, Ingate Systems AB
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
try:
|
||||
from ingate import ingatesdk
|
||||
HAS_INGATESDK = True
|
||||
except ImportError:
|
||||
HAS_INGATESDK = False
|
||||
|
||||
|
||||
def ingate_argument_spec(**kwargs):
|
||||
client_options = dict(
|
||||
version=dict(choices=['v1'], default='v1'),
|
||||
scheme=dict(choices=['http', 'https'], required=True),
|
||||
address=dict(type='str', required=True),
|
||||
username=dict(type='str', required=True),
|
||||
password=dict(type='str', required=True, no_log=True),
|
||||
port=dict(type='int'),
|
||||
timeout=dict(type='int'),
|
||||
validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
|
||||
)
|
||||
argument_spec = dict(
|
||||
client=dict(type='dict', required=True,
|
||||
options=client_options),
|
||||
)
|
||||
argument_spec.update(kwargs)
|
||||
return argument_spec
|
||||
|
||||
|
||||
def ingate_create_client(**kwargs):
|
||||
api_client = ingate_create_client_noauth(**kwargs)
|
||||
|
||||
# Authenticate and get hold of a security token.
|
||||
api_client.authenticate()
|
||||
|
||||
# Return the client.
|
||||
return api_client
|
||||
|
||||
|
||||
def ingate_create_client_noauth(**kwargs):
|
||||
client_params = kwargs['client']
|
||||
|
||||
# Create API client.
|
||||
api_client = ingatesdk.Client(client_params['version'],
|
||||
client_params['scheme'],
|
||||
client_params['address'],
|
||||
client_params['username'],
|
||||
client_params['password'],
|
||||
port=client_params['port'],
|
||||
timeout=client_params['timeout'])
|
||||
|
||||
# Check if we should skip SSL Certificate verification.
|
||||
verify_ssl = client_params.get('validate_certs')
|
||||
if not verify_ssl:
|
||||
api_client.skip_verify_certificate()
|
||||
|
||||
# Return the client.
|
||||
return api_client
|
||||
|
||||
|
||||
def is_ingatesdk_installed(module):
|
||||
if not HAS_INGATESDK:
|
||||
module.fail_json(msg="The Ingate Python SDK module is required for this module.")
|
|
@ -1,113 +0,0 @@
|
|||
#
|
||||
# Copyright (c) 2017, Paul Baker <paul@paulbaker.id.au>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, EntityCollection
|
||||
from ansible.module_utils.connection import Connection, exec_command
|
||||
|
||||
_DEVICE_CONFIG = None
|
||||
_CONNECTION = None
|
||||
|
||||
ironware_provider_spec = {
|
||||
'host': dict(),
|
||||
'port': dict(type='int'),
|
||||
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
|
||||
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
|
||||
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
|
||||
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
|
||||
'timeout': dict(type='int'),
|
||||
}
|
||||
|
||||
ironware_argument_spec = {
|
||||
'provider': dict(type='dict', options=ironware_provider_spec)
|
||||
}
|
||||
|
||||
command_spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
|
||||
|
||||
def get_provider_argspec():
|
||||
return ironware_provider_spec
|
||||
|
||||
|
||||
def check_args(module):
|
||||
pass
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
global _CONNECTION
|
||||
if _CONNECTION:
|
||||
return _CONNECTION
|
||||
_CONNECTION = Connection(module._socket_path)
|
||||
|
||||
return _CONNECTION
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
if not isinstance(commands, list):
|
||||
raise AssertionError('argument must be of type <list>')
|
||||
|
||||
transform = EntityCollection(module, command_spec)
|
||||
commands = transform(commands)
|
||||
|
||||
for index, item in enumerate(commands):
|
||||
if module.check_mode and not item['command'].startswith('show'):
|
||||
module.warn('only show commands are supported when using check '
|
||||
'mode, not executing `%s`' % item['command'])
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
connection = get_connection(module)
|
||||
|
||||
commands = to_commands(module, to_list(commands))
|
||||
|
||||
responses = list()
|
||||
|
||||
for cmd in commands:
|
||||
out = connection.get(**cmd)
|
||||
responses.append(to_text(out, errors='surrogate_then_replace'))
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
def get_config(module, source='running', flags=None):
|
||||
global _DEVICE_CONFIG
|
||||
if source == 'running' and flags is None and _DEVICE_CONFIG is not None:
|
||||
return _DEVICE_CONFIG
|
||||
else:
|
||||
conn = get_connection(module)
|
||||
out = conn.get_config(source=source, flags=flags)
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
if source == 'running' and flags is None:
|
||||
_DEVICE_CONFIG = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def load_config(module, config):
|
||||
conn = get_connection(module)
|
||||
conn.edit_config(config)
|
|
@ -1,322 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017 Citrix Systems
|
||||
#
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.six import binary_type, text_type
|
||||
from ansible.module_utils._text import to_native
|
||||
|
||||
|
||||
class ConfigProxy(object):
|
||||
|
||||
def __init__(self, actual, client, attribute_values_dict, readwrite_attrs, transforms=None, readonly_attrs=None, immutable_attrs=None, json_encodes=None):
|
||||
transforms = {} if transforms is None else transforms
|
||||
readonly_attrs = [] if readonly_attrs is None else readonly_attrs
|
||||
immutable_attrs = [] if immutable_attrs is None else immutable_attrs
|
||||
json_encodes = [] if json_encodes is None else json_encodes
|
||||
|
||||
# Actual config object from nitro sdk
|
||||
self.actual = actual
|
||||
|
||||
# nitro client
|
||||
self.client = client
|
||||
|
||||
# ansible attribute_values_dict
|
||||
self.attribute_values_dict = attribute_values_dict
|
||||
|
||||
self.readwrite_attrs = readwrite_attrs
|
||||
self.readonly_attrs = readonly_attrs
|
||||
self.immutable_attrs = immutable_attrs
|
||||
self.json_encodes = json_encodes
|
||||
self.transforms = transforms
|
||||
|
||||
self.attribute_values_processed = {}
|
||||
for attribute, value in self.attribute_values_dict.items():
|
||||
if value is None:
|
||||
continue
|
||||
if attribute in transforms:
|
||||
for transform in self.transforms[attribute]:
|
||||
if transform == 'bool_yes_no':
|
||||
if value is True:
|
||||
value = 'YES'
|
||||
elif value is False:
|
||||
value = 'NO'
|
||||
elif transform == 'bool_on_off':
|
||||
if value is True:
|
||||
value = 'ON'
|
||||
elif value is False:
|
||||
value = 'OFF'
|
||||
elif callable(transform):
|
||||
value = transform(value)
|
||||
else:
|
||||
raise Exception('Invalid transform %s' % transform)
|
||||
self.attribute_values_processed[attribute] = value
|
||||
|
||||
self._copy_attributes_to_actual()
|
||||
|
||||
def _copy_attributes_to_actual(self):
|
||||
for attribute in self.readwrite_attrs:
|
||||
if attribute in self.attribute_values_processed:
|
||||
attribute_value = self.attribute_values_processed[attribute]
|
||||
|
||||
if attribute_value is None:
|
||||
continue
|
||||
|
||||
# Fallthrough
|
||||
if attribute in self.json_encodes:
|
||||
attribute_value = json.JSONEncoder().encode(attribute_value).strip('"')
|
||||
setattr(self.actual, attribute, attribute_value)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self.attribute_values_dict:
|
||||
return self.attribute_values_dict[name]
|
||||
else:
|
||||
raise AttributeError('No attribute %s found' % name)
|
||||
|
||||
def add(self):
|
||||
self.actual.__class__.add(self.client, self.actual)
|
||||
|
||||
def update(self):
|
||||
return self.actual.__class__.update(self.client, self.actual)
|
||||
|
||||
def delete(self):
|
||||
self.actual.__class__.delete(self.client, self.actual)
|
||||
|
||||
def get(self, *args, **kwargs):
|
||||
result = self.actual.__class__.get(self.client, *args, **kwargs)
|
||||
|
||||
return result
|
||||
|
||||
def has_equal_attributes(self, other):
|
||||
if self.diff_object(other) == {}:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def diff_object(self, other):
|
||||
diff_dict = {}
|
||||
for attribute in self.attribute_values_processed:
|
||||
# Skip readonly attributes
|
||||
if attribute not in self.readwrite_attrs:
|
||||
continue
|
||||
|
||||
# Skip attributes not present in module arguments
|
||||
if self.attribute_values_processed[attribute] is None:
|
||||
continue
|
||||
|
||||
# Check existence
|
||||
if hasattr(other, attribute):
|
||||
attribute_value = getattr(other, attribute)
|
||||
else:
|
||||
diff_dict[attribute] = 'missing from other'
|
||||
continue
|
||||
|
||||
# Compare values
|
||||
param_type = self.attribute_values_processed[attribute].__class__
|
||||
if attribute_value is None or param_type(attribute_value) != self.attribute_values_processed[attribute]:
|
||||
str_tuple = (
|
||||
type(self.attribute_values_processed[attribute]),
|
||||
self.attribute_values_processed[attribute],
|
||||
type(attribute_value),
|
||||
attribute_value,
|
||||
)
|
||||
diff_dict[attribute] = 'difference. ours: (%s) %s other: (%s) %s' % str_tuple
|
||||
return diff_dict
|
||||
|
||||
def get_actual_rw_attributes(self, filter='name'):
|
||||
if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0:
|
||||
return {}
|
||||
server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter]))
|
||||
actual_instance = server_list[0]
|
||||
ret_val = {}
|
||||
for attribute in self.readwrite_attrs:
|
||||
if not hasattr(actual_instance, attribute):
|
||||
continue
|
||||
ret_val[attribute] = getattr(actual_instance, attribute)
|
||||
return ret_val
|
||||
|
||||
def get_actual_ro_attributes(self, filter='name'):
|
||||
if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0:
|
||||
return {}
|
||||
server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter]))
|
||||
actual_instance = server_list[0]
|
||||
ret_val = {}
|
||||
for attribute in self.readonly_attrs:
|
||||
if not hasattr(actual_instance, attribute):
|
||||
continue
|
||||
ret_val[attribute] = getattr(actual_instance, attribute)
|
||||
return ret_val
|
||||
|
||||
def get_missing_rw_attributes(self):
|
||||
return list(set(self.readwrite_attrs) - set(self.get_actual_rw_attributes().keys()))
|
||||
|
||||
def get_missing_ro_attributes(self):
|
||||
return list(set(self.readonly_attrs) - set(self.get_actual_ro_attributes().keys()))
|
||||
|
||||
|
||||
def get_immutables_intersection(config_proxy, keys):
|
||||
immutables_set = set(config_proxy.immutable_attrs)
|
||||
keys_set = set(keys)
|
||||
# Return list of sets' intersection
|
||||
return list(immutables_set & keys_set)
|
||||
|
||||
|
||||
def ensure_feature_is_enabled(client, feature_str):
|
||||
enabled_features = client.get_enabled_features()
|
||||
|
||||
if enabled_features is None:
|
||||
enabled_features = []
|
||||
|
||||
if feature_str not in enabled_features:
|
||||
client.enable_features(feature_str)
|
||||
client.save_config()
|
||||
|
||||
|
||||
def get_nitro_client(module):
|
||||
from nssrc.com.citrix.netscaler.nitro.service.nitro_service import nitro_service
|
||||
|
||||
client = nitro_service(module.params['nsip'], module.params['nitro_protocol'])
|
||||
client.set_credential(module.params['nitro_user'], module.params['nitro_pass'])
|
||||
client.timeout = float(module.params['nitro_timeout'])
|
||||
client.certvalidation = module.params['validate_certs']
|
||||
return client
|
||||
|
||||
|
||||
netscaler_common_arguments = dict(
|
||||
nsip=dict(
|
||||
required=True,
|
||||
fallback=(env_fallback, ['NETSCALER_NSIP']),
|
||||
),
|
||||
nitro_user=dict(
|
||||
required=True,
|
||||
fallback=(env_fallback, ['NETSCALER_NITRO_USER']),
|
||||
no_log=True
|
||||
),
|
||||
nitro_pass=dict(
|
||||
required=True,
|
||||
fallback=(env_fallback, ['NETSCALER_NITRO_PASS']),
|
||||
no_log=True
|
||||
),
|
||||
nitro_protocol=dict(
|
||||
choices=['http', 'https'],
|
||||
fallback=(env_fallback, ['NETSCALER_NITRO_PROTOCOL']),
|
||||
default='http'
|
||||
),
|
||||
validate_certs=dict(
|
||||
default=True,
|
||||
type='bool'
|
||||
),
|
||||
nitro_timeout=dict(default=310, type='float'),
|
||||
state=dict(
|
||||
choices=[
|
||||
'present',
|
||||
'absent',
|
||||
],
|
||||
default='present',
|
||||
),
|
||||
save_config=dict(
|
||||
type='bool',
|
||||
default=True,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
loglines = []
|
||||
|
||||
|
||||
def complete_missing_attributes(actual, attrs_list, fill_value=None):
|
||||
for attribute in attrs_list:
|
||||
if not hasattr(actual, attribute):
|
||||
setattr(actual, attribute, fill_value)
|
||||
|
||||
|
||||
def log(msg):
|
||||
loglines.append(msg)
|
||||
|
||||
|
||||
def get_ns_version(client):
|
||||
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsversion import nsversion
|
||||
result = nsversion.get(client)
|
||||
m = re.match(r'^.*NS(\d+)\.(\d+).*$', result[0].version)
|
||||
if m is None:
|
||||
return None
|
||||
else:
|
||||
return int(m.group(1)), int(m.group(2))
|
||||
|
||||
|
||||
def get_ns_hardware(client):
|
||||
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nshardware import nshardware
|
||||
result = nshardware.get(client)
|
||||
return result
|
||||
|
||||
|
||||
def monkey_patch_nitro_api():
|
||||
|
||||
from nssrc.com.citrix.netscaler.nitro.resource.base.Json import Json
|
||||
|
||||
def new_resource_to_string_convert(self, resrc):
|
||||
# Line below is the actual patch
|
||||
dict_valid_values = dict((k.replace('_', '', 1), v) for k, v in resrc.__dict__.items() if v)
|
||||
return json.dumps(dict_valid_values)
|
||||
Json.resource_to_string_convert = new_resource_to_string_convert
|
||||
|
||||
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
|
||||
|
||||
@classmethod
|
||||
def object_to_string_new(cls, obj):
|
||||
output = []
|
||||
flds = obj.__dict__
|
||||
for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v):
|
||||
if isinstance(v, bool):
|
||||
output.append('"%s":%s' % (k, v))
|
||||
elif isinstance(v, (binary_type, text_type)):
|
||||
v = to_native(v, errors='surrogate_or_strict')
|
||||
output.append('"%s":"%s"' % (k, v))
|
||||
elif isinstance(v, int):
|
||||
output.append('"%s":"%s"' % (k, v))
|
||||
return ','.join(output)
|
||||
|
||||
@classmethod
|
||||
def object_to_string_withoutquotes_new(cls, obj):
|
||||
output = []
|
||||
flds = obj.__dict__
|
||||
for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v):
|
||||
if isinstance(v, (int, bool)):
|
||||
output.append('%s:%s' % (k, v))
|
||||
elif isinstance(v, (binary_type, text_type)):
|
||||
v = to_native(v, errors='surrogate_or_strict')
|
||||
output.append('%s:%s' % (k, cls.encode(v)))
|
||||
return ','.join(output)
|
||||
|
||||
nitro_util.object_to_string = object_to_string_new
|
||||
nitro_util.object_to_string_withoutquotes = object_to_string_withoutquotes_new
|
|
@ -1,59 +0,0 @@
|
|||
# Copyright: (c) 2018, Pluribus Networks
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
from ansible.module_utils.connection import exec_command
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
if hasattr(module, '_nvos_connection'):
|
||||
return module._nvos_connection
|
||||
|
||||
capabilities = get_capabilities(module)
|
||||
network_api = capabilities.get('network_api')
|
||||
if network_api == 'cliconf':
|
||||
module._nvos_connection = Connection(module._socket_path)
|
||||
else:
|
||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
||||
|
||||
return module._nvos_connection
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
if hasattr(module, '_nvos_capabilities'):
|
||||
return module._nvos_capabilities
|
||||
try:
|
||||
capabilities = Connection(module._socket_path).get_capabilities()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
module._nvos_capabilities = json.loads(capabilities)
|
||||
return module._nvos_capabilities
|
||||
|
||||
|
||||
def to_commands(module, commands):
|
||||
spec = {
|
||||
'command': dict(key=True),
|
||||
'prompt': dict(),
|
||||
'answer': dict()
|
||||
}
|
||||
transform = ComplexList(spec, module)
|
||||
return transform(commands)
|
||||
|
||||
|
||||
def run_commands(module, commands, check_rc=True):
|
||||
commands = to_commands(module, to_list(commands))
|
||||
for cmd in commands:
|
||||
cmd = module.jsonify(cmd)
|
||||
rc, out, err = exec_command(module, cmd)
|
||||
if check_rc and rc != 0:
|
||||
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
|
||||
responses = (to_text(out, errors='surrogate_or_strict'))
|
||||
|
||||
return rc, out, err
|
|
@ -1,66 +0,0 @@
|
|||
# Copyright: (c) 2018, Pluribus Networks
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
#
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands
|
||||
|
||||
|
||||
def pn_cli(module, switch=None, username=None, password=None, switch_local=None):
|
||||
"""
|
||||
Method to generate the cli portion to launch the Netvisor cli.
|
||||
:param module: The Ansible module to fetch username and password.
|
||||
:return: The cli string for further processing.
|
||||
"""
|
||||
|
||||
cli = ''
|
||||
|
||||
if username and password:
|
||||
cli += '--user "%s":"%s" ' % (username, password)
|
||||
if switch:
|
||||
cli += ' switch ' + switch
|
||||
if switch_local:
|
||||
cli += ' switch-local '
|
||||
|
||||
return cli
|
||||
|
||||
|
||||
def booleanArgs(arg, trueString, falseString):
|
||||
if arg is True:
|
||||
return " %s " % trueString
|
||||
elif arg is False:
|
||||
return " %s " % falseString
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def run_cli(module, cli, state_map):
|
||||
"""
|
||||
This method executes the cli command on the target node(s) and returns the
|
||||
output. The module then exits based on the output.
|
||||
:param cli: the complete cli string to be executed on the target node(s).
|
||||
:param state_map: Provides state of the command.
|
||||
:param module: The Ansible module to fetch command
|
||||
"""
|
||||
state = module.params['state']
|
||||
command = state_map[state]
|
||||
|
||||
result, out, err = run_commands(module, cli)
|
||||
|
||||
results = dict(
|
||||
command=cli,
|
||||
msg="%s operation completed" % cli,
|
||||
changed=True
|
||||
)
|
||||
# Response in JSON format
|
||||
if result != 0:
|
||||
module.exit_json(
|
||||
command=cli,
|
||||
msg="%s operation failed" % cli,
|
||||
changed=False
|
||||
)
|
||||
|
||||
module.exit_json(**results)
|
|
@ -1,160 +0,0 @@
|
|||
#
|
||||
# (c) 2018 Extreme Networks Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
import json
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
|
||||
from ansible.module_utils.connection import Connection, ConnectionError
|
||||
|
||||
|
||||
def get_connection(module):
|
||||
"""Get switch connection
|
||||
|
||||
Creates reusable SSH connection to the switch described in a given module.
|
||||
|
||||
Args:
|
||||
module: A valid AnsibleModule instance.
|
||||
|
||||
Returns:
|
||||
An instance of `ansible.module_utils.connection.Connection` with a
|
||||
connection to the switch described in the provided module.
|
||||
|
||||
Raises:
|
||||
AnsibleConnectionFailure: An error occurred connecting to the device
|
||||
"""
|
||||
if hasattr(module, 'nos_connection'):
|
||||
return module.nos_connection
|
||||
|
||||
capabilities = get_capabilities(module)
|
||||
network_api = capabilities.get('network_api')
|
||||
if network_api == 'cliconf':
|
||||
module.nos_connection = Connection(module._socket_path)
|
||||
else:
|
||||
module.fail_json(msg='Invalid connection type %s' % network_api)
|
||||
|
||||
return module.nos_connection
|
||||
|
||||
|
||||
def get_capabilities(module):
|
||||
"""Get switch capabilities
|
||||
|
||||
Collects and returns a python object with the switch capabilities.
|
||||
|
||||
Args:
|
||||
module: A valid AnsibleModule instance.
|
||||
|
||||
Returns:
|
||||
A dictionary containing the switch capabilities.
|
||||
"""
|
||||
if hasattr(module, 'nos_capabilities'):
|
||||
return module.nos_capabilities
|
||||
|
||||
try:
|
||||
capabilities = Connection(module._socket_path).get_capabilities()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
module.nos_capabilities = json.loads(capabilities)
|
||||
return module.nos_capabilities
|
||||
|
||||
|
||||
def run_commands(module, commands):
|
||||
"""Run command list against connection.
|
||||
|
||||
Get new or previously used connection and send commands to it one at a time,
|
||||
collecting response.
|
||||
|
||||
Args:
|
||||
module: A valid AnsibleModule instance.
|
||||
commands: Iterable of command strings.
|
||||
|
||||
Returns:
|
||||
A list of output strings.
|
||||
"""
|
||||
responses = list()
|
||||
connection = get_connection(module)
|
||||
|
||||
for cmd in to_list(commands):
|
||||
if isinstance(cmd, dict):
|
||||
command = cmd['command']
|
||||
prompt = cmd['prompt']
|
||||
answer = cmd['answer']
|
||||
else:
|
||||
command = cmd
|
||||
prompt = None
|
||||
answer = None
|
||||
|
||||
try:
|
||||
out = connection.get(command, prompt, answer)
|
||||
out = to_text(out, errors='surrogate_or_strict')
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
||||
except UnicodeError:
|
||||
module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
|
||||
|
||||
responses.append(out)
|
||||
|
||||
return responses
|
||||
|
||||
|
||||
def get_config(module):
|
||||
"""Get switch configuration
|
||||
|
||||
Gets the described device's current configuration. If a configuration has
|
||||
already been retrieved it will return the previously obtained configuration.
|
||||
|
||||
Args:
|
||||
module: A valid AnsibleModule instance.
|
||||
|
||||
Returns:
|
||||
A string containing the configuration.
|
||||
"""
|
||||
if not hasattr(module, 'device_configs'):
|
||||
module.device_configs = {}
|
||||
elif module.device_configs != {}:
|
||||
return module.device_configs
|
||||
|
||||
connection = get_connection(module)
|
||||
try:
|
||||
out = connection.get_config()
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
|
||||
cfg = to_text(out, errors='surrogate_then_replace').strip()
|
||||
module.device_configs = cfg
|
||||
return cfg
|
||||
|
||||
|
||||
def load_config(module, commands):
|
||||
"""Apply a list of commands to a device.
|
||||
|
||||
Given a list of commands apply them to the device to modify the
|
||||
configuration in bulk.
|
||||
|
||||
Args:
|
||||
module: A valid AnsibleModule instance.
|
||||
commands: Iterable of command strings.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
connection = get_connection(module)
|
||||
|
||||
try:
|
||||
resp = connection.edit_config(commands)
|
||||
return resp.get('response')
|
||||
except ConnectionError as exc:
|
||||
module.fail_json(msg=to_text(exc))
|
|
@ -1,822 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2017, Cisco and/or its affiliates.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils._text import to_text
|
||||
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
|
||||
try:
|
||||
unicode
|
||||
HAVE_UNICODE = True
|
||||
except NameError:
|
||||
unicode = str
|
||||
HAVE_UNICODE = False
|
||||
|
||||
|
||||
nso_argument_spec = dict(
|
||||
url=dict(type='str', required=True),
|
||||
username=dict(type='str', required=True, fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
|
||||
password=dict(type='str', required=True, no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD'])),
|
||||
timeout=dict(type='int', default=300),
|
||||
validate_certs=dict(type='bool', default=False)
|
||||
)
|
||||
|
||||
|
||||
class State(object):
|
||||
SET = 'set'
|
||||
PRESENT = 'present'
|
||||
ABSENT = 'absent'
|
||||
CHECK_SYNC = 'check-sync'
|
||||
DEEP_CHECK_SYNC = 'deep-check-sync'
|
||||
IN_SYNC = 'in-sync'
|
||||
DEEP_IN_SYNC = 'deep-in-sync'
|
||||
|
||||
SYNC_STATES = ('check-sync', 'deep-check-sync', 'in-sync', 'deep-in-sync')
|
||||
|
||||
|
||||
class ModuleFailException(Exception):
|
||||
def __init__(self, message):
|
||||
super(ModuleFailException, self).__init__(message)
|
||||
self.message = message
|
||||
|
||||
|
||||
class NsoException(Exception):
|
||||
def __init__(self, message, error):
|
||||
super(NsoException, self).__init__(message)
|
||||
self.message = message
|
||||
self.error = error
|
||||
|
||||
|
||||
class JsonRpc(object):
|
||||
def __init__(self, url, timeout, validate_certs):
|
||||
self._url = url
|
||||
self._timeout = timeout
|
||||
self._validate_certs = validate_certs
|
||||
self._id = 0
|
||||
self._trans = {}
|
||||
self._headers = {'Content-Type': 'application/json'}
|
||||
self._conn = None
|
||||
self._system_settings = {}
|
||||
|
||||
def login(self, user, passwd):
|
||||
payload = {
|
||||
'method': 'login',
|
||||
'params': {'user': user, 'passwd': passwd}
|
||||
}
|
||||
resp, resp_json = self._call(payload)
|
||||
self._headers['Cookie'] = resp.headers['set-cookie']
|
||||
|
||||
def logout(self):
|
||||
payload = {'method': 'logout', 'params': {}}
|
||||
self._call(payload)
|
||||
|
||||
def get_system_setting(self, setting):
|
||||
if setting not in self._system_settings:
|
||||
payload = {'method': 'get_system_setting', 'params': {'operation': setting}}
|
||||
resp, resp_json = self._call(payload)
|
||||
self._system_settings[setting] = resp_json['result']
|
||||
return self._system_settings[setting]
|
||||
|
||||
def new_trans(self, **kwargs):
|
||||
payload = {'method': 'new_trans', 'params': kwargs}
|
||||
resp, resp_json = self._call(payload)
|
||||
return resp_json['result']['th']
|
||||
|
||||
def get_trans(self, mode):
|
||||
if mode not in self._trans:
|
||||
th = self.new_trans(mode=mode)
|
||||
self._trans[mode] = th
|
||||
return self._trans[mode]
|
||||
|
||||
def delete_trans(self, th):
|
||||
payload = {'method': 'delete_trans', 'params': {'th': th}}
|
||||
resp, resp_json = self._call(payload)
|
||||
self._maybe_delete_trans(th)
|
||||
|
||||
def validate_trans(self, th):
|
||||
payload = {'method': 'validate_trans', 'params': {'th': th}}
|
||||
resp, resp_json = self._write_call(payload)
|
||||
return resp_json['result']
|
||||
|
||||
def get_trans_changes(self, th):
|
||||
payload = {'method': 'get_trans_changes', 'params': {'th': th}}
|
||||
resp, resp_json = self._write_call(payload)
|
||||
return resp_json['result']['changes']
|
||||
|
||||
def validate_commit(self, th):
|
||||
payload = {'method': 'validate_commit', 'params': {'th': th}}
|
||||
resp, resp_json = self._write_call(payload)
|
||||
return resp_json['result'].get('warnings', [])
|
||||
|
||||
def commit(self, th):
|
||||
payload = {'method': 'commit', 'params': {'th': th}}
|
||||
resp, resp_json = self._write_call(payload)
|
||||
if len(resp_json['result']) == 0:
|
||||
self._maybe_delete_trans(th)
|
||||
return resp_json['result']
|
||||
|
||||
def get_schema(self, **kwargs):
|
||||
payload = {'method': 'get_schema', 'params': kwargs}
|
||||
resp, resp_json = self._maybe_write_call(payload)
|
||||
return resp_json['result']
|
||||
|
||||
def get_module_prefix_map(self, path=None):
|
||||
if path is None:
|
||||
payload = {'method': 'get_module_prefix_map', 'params': {}}
|
||||
resp, resp_json = self._call(payload)
|
||||
else:
|
||||
payload = {'method': 'get_module_prefix_map', 'params': {'path': path}}
|
||||
resp, resp_json = self._maybe_write_call(payload)
|
||||
return resp_json['result']
|
||||
|
||||
def get_value(self, path):
|
||||
payload = {
|
||||
'method': 'get_value',
|
||||
'params': {'path': path}
|
||||
}
|
||||
resp, resp_json = self._read_call(payload)
|
||||
return resp_json['result']
|
||||
|
||||
def exists(self, path):
|
||||
payload = {'method': 'exists', 'params': {'path': path}}
|
||||
try:
|
||||
resp, resp_json = self._read_call(payload)
|
||||
return resp_json['result']['exists']
|
||||
except NsoException as ex:
|
||||
# calling exists on a sub-list when the parent list does
|
||||
# not exists will cause data.not_found errors on recent
|
||||
# NSO
|
||||
if 'type' in ex.error and ex.error['type'] == 'data.not_found':
|
||||
return False
|
||||
raise
|
||||
|
||||
def create(self, th, path):
|
||||
payload = {'method': 'create', 'params': {'th': th, 'path': path}}
|
||||
self._write_call(payload)
|
||||
|
||||
def delete(self, th, path):
|
||||
payload = {'method': 'delete', 'params': {'th': th, 'path': path}}
|
||||
self._write_call(payload)
|
||||
|
||||
def set_value(self, th, path, value):
|
||||
payload = {
|
||||
'method': 'set_value',
|
||||
'params': {'th': th, 'path': path, 'value': value}
|
||||
}
|
||||
resp, resp_json = self._write_call(payload)
|
||||
return resp_json['result']
|
||||
|
||||
def show_config(self, path, operational=False):
|
||||
payload = {
|
||||
'method': 'show_config',
|
||||
'params': {
|
||||
'path': path,
|
||||
'result_as': 'json',
|
||||
'with_oper': operational}
|
||||
}
|
||||
resp, resp_json = self._read_call(payload)
|
||||
return resp_json['result']
|
||||
|
||||
def query(self, xpath, fields):
|
||||
payload = {
|
||||
'method': 'query',
|
||||
'params': {
|
||||
'xpath_expr': xpath,
|
||||
'selection': fields
|
||||
}
|
||||
}
|
||||
resp, resp_json = self._read_call(payload)
|
||||
return resp_json['result']['results']
|
||||
|
||||
def run_action(self, th, path, params=None):
|
||||
if params is None:
|
||||
params = {}
|
||||
|
||||
if is_version(self, [(4, 5), (4, 4, 3)]):
|
||||
result_format = 'json'
|
||||
else:
|
||||
result_format = 'normal'
|
||||
|
||||
payload = {
|
||||
'method': 'run_action',
|
||||
'params': {
|
||||
'format': result_format,
|
||||
'path': path,
|
||||
'params': params
|
||||
}
|
||||
}
|
||||
if th is None:
|
||||
resp, resp_json = self._read_call(payload)
|
||||
else:
|
||||
payload['params']['th'] = th
|
||||
resp, resp_json = self._call(payload)
|
||||
|
||||
if result_format == 'normal':
|
||||
# this only works for one-level results, list entries,
|
||||
# containers etc will have / in their name.
|
||||
result = {}
|
||||
for info in resp_json['result']:
|
||||
result[info['name']] = info['value']
|
||||
else:
|
||||
result = resp_json['result']
|
||||
|
||||
return result
|
||||
|
||||
def _call(self, payload):
|
||||
self._id += 1
|
||||
if 'id' not in payload:
|
||||
payload['id'] = self._id
|
||||
|
||||
if 'jsonrpc' not in payload:
|
||||
payload['jsonrpc'] = '2.0'
|
||||
|
||||
data = json.dumps(payload)
|
||||
try:
|
||||
resp = open_url(
|
||||
self._url, timeout=self._timeout,
|
||||
method='POST', data=data, headers=self._headers,
|
||||
validate_certs=self._validate_certs)
|
||||
if resp.code != 200:
|
||||
raise NsoException(
|
||||
'NSO returned HTTP code {0}, expected 200'.format(resp.status), {})
|
||||
except socket.timeout:
|
||||
raise NsoException('request timed out against NSO at {0}'.format(self._url), {})
|
||||
|
||||
resp_body = resp.read()
|
||||
resp_json = json.loads(resp_body)
|
||||
|
||||
if 'error' in resp_json:
|
||||
self._handle_call_error(payload, resp_json)
|
||||
return resp, resp_json
|
||||
|
||||
def _handle_call_error(self, payload, resp_json):
|
||||
method = payload['method']
|
||||
|
||||
error = resp_json['error']
|
||||
error_type = error['type'][len('rpc.method.'):]
|
||||
if error_type in ('unexpected_params',
|
||||
'unknown_params_value',
|
||||
'invalid_params',
|
||||
'invalid_params_type',
|
||||
'data_not_found'):
|
||||
key = error['data']['param']
|
||||
error_type_s = error_type.replace('_', ' ')
|
||||
if key == 'path':
|
||||
msg = 'NSO {0} {1}. path = {2}'.format(
|
||||
method, error_type_s, payload['params']['path'])
|
||||
else:
|
||||
path = payload['params'].get('path', 'unknown')
|
||||
msg = 'NSO {0} {1}. path = {2}. {3} = {4}'.format(
|
||||
method, error_type_s, path, key, payload['params'][key])
|
||||
else:
|
||||
msg = 'NSO {0} returned JSON-RPC error: {1}'.format(method, error)
|
||||
|
||||
raise NsoException(msg, error)
|
||||
|
||||
def _read_call(self, payload):
|
||||
if 'th' not in payload['params']:
|
||||
payload['params']['th'] = self.get_trans(mode='read')
|
||||
return self._call(payload)
|
||||
|
||||
def _write_call(self, payload):
|
||||
if 'th' not in payload['params']:
|
||||
payload['params']['th'] = self.get_trans(mode='read_write')
|
||||
return self._call(payload)
|
||||
|
||||
def _maybe_write_call(self, payload):
|
||||
if 'read_write' in self._trans:
|
||||
return self._write_call(payload)
|
||||
else:
|
||||
return self._read_call(payload)
|
||||
|
||||
def _maybe_delete_trans(self, th):
|
||||
for mode in ('read', 'read_write'):
|
||||
if th == self._trans.get(mode, None):
|
||||
del self._trans[mode]
|
||||
|
||||
|
||||
class ValueBuilder(object):
|
||||
PATH_RE = re.compile('{[^}]*}')
|
||||
PATH_RE_50 = re.compile('{[^}]*}$')
|
||||
|
||||
class Value(object):
|
||||
__slots__ = ['path', 'tag_path', 'state', 'value', 'deps']
|
||||
|
||||
def __init__(self, path, state, value, deps):
|
||||
self.path = path
|
||||
self.tag_path = ValueBuilder.PATH_RE.sub('', path)
|
||||
self.state = state
|
||||
self.value = value
|
||||
self.deps = deps
|
||||
|
||||
# nodes can depend on themselves
|
||||
if self.tag_path in self.deps:
|
||||
self.deps.remove(self.tag_path)
|
||||
|
||||
def __lt__(self, rhs):
|
||||
l_len = len(self.path.split('/'))
|
||||
r_len = len(rhs.path.split('/'))
|
||||
if l_len == r_len:
|
||||
return self.path.__lt__(rhs.path)
|
||||
return l_len < r_len
|
||||
|
||||
def __str__(self):
|
||||
return 'Value<path={0}, state={1}, value={2}>'.format(
|
||||
self.path, self.state, self.value)
|
||||
|
||||
class ValueIterator(object):
|
||||
def __init__(self, client, values, delayed_values):
|
||||
self._client = client
|
||||
self._values = values
|
||||
self._delayed_values = delayed_values
|
||||
self._pos = 0
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
|
||||
def next(self):
|
||||
if self._pos >= len(self._values):
|
||||
if len(self._delayed_values) == 0:
|
||||
raise StopIteration()
|
||||
|
||||
builder = ValueBuilder(self._client, delay=False)
|
||||
for (parent, maybe_qname, value) in self._delayed_values:
|
||||
builder.build(parent, maybe_qname, value)
|
||||
del self._delayed_values[:]
|
||||
self._values.extend(builder.values)
|
||||
|
||||
return self.next()
|
||||
|
||||
value = self._values[self._pos]
|
||||
self._pos += 1
|
||||
return value
|
||||
|
||||
def __init__(self, client, mode='config', delay=None):
|
||||
self._client = client
|
||||
self._mode = mode
|
||||
self._schema_cache = {}
|
||||
self._module_prefix_map_cache = {}
|
||||
self._values = []
|
||||
self._values_dirty = False
|
||||
self._delay = delay is None and mode == 'config' and is_version(self._client, [(5, 0)])
|
||||
self._delayed_values = []
|
||||
|
||||
def build(self, parent, maybe_qname, value, schema=None):
|
||||
qname, name = self.get_prefix_name(parent, maybe_qname)
|
||||
if name is None:
|
||||
path = parent
|
||||
else:
|
||||
path = '{0}/{1}'.format(parent, qname)
|
||||
|
||||
if schema is None:
|
||||
schema = self._get_schema(path)
|
||||
|
||||
if self._delay and schema.get('is_mount_point', False):
|
||||
# delay conversion of mounted values, required to get
|
||||
# shema information on 5.0 and later.
|
||||
self._delayed_values.append((parent, maybe_qname, value))
|
||||
elif self._is_leaf_list(schema) and is_version(self._client, [(4, 5)]):
|
||||
self._build_leaf_list(path, schema, value)
|
||||
elif self._is_leaf(schema):
|
||||
deps = schema.get('deps', [])
|
||||
if self._is_empty_leaf(schema):
|
||||
exists = self._client.exists(path)
|
||||
if exists and value != [None]:
|
||||
self._add_value(path, State.ABSENT, None, deps)
|
||||
elif not exists and value == [None]:
|
||||
self._add_value(path, State.PRESENT, None, deps)
|
||||
else:
|
||||
if maybe_qname is None:
|
||||
value_type = self.get_type(path)
|
||||
else:
|
||||
value_type = self._get_child_type(parent, qname)
|
||||
|
||||
if 'identityref' in value_type:
|
||||
if isinstance(value, list):
|
||||
value = [ll_v for ll_v, t_ll_v
|
||||
in [self.get_prefix_name(parent, v) for v in value]]
|
||||
else:
|
||||
value, t_value = self.get_prefix_name(parent, value)
|
||||
self._add_value(path, State.SET, value, deps)
|
||||
elif isinstance(value, dict):
|
||||
self._build_dict(path, schema, value)
|
||||
elif isinstance(value, list):
|
||||
self._build_list(path, schema, value)
|
||||
else:
|
||||
raise ModuleFailException(
|
||||
'unsupported schema {0} at {1}'.format(
|
||||
schema['kind'], path))
|
||||
|
||||
@property
|
||||
def values(self):
|
||||
if self._values_dirty:
|
||||
self._values = ValueBuilder.sort_values(self._values)
|
||||
self._values_dirty = False
|
||||
|
||||
return ValueBuilder.ValueIterator(self._client, self._values, self._delayed_values)
|
||||
|
||||
@staticmethod
|
||||
def sort_values(values):
|
||||
class N(object):
|
||||
def __init__(self, v):
|
||||
self.tmp_mark = False
|
||||
self.mark = False
|
||||
self.v = v
|
||||
|
||||
sorted_values = []
|
||||
nodes = [N(v) for v in sorted(values)]
|
||||
|
||||
def get_node(tag_path):
|
||||
return next((m for m in nodes
|
||||
if m.v.tag_path == tag_path), None)
|
||||
|
||||
def is_cycle(n, dep, visited):
|
||||
visited.add(n.v.tag_path)
|
||||
if dep in visited:
|
||||
return True
|
||||
|
||||
dep_n = get_node(dep)
|
||||
if dep_n is not None:
|
||||
for sub_dep in dep_n.v.deps:
|
||||
if is_cycle(dep_n, sub_dep, visited):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# check for dependency cycles, remove if detected. sort will
|
||||
# not be 100% but allows for a best-effort to work around
|
||||
# issue in NSO.
|
||||
for n in nodes:
|
||||
for dep in n.v.deps:
|
||||
if is_cycle(n, dep, set()):
|
||||
n.v.deps.remove(dep)
|
||||
|
||||
def visit(n):
|
||||
if n.tmp_mark:
|
||||
return False
|
||||
if not n.mark:
|
||||
n.tmp_mark = True
|
||||
for m in nodes:
|
||||
if m.v.tag_path in n.v.deps:
|
||||
if not visit(m):
|
||||
return False
|
||||
|
||||
n.tmp_mark = False
|
||||
n.mark = True
|
||||
|
||||
sorted_values.insert(0, n.v)
|
||||
|
||||
return True
|
||||
|
||||
n = next((n for n in nodes if not n.mark), None)
|
||||
while n is not None:
|
||||
visit(n)
|
||||
n = next((n for n in nodes if not n.mark), None)
|
||||
|
||||
return sorted_values[::-1]
|
||||
|
||||
def _build_dict(self, path, schema, value):
|
||||
keys = schema.get('key', [])
|
||||
for dict_key, dict_value in value.items():
|
||||
qname, name = self.get_prefix_name(path, dict_key)
|
||||
if dict_key in ('__state', ) or name in keys:
|
||||
continue
|
||||
|
||||
child_schema = self._find_child(path, schema, qname)
|
||||
self.build(path, dict_key, dict_value, child_schema)
|
||||
|
||||
def _build_leaf_list(self, path, schema, value):
|
||||
deps = schema.get('deps', [])
|
||||
entry_type = self.get_type(path, schema)
|
||||
|
||||
if self._mode == 'verify':
|
||||
for entry in value:
|
||||
if 'identityref' in entry_type:
|
||||
entry, t_entry = self.get_prefix_name(path, entry)
|
||||
entry_path = '{0}{{{1}}}'.format(path, entry)
|
||||
if not self._client.exists(entry_path):
|
||||
self._add_value(entry_path, State.ABSENT, None, deps)
|
||||
else:
|
||||
# remove leaf list if treated as a list and then re-create the
|
||||
# expected list entries.
|
||||
self._add_value(path, State.ABSENT, None, deps)
|
||||
|
||||
for entry in value:
|
||||
if 'identityref' in entry_type:
|
||||
entry, t_entry = self.get_prefix_name(path, entry)
|
||||
entry_path = '{0}{{{1}}}'.format(path, entry)
|
||||
self._add_value(entry_path, State.PRESENT, None, deps)
|
||||
|
||||
def _build_list(self, path, schema, value):
|
||||
deps = schema.get('deps', [])
|
||||
for entry in value:
|
||||
entry_key = self._build_key(path, entry, schema['key'])
|
||||
entry_path = '{0}{{{1}}}'.format(path, entry_key)
|
||||
entry_state = entry.get('__state', 'present')
|
||||
entry_exists = self._client.exists(entry_path)
|
||||
|
||||
if entry_state == 'absent':
|
||||
if entry_exists:
|
||||
self._add_value(entry_path, State.ABSENT, None, deps)
|
||||
else:
|
||||
if not entry_exists:
|
||||
self._add_value(entry_path, State.PRESENT, None, deps)
|
||||
if entry_state in State.SYNC_STATES:
|
||||
self._add_value(entry_path, entry_state, None, deps)
|
||||
|
||||
self.build(entry_path, None, entry)
|
||||
|
||||
def _build_key(self, path, entry, schema_keys):
|
||||
key_parts = []
|
||||
for key in schema_keys:
|
||||
value = entry.get(key, None)
|
||||
if value is None:
|
||||
raise ModuleFailException(
|
||||
'required leaf {0} in {1} not set in data'.format(
|
||||
key, path))
|
||||
|
||||
value_type = self._get_child_type(path, key)
|
||||
if 'identityref' in value_type:
|
||||
value, t_value = self.get_prefix_name(path, value)
|
||||
key_parts.append(self._quote_key(value))
|
||||
return ' '.join(key_parts)
|
||||
|
||||
def _quote_key(self, key):
|
||||
if isinstance(key, bool):
|
||||
return key and 'true' or 'false'
|
||||
|
||||
q_key = []
|
||||
for c in str(key):
|
||||
if c in ('{', '}', "'", '\\'):
|
||||
q_key.append('\\')
|
||||
q_key.append(c)
|
||||
q_key = ''.join(q_key)
|
||||
if ' ' in q_key:
|
||||
return '"{0}"'.format(q_key)
|
||||
return q_key
|
||||
|
||||
def _find_child(self, path, schema, qname):
|
||||
if 'children' not in schema:
|
||||
schema = self._get_schema(path)
|
||||
|
||||
# look for the qualified name if : is in the name
|
||||
child_schema = self._get_child(schema, qname)
|
||||
if child_schema is not None:
|
||||
return child_schema
|
||||
|
||||
# no child was found, look for a choice with a child matching
|
||||
for child_schema in schema['children']:
|
||||
if child_schema['kind'] != 'choice':
|
||||
continue
|
||||
choice_child_schema = self._get_choice_child(child_schema, qname)
|
||||
if choice_child_schema is not None:
|
||||
return choice_child_schema
|
||||
|
||||
raise ModuleFailException(
|
||||
'no child in {0} with name {1}. children {2}'.format(
|
||||
path, qname, ','.join((c.get('qname', c.get('name', None)) for c in schema['children']))))
|
||||
|
||||
def _add_value(self, path, state, value, deps):
|
||||
self._values.append(ValueBuilder.Value(path, state, value, deps))
|
||||
self._values_dirty = True
|
||||
|
||||
def get_prefix_name(self, path, qname):
|
||||
if not isinstance(qname, (str, unicode)):
|
||||
return qname, None
|
||||
if ':' not in qname:
|
||||
return qname, qname
|
||||
|
||||
module_prefix_map = self._get_module_prefix_map(path)
|
||||
module, name = qname.split(':', 1)
|
||||
if module not in module_prefix_map:
|
||||
raise ModuleFailException(
|
||||
'no module mapping for module {0}. loaded modules {1}'.format(
|
||||
module, ','.join(sorted(module_prefix_map.keys()))))
|
||||
|
||||
return '{0}:{1}'.format(module_prefix_map[module], name), name
|
||||
|
||||
def _get_schema(self, path):
|
||||
return self._ensure_schema_cached(path)['data']
|
||||
|
||||
def _get_child_type(self, parent_path, key):
|
||||
all_schema = self._ensure_schema_cached(parent_path)
|
||||
parent_schema = all_schema['data']
|
||||
meta = all_schema['meta']
|
||||
schema = self._find_child(parent_path, parent_schema, key)
|
||||
return self.get_type(parent_path, schema, meta)
|
||||
|
||||
def get_type(self, path, schema=None, meta=None):
|
||||
if schema is None or meta is None:
|
||||
all_schema = self._ensure_schema_cached(path)
|
||||
schema = all_schema['data']
|
||||
meta = all_schema['meta']
|
||||
|
||||
if self._is_leaf(schema):
|
||||
def get_type(meta, curr_type):
|
||||
if curr_type.get('primitive', False):
|
||||
return [curr_type['name']]
|
||||
if 'namespace' in curr_type:
|
||||
curr_type_key = '{0}:{1}'.format(
|
||||
curr_type['namespace'], curr_type['name'])
|
||||
type_info = meta['types'][curr_type_key][-1]
|
||||
return get_type(meta, type_info)
|
||||
if 'leaf_type' in curr_type:
|
||||
return get_type(meta, curr_type['leaf_type'][-1])
|
||||
if 'union' in curr_type:
|
||||
union_types = []
|
||||
for union_type in curr_type['union']:
|
||||
union_types.extend(get_type(meta, union_type[-1]))
|
||||
return union_types
|
||||
return [curr_type.get('name', 'unknown')]
|
||||
|
||||
return get_type(meta, schema['type'])
|
||||
return None
|
||||
|
||||
def _ensure_schema_cached(self, path):
|
||||
if not self._delay and is_version(self._client, [(5, 0)]):
|
||||
# newer versions of NSO support multiple different schemas
|
||||
# for different devices, thus the device is required to
|
||||
# look up the schema. Remove the key entry to get schema
|
||||
# logic working ok.
|
||||
path = ValueBuilder.PATH_RE_50.sub('', path)
|
||||
else:
|
||||
path = ValueBuilder.PATH_RE.sub('', path)
|
||||
|
||||
if path not in self._schema_cache:
|
||||
schema = self._client.get_schema(path=path, levels=1)
|
||||
self._schema_cache[path] = schema
|
||||
return self._schema_cache[path]
|
||||
|
||||
def _get_module_prefix_map(self, path):
|
||||
# newer versions of NSO support multiple mappings from module
|
||||
# to prefix depending on which device is used.
|
||||
if path != '' and is_version(self._client, [(5, 0)]):
|
||||
if path not in self._module_prefix_map_cache:
|
||||
self._module_prefix_map_cache[path] = self._client.get_module_prefix_map(path)
|
||||
return self._module_prefix_map_cache[path]
|
||||
|
||||
if '' not in self._module_prefix_map_cache:
|
||||
self._module_prefix_map_cache[''] = self._client.get_module_prefix_map()
|
||||
return self._module_prefix_map_cache['']
|
||||
|
||||
def _get_child(self, schema, qname):
|
||||
# no child specified, return parent
|
||||
if qname is None:
|
||||
return schema
|
||||
|
||||
name_key = ':' in qname and 'qname' or 'name'
|
||||
return next((c for c in schema['children']
|
||||
if c.get(name_key, None) == qname), None)
|
||||
|
||||
def _get_choice_child(self, schema, qname):
|
||||
name_key = ':' in qname and 'qname' or 'name'
|
||||
for child_case in schema['cases']:
|
||||
# look for direct child
|
||||
choice_child_schema = next(
|
||||
(c for c in child_case['children']
|
||||
if c.get(name_key, None) == qname), None)
|
||||
if choice_child_schema is not None:
|
||||
return choice_child_schema
|
||||
|
||||
# look for nested choice
|
||||
for child_schema in child_case['children']:
|
||||
if child_schema['kind'] != 'choice':
|
||||
continue
|
||||
choice_child_schema = self._get_choice_child(child_schema, qname)
|
||||
if choice_child_schema is not None:
|
||||
return choice_child_schema
|
||||
return None
|
||||
|
||||
def _is_leaf_list(self, schema):
|
||||
return schema.get('kind', None) == 'leaf-list'
|
||||
|
||||
def _is_leaf(self, schema):
|
||||
# still checking for leaf-list here to be compatible with pre
|
||||
# 4.5 versions of NSO.
|
||||
return schema.get('kind', None) in ('key', 'leaf', 'leaf-list')
|
||||
|
||||
def _is_empty_leaf(self, schema):
|
||||
return (schema.get('kind', None) == 'leaf' and
|
||||
schema['type'].get('primitive', False) and
|
||||
schema['type'].get('name', '') == 'empty')
|
||||
|
||||
|
||||
def connect(params):
|
||||
client = JsonRpc(params['url'],
|
||||
params['timeout'],
|
||||
params['validate_certs'])
|
||||
client.login(params['username'], params['password'])
|
||||
return client
|
||||
|
||||
|
||||
def verify_version(client, required_versions):
|
||||
version_str = client.get_system_setting('version')
|
||||
if not verify_version_str(version_str, required_versions):
|
||||
supported_versions = ', '.join(
|
||||
['.'.join([str(p) for p in required_version])
|
||||
for required_version in required_versions])
|
||||
raise ModuleFailException(
|
||||
'unsupported NSO version {0}. {1} or later supported'.format(
|
||||
version_str, supported_versions))
|
||||
|
||||
|
||||
def is_version(client, required_versions):
|
||||
version_str = client.get_system_setting('version')
|
||||
return verify_version_str(version_str, required_versions)
|
||||
|
||||
|
||||
def verify_version_str(version_str, required_versions):
|
||||
version_str = re.sub('_.*', '', version_str)
|
||||
|
||||
version = [int(p) for p in version_str.split('.')]
|
||||
if len(version) < 2:
|
||||
raise ModuleFailException(
|
||||
'unsupported NSO version format {0}'.format(version_str))
|
||||
|
||||
def check_version(required_version, version):
|
||||
for pos in range(len(required_version)):
|
||||
if pos >= len(version):
|
||||
return False
|
||||
if version[pos] > required_version[pos]:
|
||||
return True
|
||||
if version[pos] < required_version[pos]:
|
||||
return False
|
||||
return True
|
||||
|
||||
for required_version in required_versions:
|
||||
if check_version(required_version, version):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def normalize_value(expected_value, value, key):
|
||||
if value is None:
|
||||
return None
|
||||
if (isinstance(expected_value, bool) and
|
||||
isinstance(value, (str, unicode))):
|
||||
return value == 'true'
|
||||
if isinstance(expected_value, int):
|
||||
try:
|
||||
return int(value)
|
||||
except TypeError:
|
||||
raise ModuleFailException(
|
||||
'returned value {0} for {1} is not a valid integer'.format(
|
||||
key, value))
|
||||
if isinstance(expected_value, float):
|
||||
try:
|
||||
return float(value)
|
||||
except TypeError:
|
||||
raise ModuleFailException(
|
||||
'returned value {0} for {1} is not a valid float'.format(
|
||||
key, value))
|
||||
if isinstance(expected_value, (list, tuple)):
|
||||
if not isinstance(value, (list, tuple)):
|
||||
raise ModuleFailException(
|
||||
'returned value {0} for {1} is not a list'.format(value, key))
|
||||
if len(expected_value) != len(value):
|
||||
raise ModuleFailException(
|
||||
'list length mismatch for {0}'.format(key))
|
||||
|
||||
normalized_value = []
|
||||
for i in range(len(expected_value)):
|
||||
normalized_value.append(
|
||||
normalize_value(expected_value[i], value[i], '{0}[{1}]'.format(key, i)))
|
||||
return normalized_value
|
||||
|
||||
if isinstance(expected_value, dict):
|
||||
if not isinstance(value, dict):
|
||||
raise ModuleFailException(
|
||||
'returned value {0} for {1} is not a dict'.format(value, key))
|
||||
if len(expected_value) != len(value):
|
||||
raise ModuleFailException(
|
||||
'dict length mismatch for {0}'.format(key))
|
||||
|
||||
normalized_value = {}
|
||||
for k in expected_value.keys():
|
||||
n_k = normalize_value(k, k, '{0}[{1}]'.format(key, k))
|
||||
if n_k not in value:
|
||||
raise ModuleFailException('missing {0} in value'.format(n_k))
|
||||
normalized_value[n_k] = normalize_value(expected_value[k], value[k], '{0}[{1}]'.format(key, k))
|
||||
return normalized_value
|
||||
|
||||
if HAVE_UNICODE:
|
||||
if isinstance(expected_value, unicode) and isinstance(value, str):
|
||||
return value.decode('utf-8')
|
||||
if isinstance(expected_value, str) and isinstance(value, unicode):
|
||||
return value.encode('utf-8')
|
||||
else:
|
||||
if hasattr(expected_value, 'encode') and hasattr(value, 'decode'):
|
||||
return value.decode('utf-8')
|
||||
if hasattr(expected_value, 'decode') and hasattr(value, 'encode'):
|
||||
return value.encode('utf-8')
|
||||
|
||||
return value
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue