mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-04-26 04:11:25 -07:00
* Using docstrings conflicts with the standard use of docstrings * PYTHON_OPTIMIZE=2 will omit docstrings. Using docstrings makes future changes to the plugin and module code subject to the requirement that we ensure it won't be run with optimization.
138 lines
4.1 KiB
Python
138 lines
4.1 KiB
Python
# (c) 2014, Brian Coca, Josh Drake, et al
|
|
# (c) 2017 Ansible Project
|
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
|
from __future__ import (absolute_import, division, print_function)
|
|
__metaclass__ = type
|
|
|
|
DOCUMENTATION = '''
|
|
cache: redis
|
|
short_description: Use Redis DB for cache
|
|
description:
|
|
- This cache uses JSON formatted, per host records saved in Redis.
|
|
version_added: "1.9"
|
|
requirements:
|
|
- redis (python lib)
|
|
options:
|
|
_uri:
|
|
description:
|
|
- A colon separated string of connection information for Redis.
|
|
required: True
|
|
env:
|
|
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
|
|
ini:
|
|
- key: fact_caching_connection
|
|
section: defaults
|
|
_prefix:
|
|
description: User defined prefix to use when creating the DB entries
|
|
env:
|
|
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
|
|
ini:
|
|
- key: fact_caching_prefix
|
|
- section: defaults
|
|
_timeout:
|
|
default: 86400
|
|
description: Expiration timeout for the cache plugin data
|
|
env:
|
|
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
|
|
ini:
|
|
- key: fact_caching_timeout
|
|
section: defaults
|
|
type: integer
|
|
'''
|
|
|
|
import time
|
|
import json
|
|
|
|
from ansible import constants as C
|
|
from ansible.errors import AnsibleError
|
|
from ansible.plugins.cache import BaseCacheModule
|
|
|
|
try:
|
|
from redis import StrictRedis
|
|
except ImportError:
|
|
raise AnsibleError("The 'redis' python module is required for the redis fact cache, 'pip install redis'")
|
|
|
|
|
|
class CacheModule(BaseCacheModule):
|
|
"""
|
|
A caching module backed by redis.
|
|
|
|
Keys are maintained in a zset with their score being the timestamp
|
|
when they are inserted. This allows for the usage of 'zremrangebyscore'
|
|
to expire keys. This mechanism is used or a pattern matched 'scan' for
|
|
performance.
|
|
"""
|
|
def __init__(self, *args, **kwargs):
|
|
if C.CACHE_PLUGIN_CONNECTION:
|
|
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
|
|
else:
|
|
connection = []
|
|
|
|
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
|
self._prefix = C.CACHE_PLUGIN_PREFIX
|
|
self._cache = {}
|
|
self._db = StrictRedis(*connection)
|
|
self._keys_set = 'ansible_cache_keys'
|
|
|
|
def _make_key(self, key):
|
|
return self._prefix + key
|
|
|
|
def get(self, key):
|
|
|
|
if key not in self._cache:
|
|
value = self._db.get(self._make_key(key))
|
|
# guard against the key not being removed from the zset;
|
|
# this could happen in cases where the timeout value is changed
|
|
# between invocations
|
|
if value is None:
|
|
self.delete(key)
|
|
raise KeyError
|
|
self._cache[key] = json.loads(value)
|
|
|
|
return self._cache.get(key)
|
|
|
|
def set(self, key, value):
|
|
|
|
value2 = json.dumps(value)
|
|
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
|
|
self._db.setex(self._make_key(key), int(self._timeout), value2)
|
|
else:
|
|
self._db.set(self._make_key(key), value2)
|
|
|
|
self._db.zadd(self._keys_set, time.time(), key)
|
|
self._cache[key] = value
|
|
|
|
def _expire_keys(self):
|
|
if self._timeout > 0:
|
|
expiry_age = time.time() - self._timeout
|
|
self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
|
|
|
|
def keys(self):
|
|
self._expire_keys()
|
|
return self._db.zrange(self._keys_set, 0, -1)
|
|
|
|
def contains(self, key):
|
|
self._expire_keys()
|
|
return (self._db.zrank(self._keys_set, key) is not None)
|
|
|
|
def delete(self, key):
|
|
del self.cache[key]
|
|
self._db.delete(self._make_key(key))
|
|
self._db.zrem(self._keys_set, key)
|
|
|
|
def flush(self):
|
|
for key in self.keys():
|
|
self.delete(key)
|
|
|
|
def copy(self):
|
|
# TODO: there is probably a better way to do this in redis
|
|
ret = dict()
|
|
for key in self.keys():
|
|
ret[key] = self.get(key)
|
|
return ret
|
|
|
|
def __getstate__(self):
|
|
return dict()
|
|
|
|
def __setstate__(self, data):
|
|
self.__init__()
|