made subcategories for cloud modules for better organization

This commit is contained in:
Brian Coca 2014-11-04 11:18:09 -05:00 committed by Matt Clay
commit 32e85c0944
68 changed files with 0 additions and 0 deletions

View file

@ -0,0 +1,420 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gc_storage
version_added: "1.4"
short_description: This module manages objects/buckets in Google Cloud Storage.
description:
- This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project.
options:
bucket:
description:
- Bucket name.
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
required: false
default: null
aliases: []
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
force:
description:
- Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
aliases: [ 'overwrite' ]
permission:
description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'.
required: false
default: private
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only avaialbe when public-read is the acl for the object.
required: false
default: null
aliases: []
mode:
description:
- Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket).
required: true
default: null
aliases: []
choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
gcs_secret_key:
description:
- GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used.
required: true
default: null
gcs_access_key:
description:
- GCS access key. If not set then the value of the GCS_ACCESS_KEY environment variable is used.
required: true
default: null
requirements: [ "boto 2.9+" ]
author: benno@ansible.com Note. Most of the code has been taken from the S3 module.
'''
EXAMPLES = '''
# upload some content
- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read
# download some content
- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get
# Download an object as a string to use else where in your playbook
- gc_storage: bucket=mybucket object=key.txt mode=get_str
# Create an empty bucket
- gc_storage: bucket=mybucket mode=create
# Create a bucket with key as directory
- gc_storage: bucket=mybucket object=/my/directory/path mode=create
# Delete a bucket and all contents
- gc_storage: bucket=mybucket mode=delete
'''
import sys
import os
import urlparse
import hashlib
try:
import boto
except ImportError:
print "failed=True msg='boto 2.9+ required for this module'"
sys.exit(1)
def grant_check(module, gs, obj):
try:
acp = obj.get_acl()
if module.params.get('permission') == 'public-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
if not grant:
obj.set_acl('public-read')
module.exit_json(changed=True, result="The objects permission as been set to public-read")
if module.params.get('permission') == 'authenticated-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
if not grant:
obj.set_acl('authenticated-read')
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
return True
def key_check(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if key_check:
grant_check(module, gs, key_check)
return True
else:
return False
def keysum(module, gs, bucket, obj):
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, gs, bucket):
try:
result = gs.lookup(bucket)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
grant_check(module, gs, result)
return True
else:
return False
def create_bucket(module, gs, bucket):
try:
bucket = gs.create_bucket(bucket)
bucket.set_acl(module.params.get('permission'))
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def delete_bucket(module, gs, bucket):
try:
bucket = gs.lookup(bucket)
bucket_contents = bucket.list()
for key in bucket_contents:
bucket.delete_key(key.name)
bucket.delete()
return True
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def upload_file_check(src):
if os.path.exists(src):
file_exists is True
else:
file_exists is False
if os.path.isdir(src):
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
return file_exists
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_gsfile(module, gs, bucket, obj, src, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_filename(src)
key.set_acl(module.params.get('permission'))
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_gsfile(module, gs, bucket, obj, dest):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_gsstr(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, gs, bucket, obj, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def handle_get(module, gs, bucket, obj, overwrite, dest):
md5_remote = keysum(module, gs, bucket, obj)
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest()
if md5_local == md5_remote:
module.exit_json(changed=False)
if md5_local != md5_remote and not overwrite:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
else:
download_gsfile(module, gs, bucket, obj, dest)
def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# Lets check to see if bucket exists to get ground truth.
bucket_rc = bucket_check(module, gs, bucket)
key_rc = key_check(module, gs, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucket_rc and key_rc:
md5_remote = keysum(module, gs, bucket, obj)
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest()
if md5_local == md5_remote:
module.exit_json(msg="Local and remote object are identical", changed=False)
if md5_local != md5_remote and not overwrite:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
else:
upload_gsfile(module, gs, bucket, obj, src, expiration)
if not bucket_rc:
create_bucket(module, gs, bucket)
upload_gsfile(module, gs, bucket, obj, src, expiration)
# If bucket exists but key doesn't, just upload.
if bucket_rc and not key_rc:
upload_gsfile(module, gs, bucket, obj, src, expiration)
def handle_delete(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket))
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
if bucket and obj:
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, obj):
module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
else:
module.exit_json(msg="Object does not exists.", changed=False)
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
def handle_create(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
if bucket and obj:
if bucket_check(module, gs, bucket):
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, gs, bucket, dirobj)
else:
create_bucket(module, gs, bucket)
create_dirkey(module, gs, bucket, dirobj)
def main():
module = AnsibleModule(
argument_spec = dict(
bucket = dict(required=True),
object = dict(default=None),
src = dict(default=None),
dest = dict(default=None),
expiration = dict(default=600, aliases=['expiry']),
mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
gs_secret_key = dict(no_log=True, required=True),
gs_access_key = dict(required=True),
overwrite = dict(default=True, type='bool', aliases=['force']),
),
)
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
dest = module.params.get('dest')
if dest:
dest = os.path.expanduser(dest)
mode = module.params.get('mode')
expiry = module.params.get('expiration')
gs_secret_key = module.params.get('gs_secret_key')
gs_access_key = module.params.get('gs_access_key')
overwrite = module.params.get('overwrite')
if mode == 'put':
if not src or not object:
module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
if mode == 'get':
if not dest or not object:
module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
if obj:
obj = os.path.expanduser(module.params['object'])
try:
gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
module.fail_json(msg="Target bucket/key cannot be found", failed=True)
if not path_check(dest):
download_gsfile(module, gs, bucket, obj, dest)
else:
handle_get(module, gs, bucket, obj, overwrite, dest)
if mode == 'put':
if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist", failed=True)
handle_put(module, gs, bucket, obj, overwrite, src, expiry)
# Support for deleting an object if we have both params.
if mode == 'delete':
handle_delete(module, gs, bucket, obj)
if mode == 'create':
handle_create(module, gs, bucket, obj)
if mode == 'get_url':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
get_download_url(module, gs, bucket, obj, expiry)
else:
module.fail_json(msg="Key/Bucket does not exist", failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
# --------------------------- Get the String contents of an Object -------------------------
if mode == 'get_str':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
download_gsstr(module, gs, bucket, obj)
else:
module.fail_json(msg="Key/Bucket does not exist", failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
# import module snippets
from ansible.module_utils.basic import *
main()

View file

@ -0,0 +1,474 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/products/compute-engine) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance
required: false
default: "debian-7"
aliases: []
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
aliases: []
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
aliases: []
metadata:
description:
- a hash/dictionary of custom data for the instance; '{"key":"value",...}'
required: false
default: null
aliases: []
service_account_email:
version_added: 1.5.1
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: 1.5.1
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: 1.5.1
description:
- your GCE project ID
required: false
default: null
aliases: []
name:
description:
- identifier when working with a single instance
required: false
aliases: []
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
aliases: []
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
aliases: []
disks:
description:
- a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE).
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
aliases: []
zone:
description:
- the GCE zone to use
required: true
default: "us-central1-a"
aliases: []
requirements: [ "libcloud" ]
notes:
- Either I(name) or I(instance_names) is required.
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 7 instance in the
# us-central1-a Zone of n1-standard-1 machine type.
- local_action:
module: gce
name: test-instance
zone: us-central1-a
machine_type: n1-standard-1
image: debian-7
# Example using defaults and with metadata to create a single 'foo' instance
- local_action:
module: gce
name: foo
metadata: '{"db":"postgres", "group":"qa", "id":500}'
# Launch instances from a control node, runs some tasks on the new instances,
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
vars:
names: foo,bar
machine_type: n1-standard-1
image: debian-6
zone: us-central1-a
service_account_email: unique-email@developer.gserviceaccount.com
pem_file: /path/to/pem_file
project_id: project-id
tasks:
- name: Launch instances
local_action: gce instance_names={{names}} machine_type={{machine_type}}
image={{image}} zone={{zone}} service_account_email={{ service_account_email }}
pem_file={{ pem_file }} project_id={{ project_id }}
register: gce
- name: Wait for SSH to come up
local_action: wait_for host={{item.public_ip}} port=22 delay=10
timeout=60 state=started
with_items: {{gce.instance_data}}
- name: Configure instance(s)
hosts: launched
sudo: True
roles:
- my_awesome_role
- my_awesome_tasks
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: gce
state: 'absent'
instance_names: {{gce.instance_names}}
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
try:
from ast import literal_eval
except ImportError:
print("failed=True " + \
"msg='GCE module requires python's 'ast' module, python v2.6+'")
sys.exit(1)
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
return({
'image': not inst.image is None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'private_ip': inst.private_ips[0],
'public_ip': inst.public_ips[0],
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
new_instances = []
changed = False
lc_image = gce.ex_get_image(image)
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name']))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type)
lc_zone = gce.ex_get_zone(zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
try:
md = literal_eval(metadata)
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError, e:
print("failed=True msg='bad metadata: %s'" % str(e))
sys.exit(1)
except SyntaxError, e:
print("failed=True msg='bad metadata syntax'")
sys.exit(1)
items = []
for k,v in md.items():
items.append({"key": k,"value": v})
metadata = {'items': items}
# These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
for name in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.create_volume(None, "%s" % name, image=lc_image)
except ResourceExistsError:
pd = gce.ex_get_volume("%s" % name, lc_zone)
inst = None
try:
inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd)
changed = True
except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone)
except GoogleBaseError, e:
module.fail_json(msg='Unexpected error attempting to create ' + \
'instance %s, error: %s' % (name, e.value))
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i+1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
if inst:
new_instances.append(inst)
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def terminate_instances(module, gce, instance_names, zone_name):
"""Terminates a list of instances.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone_name: the zone where the instances reside prior to termination
Returns a dictionary of instance names that were terminated.
"""
changed = False
terminated_instance_names = []
for name in instance_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if inst:
gce.destroy_node(inst)
terminated_instance_names.append(inst.name)
changed = True
return (changed, terminated_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-7'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(),
network = dict(default='default'),
persistent_boot_disk = dict(type='bool', default=False),
disks = dict(type='list'),
state = dict(choices=['active', 'present', 'absent', 'deleted'],
default='present'),
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
image = module.params.get('image')
instance_names = module.params.get('instance_names')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
changed = False
inames = []
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames.append(name)
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted']:
json_output['state'] = 'absent'
(changed, terminated_instance_names) = terminate_instances(module,
gce, inames, zone)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names:
json_output['instance_names'] = terminated_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data,instance_name_list) = create_instances(
module, gce, inames)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

View file

@ -0,0 +1,335 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_lb
version_added: "1.5"
short_description: create/destroy GCE load-balancer resources
description:
- This module can create and destroy Google Compute Engine C(loadbalancer)
and C(httphealthcheck) resources. The primary LB resource is the
C(load_balancer) resource and the health check parameters are all
prefixed with I(httphealthcheck).
The full documentation for Google Compute Engine load balancing is at
U(https://developers.google.com/compute/docs/load-balancing/). However,
the ansible module simplifies the configuration by following the
libcloud model.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
httphealthcheck_name:
description:
- the name identifier for the HTTP health check
required: false
default: null
httphealthcheck_port:
description:
- the TCP port to use for HTTP health checking
required: false
default: 80
httphealthcheck_path:
description:
- the url path to use for HTTP health checking
required: false
default: "/"
httphealthcheck_interval:
description:
- the duration in seconds between each health check request
required: false
default: 5
httphealthcheck_timeout:
description:
- the timeout in seconds before a request is considered a failed check
required: false
default: 5
httphealthcheck_unhealthy_count:
description:
- number of consecutive failed checks before marking a node unhealthy
required: false
default: 2
httphealthcheck_healthy_count:
description:
- number of consecutive successful checks before marking a node healthy
required: false
default: 2
httphealthcheck_host:
description:
- host header to pass through on HTTP check requests
required: false
default: null
name:
description:
- name of the load-balancer resource
required: false
default: null
protocol:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
- the GCE region where the load-balancer is defined
required: false
external_ip:
description:
- the external static IPv4 (or auto-assigned) address for the LB
required: false
default: null
port_range:
description:
- the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
required: false
default: null
members:
description:
- a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
required: false
aliases: ['nodes']
state:
description:
- desired state of the LB
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
module: gce_lb
name: testlb
region: us-central1
members: ["us-central1-a/www-a", "us-central1-b/www-b"]
httphealthcheck_name: hc
httphealthcheck_port: 80
httphealthcheck_path: "/up"
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
httphealthcheck_name = dict(),
httphealthcheck_port = dict(default=80),
httphealthcheck_path = dict(default='/'),
httphealthcheck_interval = dict(default=5),
httphealthcheck_timeout = dict(default=5),
httphealthcheck_unhealthy_count = dict(default=2),
httphealthcheck_healthy_count = dict(default=2),
httphealthcheck_host = dict(),
name = dict(),
protocol = dict(default='tcp'),
region = dict(),
external_ip = dict(),
port_range = dict(),
members = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
httphealthcheck_name = module.params.get('httphealthcheck_name')
httphealthcheck_port = module.params.get('httphealthcheck_port')
httphealthcheck_path = module.params.get('httphealthcheck_path')
httphealthcheck_interval = module.params.get('httphealthcheck_interval')
httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
httphealthcheck_unhealthy_count = \
module.params.get('httphealthcheck_unhealthy_count')
httphealthcheck_healthy_count = \
module.params.get('httphealthcheck_healthy_count')
httphealthcheck_host = module.params.get('httphealthcheck_host')
name = module.params.get('name')
protocol = module.params.get('protocol')
region = module.params.get('region')
external_ip = module.params.get('external_ip')
port_range = module.params.get('port_range')
members = module.params.get('members')
state = module.params.get('state')
try:
gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
gcelb.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'name': name, 'state': state}
if not name and not httphealthcheck_name:
module.fail_json(msg='Nothing to do, please specify a "name" ' + \
'or "httphealthcheck_name" parameter', changed=False)
if state in ['active', 'present']:
# first, create the httphealthcheck if requested
hc = None
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
host=httphealthcheck_host, path=httphealthcheck_path,
port=httphealthcheck_port,
interval=httphealthcheck_interval,
timeout=httphealthcheck_timeout,
unhealthy_threshold=httphealthcheck_unhealthy_count,
healthy_threshold=httphealthcheck_healthy_count)
changed = True
except ResourceExistsError:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if hc is not None:
json_output['httphealthcheck_host'] = hc.extra['host']
json_output['httphealthcheck_path'] = hc.path
json_output['httphealthcheck_port'] = hc.port
json_output['httphealthcheck_interval'] = hc.interval
json_output['httphealthcheck_timeout'] = hc.timeout
json_output['httphealthcheck_unhealthy_count'] = \
hc.unhealthy_threshold
json_output['httphealthcheck_healthy_count'] = \
hc.healthy_threshold
# create the forwarding rule (and target pool under the hood)
lb = None
if name:
if not region:
module.fail_json(msg='Missing required region name',
changed=False)
nodes = []
output_nodes = []
json_output['name'] = name
# members is a python list of 'zone/inst' strings
if members:
for node in members:
try:
zone, node_name = node.split('/')
nodes.append(gce.ex_get_node(node_name, zone))
output_nodes.append(node)
except:
# skip nodes that are badly formatted or don't exist
pass
try:
if hc is not None:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_healthchecks=[hc],
ex_address=external_ip)
else:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_address=external_ip)
changed = True
except ResourceExistsError:
lb = gcelb.get_balancer(name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if lb is not None:
json_output['members'] = output_nodes
json_output['protocol'] = protocol
json_output['region'] = region
json_output['external_ip'] = lb.ip
json_output['port_range'] = lb.port
hc_names = []
if 'healthchecks' in lb.extra:
for hc in lb.extra['healthchecks']:
hc_names.append(hc.name)
json_output['httphealthchecks'] = hc_names
if state in ['absent', 'deleted']:
# first, delete the load balancer (forwarding rule and target pool)
# if specified.
if name:
json_output['name'] = name
try:
lb = gcelb.get_balancer(name)
gcelb.destroy_balancer(lb)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# destroy the health check if specified
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
gce.ex_destroy_healthcheck(hc)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

View file

@ -0,0 +1,271 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compue Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800')
required: false
default: null
aliases: []
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
required: false
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
required: false
default: null
aliases: ['fwrule']
name:
description:
- name of the network
required: false
default: null
aliases: []
src_range:
description:
- the source IPv4 address range in CIDR notation
required: false
default: null
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
module: gce_net
name: privatenet
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
else:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return [return_val]
def main():
module = AnsibleModule(
argument_spec = dict(
allowed = dict(),
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(),
src_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
state = module.params.get('state')
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
json_output['ipv4_range'] = network.cidr
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range:
module.fail_json(msg="Missing required 'ipv4_range' parameter",
changed=False)
try:
network = gce.ex_create_network(name, ipv4_range)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + \
"firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags)
changed = True
except ResourceExistsError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
if name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
# json_output['d1'] = 'found network name %s' % name
except ResourceNotFoundError:
# json_output['d2'] = 'not found network name %s' % name
pass
except Exception, e:
# json_output['d3'] = 'error with %s' % name
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
# json_output['d4'] = 'deleting %s' % name
gce.ex_destroy_network(network)
# json_output['d5'] = 'deleted %s' % name
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

View file

@ -0,0 +1,285 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()