mirror of
https://github.com/ansible-collections/community.general.git
synced 2025-07-23 05:10:22 -07:00
aws_eks_cluster: New module for managing AWS EKS (#41183)
* aws_eks: New module for managing AWS EKS aws_eks module is used for creating and removing EKS clusters. Includes full test suite and updates to IAM policies to enable it. * Clean up all security groups * appease shippable * Rename aws_eks module to aws_eks_cluster
This commit is contained in:
parent
2c2af87b2e
commit
b235cb8734
11 changed files with 529 additions and 1 deletions
3
test/integration/targets/aws_eks/aliases
Normal file
3
test/integration/targets/aws_eks/aliases
Normal file
|
@ -0,0 +1,3 @@
|
|||
cloud/aws
|
||||
unsupported
|
||||
aws_eks_cluster
|
5
test/integration/targets/aws_eks/playbooks/full_test.yml
Normal file
5
test/integration/targets/aws_eks/playbooks/full_test.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
- hosts: localhost
|
||||
connection: local
|
||||
|
||||
roles:
|
||||
- aws_eks
|
16
test/integration/targets/aws_eks/playbooks/old_version.yml
Normal file
16
test/integration/targets/aws_eks/playbooks/old_version.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
- hosts: localhost
|
||||
connection: local
|
||||
|
||||
tasks:
|
||||
- name: try and use aws_eks_cluster module
|
||||
aws_eks_cluster:
|
||||
state: absent
|
||||
name: my_cluster
|
||||
ignore_errors: yes
|
||||
register: aws_eks_cluster
|
||||
|
||||
- name: ensure that aws_eks fails with friendly error message
|
||||
assert:
|
||||
that:
|
||||
- '"msg" in aws_eks_cluster'
|
||||
- aws_eks_cluster is failed
|
|
@ -0,0 +1,33 @@
|
|||
eks_cluster_name: "{{ resource_prefix }}"
|
||||
eks_subnets:
|
||||
- zone: a
|
||||
cidr: 10.0.1.0/24
|
||||
- zone: b
|
||||
cidr: 10.0.2.0/24
|
||||
- zone: c
|
||||
cidr: 10.0.3.0/24
|
||||
|
||||
eks_security_groups:
|
||||
- name: "{{ eks_cluster_name }}-control-plane-sg"
|
||||
description: "EKS Control Plane Security Group"
|
||||
rules:
|
||||
- group_name: "{{ eks_cluster_name }}-workers-sg"
|
||||
group_desc: "EKS Worker Security Group"
|
||||
ports: 443
|
||||
proto: tcp
|
||||
rules_egress:
|
||||
- group_name: "{{ eks_cluster_name }}-workers-sg"
|
||||
group_desc: "EKS Worker Security Group"
|
||||
from_port: 1025
|
||||
to_port: 65535
|
||||
proto: tcp
|
||||
- name: "{{ eks_cluster_name }}-worker-sg"
|
||||
description: "EKS Worker Security Group"
|
||||
rules:
|
||||
- group_name: "{{ eks_cluster_name }}-workers-sg"
|
||||
proto: tcp
|
||||
from_port: 1
|
||||
to_port: 65535
|
||||
- group_name: "{{ eks_cluster_name }}-control-plane-sg"
|
||||
ports: 10250
|
||||
proto: tcp
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "eks.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
dependencies: []
|
|
@ -0,0 +1,182 @@
|
|||
---
|
||||
# tasks file for aws_eks modules
|
||||
|
||||
- block:
|
||||
# FIXME: ap-south-1 only has two AZs, ap-south-1a and ap-south-1b
|
||||
# That makes it my best guess as to it being among the last to support EKS
|
||||
# If it does become supported, change this test to use an unsupported region
|
||||
# or if all regions are supported, delete this test
|
||||
- name: attempt to use eks in unsupported region
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
state: absent
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
region: ap-south-1
|
||||
register: aws_eks_unsupported_region
|
||||
ignore_errors: yes
|
||||
|
||||
- name: check that aws_eks_cluster did nothing
|
||||
assert:
|
||||
that:
|
||||
- aws_eks_unsupported_region is failed
|
||||
- '"msg" in aws_eks_unsupported_region'
|
||||
|
||||
- name: set up aws connection info
|
||||
set_fact:
|
||||
aws_connection_info: &aws_connection_info
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
region: "{{ aws_region }}"
|
||||
no_log: yes
|
||||
|
||||
- name: delete an as yet non-existent EKS cluster
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
state: absent
|
||||
<<: *aws_connection_info
|
||||
register: aws_eks_delete_non_existent
|
||||
|
||||
- name: check that aws_eks_cluster did nothing
|
||||
assert:
|
||||
that:
|
||||
- aws_eks_delete_non_existent is not changed
|
||||
|
||||
- name: ensure IAM instance role exists
|
||||
iam_role:
|
||||
name: aws_eks_cluster_role
|
||||
assume_role_policy_document: "{{ lookup('file','eks-trust-policy.json') }}"
|
||||
state: present
|
||||
create_instance_profile: no
|
||||
managed_policies:
|
||||
- AmazonEKSServicePolicy
|
||||
- AmazonEKSClusterPolicy
|
||||
<<: *aws_connection_info
|
||||
register: iam_role
|
||||
|
||||
- name: create a VPC to work in
|
||||
ec2_vpc_net:
|
||||
cidr_block: 10.0.0.0/16
|
||||
state: present
|
||||
name: '{{ resource_prefix }}_aws_eks'
|
||||
resource_tags:
|
||||
Name: '{{ resource_prefix }}_aws_eks'
|
||||
<<: *aws_connection_info
|
||||
register: setup_vpc
|
||||
|
||||
- name: create subnets
|
||||
ec2_vpc_subnet:
|
||||
az: '{{ aws_region }}{{ item.zone }}'
|
||||
tags:
|
||||
Name: '{{ resource_prefix }}_aws_eks-subnet-{{ item.zone }}'
|
||||
vpc_id: '{{ setup_vpc.vpc.id }}'
|
||||
cidr: "{{ item.cidr }}"
|
||||
state: present
|
||||
<<: *aws_connection_info
|
||||
register: setup_subnets
|
||||
with_items:
|
||||
- "{{ eks_subnets }}"
|
||||
|
||||
- name: create security groups to use for EKS
|
||||
ec2_group:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.description }}"
|
||||
state: present
|
||||
rules: "{{ item.rules }}"
|
||||
rules_egress: "{{ item.rules_egress|default(omit) }}"
|
||||
vpc_id: '{{ setup_vpc.vpc.id }}'
|
||||
<<: *aws_connection_info
|
||||
with_items: "{{ eks_security_groups }}"
|
||||
register: setup_security_groups
|
||||
|
||||
- name: create EKS cluster
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
security_groups: "{{ eks_security_groups | json_query('[].name') }}"
|
||||
subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
|
||||
role_arn: "{{ iam_role.arn }}"
|
||||
<<: *aws_connection_info
|
||||
register: eks_create
|
||||
|
||||
- name: check that EKS cluster was created
|
||||
assert:
|
||||
that:
|
||||
- eks_create is changed
|
||||
- eks_create.name == eks_cluster_name
|
||||
|
||||
- name: create EKS cluster with same details but using SG ids
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
security_groups: "{{ setup_security_groups.results | json_query('[].group_id') }}"
|
||||
subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
|
||||
role_arn: "{{ iam_role.arn }}"
|
||||
<<: *aws_connection_info
|
||||
register: eks_create
|
||||
|
||||
- name: check that EKS cluster did not change
|
||||
assert:
|
||||
that:
|
||||
- eks_create is not changed
|
||||
- eks_create.name == eks_cluster_name
|
||||
|
||||
- name: remove EKS cluster
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
state: absent
|
||||
<<: *aws_connection_info
|
||||
register: eks_delete
|
||||
|
||||
- name: check that EKS cluster was created
|
||||
assert:
|
||||
that:
|
||||
- eks_delete is changed
|
||||
|
||||
always:
|
||||
- name: Announce teardown start
|
||||
debug:
|
||||
msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
|
||||
|
||||
- name: remove EKS cluster
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
state: absent
|
||||
<<: *aws_connection_info
|
||||
register: eks_delete
|
||||
ignore_errors: yes
|
||||
|
||||
- debug:
|
||||
msg: "{{ eks_security_groups|reverse|list }}"
|
||||
|
||||
- name: create list of all additional EKS security groups
|
||||
set_fact:
|
||||
additional_eks_sg:
|
||||
- name: "{{ eks_cluster_name }}-workers-sg"
|
||||
|
||||
- name: remove security groups
|
||||
ec2_group:
|
||||
name: '{{ item.name }}'
|
||||
state: absent
|
||||
vpc_id: '{{ setup_vpc.vpc.id }}'
|
||||
<<: *aws_connection_info
|
||||
with_items: "{{ eks_security_groups|reverse|list + additional_eks_sg }}"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: remove setup subnet
|
||||
ec2_vpc_subnet:
|
||||
az: '{{ aws_region }}{{ item.zone }}'
|
||||
vpc_id: '{{ setup_vpc.vpc.id }}'
|
||||
cidr: "{{ item.cidr}}"
|
||||
state: absent
|
||||
<<: *aws_connection_info
|
||||
with_items: "{{ eks_subnets }}"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: remove setup VPC
|
||||
ec2_vpc_net:
|
||||
cidr_block: 10.0.0.0/16
|
||||
state: absent
|
||||
name: '{{ resource_prefix }}_aws_eks'
|
||||
<<: *aws_connection_info
|
||||
ignore_errors: yes
|
25
test/integration/targets/aws_eks/runme.sh
Executable file
25
test/integration/targets/aws_eks/runme.sh
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# We don't set -u here, due to pypa/virtualenv#150
|
||||
set -ex
|
||||
|
||||
MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
|
||||
|
||||
trap 'rm -rf "${MYTMPDIR}"' EXIT
|
||||
|
||||
# This is needed for the ubuntu1604py3 tests
|
||||
# Ubuntu patches virtualenv to make the default python2
|
||||
# but for the python3 tests we need virtualenv to use python3
|
||||
PYTHON=${ANSIBLE_TEST_PYTHON_INTERPRETER:-python}
|
||||
|
||||
# Test graceful failure for older versions of botocore
|
||||
virtualenv --system-site-packages --python "${PYTHON}" "${MYTMPDIR}/botocore-1.7.40"
|
||||
source "${MYTMPDIR}/botocore-1.7.40/bin/activate"
|
||||
$PYTHON -m pip install 'botocore<1.10.0' boto3
|
||||
ansible-playbook -i ../../inventory -e @../../integration_config.yml -e @../../cloud-config-aws.yml -v playbooks/old_version.yml "$@"
|
||||
|
||||
# Run full test suite
|
||||
virtualenv --system-site-packages --python "${PYTHON}" "${MYTMPDIR}/botocore-recent"
|
||||
source "${MYTMPDIR}/botocore-recent/bin/activate"
|
||||
$PYTHON -m pip install 'botocore>=1.10.1' boto3
|
||||
ansible-playbook -i ../../inventory -e @../../integration_config.yml -e @../../cloud-config-aws.yml -v playbooks/full_test.yml "$@"
|
Loading…
Add table
Add a link
Reference in a new issue