diff options
Diffstat (limited to 'playbooks/aws/openshift-cluster')
| -rw-r--r-- | playbooks/aws/openshift-cluster/config.yml | 36 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/launch.yml | 74 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/launch_instances.yml | 63 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/library/ec2_ami_find.py | 302 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/list.yml | 15 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 132 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/templates/user_data.j2 | 29 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/terminate.yml | 20 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/update.yml | 25 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/vars.defaults.yml | 1 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/vars.online.int.yml | 9 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/vars.online.prod.yml | 9 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/vars.online.stage.yml | 9 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/vars.yml | 37 | 
14 files changed, 622 insertions, 139 deletions
| diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml new file mode 100644 index 000000000..b8961704e --- /dev/null +++ b/playbooks/aws/openshift-cluster/config.yml @@ -0,0 +1,36 @@ +--- +- name: Populate oo_masters_to_config host group +  hosts: localhost +  gather_facts: no +  vars_files: +  - vars.yml +  tasks: +  - name: Evaluate oo_masters_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_masters_to_config +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) +  - name: Evaluate oo_nodes_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_nodes_to_config +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) +  - name: Evaluate oo_first_master +    add_host: +      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" +      groups: oo_first_master +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml +  vars: +    openshift_cluster_id: "{{ cluster_id }}" +    openshift_debug_level: 4 +    openshift_deployment_type: "{{ deployment_type }}" +    openshift_hostname: "{{ ec2_private_ip_address }}" +    openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 3561c1803..3eb5496e4 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -4,59 +4,27 @@    connection: local    gather_facts: no    vars_files: -      - vars.yml +  - vars.yml +  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]    tasks: -    - set_fact: k8s_type="master" - -    - name: Generate master instance names(s) -      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} -      register: master_names_output -      with_sequence: start=1 end={{ num_masters }} - -    # These set_fact's cannot be combined -    - set_fact: -        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - -    - set_fact: -        master_names: "{{ master_names_string.strip().split(' ') }}" - -    - include: launch_instances.yml -      vars: -        instances: "{{ master_names }}" -        cluster: "{{ cluster_id }}" -        type: "{{ k8s_type }}" - -    - set_fact: k8s_type="node" - -    - name: Generate node instance names(s) -      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} -      register: node_names_output -      with_sequence: start=1 end={{ num_nodes }} - -    # These set_fact's cannot be combined -    - set_fact: -        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - -    - set_fact: -        node_names: "{{ node_names_string.strip().split(' ') }}" - -    - include: launch_instances.yml -      vars: -        instances: "{{ node_names }}" -        cluster: "{{ cluster_id }}" -        type: "{{ k8s_type }}" - -- hosts: "tag_env_{{ cluster_id }}" -  roles: -  - openshift_repos -  - os_update_latest - -- include: ../openshift-master/config.yml -  vars: -    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" - -- include: ../openshift-node/config.yml -  vars: -    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" +  - fail: +      msg: Deployment type not supported for aws provider yet +    when: deployment_type == 'enterprise' + +  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml +  - include: tasks/launch_instances.yml +    vars: +      instances: "{{ master_names }}" +      cluster: "{{ cluster_id }}" +      type: "{{ k8s_type }}" + +  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml +  - include: tasks/launch_instances.yml +    vars: +      instances: "{{ node_names }}" +      cluster: "{{ cluster_id }}" +      type: "{{ k8s_type }}" + +- include: update.yml  - include: list.yml diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml deleted file mode 100644 index 9d645fbe5..000000000 --- a/playbooks/aws/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- set_fact: -    machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}" -    machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}" -    machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}" -    machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" -    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" -    security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" -    env: "{{ cluster }}" -    host_type: "{{ type }}" -    env_host_type: "{{ cluster }}-openshift-{{ type }}" - -- name: Launch instance(s) -  ec2: -    state: present -    region: "{{ machine_region }}" -    keypair: "{{ machine_keypair }}" -    group: "{{ security_group }}" -    instance_type: "{{ machine_type }}" -    image: "{{ machine_image }}" -    count: "{{ instances | oo_len }}" -    wait: yes -    instance_tags: -      created-by: "{{ created_by }}" -      env: "{{ env }}" -      host-type: "{{ host_type }}" -      env-host-type: "{{ env_host_type }}" -  register: ec2 - -- name: Add Name tag to instances -  ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present -  with_together: -  - instances -  - ec2.instances -  args: -    tags: -      Name: "{{ item.0 }}" - -- set_fact: -    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} - -- name: Add new instances groups and variables -  add_host: -    hostname: "{{ item.0 }}" -    ansible_ssh_host: "{{ item.1.dns_name }}" -    groups: "{{ instance_groups }}" -    ec2_private_ip_address: "{{ item.1.private_ip }}" -    ec2_ip_address: "{{ item.1.public_ip }}" -  with_together: -  - instances -  - ec2.instances - -- name: Wait for ssh -  wait_for: "port=22 host={{ item.dns_name }}" -  with_items: ec2.instances - -- name: Wait for root user setup -  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" -  register: result -  until: result.rc == 0 -  retries: 20 -  delay: 10 -  with_items: ec2.instances diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py new file mode 100644 index 000000000..29e594a65 --- /dev/null +++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +#pylint: skip-file +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +DOCUMENTATION = ''' +--- +module: ec2_ami_find +version_added: 2.0 +short_description: Searches for AMIs to obtain the AMI ID and other information +description: +  - Returns list of matching AMIs with AMI ID, along with other useful information +  - Can search AMIs with different owners +  - Can search by matching tag(s), by AMI name and/or other criteria +  - Results can be sorted and sliced +author: Tom Bamford +notes: +  - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. +  - See the example below for a suggestion of how to search by distro/release. +options: +  region: +    description: +      - The AWS region to use. +    required: true +    aliases: [ 'aws_region', 'ec2_region' ] +  owner: +    description: +      - Search AMIs owned by the specified owner +      - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' +      - If not specified, all EC2 AMIs in the specified region will be searched. +      - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. +    required: false +    default: null +  ami_id: +    description: +      - An AMI ID to match. +    default: null +    required: false +  ami_tags: +    description: +      - A hash/dictionary of tags to match for the AMI. +    default: null +    required: false +  architecture: +    description: +      - An architecture type to match (e.g. x86_64). +    default: null +    required: false +  hypervisor: +    description: +      - A hypervisor type type to match (e.g. xen). +    default: null +    required: false +  is_public: +    description: +      - Whether or not the image(s) are public. +    choices: ['yes', 'no'] +    default: null +    required: false +  name: +    description: +      - An AMI name to match. +    default: null +    required: false +  platform: +    description: +      - Platform type to match. +    default: null +    required: false +  sort: +    description: +      - Optional attribute which with to sort the results. +      - If specifying 'tag', the 'tag_name' parameter is required. +    choices: ['name', 'description', 'tag'] +    default: null +    required: false +  sort_tag: +    description: +      - Tag name with which to sort results. +      - Required when specifying 'sort=tag'. +    default: null +    required: false +  sort_order: +    description: +      - Order in which to sort results. +      - Only used when the 'sort' parameter is specified. +    choices: ['ascending', 'descending'] +    default: 'ascending' +    required: false +  sort_start: +    description: +      - Which result to start with (when sorting). +      - Corresponds to Python slice notation. +    default: null +    required: false +  sort_end: +    description: +      - Which result to end with (when sorting). +      - Corresponds to Python slice notation. +    default: null +    required: false +  state: +    description: +      - AMI state to match. +    default: 'available' +    required: false +  virtualization_type: +    description: +      - Virtualization type to match (e.g. hvm). +    default: null +    required: false +  no_result_action: +    description: +      - What to do when no results are found. +      - "'success' reports success and returns an empty array" +      - "'fail' causes the module to report failure" +    choices: ['success', 'fail'] +    default: 'success' +    required: false +requirements: +  - boto + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Search for the AMI tagged "project:website" +- ec2_ami_find: +    owner: self +    tags: +      project: website +    no_result_action: fail +  register: ami_find + +# Search for the latest Ubuntu 14.04 AMI +- ec2_ami_find: +    name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*" +    owner: 099720109477 +    sort: name +    sort_order: descending +    sort_end: 1 +  register: ami_find + +# Launch an EC2 instance +- ec2: +    image: "{{ ami_search.results[0].ami_id }}" +    instance_type: m3.medium +    key_name: mykey +    wait: yes +''' + +try: +    import boto.ec2 +    HAS_BOTO=True +except ImportError: +    HAS_BOTO=False + +import json + +def main(): +    argument_spec = ec2_argument_spec() +    argument_spec.update(dict( +            region = dict(required=True, +                aliases = ['aws_region', 'ec2_region']), +            owner = dict(required=False, default=None), +            ami_id = dict(required=False), +            ami_tags = dict(required=False, type='dict', +                aliases = ['search_tags', 'image_tags']), +            architecture = dict(required=False), +            hypervisor = dict(required=False), +            is_public = dict(required=False), +            name = dict(required=False), +            platform = dict(required=False), +            sort = dict(required=False, default=None, +                choices=['name', 'description', 'tag']), +            sort_tag = dict(required=False), +            sort_order = dict(required=False, default='ascending', +                choices=['ascending', 'descending']), +            sort_start = dict(required=False), +            sort_end = dict(required=False), +            state = dict(required=False, default='available'), +            virtualization_type = dict(required=False), +            no_result_action = dict(required=False, default='success', +                choices = ['success', 'fail']), +        ) +    ) + +    module = AnsibleModule( +        argument_spec=argument_spec, +    ) + +    if not HAS_BOTO: +        module.fail_json(msg='boto required for this module, install via pip or your package manager') + +    ami_id = module.params.get('ami_id') +    ami_tags = module.params.get('ami_tags') +    architecture = module.params.get('architecture') +    hypervisor = module.params.get('hypervisor') +    is_public = module.params.get('is_public') +    name = module.params.get('name') +    owner = module.params.get('owner') +    platform = module.params.get('platform') +    sort = module.params.get('sort') +    sort_tag = module.params.get('sort_tag') +    sort_order = module.params.get('sort_order') +    sort_start = module.params.get('sort_start') +    sort_end = module.params.get('sort_end') +    state = module.params.get('state') +    virtualization_type = module.params.get('virtualization_type') +    no_result_action = module.params.get('no_result_action') + +    filter = {'state': state} + +    if ami_id: +        filter['image_id'] = ami_id +    if ami_tags: +        for tag in ami_tags: +            filter['tag:'+tag] = ami_tags[tag] +    if architecture: +        filter['architecture'] = architecture +    if hypervisor: +        filter['hypervisor'] = hypervisor +    if is_public: +        filter['is_public'] = is_public +    if name: +        filter['name'] = name +    if platform: +        filter['platform'] = platform +    if virtualization_type: +        filter['virtualization_type'] = virtualization_type + +    ec2 = ec2_connect(module) + +    images_result = ec2.get_all_images(owners=owner, filters=filter) + +    if no_result_action == 'fail' and len(images_result) == 0: +        module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) + +    results = [] +    for image in images_result: +        data = { +            'ami_id': image.id, +            'architecture': image.architecture, +            'description': image.description, +            'is_public': image.is_public, +            'name': image.name, +            'owner_id': image.owner_id, +            'platform': image.platform, +            'root_device_name': image.root_device_name, +            'root_device_type': image.root_device_type, +            'state': image.state, +            'tags': image.tags, +            'virtualization_type': image.virtualization_type, +        } + +        if image.kernel_id: +            data['kernel_id'] = image.kernel_id +        if image.ramdisk_id: +            data['ramdisk_id'] = image.ramdisk_id + +        results.append(data) + +    if sort == 'tag': +        if not sort_tag: +            module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") +        results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending')) +    elif sort: +        results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) + +    try: +        if sort and sort_start and sort_end: +            results = results[int(sort_start):int(sort_end)] +        elif sort and sort_start: +            results = results[int(sort_start):] +        elif sort and sort_end: +            results = results[:int(sort_end)] +    except TypeError: +        module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") + +    module.exit_json(results=results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': +    main() + diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 08e9e2df4..04fcdc0a1 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -2,16 +2,23 @@  - name: Generate oo_list_hosts group    hosts: localhost    gather_facts: no +  vars_files: +  - vars.yml    tasks:    - set_fact: scratch_group=tag_env_{{ cluster_id }}      when: cluster_id != ''    - set_fact: scratch_group=all -    when: scratch_group is not defined -  - add_host: name={{ item }} groups=oo_list_hosts -    with_items: groups[scratch_group] | difference(['localhost']) +    when: cluster_id == '' +  - add_host: +      name: "{{ item }}" +      groups: oo_list_hosts +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    with_items: groups[scratch_group] | default([]) | difference(['localhost'])  - name: List Hosts    hosts: oo_list_hosts    gather_facts: no    tasks: -  - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}" +  - debug: +      msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..666a8d1fb --- /dev/null +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,132 @@ +--- +- set_fact: +    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" +    docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" +    env: "{{ cluster }}" +    env_host_type: "{{ cluster }}-openshift-{{ type }}" +    host_type: "{{ type }}" + +- set_fact: +    ec2_region: "{{ lookup('env', 'ec2_region') +                    | default(deployment_vars[deployment_type].region, true) }}" +  when: ec2_region is not defined +- set_fact: +    ec2_image_name: "{{ lookup('env', 'ec2_image_name') +                        | default(deployment_vars[deployment_type].image_name, true) }}" +  when: ec2_image_name is not defined and ec2_image is not defined +- set_fact: +    ec2_image: "{{ lookup('env', 'ec2_image') +                   | default(deployment_vars[deployment_type].image, true) }}" +  when: ec2_image is not defined and not ec2_image_name +- set_fact: +    ec2_instance_type: "{{ lookup('env', 'ec2_instance_type') +                    | default(deployment_vars[deployment_type].type, true) }}" +  when: ec2_instance_type is not defined +- set_fact: +    ec2_keypair: "{{ lookup('env', 'ec2_keypair') +                    | default(deployment_vars[deployment_type].keypair, true) }}" +  when: ec2_keypair is not defined +- set_fact: +    ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet') +                    | default(deployment_vars[deployment_type].vpc_subnet, true) }}" +  when: ec2_vpc_subnet is not defined +- set_fact: +    ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip') +                    | default(deployment_vars[deployment_type].assign_public_ip, true) }}" +  when: ec2_assign_public_ip is not defined +- set_fact: +    ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') +                    | default(deployment_vars[deployment_type].security_groups, true) }}" +  when: ec2_security_groups is not defined + +- name: Find amis for deployment_type +  ec2_ami_find: +    region: "{{ ec2_region }}" +    ami_id: "{{ ec2_image | default(omit, true) }}" +    name: "{{ ec2_image_name | default(omit, true) }}" +  register: ami_result + +- fail: msg="Could not find requested ami" +  when: not ami_result.results + +- set_fact: +    latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" +    user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}" +    volume_defs: +      master: +        root: +          volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" +          device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}" +          iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}" +      node: +        root: +          volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}" +          device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}" +          iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}" +        docker: +          volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}" +          device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" +          iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" + +- set_fact: +    volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}" + +- name: Launch instance(s) +  ec2: +    state: present +    region: "{{ ec2_region }}" +    keypair: "{{ ec2_keypair }}" +    group: "{{ ec2_security_groups }}" +    instance_type: "{{ ec2_instance_type }}" +    image: "{{ latest_ami }}" +    count: "{{ instances | oo_len }}" +    vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}" +    assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}" +    user_data: "{{ user_data }}" +    wait: yes +    instance_tags: +      created-by: "{{ created_by }}" +      env: "{{ env }}" +      host-type: "{{ host_type }}" +      env-host-type: "{{ env_host_type }}" +    volumes: "{{ volumes }}" +  register: ec2 + +- name: Add Name tag to instances +  ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present +  with_together: +  - instances +  - ec2.instances +  args: +    tags: +      Name: "{{ item.0 }}" + +- set_fact: +    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} + +- name: Add new instances groups and variables +  add_host: +    hostname: "{{ item.0 }}" +    ansible_ssh_host: "{{ item.1.dns_name }}" +    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    groups: "{{ instance_groups }}" +    ec2_private_ip_address: "{{ item.1.private_ip }}" +    ec2_ip_address: "{{ item.1.public_ip }}" +  with_together: +  - instances +  - ec2.instances + +- name: Wait for ssh +  wait_for: "port=22 host={{ item.dns_name }}" +  with_items: ec2.instances + +- name: Wait for user setup +  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" +  register: result +  until: result.rc == 0 +  retries: 20 +  delay: 10 +  with_together: +  - instances +  - ec2.instances diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 new file mode 100644 index 000000000..7dbc8f552 --- /dev/null +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -0,0 +1,29 @@ +#cloud-config +yum_repos: +  jdetiber-copr: +    name: Copr repo for origin owned by jdetiber +    baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/ +    skip_if_unavailable: true +    gpgcheck: true +    gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg +    enabled: true + +packages: +- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8 +- docker-storage-setup + +mounts: +- [ xvdb ] +- [ ephemeral0 ] + +write_files: +- content: | +    DEVS=/dev/xvdb +    VG=docker_vg +  path: /etc/sysconfig/docker-storage-setup +  owner: root:root +  permissions: '0644' + +runcmd: +- systemctl daemon-reload +- systemctl enable lvm2-lvmetad.service docker-storage-setup.service diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 39607633a..617d0d456 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -1,14 +1,16 @@  ---  - name: Terminate instance(s)    hosts: localhost - +  gather_facts: no    vars_files: -    - vars.yml - -- include: ../openshift-node/terminate.yml -  vars: -    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]' +  - vars.yml +  tasks: +  - set_fact: scratch_group=tag_env_{{ cluster_id }} +  - add_host: +      name: "{{ item }}" +      groups: oo_hosts_to_terminate +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    with_items: groups[scratch_group] | default([]) | difference(['localhost']) -- include: ../openshift-master/terminate.yml -  vars: -    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]' +- include: ../terminate.yml diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index 90ecdc6ab..5e7ab4e58 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,13 +1,18 @@  --- -- hosts: "tag_env_{{ cluster_id }}" -  roles: -  - openshift_repos -  - os_update_latest +- name: Populate oo_hosts_to_update group +  hosts: localhost +  gather_facts: no +  vars_files: +  - vars.yml +  tasks: +  - name: Evaluate oo_hosts_to_update +    add_host: +      name: "{{ item }}" +      groups: oo_hosts_to_update +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" +    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([]) -- include: ../openshift-master/config.yml -  vars: -    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" +- include: ../../common/openshift-cluster/update_repos_and_packages.yml -- include: ../openshift-node/config.yml -  vars: -    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" +- include: config.yml diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.defaults.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml new file mode 100644 index 000000000..12f79a9c1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -0,0 +1,9 @@ +--- +ec2_image: ami-906240f8 +ec2_image_name: libra-ops-rhel7* +ec2_region: us-east-1 +ec2_keypair: mmcgrath_libra +ec2_instance_type: m3.large +ec2_security_groups: [ 'int-v3' ] +ec2_vpc_subnet: subnet-987c0def +ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml new file mode 100644 index 000000000..12f79a9c1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -0,0 +1,9 @@ +--- +ec2_image: ami-906240f8 +ec2_image_name: libra-ops-rhel7* +ec2_region: us-east-1 +ec2_keypair: mmcgrath_libra +ec2_instance_type: m3.large +ec2_security_groups: [ 'int-v3' ] +ec2_vpc_subnet: subnet-987c0def +ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml new file mode 100644 index 000000000..12f79a9c1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -0,0 +1,9 @@ +--- +ec2_image: ami-906240f8 +ec2_image_name: libra-ops-rhel7* +ec2_region: us-east-1 +ec2_keypair: mmcgrath_libra +ec2_instance_type: m3.large +ec2_security_groups: [ 'int-v3' ] +ec2_vpc_subnet: subnet-987c0def +ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index ed97d539c..07e453f89 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1 +1,38 @@  --- +deployment_vars: +  origin: +    # fedora, since centos requires marketplace +    image: ami-acd999c4 +    image_name: +    region: us-east-1 +    ssh_user: fedora +    sudo: yes +    keypair: libra +    type: m3.large +    security_groups: [ 'public' ] +    vpc_subnet: +    assign_public_ip: +  online: +    # private ami +    image: ami-7a9e9812 +    image_name: openshift-rhel7_* +    region: us-east-1 +    ssh_user: root +    sudo: no +    keypair: libra +    type: m3.large +    security_groups: [ 'public' ] +    vpc_subnet: +    assign_public_ip: +  enterprise: +    # rhel-7.1, requires cloud access subscription +    image: ami-10663b78 +    image_name: +    region: us-east-1 +    ssh_user: ec2-user +    sudo: yes +    keypair: libra +    type: m3.large +    security_groups: [ 'public' ] +    vpc_subnet: +    assign_public_ip: | 
