From efe86b44bce679db38cca654818dc3837bb05f6a Mon Sep 17 00:00:00 2001
From: Kenny Woodson <kwoodson@redhat.com>
Date: Wed, 30 Aug 2017 17:47:48 -0400
Subject: Consolidating AWS roles and variables underneath openshift_aws role.

---
 roles/openshift_aws/README.md                      |  84 +++++++++
 roles/openshift_aws/defaults/main.yml              | 209 +++++++++++++++++++++
 roles/openshift_aws/filter_plugins/filters.py      |  28 +++
 roles/openshift_aws/meta/main.yml                  |   3 +
 roles/openshift_aws/tasks/ami_copy.yml             |  34 ++++
 roles/openshift_aws/tasks/build_ami.yml            |  48 +++++
 roles/openshift_aws/tasks/build_node_group.yml     |  34 ++++
 roles/openshift_aws/tasks/elb.yml                  |  68 +++++++
 roles/openshift_aws/tasks/iam_cert.yml             |  29 +++
 roles/openshift_aws/tasks/launch_config.yml        |  45 +++++
 roles/openshift_aws/tasks/provision.yml            |  54 ++++++
 roles/openshift_aws/tasks/provision_nodes.yml      |  66 +++++++
 roles/openshift_aws/tasks/s3.yml                   |   7 +
 roles/openshift_aws/tasks/scale_group.yml          |  32 ++++
 roles/openshift_aws/tasks/seal_ami.yml             |  49 +++++
 roles/openshift_aws/tasks/security_group.yml       |  45 +++++
 roles/openshift_aws/tasks/ssh_keys.yml             |   8 +
 roles/openshift_aws/tasks/vpc.yml                  |  52 +++++
 roles/openshift_aws_ami_copy/README.md             |  50 -----
 roles/openshift_aws_ami_copy/tasks/main.yml        |  26 ---
 roles/openshift_aws_elb/README.md                  |  75 --------
 roles/openshift_aws_elb/defaults/main.yml          |  33 ----
 roles/openshift_aws_elb/meta/main.yml              |  12 --
 roles/openshift_aws_elb/tasks/main.yml             |  57 ------
 roles/openshift_aws_iam_kms/README.md              |  43 -----
 roles/openshift_aws_iam_kms/defaults/main.yml      |   1 -
 roles/openshift_aws_iam_kms/meta/main.yml          |  13 --
 roles/openshift_aws_iam_kms/tasks/main.yml         |  18 --
 roles/openshift_aws_launch_config/README.md        |  72 -------
 .../openshift_aws_launch_config/defaults/main.yml  |   1 -
 roles/openshift_aws_launch_config/meta/main.yml    |  12 --
 roles/openshift_aws_launch_config/tasks/main.yml   |  50 -----
 .../templates/cloud-init.j2                        |   9 -
 roles/openshift_aws_node_group/README.md           |  77 --------
 roles/openshift_aws_node_group/defaults/main.yml   |  58 ------
 roles/openshift_aws_node_group/tasks/main.yml      |  32 ----
 roles/openshift_aws_s3/README.md                   |  43 -----
 roles/openshift_aws_s3/tasks/main.yml              |   6 -
 roles/openshift_aws_sg/README.md                   |  59 ------
 roles/openshift_aws_sg/defaults/main.yml           |  48 -----
 roles/openshift_aws_sg/tasks/main.yml              |  53 ------
 roles/openshift_aws_ssh_keys/README.md             |  49 -----
 roles/openshift_aws_ssh_keys/tasks/main.yml        |   8 -
 roles/openshift_aws_vpc/README.md                  |  62 ------
 roles/openshift_aws_vpc/defaults/main.yml          |   1 -
 roles/openshift_aws_vpc/tasks/main.yml             |  53 ------
 roles/openshift_node/tasks/bootstrap.yml           |  33 ++--
 roles/openshift_node/tasks/main.yml                |   7 +
 48 files changed, 924 insertions(+), 1032 deletions(-)
 create mode 100644 roles/openshift_aws/README.md
 create mode 100644 roles/openshift_aws/defaults/main.yml
 create mode 100644 roles/openshift_aws/filter_plugins/filters.py
 create mode 100644 roles/openshift_aws/meta/main.yml
 create mode 100644 roles/openshift_aws/tasks/ami_copy.yml
 create mode 100644 roles/openshift_aws/tasks/build_ami.yml
 create mode 100644 roles/openshift_aws/tasks/build_node_group.yml
 create mode 100644 roles/openshift_aws/tasks/elb.yml
 create mode 100644 roles/openshift_aws/tasks/iam_cert.yml
 create mode 100644 roles/openshift_aws/tasks/launch_config.yml
 create mode 100644 roles/openshift_aws/tasks/provision.yml
 create mode 100644 roles/openshift_aws/tasks/provision_nodes.yml
 create mode 100644 roles/openshift_aws/tasks/s3.yml
 create mode 100644 roles/openshift_aws/tasks/scale_group.yml
 create mode 100644 roles/openshift_aws/tasks/seal_ami.yml
 create mode 100644 roles/openshift_aws/tasks/security_group.yml
 create mode 100644 roles/openshift_aws/tasks/ssh_keys.yml
 create mode 100644 roles/openshift_aws/tasks/vpc.yml
 delete mode 100644 roles/openshift_aws_ami_copy/README.md
 delete mode 100644 roles/openshift_aws_ami_copy/tasks/main.yml
 delete mode 100644 roles/openshift_aws_elb/README.md
 delete mode 100644 roles/openshift_aws_elb/defaults/main.yml
 delete mode 100644 roles/openshift_aws_elb/meta/main.yml
 delete mode 100644 roles/openshift_aws_elb/tasks/main.yml
 delete mode 100644 roles/openshift_aws_iam_kms/README.md
 delete mode 100644 roles/openshift_aws_iam_kms/defaults/main.yml
 delete mode 100644 roles/openshift_aws_iam_kms/meta/main.yml
 delete mode 100644 roles/openshift_aws_iam_kms/tasks/main.yml
 delete mode 100644 roles/openshift_aws_launch_config/README.md
 delete mode 100644 roles/openshift_aws_launch_config/defaults/main.yml
 delete mode 100644 roles/openshift_aws_launch_config/meta/main.yml
 delete mode 100644 roles/openshift_aws_launch_config/tasks/main.yml
 delete mode 100644 roles/openshift_aws_launch_config/templates/cloud-init.j2
 delete mode 100644 roles/openshift_aws_node_group/README.md
 delete mode 100644 roles/openshift_aws_node_group/defaults/main.yml
 delete mode 100644 roles/openshift_aws_node_group/tasks/main.yml
 delete mode 100644 roles/openshift_aws_s3/README.md
 delete mode 100644 roles/openshift_aws_s3/tasks/main.yml
 delete mode 100644 roles/openshift_aws_sg/README.md
 delete mode 100644 roles/openshift_aws_sg/defaults/main.yml
 delete mode 100644 roles/openshift_aws_sg/tasks/main.yml
 delete mode 100644 roles/openshift_aws_ssh_keys/README.md
 delete mode 100644 roles/openshift_aws_ssh_keys/tasks/main.yml
 delete mode 100644 roles/openshift_aws_vpc/README.md
 delete mode 100644 roles/openshift_aws_vpc/defaults/main.yml
 delete mode 100644 roles/openshift_aws_vpc/tasks/main.yml

(limited to 'roles')

diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
new file mode 100644
index 000000000..696efbea5
--- /dev/null
+++ b/roles/openshift_aws/README.md
@@ -0,0 +1,84 @@
+openshift_aws
+==================================
+
+Provision AWS infrastructure helpers.
+
+Requirements
+------------
+
+* Ansible 2.3
+* Boto
+
+Role Variables
+--------------
+
+From this role:
+
+| Name                                              | Default value
+|---------------------------------------------------|-----------------------
+| openshift_aws_clusterid                           | default
+| openshift_aws_elb_scheme                          | internet-facing
+| openshift_aws_launch_config_bootstrap_token       | ''
+| openshift_aws_node_group_config                   | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
+| openshift_aws_ami_copy_wait                       | False
+| openshift_aws_users                               | []
+| openshift_aws_launch_config_name                  | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_create_vpc                          | False
+| openshift_aws_node_group_type                     | master
+| openshift_aws_elb_cert_arn                        | ''
+| openshift_aws_kubernetes_cluster_status           | owned
+| openshift_aws_s3_mode                             | create
+| openshift_aws_vpc                                 | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_ssh_keys                     | False
+| openshift_aws_iam_kms_alias                       | alias/{{ openshift_aws_clusterid }}_kms
+| openshift_aws_use_custom_ami                      | False
+| openshift_aws_ami_copy_src_region                 | {{ openshift_aws_region }}
+| openshift_aws_s3_bucket_name                      | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_health_check                    | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
+| openshift_aws_node_security_groups                | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
+| openshift_aws_elb_security_groups                 | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
+| openshift_aws_vpc_tags                            | {'Name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_security_groups              | False
+| openshift_aws_create_iam_cert                     | False
+| openshift_aws_create_scale_group                  | True
+| openshift_aws_ami_encrypt                         | False
+| openshift_aws_node_group_config_node_volumes      | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_elb_instance_filter                 | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
+| openshift_aws_region                              | us-east-1
+| openshift_aws_elb_name                            | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_elb_idle_timout                     | 400
+| openshift_aws_subnet_name                     | us-east-1c
+| openshift_aws_node_group_config_tags              | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
+| openshift_aws_create_launch_config                | True
+| openshift_aws_ami_tags                            | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
+| openshift_aws_ami_name                            | openshift-gi
+| openshift_aws_node_group_config_master_volumes    | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_vpc_name                            | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_listeners                       | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
+|
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+```yaml
+- include_role:
+    name: openshift_aws
+    tasks_from: vpc.yml
+  vars:
+    openshift_aws_clusterid: test
+    openshift_aws_region: us-east-1
+    openshift_aws_create_vpc: true
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
new file mode 100644
index 000000000..4e7f54f79
--- /dev/null
+++ b/roles/openshift_aws/defaults/main.yml
@@ -0,0 +1,209 @@
+---
+openshift_aws_create_vpc: True
+openshift_aws_create_s3: True
+openshift_aws_create_iam_cert: True
+openshift_aws_create_security_groups: True
+openshift_aws_create_launch_config: True
+openshift_aws_create_scale_group: True
+openshift_aws_kubernetes_cluster_status: owned  # or shared
+openshift_aws_node_group_type: master
+
+openshift_aws_wait_for_ssh: True
+
+openshift_aws_clusterid: default
+openshift_aws_region: us-east-1
+openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
+openshift_aws_iam_cert_path: ''
+openshift_aws_iam_cert_chain_path: ''
+openshift_aws_iam_cert_key_path: ''
+openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
+
+openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+openshift_aws_ami: ''
+openshift_aws_ami_copy_wait: False
+openshift_aws_ami_encrypt: False
+openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}"
+openshift_aws_ami_name: openshift-gi
+openshift_aws_base_ami_name: ami_base
+
+openshift_aws_launch_config_bootstrap_token: ''
+openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}"
+
+openshift_aws_users: []
+
+openshift_aws_ami_tags:
+  bootstrap: "true"
+  openshift-created: "true"
+  clusterid: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_s3_mode: create
+openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
+
+openshift_aws_elb_health_check:
+  ping_protocol: tcp
+  ping_port: 443
+  response_timeout: 5
+  interval: 30
+  unhealthy_threshold: 2
+  healthy_threshold: 2
+
+openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_idle_timout: 400
+openshift_aws_elb_scheme: internet-facing
+openshift_aws_elb_cert_arn: ''
+
+openshift_aws_elb_listeners:
+  master:
+    external:
+    - protocol: tcp
+      load_balancer_port: 80
+      instance_protocol: ssl
+      instance_port: 443
+    - protocol: ssl
+      load_balancer_port: 443
+      instance_protocol: ssl
+      instance_port: 443
+      # ssl certificate required for https or ssl
+      ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+    internal:
+    - protocol: tcp
+      load_balancer_port: 80
+      instance_protocol: tcp
+      instance_port: 80
+    - protocol: tcp
+      load_balancer_port: 443
+      instance_protocol: tcp
+      instance_port: 443
+
+openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sdb
+  volume_size: 100
+  device_type: gp2
+  delete_on_termination: False
+
+openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sdb
+  volume_size: 100
+  device_type: gp2
+  delete_on_termination: True
+
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+
+openshift_aws_node_group_config:
+  tags: "{{ openshift_aws_node_group_config_tags }}"
+  master:
+    instance_type: m4.xlarge
+    ami: "{{ openshift_aws_ami }}"
+    volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
+    health_check:
+      period: 60
+      type: EC2
+    min_size: 3
+    max_size: 3
+    desired_size: 3
+    tags:
+      host-type: master
+      sub-host-type: default
+    wait_for_instances: True
+  compute:
+    instance_type: m4.xlarge
+    ami: "{{ openshift_aws_ami }}"
+    volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+    health_check:
+      period: 60
+      type: EC2
+    min_size: 3
+    max_size: 100
+    desired_size: 3
+    tags:
+      host-type: node
+      sub-host-type: compute
+  infra:
+    instance_type: m4.xlarge
+    ami: "{{ openshift_aws_ami }}"
+    volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+    health_check:
+      period: 60
+      type: EC2
+    min_size: 2
+    max_size: 20
+    desired_size: 2
+    tags:
+      host-type: node
+      sub-host-type: infra
+
+openshift_aws_elb_security_groups:
+- "{{ openshift_aws_clusterid }}"
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+
+openshift_aws_elb_instance_filter:
+  "tag:clusterid": "{{ openshift_aws_clusterid }}"
+  "tag:host-type": "{{ openshift_aws_node_group_type }}"
+  instance-state-name: running
+
+openshift_aws_node_security_groups:
+  default:
+    name: "{{ openshift_aws_clusterid }}"
+    desc: "{{ openshift_aws_clusterid }} default"
+    rules:
+    - proto: tcp
+      from_port: 22
+      to_port: 22
+      cidr_ip: 0.0.0.0/0
+    - proto: all
+      from_port: all
+      to_port: all
+      group_name: "{{ openshift_aws_clusterid }}"
+  master:
+    name: "{{ openshift_aws_clusterid }}_master"
+    desc: "{{ openshift_aws_clusterid }} master instances"
+    rules:
+    - proto: tcp
+      from_port: 80
+      to_port: 80
+      cidr_ip: 0.0.0.0/0
+    - proto: tcp
+      from_port: 443
+      to_port: 443
+      cidr_ip: 0.0.0.0/0
+  compute:
+    name: "{{ openshift_aws_clusterid }}_compute"
+    desc: "{{ openshift_aws_clusterid }} compute node instances"
+  infra:
+    name: "{{ openshift_aws_clusterid }}_infra"
+    desc: "{{ openshift_aws_clusterid }} infra node instances"
+    rules:
+    - proto: tcp
+      from_port: 80
+      to_port: 80
+      cidr_ip: 0.0.0.0/0
+    - proto: tcp
+      from_port: 443
+      to_port: 443
+      cidr_ip: 0.0.0.0/0
+    - proto: tcp
+      from_port: 30000
+      to_port: 32000
+      cidr_ip: 0.0.0.0/0
+  etcd:
+    name: "{{ openshift_aws_clusterid }}_etcd"
+    desc: "{{ openshift_aws_clusterid }} etcd instances"
+
+openshift_aws_vpc_tags:
+  Name: "{{ openshift_aws_vpc_name }}"
+
+openshift_aws_subnet_name: us-east-1c
+
+openshift_aws_vpc:
+  name: "{{ openshift_aws_vpc_name }}"
+  cidr: 172.31.0.0/16
+  subnets:
+    us-east-1:
+    - cidr: 172.31.48.0/20
+      az: "us-east-1c"
+    - cidr: 172.31.32.0/20
+      az: "us-east-1e"
+    - cidr: 172.31.16.0/20
+      az: "us-east-1a"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py
new file mode 100644
index 000000000..06e1f9602
--- /dev/null
+++ b/roles/openshift_aws/filter_plugins/filters.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_aws
+'''
+
+
+class FilterModule(object):
+    ''' Custom ansible filters for use by openshift_aws role'''
+
+    @staticmethod
+    def build_instance_tags(clusterid, status='owned'):
+        ''' This function will return a dictionary of the instance tags.
+
+            The main desire to have this inside of a filter_plugin is that we
+            need to build the following key.
+
+            {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+
+        '''
+        tags = {'clusterid': clusterid,
+                'kubernetes.io/cluster/{}'.format(clusterid): status}
+
+        return tags
+
+    def filters(self):
+        ''' returns a mapping of filters to methods '''
+        return {'build_instance_tags': self.build_instance_tags}
diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml
new file mode 100644
index 000000000..875efcb8f
--- /dev/null
+++ b/roles/openshift_aws/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_utils
diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml
new file mode 100644
index 000000000..07020dd62
--- /dev/null
+++ b/roles/openshift_aws/tasks/ami_copy.yml
@@ -0,0 +1,34 @@
+---
+- fail:
+    msg: "{{ item }} needs to be defined"
+  when: item is not defined
+  with_items:
+  - openshift_aws_ami_copy_src_ami
+  - openshift_aws_ami_copy_name
+
+- name: Create IAM KMS key with alias
+  oo_iam_kms:
+    state: present
+    alias: "{{ openshift_aws_iam_kms_alias }}"
+    region: "{{ openshift_aws_region }}"
+  register: created_kms
+
+- debug: var=created_kms.results
+
+- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}"
+  ec2_ami_copy:
+    name: "{{ openshift_aws_ami_copy_name }}"
+    region: "{{ openshift_aws_region }}"
+    source_region: "{{ openshift_aws_ami_copy_src_region }}"
+    source_image_id: "{{ openshift_aws_ami_copy_src_ami }}"
+    encrypted: "{{ openshift_aws_ami_encrypt | bool }}"
+    kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}"
+    wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}"
+    tags: "{{ openshift_aws_ami_tags }}"
+  register: copy_result
+
+- debug: var=copy_result
+
+- name: return AMI ID with setfact
+  set_fact:
+    openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml
new file mode 100644
index 000000000..8d4e5ac43
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_ami.yml
@@ -0,0 +1,48 @@
+---
+- when: openshift_aws_create_vpc | bool
+  name: create a vpc
+  include: vpc.yml
+
+- when: openshift_aws_users | length  > 0
+  name: create aws ssh keypair
+  include: ssh_keys.yml
+
+- when: openshift_aws_create_security_groups | bool
+  name: Create compute security_groups
+  include: security_group.yml
+
+- name: query vpc
+  ec2_vpc_net_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      'tag:Name': "{{ openshift_aws_vpc_name }}"
+  register: vpcout
+
+- name: fetch the default subnet id
+  ec2_vpc_subnet_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:Name": "{{ openshift_aws_subnet_name }}"
+      vpc-id: "{{ vpcout.vpcs[0].id }}"
+  register: subnetout
+
+- name: create instance for ami creation
+  ec2:
+    assign_public_ip: yes
+    region: "{{ openshift_aws_region }}"
+    key_name: "{{ openshift_aws_ssh_key_name }}"
+    group: "{{ openshift_aws_clusterid }}"
+    instance_type: m4.xlarge
+    vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+    image: "{{ openshift_aws_base_ami }}"
+    volumes:
+    - device_name: /dev/sdb
+      volume_type: gp2
+      volume_size: 100
+      delete_on_termination: true
+    wait: yes
+    exact_count: 1
+    count_tag:
+      Name: "{{ openshift_aws_base_ami_name }}"
+    instance_tags:
+      Name: "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
new file mode 100644
index 000000000..0dac1c23d
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -0,0 +1,34 @@
+---
+# When openshift_aws_use_custom_ami is '' then
+# we retrieve the latest build AMI.
+# Then set openshift_aws_ami to the ami.
+- when: openshift_aws_ami == ''
+  block:
+  - name: fetch recently created AMI
+    ec2_ami_find:
+      region: "{{ openshift_aws_region }}"
+      sort: creationDate
+      sort_order: descending
+      name: "{{ openshift_aws_ami_name }}*"
+      ami_tags: "{{ openshift_aws_ami_tags }}"
+      no_result_action: fail
+    register: amiout
+
+  - name: Set the openshift_aws_ami
+    set_fact:
+      openshift_aws_ami: "{{ amiout.results[0].ami_id }}"
+    when:
+    - "'results' in amiout"
+    - amiout.results|length > 0
+
+- when: openshift_aws_create_security_groups
+  name: "Create {{ openshift_aws_node_group_type }} security groups"
+  include: security_group.yml
+
+- when: openshift_aws_create_launch_config
+  name: "Create {{ openshift_aws_node_group_type }} launch config"
+  include: launch_config.yml
+
+- when: openshift_aws_create_scale_group
+  name: "Create {{ openshift_aws_node_group_type }} node group"
+  include: scale_group.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
new file mode 100644
index 000000000..a1fdd66fc
--- /dev/null
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -0,0 +1,68 @@
+---
+- name: query vpc
+  ec2_vpc_net_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      'tag:Name': "{{ openshift_aws_vpc_name }}"
+  register: vpcout
+
+- name: debug
+  debug: var=vpcout
+
+- name: fetch the remote instances
+  ec2_remote_facts:
+    region: "{{ openshift_aws_region }}"
+    filters: "{{ openshift_aws_elb_instance_filter }}"
+  register: instancesout
+
+- name: fetch the default subnet id
+  ec2_vpc_subnet_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:Name": "{{ openshift_aws_subnet_name }}"
+      vpc-id: "{{ vpcout.vpcs[0].id }}"
+  register: subnetout
+
+- name:
+  debug:
+    msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+                   if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+                   else openshift_aws_elb_listeners }}"
+
+- name: "Create ELB {{ openshift_aws_elb_name }}"
+  ec2_elb_lb:
+    name: "{{ openshift_aws_elb_name }}"
+    state: present
+    security_group_names: "{{ openshift_aws_elb_security_groups }}"
+    idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+    region: "{{ openshift_aws_region }}"
+    subnets:
+    - "{{ subnetout.subnets[0].id }}"
+    health_check: "{{ openshift_aws_elb_health_check }}"
+    listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+                   if 'master' in openshift_aws_node_group_type  or 'infra' in openshift_aws_node_group_type
+                   else openshift_aws_elb_listeners }}"
+    scheme: "{{ openshift_aws_elb_scheme }}"
+    tags:
+      KubernetesCluster: "{{ openshift_aws_clusterid }}"
+  register: new_elb
+
+# It is necessary to ignore_errors here because the instances are not in 'ready'
+#  state when first added to ELB
+- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+  ec2_elb:
+    instance_id: "{{ item.id }}"
+    ec2_elbs: "{{ openshift_aws_elb_name }}"
+    state: present
+    region: "{{ openshift_aws_region }}"
+    wait: False
+  with_items: "{{ instancesout.instances }}"
+  ignore_errors: True
+  retries: 10
+  register: elb_call
+  until: elb_call|succeeded
+
+- debug:
+    msg: "{{ item }}"
+  with_items:
+  - "{{ new_elb }}"
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
new file mode 100644
index 000000000..cd9772a25
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -0,0 +1,29 @@
+---
+- name: upload certificates to AWS IAM
+  iam_cert23:
+    state: present
+    name: "{{ openshift_aws_iam_cert_name }}"
+    cert: "{{ openshift_aws_iam_cert_path }}"
+    key: "{{ openshift_aws_iam_cert_key_path }}"
+    cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}"
+  register: elb_cert_chain
+  failed_when:
+  - "'failed' in elb_cert_chain"
+  - elb_cert_chain.failed
+  - "'msg' in elb_cert_chain"
+  - "'already exists and has a different certificate body' in elb_cert_chain.msg"
+  - "'BotoServerError' in elb_cert_chain.msg"
+  when:
+  - openshift_aws_create_iam_cert | bool
+  - openshift_aws_iam_cert_path != ''
+  - openshift_aws_iam_cert_key_path != ''
+  - openshift_aws_elb_cert_arn == ''
+
+- name: set_fact openshift_aws_elb_cert_arn
+  set_fact:
+    openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+
+- name: wait for cert to propagate
+  pause:
+    seconds: 5
+  when: elb_cert_chain.changed
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
new file mode 100644
index 000000000..65c5a6cc0
--- /dev/null
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -0,0 +1,45 @@
+---
+- fail:
+    msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
+  when:
+  - openshift_aws_ami is undefined
+
+- name: fetch the security groups for launch config
+  ec2_group_facts:
+    filters:
+      group-name:
+      - "{{ openshift_aws_clusterid }}"  # default sg
+      - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"  # node type sg
+      - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s"  # node type sg k8s
+    region: "{{ openshift_aws_region }}"
+  register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group launch config
+  ec2_lc:
+    name: "{{ openshift_aws_launch_config_name }}"
+    region: "{{ openshift_aws_region }}"
+    image_id: "{{ openshift_aws_ami }}"
+    instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
+    security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+    user_data: |-
+      #cloud-config
+      {%  if openshift_aws_node_group_type != 'master' %}
+      write_files:
+      - path: /root/csr_kubeconfig
+        owner: root:root
+        permissions: '0640'
+        content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
+      - path: /root/openshift_settings
+        owner: root:root
+        permissions: '0640'
+        content:
+          openshift_type: "{{ openshift_aws_node_group_type }}"
+      runcmd:
+      - [ systemctl, enable, atomic-openshift-node]
+      - [ systemctl, start, atomic-openshift-node]
+      {% endif %}
+    key_name: "{{ openshift_aws_ssh_key_name }}"
+    ebs_optimized: False
+    volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
+    assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
new file mode 100644
index 000000000..189caeaee
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -0,0 +1,54 @@
+---
+- when: openshift_aws_create_vpc | bool
+  name: create default vpc
+  include: vpc.yml
+
+- when: openshift_aws_create_iam_cert | bool
+  name: create the iam_cert for elb certificate
+  include: iam_cert.yml
+
+- when: openshift_aws_users | length > 0
+  name: create aws ssh keypair
+  include: ssh_keys.yml
+
+- when: openshift_aws_create_s3 | bool
+  name: create s3 bucket for registry
+  include: s3.yml
+
+- name: include scale group creation for master
+  include: build_node_group.yml
+
+- name: fetch newly created instances
+  ec2_remote_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:clusterid": "{{ openshift_aws_clusterid }}"
+      "tag:host-type": "{{ openshift_aws_node_group_type }}"
+      instance-state-name: running
+  register: instancesout
+  retries: 20
+  delay: 3
+  until: instancesout.instances|length > 0
+
+- name: create our master internal load balancers
+  include: elb.yml
+  vars:
+    openshift_aws_elb_direction: internal
+    openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+    openshift_aws_elb_scheme: internal
+
+- name: create our master external load balancers
+  include: elb.yml
+  vars:
+    openshift_aws_elb_direction: external
+    openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+    openshift_aws_elb_scheme: internet-facing
+
+- name: wait for ssh to become available
+  wait_for:
+    port: 22
+    host: "{{ item.public_ip_address }}"
+    timeout: 300
+    search_regex: OpenSSH
+  with_items: "{{ instancesout.instances }}"
+  when: openshift_aws_wait_for_ssh | bool
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
new file mode 100644
index 000000000..fc4996c68
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -0,0 +1,66 @@
+---
+# Get bootstrap config token
+# bootstrap should be created on first master
+# need to fetch it and shove it into cloud data
+- name: fetch master instances
+  ec2_remote_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:clusterid": "{{ openshift_aws_clusterid }}"
+      "tag:host-type": master
+      instance-state-name: running
+  register: instancesout
+  retries: 20
+  delay: 3
+  until: instancesout.instances|length > 0
+
+- name: slurp down the bootstrap.kubeconfig
+  slurp:
+    src: /etc/origin/master/bootstrap.kubeconfig
+  delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
+  remote_user: root
+  register: bootstrap
+
+- name: set_fact for kubeconfig token
+  set_fact:
+    openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
+
+- name: include build node group for infra
+  include: build_node_group.yml
+  vars:
+    openshift_aws_node_group_type: infra
+    openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra"
+    openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}"
+
+- name: include build node group for compute
+  include: build_node_group.yml
+  vars:
+    openshift_aws_node_group_type: compute
+    openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute"
+    openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}"
+
+- when: openshift_aws_wait_for_ssh | bool
+  block:
+  - name: pause and allow for instances to scale before we query them
+    pause:
+      seconds: 10
+
+  - name: fetch newly created instances
+    ec2_remote_facts:
+      region: "{{ openshift_aws_region }}"
+      filters:
+        "tag:clusterid": "{{ openshift_aws_clusterid }}"
+        "tag:host-type": node
+        instance-state-name: running
+    register: instancesout
+    retries: 20
+    delay: 3
+    until: instancesout.instances|length > 0
+
+  - name: wait for ssh to become available
+    wait_for:
+      port: 22
+      host: "{{ item.public_ip_address }}"
+      timeout: 300
+      search_regex: OpenSSH
+    with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
new file mode 100644
index 000000000..9cf37c840
--- /dev/null
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -0,0 +1,7 @@
+---
+- name: Create an s3 bucket
+  s3:
+    bucket: "{{ openshift_aws_s3_bucket_name }}"
+    mode: "{{ openshift_aws_s3_mode }}"
+    region: "{{ openshift_aws_region }}"
+  when: openshift_aws_create_s3 | bool
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
new file mode 100644
index 000000000..3e969fc43
--- /dev/null
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -0,0 +1,32 @@
+---
+- name: query vpc
+  ec2_vpc_net_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      'tag:Name': "{{ openshift_aws_vpc_name }}"
+  register: vpcout
+
+- name: fetch the subnet to use in scale group
+  ec2_vpc_subnet_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:Name": "{{ openshift_aws_subnet_name }}"
+      vpc-id: "{{ vpcout.vpcs[0].id }}"
+  register: subnetout
+
+- name: Create the scale group
+  ec2_asg:
+    name: "{{ openshift_aws_scale_group_name }}"
+    launch_config_name: "{{ openshift_aws_launch_config_name }}"
+    health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}"
+    health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}"
+    min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}"
+    max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}"
+    desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}"
+    region: "{{ openshift_aws_region }}"
+    termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in  openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+    load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+    wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+    vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+    tags:
+    - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
new file mode 100644
index 000000000..0cb749dcc
--- /dev/null
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -0,0 +1,49 @@
+---
+- name: fetch newly created instances
+  ec2_remote_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:Name": "{{ openshift_aws_base_ami_name }}"
+      instance-state-name: running
+  register: instancesout
+  retries: 20
+  delay: 3
+  until: instancesout.instances|length > 0
+
+- name: bundle ami
+  ec2_ami:
+    instance_id: "{{ instancesout.instances.0.id }}"
+    region: "{{ openshift_aws_region }}"
+    state: present
+    description: "This was provisioned {{ ansible_date_time.iso8601 }}"
+    name: "{{ openshift_aws_ami_name }}"
+    tags: "{{ openshift_aws_ami_tags }}"
+    wait: yes
+  register: amioutput
+
+- debug: var=amioutput
+
+- when: openshift_aws_ami_encrypt | bool
+  block:
+  - name: augment the encrypted ami tags with source-ami
+    set_fact:
+      source_tag:
+        source-ami: "{{ amioutput.image_id }}"
+
+  - name: copy the ami for encrypted disks
+    include: ami_copy.yml
+    vars:
+      openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
+      openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
+      # TODO: How does the kms alias get passed to ec2_ami_copy
+      openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+      openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}"
+      # this option currently fails due to boto waiters
+      # when supported this need to be reapplied
+      #openshift_aws_ami_copy_wait: True
+
+- name: terminate temporary instance
+  ec2:
+    state: absent
+    region: "{{ openshift_aws_region }}"
+    instance_ids: "{{ instancesout.instances.0.id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
new file mode 100644
index 000000000..161e72fb4
--- /dev/null
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -0,0 +1,45 @@
+---
+- name: Fetch the VPC for the vpc.id
+  ec2_vpc_net_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      "tag:Name": "{{ openshift_aws_clusterid }}"
+  register: vpcout
+
+- name: Create default security group for cluster
+  ec2_group:
+    name: "{{ openshift_aws_node_security_groups.default.name }}"
+    description: "{{ openshift_aws_node_security_groups.default.desc }}"
+    region: "{{ openshift_aws_region }}"
+    vpc_id: "{{ vpcout.vpcs[0].id }}"
+    rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}"
+  register: sg_default_created
+
+- name: create the node group sgs
+  ec2_group:
+    name: "{{ item.name}}"
+    description: "{{ item.desc }}"
+    rules: "{{ item.rules if 'rules' in item else [] }}"
+    region: "{{ openshift_aws_region }}"
+    vpc_id: "{{ vpcout.vpcs[0].id }}"
+  register: sg_create
+  with_items:
+  - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: create the k8s sgs for the node group
+  ec2_group:
+    name: "{{ item.name }}_k8s"
+    description: "{{ item.desc }} for k8s"
+    region: "{{ openshift_aws_region }}"
+    vpc_id: "{{ vpcout.vpcs[0].id }}"
+  register: k8s_sg_create
+  with_items:
+  - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: tag sg groups with proper tags
+  ec2_tag:
+    tags:
+      KubernetesCluster: "{{ openshift_aws_clusterid }}"
+    resource: "{{ item.group_id }}"
+    region: "{{ openshift_aws_region }}"
+  with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws/tasks/ssh_keys.yml b/roles/openshift_aws/tasks/ssh_keys.yml
new file mode 100644
index 000000000..f439ce74e
--- /dev/null
+++ b/roles/openshift_aws/tasks/ssh_keys.yml
@@ -0,0 +1,8 @@
+---
+- name: Add the public keys for the users
+  ec2_key:
+    name: "{{ item.key_name }}"
+    key_material: "{{ item.pub_key }}"
+    region: "{{ openshift_aws_region }}"
+  with_items: "{{ openshift_aws_users }}"
+  no_log: True
diff --git a/roles/openshift_aws/tasks/vpc.yml b/roles/openshift_aws/tasks/vpc.yml
new file mode 100644
index 000000000..ce2c8eac5
--- /dev/null
+++ b/roles/openshift_aws/tasks/vpc.yml
@@ -0,0 +1,52 @@
+---
+- name: Create AWS VPC
+  ec2_vpc_net:
+    state: present
+    cidr_block: "{{ openshift_aws_vpc.cidr }}"
+    dns_support: True
+    dns_hostnames: True
+    region: "{{ openshift_aws_region }}"
+    name: "{{ openshift_aws_clusterid }}"
+    tags: "{{ openshift_aws_vpc_tags }}"
+  register: vpc
+
+- name: Sleep to avoid a race condition when creating the vpc
+  pause:
+    seconds: 5
+  when: vpc.changed
+
+- name: assign the vpc igw
+  ec2_vpc_igw:
+    region: "{{ openshift_aws_region }}"
+    vpc_id: "{{ vpc.vpc.id }}"
+  register: igw
+
+- name: assign the vpc subnets
+  ec2_vpc_subnet:
+    region: "{{ openshift_aws_region }}"
+    vpc_id: "{{ vpc.vpc.id }}"
+    cidr: "{{ item.cidr }}"
+    az: "{{ item.az }}"
+    resource_tags:
+      Name: "{{ item.az }}"
+  with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+- name: Grab the route tables from our VPC
+  ec2_vpc_route_table_facts:
+    region: "{{ openshift_aws_region }}"
+    filters:
+      vpc-id: "{{ vpc.vpc.id }}"
+  register: route_table
+
+- name: update the route table in the vpc
+  ec2_vpc_route_table:
+    lookup: id
+    route_table_id: "{{ route_table.route_tables[0].id }}"
+    vpc_id: "{{ vpc.vpc.id }}"
+    region: "{{ openshift_aws_region }}"
+    tags:
+      Name: "{{ openshift_aws_vpc_name }}"
+    routes:
+    - dest: 0.0.0.0/0
+      gateway_id: igw
+  register: route_table_out
diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md
deleted file mode 100644
index 111818451..000000000
--- a/roles/openshift_aws_ami_copy/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-openshift_aws_ami_perms
-=========
-
-Ansible role for copying an AMI
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- openshift_aws_ami_copy_src_ami: source AMI id to copy from
-- openshift_aws_ami_copy_region: region where the AMI is found
-- openshift_aws_ami_copy_name: name to assign to new AMI
-- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption
-- openshift_aws_ami_copy_tags: dict with desired tags
-- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status.  This fails due to boto waiters.
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-    - name: copy the ami for encrypted disks
-      include_role:
-        name: openshift_aws_ami_copy
-      vars:
-        r_openshift_aws_ami_copy_region: us-east-1
-        r_openshift_aws_ami_copy_name: myami
-        r_openshift_aws_ami_copy_src_ami: ami-1234
-        r_openshift_aws_ami_copy_kms_arn: arn:xxxx
-        r_openshift_aws_ami_copy_tags: {}
-        r_openshift_aws_ami_copy_encrypt: False
-
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml
deleted file mode 100644
index bcccd4042..000000000
--- a/roles/openshift_aws_ami_copy/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- fail:
-    msg: "{{ item }} needs to be defined"
-  when: item is not defined
-  with_items:
-  - r_openshift_aws_ami_copy_src_ami
-  - r_openshift_aws_ami_copy_name
-  - r_openshift_aws_ami_copy_region
-
-- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}"
-  ec2_ami_copy:
-    region: "{{ r_openshift_aws_ami_copy_region }}"
-    source_region: "{{ r_openshift_aws_ami_copy_region }}"
-    name: "{{ r_openshift_aws_ami_copy_name }}"
-    source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}"
-    encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}"
-    kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}"
-    wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}"
-    tags: "{{ r_openshift_aws_ami_copy_tags }}"
-  register: copy_result
-
-- debug: var=copy_result
-
-- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami
-  set_fact:
-    r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md
deleted file mode 100644
index ecc45fa14..000000000
--- a/roles/openshift_aws_elb/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-openshift_aws_elb
-=========
-
-Ansible role to provision and manage AWS ELB's for Openshift.
-
-Requirements
-------------
-
-Ansible Modules:
-
-- ec2_elb
-- ec2_elb_lb
-
-python package:
-
-python-boto
-
-Role Variables
---------------
-
-- r_openshift_aws_elb_instances: instances to put in ELB
-- r_openshift_aws_elb_elb_name: name of elb
-- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to
-- r_openshift_aws_elb_region: AWS Region
-- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb
-```yaml
-  ping_protocol: tcp
-  ping_port: 443
-  response_timeout: 5
-  interval: 30
-  unhealthy_threshold: 2
-  healthy_threshold: 2
-```
-- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb
-```yaml
-- protocol: tcp
-  load_balancer_port: 80
-  instance_protocol: ssl
-  instance_port: 443
-- protocol: ssl
-  load_balancer_port: 443
-  instance_protocol: ssl
-  instance_port: 443
-  # ssl certificate required for https or ssl
-  ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- include_role:
-    name: openshift_aws_elb
-  vars:
-    r_openshift_aws_elb_instances: aws_instances_to_put_in_elb
-    r_openshift_aws_elb_elb_name: elb_name
-    r_openshift_aws_elb_security_groups: security_group_names
-    r_openshift_aws_elb_region: aws_region
-    r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}"
-    r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml
deleted file mode 100644
index ed5d38079..000000000
--- a/roles/openshift_aws_elb/defaults/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-r_openshift_aws_elb_health_check:
-  ping_protocol: tcp
-  ping_port: 443
-  response_timeout: 5
-  interval: 30
-  unhealthy_threshold: 2
-  healthy_threshold: 2
-
-r_openshift_aws_elb_cert_arn: ''
-
-r_openshift_aws_elb_listeners:
-  master:
-    external:
-    - protocol: tcp
-      load_balancer_port: 80
-      instance_protocol: ssl
-      instance_port: 443
-    - protocol: ssl
-      load_balancer_port: 443
-      instance_protocol: ssl
-      instance_port: 443
-      # ssl certificate required for https or ssl
-      ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
-    internal:
-    - protocol: tcp
-      load_balancer_port: 80
-      instance_protocol: tcp
-      instance_port: 80
-    - protocol: tcp
-      load_balancer_port: 443
-      instance_protocol: tcp
-      instance_port: 443
diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml
deleted file mode 100644
index 58be652a5..000000000
--- a/roles/openshift_aws_elb/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
-  author: OpenShift
-  description: Openshift ELB provisioning
-  company: Red Hat, Inc
-  license: ASL 2.0
-  min_ansible_version: 1.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-dependencies: []
diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml
deleted file mode 100644
index 64ec18545..000000000
--- a/roles/openshift_aws_elb/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: fetch the default subnet id
-  ec2_remote_facts:
-    region: "{{ r_openshift_aws_elb_region }}"
-    filters: "{{ r_openshift_aws_elb_instance_filter }}"
-  register: instancesout
-
-- name: fetch the default subnet id
-  ec2_vpc_subnet_facts:
-    region: "{{ r_openshift_aws_elb_region }}"
-    filters:
-      "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}"
-  register: subnetout
-
-- name:
-  debug:
-    msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
-                   if 'master' in r_openshift_aws_elb_type  or 'infra' in r_openshift_aws_elb_type
-                   else r_openshift_aws_elb_listeners }}"
-
-- name: "Create ELB {{ r_openshift_aws_elb_name }}"
-  ec2_elb_lb:
-    name: "{{ r_openshift_aws_elb_name }}"
-    state: present
-    security_group_names: "{{ r_openshift_aws_elb_security_groups }}"
-    idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}"
-    region: "{{ r_openshift_aws_elb_region }}"
-    subnets:
-    - "{{ subnetout.subnets[0].id }}"
-    health_check: "{{ r_openshift_aws_elb_health_check }}"
-    listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
-                   if 'master' in r_openshift_aws_elb_type  or 'infra' in r_openshift_aws_elb_type
-                   else r_openshift_aws_elb_listeners }}"
-    scheme: "{{ r_openshift_aws_elb_scheme }}"
-    tags:
-      KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}"
-  register: new_elb
-
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-#  state when first added to ELB
-- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}"
-  ec2_elb:
-    instance_id: "{{ item.id }}"
-    ec2_elbs: "{{ r_openshift_aws_elb_name }}"
-    state: present
-    region: "{{ r_openshift_aws_elb_region }}"
-    wait: False
-  with_items: "{{ instancesout.instances }}"
-  ignore_errors: True
-  retries: 10
-  register: elb_call
-  until: elb_call|succeeded
-
-- debug:
-    msg: "{{ item }}"
-  with_items:
-  - "{{ new_elb }}"
diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md
deleted file mode 100644
index 9468e785c..000000000
--- a/roles/openshift_aws_iam_kms/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_iam_kms
-=========
-
-Ansible role to create AWS IAM KMS keys for encryption
-
-Requirements
-------------
-
-Ansible Modules:
-
-oo_iam_kms
-
-Role Variables
---------------
-
-- r_openshift_aws_iam_kms_region: AWS region to create KMS key
-- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key
-
-Dependencies
-------------
-
-lib_utils
-
-Example Playbook
-----------------
-```yaml
-- include_role:
-    name: openshift_aws_iam_kms
-  vars:
-    r_openshift_aws_iam_kms_region: 'us-east-1'
-    r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms'
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_iam_kms/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml
deleted file mode 100644
index e29aaf96b..000000000
--- a/roles/openshift_aws_iam_kms/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
-  author: OpenShift
-  description: AWS IAM KMS setup and management
-  company: Red Hat, Inc
-  license: ASL 2.0
-  min_ansible_version: 1.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-dependencies:
-- lib_utils
diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml
deleted file mode 100644
index 32aac2666..000000000
--- a/roles/openshift_aws_iam_kms/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- fail:
-    msg: "{{ item.name }} needs to be defined."
-  when: item.cond | bool
-  with_items:
-  - name: "{{ r_openshift_aws_iam_kms_alias }}"
-    cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}"
-  - name: "{{ r_openshift_aws_iam_kms_region }}"
-    cond: "{{ r_openshift_aws_iam_kms_region is undefined }}"
-
-- name: Create IAM KMS key with alias
-  oo_iam_kms:
-    state: present
-    alias: "{{ r_openshift_aws_iam_kms_alias }}"
-    region: "{{ r_openshift_aws_iam_kms_region }}"
-  register: created_kms
-
-- debug: var=created_kms.results
diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md
deleted file mode 100644
index 52b7e83b6..000000000
--- a/roles/openshift_aws_launch_config/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-openshift_aws_launch_config
-=========
-
-Ansible role to create an AWS launch config for a scale group.
-
-This includes the AMI, volumes, user_data, etc.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
-- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
-- r_openshift_aws_launch_config_region: "{{ region }}"
-- r_openshift_aws_launch_config: "{{ node_group_config }}"
-```yaml
-    master:
-      instance_type: m4.xlarge
-      ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced
-      volumes:
-      - device_name: /dev/sdb
-        volume_size: 100
-        device_type: gp2
-        delete_on_termination: False
-      health_check:
-        period: 60
-        type: EC2
-      min_size: 3
-      max_size: 3
-      desired_size: 3
-      tags:
-        host-type: master
-        sub-host-type: default
-      wait_for_instances: True
-```
-- r_openshift_aws_launch_config_type: compute
-- r_openshift_aws_launch_config_custom_image: ami-xxxxx
-- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig>
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-  - name: create compute nodes config
-    include_role:
-      name: openshift_aws_launch_config
-    vars:
-      r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
-      r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
-      r_openshift_aws_launch_config_region: "{{ region }}"
-      r_openshift_aws_launch_config: "{{ node_group_config }}"
-      r_openshift_aws_launch_config_type: compute
-      r_openshift_aws_launch_config_custom_image: ami-1234
-      r_openshift_aws_launch_config_bootstrap_token: abcd
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_launch_config/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml
deleted file mode 100644
index e61670cc2..000000000
--- a/roles/openshift_aws_launch_config/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
-  author: OpenShift
-  description: Openshift AWS VPC creation
-  company: Red Hat, Inc
-  license: ASL 2.0
-  min_ansible_version: 2.3
-  platforms:
-  - name: EL
-    versions:
-    - 7
-dependencies: []
diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml
deleted file mode 100644
index 437cf1f71..000000000
--- a/roles/openshift_aws_launch_config/tasks/main.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: fail when params are not set
-  fail:
-    msg: Please specify the role parameters.
-  when:
-  - r_openshift_aws_launch_config_cluseterid is undefined
-  - r_openshift_aws_launch_config_type is undefined
-  - r_openshift_aws_launch_config_region is undefined
-  - r_openshift_aws_launch_config is undefined
-
-- name: fetch the security groups for launch config
-  ec2_group_facts:
-    filters:
-      group-name:
-      - "{{ r_openshift_aws_launch_config_clusterid }}"  # default sg
-      - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}"  # node type sg
-      - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s"  # node type sg k8s
-    region: "{{ r_openshift_aws_launch_config_region }}"
-  register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group config
-  ec2_lc:
-    name: "{{ r_openshift_aws_launch_config_name }}"
-    region: "{{ r_openshift_aws_launch_config_region }}"
-    image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}"
-    instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}"
-    security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
-    user_data: |-
-      #cloud-config
-      {%  if r_openshift_aws_launch_config_type != 'master' %}
-      write_files:
-      - path: /root/csr_kubeconfig
-        owner: root:root
-        permissions: '0640'
-        content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
-      - path: /root/openshift_settings
-        owner: root:root
-        permissions: '0640'
-        content:
-          openshift_type: "{{ r_openshift_aws_launch_config_type }}"
-      runcmd:
-      - [ systemctl, enable, atomic-openshift-node]
-      - [ systemctl, start, atomic-openshift-node]
-      {% endif %}
-    key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}"
-    ebs_optimized: False
-    volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}"
-    assign_public_ip: True
-  register: test
diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2
deleted file mode 100644
index 1a1e29550..000000000
--- a/roles/openshift_aws_launch_config/templates/cloud-init.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %}
-#cloud-config
-write_files:
-- path: /root/csr_kubeconfig
-  owner: root:root
-  permissions: '0640'
-  content: |-
-  {{ r_openshift_aws_launch_config_bootstrap_token }}
-{% endif %}
diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md
deleted file mode 100644
index c32c57bc5..000000000
--- a/roles/openshift_aws_node_group/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-openshift_aws_node_group
-=========
-
-Ansible role to create an aws node group.
-
-This includes the security group, launch config, and scale group.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-```yaml
-- r_openshift_aws_node_group_name: myscalegroup
-- r_openshift_aws_node_group_clusterid: myclusterid
-- r_openshift_aws_node_group_region: us-east-1
-- r_openshift_aws_node_group_lc_name: launch_config
-- r_openshift_aws_node_group_type: master|infra|compute
-- r_openshift_aws_node_group_config: "{{ node_group_config }}"
-```yaml
-master:
-  instance_type: m4.xlarge
-  ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced
-  volumes:
-  - device_name: /dev/sdb
-    volume_size: 100
-    device_type: gp2
-    delete_on_termination: False
-  health_check:
-    period: 60
-    type: EC2
-  min_size: 3
-  max_size: 3
-  desired_size: 3
-  tags:
-    host-type: master
-    sub-host-type: default
-  wait_for_instances: True
-```
-- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-
-```yaml
-us-east-1a  # name of subnet
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-  - name: "create {{ openshift_build_node_type }} node groups"
-    include_role:
-      name: openshift_aws_node_group
-    vars:
-      r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute"
-      r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
-      r_openshift_aws_node_group_clusterid: "{{ clusterid }}"
-      r_openshift_aws_node_group_region: "{{ region }}"
-      r_openshift_aws_node_group_config: "{{ node_group_config }}"
-      r_openshift_aws_node_group_type: compute
-      r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml
deleted file mode 100644
index 44c5116a1..000000000
--- a/roles/openshift_aws_node_group/defaults/main.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-r_openshift_aws_node_group_type: master
-
-r_openshift_aws_node_group_config:
-  tags:
-    clusterid: "{{ r_openshift_aws_node_group_clusterid }}"
-  master:
-    instance_type: m4.xlarge
-    ami: "{{ r_openshift_aws_node_group_ami }}"
-    volumes:
-    - device_name: /dev/sdb
-      volume_size: 100
-      device_type: gp2
-      delete_on_termination: False
-    health_check:
-      period: 60
-      type: EC2
-    min_size: 3
-    max_size: 3
-    desired_size: 3
-    tags:
-      host-type: master
-      sub-host-type: default
-    wait_for_instances: True
-  compute:
-    instance_type: m4.xlarge
-    ami: "{{ r_openshift_aws_node_group_ami }}"
-    volumes:
-    - device_name: /dev/sdb
-      volume_size: 100
-      device_type: gp2
-      delete_on_termination: True
-    health_check:
-      period: 60
-      type: EC2
-    min_size: 3
-    max_size: 100
-    desired_size: 3
-    tags:
-      host-type: node
-      sub-host-type: compute
-  infra:
-    instance_type: m4.xlarge
-    ami: "{{ r_openshift_aws_node_group_ami }}"
-    volumes:
-    - device_name: /dev/sdb
-      volume_size: 100
-      device_type: gp2
-      delete_on_termination: True
-    health_check:
-      period: 60
-      type: EC2
-    min_size: 2
-    max_size: 20
-    desired_size: 2
-    tags:
-      host-type: node
-      sub-host-type: infra
diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml
deleted file mode 100644
index 6f5364b03..000000000
--- a/roles/openshift_aws_node_group/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: validate role inputs
-  fail:
-    msg: Please pass in the required role variables
-  when:
-  - r_openshift_aws_node_group_clusterid is not defined
-  - r_openshift_aws_node_group_region is not defined
-  - r_openshift_aws_node_group_subnet_name is not defined
-
-- name: fetch the subnet to use in scale group
-  ec2_vpc_subnet_facts:
-    region: "{{ r_openshift_aws_node_group_region }}"
-    filters:
-      "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}"
-  register: subnetout
-
-- name: Create the scale group
-  ec2_asg:
-    name: "{{ r_openshift_aws_node_group_name }}"
-    launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}"
-    health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}"
-    health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}"
-    min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}"
-    max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}"
-    desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}"
-    region: "{{ r_openshift_aws_node_group_region }}"
-    termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in  r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
-    load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
-    wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}"
-    vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
-    tags:
-    - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md
deleted file mode 100644
index afafe61cf..000000000
--- a/roles/openshift_aws_s3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_s3
-=========
-
-Ansible role to create an s3 bucket
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_s3_clusterid: myclusterid
-- r_openshift_aws_s3_region: us-east-1
-- r_openshift_aws_s3_mode:  create|delete
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create an s3 bucket
-  include_role:
-    name: openshift_aws_s3
-  vars:
-    r_openshift_aws_s3_clusterid: mycluster
-    r_openshift_aws_s3_region: us-east-1
-    r_openshift_aws_s3_mode: create
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml
deleted file mode 100644
index 46bd781bd..000000000
--- a/roles/openshift_aws_s3/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create an s3 bucket
-  s3:
-    bucket: "{{ r_openshift_aws_s3_clusterid }}"
-    mode: "{{ r_openshift_aws_s3_mode }}"
-    region: "{{ r_openshift_aws_s3_region }}"
diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md
deleted file mode 100644
index eeb76bbb6..000000000
--- a/roles/openshift_aws_sg/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-openshift_aws_sg
-=========
-
-Ansible role to create an aws security groups
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_sg_clusterid: myclusterid
-- r_openshift_aws_sg_region: us-east-1
-- r_openshift_aws_sg_type: master|infra|compute
-```yaml
-# defaults/main.yml
-  default:
-    name: "{{ r_openshift_aws_sg_clusterid }}"
-    desc: "{{ r_openshift_aws_sg_clusterid }} default"
-    rules:
-    - proto: tcp
-      from_port: 22
-      to_port: 22
-      cidr_ip: 0.0.0.0/0
-    - proto: all
-      from_port: all
-      to_port: all
-      group_name: "{{ r_openshift_aws_sg_clusterid }}"
-```
-
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create security groups for master
-  include_role:
-    name: openshift_aws_sg
-  vars:
-    r_openshift_aws_sg_clusterid: mycluster
-    r_openshift_aws_sg_region: us-east-1
-    r_openshift_aws_sg_type: master
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml
deleted file mode 100644
index 9c480d337..000000000
--- a/roles/openshift_aws_sg/defaults/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-r_openshift_aws_sg_sg:
-  default:
-    name: "{{ r_openshift_aws_sg_clusterid }}"
-    desc: "{{ r_openshift_aws_sg_clusterid }} default"
-    rules:
-    - proto: tcp
-      from_port: 22
-      to_port: 22
-      cidr_ip: 0.0.0.0/0
-    - proto: all
-      from_port: all
-      to_port: all
-      group_name: "{{ r_openshift_aws_sg_clusterid }}"
-  master:
-    name: "{{ r_openshift_aws_sg_clusterid }}_master"
-    desc: "{{ r_openshift_aws_sg_clusterid }} master instances"
-    rules:
-    - proto: tcp
-      from_port: 80
-      to_port: 80
-      cidr_ip: 0.0.0.0/0
-    - proto: tcp
-      from_port: 443
-      to_port: 443
-      cidr_ip: 0.0.0.0/0
-  compute:
-    name: "{{ r_openshift_aws_sg_clusterid }}_compute"
-    desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances"
-  infra:
-    name: "{{ r_openshift_aws_sg_clusterid }}_infra"
-    desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances"
-    rules:
-    - proto: tcp
-      from_port: 80
-      to_port: 80
-      cidr_ip: 0.0.0.0/0
-    - proto: tcp
-      from_port: 443
-      to_port: 443
-      cidr_ip: 0.0.0.0/0
-    - proto: tcp
-      from_port: 30000
-      to_port: 32000
-      cidr_ip: 0.0.0.0/0
-  etcd:
-    name: "{{ r_openshift_aws_sg_clusterid }}_etcd"
-    desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances"
diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml
deleted file mode 100644
index 2294fdcc9..000000000
--- a/roles/openshift_aws_sg/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Validate role inputs
-  fail:
-    msg: Please ensure to pass the correct variables
-  when:
-  - r_openshift_aws_sg_region is undefined
-  - r_openshift_aws_sg_region is undefined
-
-
-- name: Fetch the VPC for vpc.id
-  ec2_vpc_net_facts:
-    region: "{{ r_openshift_aws_sg_region }}"
-    filters:
-      "tag:Name": "{{ r_openshift_aws_sg_clusterid }}"
-  register: vpcout
-
-- name: Create default security group for cluster
-  ec2_group:
-    name: "{{ r_openshift_aws_sg_sg.default.name }}"
-    description: "{{ r_openshift_aws_sg_sg.default.desc }}"
-    region: "{{ r_openshift_aws_sg_region }}"
-    vpc_id: "{{ vpcout.vpcs[0].id }}"
-    rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}"
-  register: sg_default_created
-
-- name: create the node group sgs
-  ec2_group:
-    name: "{{ item.name}}"
-    description: "{{ item.desc }}"
-    rules: "{{ item.rules if 'rules' in item else [] }}"
-    region: "{{ r_openshift_aws_sg_region }}"
-    vpc_id: "{{ vpcout.vpcs[0].id }}"
-  register: sg_create
-  with_items:
-  - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}"
-
-- name: create the k8s sgs for the node group
-  ec2_group:
-    name: "{{ item.name }}_k8s"
-    description: "{{ item.desc }} for k8s"
-    region: "{{ r_openshift_aws_sg_region }}"
-    vpc_id: "{{ vpcout.vpcs[0].id }}"
-  register: k8s_sg_create
-  with_items:
-  - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}"
-
-- name: tag sg groups with proper tags
-  ec2_tag:
-    tags:
-      KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}"
-    resource: "{{ item.group_id }}"
-    region: "{{ r_openshift_aws_sg_region }}"
-  with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md
deleted file mode 100644
index 4f8667918..000000000
--- a/roles/openshift_aws_ssh_keys/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-openshift_aws_ssh_keys
-=========
-
-Ansible role for sshind SSH keys
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_ssh_keys_users: list of dicts of users
-- r_openshift_aws_ssh_keys_region: ec2_region to install the keys
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-users:
-- username: user1
-  pub_key: <user1 ssh public key>
-- username: user2
-  pub_key: <user2 ssh public key>
-
-region: us-east-1
-
-- include_role:
-    name: openshift_aws_ssh_keys
-  vars:
-    r_openshift_aws_ssh_keys_users: "{{ users }}"
-    r_openshift_aws_ssh_keys_region: "{{ region }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws_ssh_keys/tasks/main.yml
deleted file mode 100644
index 232cf20ed..000000000
--- a/roles/openshift_aws_ssh_keys/tasks/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Add the public keys for the users
-  ec2_key:
-    name: "{{ item.key_name }}"
-    key_material: "{{ item.pub_key }}"
-    region: "{{ r_openshift_aws_ssh_keys_region }}"
-  with_items: "{{ r_openshift_aws_ssh_keys_users }}"
-  no_log: True
diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md
deleted file mode 100644
index d88cf0581..000000000
--- a/roles/openshift_aws_vpc/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-openshift_aws_vpc
-=========
-
-Ansible role to create a default AWS VPC
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_vpc_clusterid: "{{ clusterid }}"
-- r_openshift_aws_vpc_cidr: 172.31.48.0/20
-- r_openshift_aws_vpc_subnets: "{{ subnets }}"
-```yaml
-    subnets:
-      us-east-1:  # These are us-east-1 region defaults. Ensure this matches your region
-      - cidr: 172.31.48.0/20
-        az: "us-east-1c"
-      - cidr: 172.31.32.0/20
-        az: "us-east-1e"
-      - cidr: 172.31.16.0/20
-        az: "us-east-1a"
-```
-- r_openshift_aws_vpc_region: "{{ region }}"
-- r_openshift_aws_vpc_tags: dict of tags to apply to vpc
-- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}"
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-```yaml
-  - name: create default vpc
-    include_role:
-      name: openshift_aws_vpc
-    vars:
-      r_openshift_aws_vpc_clusterid: mycluster
-      r_openshift_aws_vpc_cidr: 172.31.48.0/20
-      r_openshift_aws_vpc_subnets: "{{ subnets }}"
-      r_openshift_aws_vpc_region: us-east-1
-      r_openshift_aws_vpc_tags: {}
-      r_openshift_aws_vpc_name: mycluster
-
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_vpc/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws_vpc/tasks/main.yml
deleted file mode 100644
index cfe08dae5..000000000
--- a/roles/openshift_aws_vpc/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Create AWS VPC
-  ec2_vpc_net:
-    state: present
-    cidr_block: "{{ r_openshift_aws_vpc_cidr }}"
-    dns_support: True
-    dns_hostnames: True
-    region: "{{ r_openshift_aws_vpc_region }}"
-    name: "{{ r_openshift_aws_vpc_clusterid }}"
-    tags:
-      Name: "{{ r_openshift_aws_vpc_clusterid }}"
-  register: vpc
-
-- name: Sleep to avoid a race condition when creating the vpc
-  pause:
-    seconds: 5
-  when: vpc.changed
-
-- name: assign the vpc igw
-  ec2_vpc_igw:
-    region: "{{ r_openshift_aws_vpc_region }}"
-    vpc_id: "{{ vpc.vpc.id }}"
-  register: igw
-
-- name: assign the vpc subnets
-  ec2_vpc_subnet:
-    region: "{{ r_openshift_aws_vpc_region }}"
-    vpc_id: "{{ vpc.vpc.id }}"
-    cidr: "{{ item.cidr }}"
-    az: "{{ item.az }}"
-    resource_tags:
-      Name: "{{ item.az }}"
-  with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}"
-
-- name: Grab the route tables from our VPC
-  ec2_vpc_route_table_facts:
-    region: "{{ r_openshift_aws_vpc_region }}"
-    filters:
-      vpc-id: "{{ vpc.vpc.id }}"
-  register: route_table
-
-- name: update the route table in the vpc
-  ec2_vpc_route_table:
-    lookup: id
-    route_table_id: "{{ route_table.route_tables[0].id }}"
-    vpc_id: "{{ vpc.vpc.id }}"
-    region: "{{ r_openshift_aws_vpc_region }}"
-    tags:
-      Name: "{{ r_openshift_aws_vpc_name }}"
-    routes:
-    - dest: 0.0.0.0/0
-      gateway_id: igw
-  register: route_table_out
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index cb1440283..b83b2c452 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -42,14 +42,25 @@
     path: /etc/origin/.config_managed
   register: rpmgenerated_config
 
-- name: Remove RPM generated config files if present
-  file:
-    path: "/etc/origin/{{ item }}"
-    state: absent
-  when:
-  - rpmgenerated_config.stat.exists
-  - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise']
-  with_items:
-  - master
-  - node
-  - .config_managed
+- when: rpmgenerated_config.stat.exists
+  block:
+  - name: Remove RPM generated config files if present
+    file:
+      path: "/etc/origin/{{ item }}"
+      state: absent
+    with_items:
+    - master
+
+  # with_fileglob doesn't work correctly due to a few issues.
+  # Could change this to fileglob when it gets fixed.
+  - name: find all files in /etc/origin/node so we can remove them
+    find:
+      path: /etc/origin/node/
+    register: find_results
+
+  - name: Remove everything except the resolv.conf required for node
+    file:
+      path: "{{ item.path }}"
+      state: absent
+    when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+    with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 10a44d2e0..22ff6dfd2 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -49,6 +49,13 @@
     state: restarted
   when: openshift_use_crio | default(false)
 
+- name: restart NetworkManager to ensure resolv.conf is present
+  systemd:
+    name: NetworkManager
+    enabled: yes
+    state: restarted
+  when: openshift_node_bootstrap | bool
+
 # The atomic-openshift-node service will set this parameter on
 # startup, but if the network service is restarted this setting is
 # lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
-- 
cgit v1.2.3