diff options
Diffstat (limited to 'playbooks/aws/openshift-cluster')
| -rwxr-xr-x | playbooks/aws/openshift-cluster/accept.yml | 19 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/build_ami.yml | 135 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/build_node_group.yml | 47 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/install.yml | 74 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/provision.yml | 158 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/provision_install.yml | 16 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/provision_nodes.yml | 49 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/provisioning_vars.example.yml | 26 | ||||
| -rw-r--r-- | playbooks/aws/openshift-cluster/vars.yml | 113 | 
9 files changed, 182 insertions, 455 deletions
| diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index d43c84205..ffc367f9f 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -1,12 +1,17 @@ +#!/usr/bin/ansible-playbook  ---  - name: Setup the vpc and the master node group -  #hosts: oo_first_master    hosts: localhost    remote_user: root    gather_facts: no    tasks: -  - name: get provisioning vars -    include_vars: vars.yml +  - name: Alert user to variables needed - clusterid +    debug: +      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + +  - name: Alert user to variables needed - region +    debug: +      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"    - name: bring lib_openshift into scope      include_role: @@ -14,9 +19,9 @@    - name: fetch masters      ec2_remote_facts: -      region: "{{ provision.region }}" +      region: "{{ openshift_aws_region | default('us-east-1') }}"        filters: -        "tag:clusterid": "{{ provision.clusterid }}" +        "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"          "tag:host-type": master          instance-state-name: running      register: mastersout @@ -26,9 +31,9 @@    - name: fetch new node instances      ec2_remote_facts: -      region: "{{ provision.region }}" +      region: "{{ openshift_aws_region | default('us-east-1') }}"        filters: -        "tag:clusterid": "{{ provision.clusterid }}" +        "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"          "tag:host-type": node          instance-state-name: running      register: instancesout diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml index d27874200..d3c0057b5 100644 --- a/playbooks/aws/openshift-cluster/build_ami.yml +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -3,67 +3,49 @@    connection: local    gather_facts: no    tasks: -  - name: get the necessary vars for ami building -    include_vars: vars.yml - -  - name: create a vpc with the name <clusterid> -    include_role: -      name: openshift_aws_vpc -    vars: -      r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}" -      r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}" -      r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}" -      r_openshift_aws_vpc_region: "{{ provision.region }}" -      r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}" -      r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}" - -  - name: create aws ssh keypair +  - name: Require openshift_aws_base_ami +    fail: +      msg: "A base AMI is required for AMI building.  Please ensure  `openshift_aws_base_ami` is defined." +    when: openshift_aws_base_ami is undefined + +  - name: "Alert user to variables needed and their values - {{ item.name }}" +    debug: +      msg: "{{ item.msg }}" +    with_items: +    - name: openshift_aws_clusterid +      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" +    - name: openshift_aws_region +      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + +  - name: create an instance and prepare for ami      include_role: -      name: openshift_aws_ssh_keys +      name: openshift_aws +      tasks_from: build_ami.yml      vars: -      r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}" -      r_openshift_aws_ssh_keys_region: "{{ provision.region }}" +      openshift_aws_node_group_type: compute -  - name: fetch the default subnet id -    ec2_vpc_subnet_facts: -      region: "{{ provision.region }}" +  - name: fetch newly created instances +    ec2_remote_facts: +      region: "{{ openshift_aws_region | default('us-east-1') }}"        filters: -        "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}" -    register: subnetout - -  - name: create instance for ami creation -    ec2: -      assign_public_ip: yes -      region: "{{ provision.region }}" -      key_name: "{{ provision.node_group_config.ssh_key_name }}" -      group: "{{ provision.clusterid }}" -      instance_type: m4.xlarge -      vpc_subnet_id: "{{ subnetout.subnets[0].id }}" -      image: "{{ provision.build.base_image }}" -      volumes: -      - device_name: /dev/sdb -        volume_type: gp2 -        volume_size: 100 -        delete_on_termination: true -      wait: yes -      exact_count: 1 -      count_tag: -        Name: ami_base -      instance_tags: -        Name: ami_base -    register: amibase +        "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}" +        instance-state-name: running +    register: instancesout +    retries: 20 +    delay: 3 +    until: instancesout.instances|length > 0    - name: wait for ssh to become available      wait_for:        port: 22 -      host: "{{ amibase.tagged_instances.0.public_ip }}" +      host: "{{ instancesout.instances[0].public_ip_address }}"        timeout: 300        search_regex: OpenSSH    - name: add host to nodes      add_host:        groups: nodes -      name: "{{ amibase.tagged_instances.0.public_dns_name }}" +      name: "{{ instancesout.instances[0].public_dns_name }}"    - name: set the user to perform installation      set_fact: @@ -84,9 +66,6 @@  - hosts: nodes    remote_user: root    tasks: -  - name: get the necessary vars for ami building -    include_vars: vars.yml -    - set_fact:        openshift_node_bootstrap: True @@ -98,53 +77,9 @@    connection: local    become: no    tasks: -  - name: bundle ami -    ec2_ami: -      instance_id: "{{ amibase.tagged_instances.0.id }}" -      region: "{{ provision.region }}" -      state: present -      description: "This was provisioned {{ ansible_date_time.iso8601 }}" -      name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" -      tags: "{{ provision.build.openshift_ami_tags }}" -      wait: yes -    register: amioutput - -  - debug: var=amioutput - -  - when: provision.build.use_encryption | default(False) -    block: -    - name: setup kms key for encryption -      include_role: -        name: openshift_aws_iam_kms -      vars: -        r_openshift_aws_iam_kms_region: "{{ provision.region }}" -        r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms" - -    - name: augment the encrypted ami tags with source-ami -      set_fact: -        source_tag: -          source-ami: "{{ amioutput.image_id }}" - -    - name: copy the ami for encrypted disks -      include_role: -        name: openshift_aws_ami_copy -      vars: -        r_openshift_aws_ami_copy_region: "{{ provision.region }}" -        r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted" -        r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" -        r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms" -        r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}" -        r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}" -        # this option currently fails due to boto waiters -        # when supported this need to be reapplied -        #r_openshift_aws_ami_copy_wait: True - -    - name: Display newly created encrypted ami id -      debug: -        msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}" - -  - name: terminate temporary instance -    ec2: -      state: absent -      region: "{{ provision.region }}" -      instance_ids: "{{ amibase.tagged_instances.0.id }}" +  - name: seal the ami +    include_role: +      name: openshift_aws +      tasks_from: seal_ami.yml +    vars: +      openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml deleted file mode 100644 index 3ef492238..000000000 --- a/playbooks/aws/openshift-cluster/build_node_group.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: fetch recently created AMI -  ec2_ami_find: -    region: "{{ provision.region }}" -    sort: creationDate -    sort_order: descending -    name: "{{ provision.build.ami_name }}*" -    ami_tags: "{{ provision.build.openshift_ami_tags }}" -    #no_result_action: fail -  register: amiout - -- block: -  - name: "Create {{ openshift_build_node_type }} sgs" -    include_role: -      name: openshift_aws_sg -    vars: -      r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}" -      r_openshift_aws_sg_region: "{{ provision.region }}" -      r_openshift_aws_sg_type: "{{ openshift_build_node_type }}" - -  - name: "generate a launch config name for {{ openshift_build_node_type }}" -    set_fact: -      launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}" - -  - name: create "{{ openshift_build_node_type }} launch config" -    include_role: -      name: openshift_aws_launch_config -    vars: -      r_openshift_aws_launch_config_name: "{{ launch_config_name }}" -      r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}" -      r_openshift_aws_launch_config_region: "{{ provision.region }}" -      r_openshift_aws_launch_config: "{{ provision.node_group_config }}" -      r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}" -      r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}" -      r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}" - -  - name: "create {{ openshift_build_node_type }} node groups" -    include_role: -      name: openshift_aws_node_group -    vars: -      r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}" -      r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" -      r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}" -      r_openshift_aws_node_group_region: "{{ provision.region }}" -      r_openshift_aws_node_group_config: "{{ provision.node_group_config }}" -      r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}" -      r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}" diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml new file mode 100644 index 000000000..86d58a68e --- /dev/null +++ b/playbooks/aws/openshift-cluster/install.yml @@ -0,0 +1,74 @@ +--- +- name: Setup the vpc and the master node group +  hosts: localhost +  tasks: +  - name: Alert user to variables needed - clusterid +    debug: +      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + +  - name: Alert user to variables needed - region +    debug: +      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + +  - name: fetch newly created instances +    ec2_remote_facts: +      region: "{{ openshift_aws_region | default('us-east-1') }}" +      filters: +        "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" +        "tag:host-type": master +        instance-state-name: running +    register: instancesout +    retries: 20 +    delay: 3 +    until: instancesout.instances|length > 0 + +  - name: add new master to masters group +    add_host: +      groups: "masters,etcd,nodes" +      name: "{{ item.public_ip_address }}" +      hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}" +    with_items: "{{ instancesout.instances }}" + +  - name: wait for ssh to become available +    wait_for: +      port: 22 +      host: "{{ item.public_ip_address }}" +      timeout: 300 +      search_regex: OpenSSH +    with_items: "{{ instancesout.instances }}" + +- name: set the master facts for hostname to elb +  hosts: masters +  gather_facts: no +  remote_user: root +  tasks: +  - name: fetch elbs +    ec2_elb_facts: +      region: "{{ openshift_aws_region | default('us-east-1') }}" +      names: +      - "{{ item }}" +    with_items: +    - "{{ openshift_aws_clusterid | default('default') }}-master-external" +    - "{{ openshift_aws_clusterid | default('default') }}-master-internal" +    delegate_to: localhost +    register: elbs + +  - debug: var=elbs + +  - name: set fact +    set_fact: +      openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" +      osm_custom_cors_origins: +      - "{{ elbs.results[1].elbs[0].dns_name }}" +      - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com" +      - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com" +    with_items: "{{ groups['masters'] }}" + +- name: normalize groups +  include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include +  include: ../../common/openshift-cluster/std_include.yml + +- name: run the config +  include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index dfbf61cc7..db7afac6f 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -2,156 +2,16 @@  - name: Setup the vpc and the master node group    hosts: localhost    tasks: -  - name: get provisioning vars -    include_vars: vars.yml -  - name: create default vpc -    include_role: -      name: openshift_aws_vpc -    vars: -      r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}" -      r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}" -      r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}" -      r_openshift_aws_vpc_region: "{{ provision.region }}" -      r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}" -      r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}" - -  - name: create aws ssh keypair -    include_role: -      name: openshift_aws_ssh_keys -    vars: -      r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}" -      r_openshift_aws_ssh_keys_region: "{{ provision.region }}" - -  - when: provision.openshift_registry_s3 | default(false) -    name: create s3 bucket for registry -    include_role: -      name: openshift_aws_s3 -    vars: -      r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry" -      r_openshift_aws_s3_region: "{{ provision.region }}" -      r_openshift_aws_s3_mode: create +  - name: Alert user to variables needed - clusterid +    debug: +      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" -  - name: include scale group creation for master -    include: build_node_group.yml -    vars: -      openshift_build_node_type: master +  - name: Alert user to variables needed - region +    debug: +      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" -  - name: fetch new master instances -    ec2_remote_facts: -      region: "{{ provision.region }}" -      filters: -        "tag:clusterid": "{{ provision.clusterid }}" -        "tag:host-type": master -        instance-state-name: running -    register: instancesout -    retries: 20 -    delay: 3 -    until: instancesout.instances|length > 0 - -  - name: bring iam_cert23 into scope -    include_role: -      name: lib_utils - -  - name: upload certificates to AWS IAM -    iam_cert23: -      state: present -      name: "{{ provision.clusterid }}-master-external" -      cert: "{{ provision.iam_cert_ca.cert_path }}" -      key: "{{ provision.iam_cert_ca.key_path }}" -      cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}" -    register: elb_cert_chain -    failed_when: -    - "'failed' in elb_cert_chain" -    - elb_cert_chain.failed -    - "'msg' in elb_cert_chain" -    - "'already exists' not in elb_cert_chain.msg" -    when: provision.iam_cert_ca is defined - -  - debug: var=elb_cert_chain - -  - name: create our master external and internal load balancers +  - name: create default vpc      include_role: -      name: openshift_aws_elb -    vars: -      r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}" -      r_openshift_aws_elb_region: "{{ provision.region }}" -      r_openshift_aws_elb_instance_filter: -        "tag:clusterid": "{{ provision.clusterid }}" -        "tag:host-type": master -        instance-state-name: running -      r_openshift_aws_elb_type: master -      r_openshift_aws_elb_direction: "{{ elb_item }}" -      r_openshift_aws_elb_idle_timout: 400 -      r_openshift_aws_elb_scheme: internet-facing -      r_openshift_aws_elb_security_groups: -      - "{{ provision.clusterid }}" -      - "{{ provision.clusterid }}_master" -      r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}" -      r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}" -      r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" -    with_items: -    - internal -    - external -    loop_control: -      loop_var: elb_item - -  - name: add new master to masters group -    add_host: -      groups: "masters,etcd,nodes" -      name: "{{ item.public_ip_address }}" -      hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}" -    with_items: "{{ instancesout.instances }}" - -  - name: set facts for group normalization -    set_fact: -      cluster_id: "{{ provision.clusterid }}" -      cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}" - -  - name: wait for ssh to become available -    wait_for: -      port: 22 -      host: "{{ item.public_ip_address }}" -      timeout: 300 -      search_regex: OpenSSH -    with_items: "{{ instancesout.instances }}" - - -- name: set the master facts for hostname to elb -  hosts: masters -  gather_facts: no -  remote_user: root -  tasks: -  - name: include vars -    include_vars: vars.yml - -  - name: fetch elbs -    ec2_elb_facts: -      region: "{{ provision.region }}" -      names: -      - "{{ item }}" -    with_items: -    - "{{ provision.clusterid }}-master-external" -    - "{{ provision.clusterid }}-master-internal" -    delegate_to: localhost -    register: elbs - -  - debug: var=elbs - -  - name: set fact -    set_fact: -      openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" -      osm_custom_cors_origins: -      - "{{ elbs.results[1].elbs[0].dns_name }}" -      - "console.{{ provision.clusterid }}.openshift.com" -      - "api.{{ provision.clusterid }}.openshift.com" -    with_items: "{{ groups['masters'] }}" - -- name: normalize groups -  include: ../../byo/openshift-cluster/initialize_groups.yml - -- name: run the std_include -  include: ../../common/openshift-cluster/std_include.yml - -- name: run the config -  include: ../../common/openshift-cluster/config.yml +      name: openshift_aws +      tasks_from: provision.yml diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml new file mode 100644 index 000000000..e787deced --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -0,0 +1,16 @@ +--- +# Once an AMI is built then this script is used for +# the one stop shop to provision and install a cluster +# this playbook is run with the following parameters: +# ansible-playbook -i openshift-ansible-inventory provision_install.yml +- name: Include the provision.yml playbook to create cluster +  include: provision.yml + +- name: Include the install.yml playbook to install cluster +  include: install.yml + +- name: Include the install.yml playbook to install cluster +  include: provision_nodes.yml + +- name: Include the accept.yml playbook to accept nodes into the cluster +  include: accept.yml diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml index 5428fb307..44c686e08 100644 --- a/playbooks/aws/openshift-cluster/provision_nodes.yml +++ b/playbooks/aws/openshift-cluster/provision_nodes.yml @@ -1,47 +1,18 @@  --- -# Get bootstrap config token -# bootstrap should be created on first master -# need to fetch it and shove it into cloud data  - name: create the node scale groups    hosts: localhost    connection: local    gather_facts: yes    tasks: -  - name: get provisioning vars -    include_vars: vars.yml +  - name: Alert user to variables needed - clusterid +    debug: +      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" -  - name: fetch master instances -    ec2_remote_facts: -      region: "{{ provision.region }}" -      filters: -        "tag:clusterid": "{{ provision.clusterid }}" -        "tag:host-type": master -        instance-state-name: running -    register: instancesout -    retries: 20 -    delay: 3 -    until: instancesout.instances|length > 0 +  - name: Alert user to variables needed - region +    debug: +      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" -  - name: slurp down the bootstrap.kubeconfig -    slurp: -      src: /etc/origin/master/bootstrap.kubeconfig -    delegate_to: "{{ instancesout.instances[0].public_ip_address }}" -    remote_user: root -    register: bootstrap - -  - name: set_fact on localhost for kubeconfig -    set_fact: -      local_bootstrap: "{{ bootstrap }}" -      launch_config_name: -        infra: "infra-{{ ansible_date_time.epoch }}" -        compute: "compute-{{ ansible_date_time.epoch }}" - -  - name: include build node group -    include: build_node_group.yml -    vars: -      openshift_build_node_type: infra - -  - name: include build node group -    include: build_node_group.yml -    vars: -      openshift_build_node_type: compute +  - name: create the node groups +    include_role: +      name: openshift_aws +      tasks_from: provision_nodes.yml diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml new file mode 100644 index 000000000..5a30ad3a5 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml @@ -0,0 +1,26 @@ +--- +openshift_node_bootstrap: True + +# specify a clusterid +#openshift_aws_clusterid: default + +# must specify a base_ami when building an AMI +#openshift_aws_base_ami: + +# when creating an encrypted AMI please specify use_encryption +#openshift_aws_ami_encrypt: False + +# custom certificates are required for the ELB +#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' +#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key' +#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt' + +# This is required for any ec2 instances +#openshift_aws_ssh_key_name: myuser_key + +# This will ensure these users are created +#openshift_aws_users: +#- key_name: myuser_key +#  username: myuser +#  pub_key: | +#         ssh-rsa AAAA diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml deleted file mode 100644 index 47da03cb7..000000000 --- a/playbooks/aws/openshift-cluster/vars.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- - -clusterid: mycluster -region: us-east-1 - -provision: -  clusterid: "{{ clusterid }}" -  region: "{{ region }}" - -  build:  # build specific variables here -    ami_name: "openshift-gi-" -    base_image: ami-bdd5d6ab  # base image for AMI to build from - -    # when creating an encrypted AMI please specify use_encryption -    use_encryption: False - -    openshift_ami_tags: -      bootstrap: "true" -      openshift-created: "true" -      clusterid: "{{ clusterid }}" - -  # Use s3 backed registry storage -  openshift_registry_s3: True - -  # if using custom certificates these are required for the ELB -  iam_cert_ca: -    name: "{{ clusterid }}_openshift" -    cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' -    key_path: '/path/to/wildcard.<clusterid>.example.com.key' -    chain_path: '/path/to/cert.ca.crt' - -  instance_users: -  - key_name: myuser_key -    username: myuser -    pub_key: | -           ssh-rsa AAAA== myuser@system - -  node_group_config: -    tags: -      clusterid: "{{ clusterid }}" -      environment: stg - -    ssh_key_name: myuser_key - -    # master specific cluster node settings -    master: -      instance_type: m4.xlarge -      ami: ami-cdeec8b6  # if using an encrypted AMI this will be replaced -      volumes: -      - device_name: /dev/sdb -        volume_size: 100 -        device_type: gp2 -        delete_on_termination: False -      health_check: -        period: 60 -        type: EC2 -      min_size: 3 -      max_size: 3 -      desired_size: 3 -      tags: -        host-type: master -        sub-host-type: default -      wait_for_instances: True - -    # compute specific cluster node settings -    compute: -      instance_type: m4.xlarge -      ami: ami-cdeec8b6 -      volumes: -      - device_name: /dev/sdb -        volume_size: 100 -        device_type: gp2 -        delete_on_termination: True -      health_check: -        period: 60 -        type: EC2 -      min_size: 3 -      max_size: 100 -      desired_size: 3 -      tags: -        host-type: node -        sub-host-type: compute - -    # infra specific cluster node settings -    infra: -      instance_type: m4.xlarge -      ami: ami-cdeec8b6 -      volumes: -      - device_name: /dev/sdb -        volume_size: 100 -        device_type: gp2 -        delete_on_termination: True -      health_check: -        period: 60 -        type: EC2 -      min_size: 2 -      max_size: 20 -      desired_size: 2 -      tags: -        host-type: node -        sub-host-type: infra - -  # vpc settings -  vpc: -    cidr: 172.31.0.0/16 -    subnets: -      us-east-1:  # These are us-east-1 region defaults. Ensure this matches your region -      - cidr: 172.31.48.0/20 -        az: "us-east-1c" -      - cidr: 172.31.32.0/20 -        az: "us-east-1e" -      - cidr: 172.31.16.0/20 -        az: "us-east-1a" | 
