diff options
Diffstat (limited to 'playbooks')
41 files changed, 1058 insertions, 183 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index a407e326b..dbf924683 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -25,11 +25,6 @@      - set_fact:          is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" -    - name: Remove br0 interface -      shell: ovs-vsctl del-br br0 -      changed_when: False -      failed_when: False -      - name: Stop services        service: name={{ item }} state=stopped        with_items: @@ -108,82 +103,12 @@          - tuned-profiles-openshift-node          - tuned-profiles-origin-node -    - name: Remove linux interfaces -      shell: ip link del "{{ item }}" -      changed_when: False -      failed_when: False -      with_items: -        - lbr0 -        - vlinuxbr -        - vovsbr -      - shell: systemctl reset-failed        changed_when: False      - shell: systemctl daemon-reload        changed_when: False -    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node -      changed_when: False -      failed_when: False -      with_items: -        - openshift-enterprise -        - atomic-enterprise -        - origin - -    - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}' -      changed_when: False -      failed_when: False -      register: exited_containers_to_delete -      with_items: -        - aep3.*/aep -        - aep3.*/node -        - aep3.*/openvswitch -        - openshift3/ose -        - openshift3/node -        - openshift3/openvswitch -        - openshift/origin - -    - shell: "docker rm {{ item.stdout_lines | join(' ') }}" -      changed_when: False -      failed_when: False -      with_items: "{{ exited_containers_to_delete.results }}" - -    - shell: docker images | egrep {{ item }} | awk '{ print $3 }' -      changed_when: False -      failed_when: False -      register: images_to_delete -      with_items: -        - registry\.access\..*redhat\.com/openshift3 -        - registry\.access\..*redhat\.com/aep3 -        - registry\.qe\.openshift\.com/.* -        - registry\.access\..*redhat\.com/rhel7/etcd -        - docker.io/openshift - -    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" -      changed_when: False -      failed_when: False -      with_items: "{{ images_to_delete.results }}" -     -    - name: Remove sdn drop files -      file:  -        path: /run/openshift-sdn -        state: absent -         -    - name: restart docker -      service: -        name: docker -        state: restarted -      - name: Remove remaining files        file: path={{ item }} state=absent        with_items: @@ -199,6 +124,12 @@          - /etc/systemd/system/atomic-openshift-master-api.service          - /etc/systemd/system/atomic-openshift-master-controllers.service          - /etc/systemd/system/atomic-openshift-node.service +        - /etc/systemd/system/atomic-openshift-node-dep.service +        - /etc/systemd/system/origin-master.service +        - /etc/systemd/system/origin-master-api.service +        - /etc/systemd/system/origin-master-controllers.service +        - /etc/systemd/system/origin-node.service +        - /etc/systemd/system/origin-node-dep.service          - /etc/systemd/system/etcd_container.service          - /etc/systemd/system/openvswitch.service          - /etc/sysconfig/atomic-enterprise-master @@ -209,8 +140,15 @@          - /etc/sysconfig/atomic-openshift-master-api          - /etc/sysconfig/atomic-openshift-master-controllers          - /etc/sysconfig/atomic-openshift-node +        - /etc/sysconfig/atomic-openshift-node-dep +        - /etc/sysconfig/origin-master +        - /etc/sysconfig/origin-master-api +        - /etc/sysconfig/origin-master-controllers +        - /etc/sysconfig/origin-node +        - /etc/sysconfig/origin-node-dep          - /etc/sysconfig/openshift-master          - /etc/sysconfig/openshift-node +        - /etc/sysconfig/openshift-node-dep          - /etc/sysconfig/openvswitch          - /etc/sysconfig/origin-master          - /etc/sysconfig/origin-master-api @@ -246,7 +184,76 @@  - hosts: nodes    become: yes    tasks: +    - name: Remove br0 interface +      shell: ovs-vsctl del-br br0 +      changed_when: False +      failed_when: False +    - name: Remove linux interfaces +      shell: ip link del "{{ item }}" +      changed_when: False +      failed_when: False +      with_items: +        - lbr0 +        - vlinuxbr +        - vovsbr      - name: restart docker        service: name=docker state=restarted +            - name: restart NetworkManager        service: name=NetworkManager state=restarted +       +    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +      changed_when: False + +    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node +      changed_when: False +      failed_when: False +      with_items: +        - openshift-enterprise +        - atomic-enterprise +        - origin + +    - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}' +      changed_when: False +      failed_when: False +      register: exited_containers_to_delete +      with_items: +        - aep3.*/aep +        - aep3.*/node +        - aep3.*/openvswitch +        - openshift3/ose +        - openshift3/node +        - openshift3/openvswitch +        - openshift/origin + +    - shell: "docker rm {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ exited_containers_to_delete.results }}" + +    - shell: docker images | egrep {{ item }} | awk '{ print $3 }' +      changed_when: False +      failed_when: False +      register: images_to_delete +      with_items: +        - registry\.access\..*redhat\.com/openshift3 +        - registry\.access\..*redhat\.com/aep3 +        - registry\.qe\.openshift\.com/.* +        - registry\.access\..*redhat\.com/rhel7/etcd +        - docker.io/openshift + +    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" +      changed_when: False +      failed_when: False +      with_items: "{{ images_to_delete.results }}" + +    - name: Remove sdn drop files +      file: +        path: /run/openshift-sdn +        state: absent diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 8402b3579..4839c100b 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,7 +1,20 @@ +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../aws/openshift-cluster/vars.yml -  - ../../aws/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user:     "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo:         "{{ deployment_vars[deployment_type].become }}" @@ -21,3 +34,4 @@      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"      openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"      openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" +    openshift_use_dnsmasq: false diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 7d5776ae6..d22c86cda 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -150,6 +150,7 @@      groups: "{{ instance_groups }}"      ec2_private_ip_address: "{{ item.1.private_ip }}"      ec2_ip_address: "{{ item.1.public_ip }}" +    ec2_tag_sub-host-type: "{{ sub_host_type }}"      openshift_node_labels: "{{ node_label }}"      logrotate_scripts: "{{ logrotate }}"    with_together: diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index 2a3974a8c..b1087f9c4 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -3,8 +3,10 @@  mounts:  - [ xvdb ]  - [ ephemeral0 ] +{% endif %}  write_files: +{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}  - content: |      DEVS=/dev/xvdb      VG=docker_vg @@ -12,8 +14,7 @@ write_files:    owner: root:root    permissions: '0644'  {% endif %} - -{% if deployment_vars[deployment_type].become %} +{% if deployment_vars[deployment_type].become | bool %}  - path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty    permissions: 440    content: | diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index bd31c42dd..d762203b2 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,12 +1,25 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Update - Populate oo_hosts_to_update group    hosts: localhost    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Update - Evaluate oo_hosts_to_update      add_host: @@ -14,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index 8bda72ac2..d774187f0 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -17,7 +17,7 @@ deployment_rhel7_ent_base:  deployment_vars:    origin:      # centos-7, requires marketplace -    image: "{{ lookup('oo_option', 'ec2_image') | default('ami-61bbf104', True) }}" +    image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"      image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"      region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"      ssh_user: centos diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml new file mode 100644 index 000000000..6c12e8245 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -0,0 +1,105 @@ + +- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade +  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  roles: +  - openshift_facts +  tasks: +  - name: Determine available Docker version +    script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker +    register: g_docker_version_result +    when: not openshift.common.is_atomic | bool + +  - name: Check if Docker is installed +    command: rpm -q docker +    register: pkg_check +    failed_when: pkg_check.rc > 1 +    changed_when: no +    when: not openshift.common.is_atomic | bool + +  - set_fact: +      g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" +    when: not openshift.common.is_atomic | bool + +  - name: Set fact if docker requires an upgrade +    set_fact: +      docker_upgrade: true +    when: not openshift.common.is_atomic | bool and pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<') + +  - fail: +      msg: This playbook requires access to Docker 1.10 or later +    when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<') + +# If a node fails, halt everything, the admin will need to clean up and we +# don't want to carry on, potentially taking out every node. The playbook can safely be re-run +# and will not take any action on a node already running 1.10+. +- name: Evacuate and upgrade nodes +  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  serial: 1 +  any_errors_fatal: true +  tasks: +  - debug: var=docker_upgrade + +  - name: Prepare for Node evacuation +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + +# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts) +  - name: Evacuate Node for Kubelet upgrade +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + +  - name: Stop containerized services +    service: name={{ item }} state=stopped +    with_items: +      - "{{ openshift.common.service_type }}-master" +      - "{{ openshift.common.service_type }}-master-api" +      - "{{ openshift.common.service_type }}-master-controllers" +      - "{{ openshift.common.service_type }}-node" +      - etcd +      - openvswitch +    failed_when: false +    when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool + +  - name: Remove all containers and images +    script: files/nuke_images.sh docker +    register: nuke_images_result +    when: docker_upgrade is defined and docker_upgrade | bool + +  - name: Upgrade Docker +    command: "{{ ansible_pkg_mgr}} update -y docker" +    register: docker_upgrade_result +    when: docker_upgrade is defined and docker_upgrade | bool + +  - name: Restart containerized services +    service: name={{ item }} state=started +    with_items: +      - etcd +      - openvswitch +      - "{{ openshift.common.service_type }}-master" +      - "{{ openshift.common.service_type }}-master-api" +      - "{{ openshift.common.service_type }}-master-controllers" +      - "{{ openshift.common.service_type }}-node" +    failed_when: false +    when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool + +  - name: Wait for master API to come back online +    become: no +    local_action: +      module: wait_for +        host="{{ inventory_hostname }}" +        state=started +        delay=10 +        port="{{ openshift.master.api_port }}" +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config + +  - name: Set node schedulability +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: openshift.node.schedulable | bool +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool + diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh new file mode 100644 index 000000000..9a5ee2276 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Stop any running containers +running_container_count=`docker ps -q | wc -l` +if test $running_container_count -gt 0 +then +    docker stop $(docker ps -q) +fi + +# Delete all containers +container_count=`docker ps -a -q | wc -l` +if test $container_count -gt 0 +then +    docker rm -f -v $(docker ps -a -q) +fi + +# Delete all images (forcefully) +image_count=`docker images -q | wc -l` +if test $image_count -gt 0 +then +    # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144 +    docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge." +fi diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml new file mode 100644 index 000000000..0f86abd89 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -0,0 +1,29 @@ +# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. +# +# Currently only supports upgrading 1.9.x to >= 1.10.x. +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts | default([]) +    changed_when: false + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../cluster_hosts.yml + +- include: ../../../../common/openshift-cluster/evaluate_groups.yml +  vars: +    # Do not allow adding hosts during upgrade. +    g_new_master_hosts: [] +    g_new_node_hosts: [] +    openshift_cluster_id: "{{ cluster_id | default('default') }}" +    openshift_deployment_type: "{{ deployment_type }}" + +- include: docker_upgrade.yml diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 990ddd2f2..f093411ef 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,5 +1,23 @@  --- -- hosts: all +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: openshift-cluster/cluster_hosts.yml +   +- include: ../common/openshift-cluster/evaluate_groups.yml  +   +- hosts: l_oo_all_hosts    vars:      openshift_deployment_type: "{{ deployment_type }}"    roles: diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 5ed1d3b3c..ebddc7841 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -28,25 +28,4 @@    - role: flannel_register      when: openshift.common.use_flannel | bool -- name: Create persistent volumes and create hosted services -  hosts: oo_first_master -  vars: -    attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" -    deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" -  roles: -  - role: openshift_persistent_volumes -    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 -  - role: openshift_serviceaccounts -    openshift_serviceaccounts_names: -    - router -    - registry -    openshift_serviceaccounts_namespace: default -    openshift_serviceaccounts_sccs: -    - privileged -  - role: openshift_registry -    registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" -    when: deploy_infra | bool and attach_registry_volume | bool -  - role: openshift_metrics -    when: openshift.hosted.metrics.deploy | bool + diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 903babc45..5fec11541 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -31,6 +31,8 @@  - include: ../openshift-nfs/config.yml +- include: ../openshift-loadbalancer/config.yml +  - include: ../openshift-master/config.yml  - include: additional_config.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 1cbc0f544..811b3d685 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,5 +1,30 @@ +- name: Create persistent volumes and create hosted services +  hosts: oo_first_master +  vars: +    attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" +    deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" +    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" +    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" +  roles: +  - role: openshift_persistent_volumes +    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 +  - role: openshift_serviceaccounts +    openshift_serviceaccounts_names: +    - router +    - registry +    openshift_serviceaccounts_namespace: default +    openshift_serviceaccounts_sccs: +    - privileged +  - role: openshift_registry +    registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" +    when: deploy_infra | bool and attach_registry_volume | bool +  - role: openshift_metrics +    when: openshift.hosted.metrics.deploy | bool +  - name: Create Hosted Resources    hosts: oo_first_master +  pre_tasks: +  - set_fact: +      openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"    roles:    - role: openshift_hosted -    openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index 1474bb3ca..0a37d4597 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -1,4 +1,6 @@  --- +- include: evaluate_groups.yml +  - hosts: oo_hosts_to_update    vars:      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh index 96944a78b..9bbeff660 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh @@ -2,7 +2,7 @@  # Here we don't really care if this is a master, api, controller or node image.  # We just need to know the version of one of them. -unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1) +unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)  if [ ${1} == "origin" ]; then      image_name="openshift/origin" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index a72749a2b..3a4c58e43 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -54,7 +54,7 @@    - script: ../files/pre-upgrade-check -- name: Verify upgrade can proceed +- name: Verify upgrade targets    hosts: oo_masters_to_config:oo_nodes_to_config    vars:      target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml index 66935e061..85d7073f2 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -29,7 +29,7 @@          valid version for a {{ target_version }} upgrade      when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') -- name: Verify upgrade can proceed +- name: Verify upgrade targets    hosts: oo_masters_to_config:oo_nodes_to_config    vars:      target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml deleted file mode 100644 index d9177e8a0..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ -- name: Check if Docker is installed -  command: rpm -q docker -  register: pkg_check -  failed_when: pkg_check.rc > 1 -  changed_when: no - -- name: Upgrade Docker -  command: "{{ ansible_pkg_mgr}} update -y docker" -  when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<') -  register: docker_upgrade - -- name: Restart Docker -  service: name=docker state=restarted -  when: docker_upgrade | changed diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml index 12e2edfb9..31e76805c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml @@ -37,7 +37,7 @@    - name: Update router image to current version      when: all_routers.rc == 0      command: > -      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p +      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'        --api-version=v1      with_items: haproxy_routers @@ -52,7 +52,7 @@    - name: Update registry image to current version      when: _default_registry.rc == 0      command: > -      {{ oc_cmd }} patch dc/docker-registry -p +      {{ oc_cmd }} patch dc/docker-registry -n default -p        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'        --api-version=v1 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml index dd9843290..6bff16674 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -53,7 +53,7 @@          valid version for a {{ target_version }} upgrade      when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<') -- name: Verify upgrade can proceed +- name: Verify master processes    hosts: oo_masters_to_config    roles:    - openshift_facts @@ -84,7 +84,7 @@        enabled: yes      when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool -- name: Verify upgrade can proceed +- name: Verify node processes    hosts: oo_nodes_to_config    roles:    - openshift_facts @@ -96,7 +96,7 @@        enabled: yes      when: openshift.common.is_containerized | bool -- name: Verify upgrade can proceed +- name: Verify upgrade targets    hosts: oo_masters_to_config:oo_nodes_to_config    vars:      target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index c93bf2a17..156e80c0f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -3,13 +3,13 @@  # The restart playbook should be run after this playbook completes.  ############################################################################### -- name: Upgrade docker +- include: ../../../../byo/openshift-cluster/upgrades/docker/docker_upgrade.yml + +- name: Update Docker facts    hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config    roles:    - openshift_facts    tasks: -  - include: docker_upgrade.yml -    when: not openshift.common.is_atomic | bool    - name: Set post docker install facts      openshift_facts:        role: "{{ item.role }}" diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..f4392173a --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -0,0 +1,5 @@ +--- +- name: Configure load balancers +  hosts: oo_lb_to_config +  roles: +  - role: openshift_loadbalancer diff --git a/playbooks/common/openshift-loadbalancer/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml new file mode 100644 index 000000000..19fffd5e9 --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/service.yml @@ -0,0 +1,20 @@ +--- +- name: Populate g_service_nodes host group if needed +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - fail: msg="new_cluster_state is required to be injected in this playbook" +    when: new_cluster_state is not defined + +  - name: Evaluate g_service_lb +    add_host: name={{ item }} groups=g_service_lb +    with_items: oo_host_group_exp | default([]) + +- name: Change state on lb instance(s) +  hosts: g_service_lb +  connection: ssh +  gather_facts: no +  tasks: +    - service: name=hapoxy state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 91d66a9cb..0ca148169 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -235,33 +235,6 @@        validate_checksum: yes      with_items: "{{ masters_needing_certs | default([]) }}" -- name: Configure load balancers -  hosts: oo_lb_to_config -  vars: -    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" -    haproxy_limit_nofile: 100000 -    haproxy_global_maxconn: 20000 -    haproxy_default_maxconn: 20000 -    haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" -    haproxy_frontends: -    - name: atomic-openshift-api -      mode: tcp -      options: -      - tcplog -      binds: -      - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" -      default_backend: atomic-openshift-api -    haproxy_backends: -    - name: atomic-openshift-api -      mode: tcp -      option: tcplog -      balance: source -      servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}" -  roles: -  - role: openshift_facts -  - role: haproxy -    when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool -  - name: Check for cached session secrets    hosts: oo_first_master    roles: @@ -348,6 +321,13 @@      openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"      openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and  +            openshift_generate_no_proxy_hosts | default(True) | bool }}"    pre_tasks:    - name: Ensure certificate directory exists      file: diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index a41fca45a..b3491ef8d 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -116,6 +116,13 @@      openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"      openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and  +            openshift_generate_no_proxy_hosts | default(True) | bool }}"    roles:    - openshift_node @@ -125,6 +132,13 @@      openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"      openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and  +            openshift_generate_no_proxy_hosts | default(True) | bool }}"    roles:    - openshift_node diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 475d29293..b973c513f 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -1,8 +1,23 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_become: "{{ deployment_vars[deployment_type].become }}" +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../gce/openshift-cluster/vars.yml -  - ../../gce/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo: "{{ deployment_vars[deployment_type].become }}" diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py new file mode 100644 index 000000000..fcaa3b850 --- /dev/null +++ b/playbooks/gce/openshift-cluster/library/gce.py @@ -0,0 +1,543 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +DOCUMENTATION = ''' +--- +module: gce +version_added: "1.4" +short_description: create or terminate GCE instances +description: +     - Creates or terminates Google Compute Engine (GCE) instances.  See +       U(https://cloud.google.com/products/compute-engine) for an overview. +       Full install/configuration instructions for the gce* modules can +       be found in the comments of ansible/test/gce_tests.py. +options: +  image: +    description: +       - image string to use for the instance +    required: false +    default: "debian-7" +  instance_names: +    description: +      - a comma-separated list of instance names to create or destroy +    required: false +    default: null +  machine_type: +    description: +      - machine type to use for the instance, use 'n1-standard-1' by default +    required: false +    default: "n1-standard-1" +  metadata: +    description: +      - a hash/dictionary of custom data for the instance; +        '{"key":"value", ...}' +    required: false +    default: null +  service_account_email: +    version_added: "1.5.1" +    description: +      - service account email +    required: false +    default: null +  service_account_permissions: +    version_added: "2.0" +    description: +      - service account permissions (see +        U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), +        --scopes section for detailed information) +    required: false +    default: null +    choices: [ +      "bigquery", "cloud-platform", "compute-ro", "compute-rw", +      "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", +      "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", +      "storage-rw", "taskqueue", "userinfo-email" +    ] +  pem_file: +    version_added: "1.5.1" +    description: +      - path to the pem file associated with the service account email +    required: false +    default: null +  project_id: +    version_added: "1.5.1" +    description: +      - your GCE project ID +    required: false +    default: null +  name: +    description: +      - identifier when working with a single instance +    required: false +  network: +    description: +      - name of the network, 'default' will be used if not specified +    required: false +    default: "default" +  persistent_boot_disk: +    description: +      - if set, create the instance with a persistent boot disk +    required: false +    default: "false" +  disks: +    description: +      - a list of persistent disks to attach to the instance; a string value +        gives the name of the disk; alternatively, a dictionary value can +        define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry +        will be the boot disk (which must be READ_WRITE). +    required: false +    default: null +    version_added: "1.7" +  state: +    description: +      - desired state of the resource +    required: false +    default: "present" +    choices: ["active", "present", "absent", "deleted"] +  tags: +    description: +      - a comma-separated list of tags to associate with the instance +    required: false +    default: null +  zone: +    description: +      - the GCE zone to use +    required: true +    default: "us-central1-a" +  ip_forward: +    version_added: "1.9" +    description: +      - set to true if the instance can forward ip packets (useful for +        gateways) +    required: false +    default: "false" +  external_ip: +    version_added: "1.9" +    description: +      - type of external ip, ephemeral by default +    required: false +    default: "ephemeral" +  disk_auto_delete: +    version_added: "1.9" +    description: +      - if set boot disk will be removed after instance destruction +    required: false +    default: "true" + +requirements: +    - "python >= 2.6" +    - "apache-libcloud >= 0.13.3" +notes: +  - Either I(name) or I(instance_names) is required. +author: "Eric Johnson (@erjohnso) <erjohnso@google.com>" +''' + +EXAMPLES = ''' +# Basic provisioning example.  Create a single Debian 7 instance in the +# us-central1-a Zone of n1-standard-1 machine type. +- local_action: +    module: gce +    name: test-instance +    zone: us-central1-a +    machine_type: n1-standard-1 +    image: debian-7 + +# Example using defaults and with metadata to create a single 'foo' instance +- local_action: +    module: gce +    name: foo +    metadata: '{"db":"postgres", "group":"qa", "id":500}' + + +# Launch instances from a control node, runs some tasks on the new instances, +# and then terminate them +- name: Create a sandbox instance +  hosts: localhost +  vars: +    names: foo,bar +    machine_type: n1-standard-1 +    image: debian-6 +    zone: us-central1-a +    service_account_email: unique-email@developer.gserviceaccount.com +    pem_file: /path/to/pem_file +    project_id: project-id +  tasks: +    - name: Launch instances +      local_action: gce instance_names={{names}} machine_type={{machine_type}} +                    image={{image}} zone={{zone}} +                    service_account_email={{ service_account_email }} +                    pem_file={{ pem_file }} project_id={{ project_id }} +      register: gce +    - name: Wait for SSH to come up +      local_action: wait_for host={{item.public_ip}} port=22 delay=10 +                    timeout=60 state=started +      with_items: {{gce.instance_data}} + +- name: Configure instance(s) +  hosts: launched +  sudo: True +  roles: +    - my_awesome_role +    - my_awesome_tasks + +- name: Terminate instances +  hosts: localhost +  connection: local +  tasks: +    - name: Terminate instances that were previously launched +      local_action: +        module: gce +        state: 'absent' +        instance_names: {{gce.instance_names}} + +''' + +try: +    import libcloud +    from libcloud.compute.types import Provider +    from libcloud.compute.providers import get_driver +    from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ +        ResourceExistsError, ResourceInUseError, ResourceNotFoundError +    _ = Provider.GCE +    HAS_LIBCLOUD = True +except ImportError: +    HAS_LIBCLOUD = False + +try: +    from ast import literal_eval +    HAS_PYTHON26 = True +except ImportError: +    HAS_PYTHON26 = False + + +def get_instance_info(inst): +    """Retrieves instance information from an instance object and returns it +    as a dictionary. + +    """ +    metadata = {} +    if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: +        for md in inst.extra['metadata']['items']: +            metadata[md['key']] = md['value'] + +    try: +        netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] +    except: +        netname = None +    if 'disks' in inst.extra: +        disk_names = [disk_info['source'].split('/')[-1] +                      for disk_info +                      in sorted(inst.extra['disks'], +                                key=lambda disk_info: disk_info['index'])] +    else: +        disk_names = [] + +    if len(inst.public_ips) == 0: +        public_ip = None +    else: +        public_ip = inst.public_ips[0] + +    return({ +        'image': inst.image is not None and inst.image.split('/')[-1] or None, +        'disks': disk_names, +        'machine_type': inst.size, +        'metadata': metadata, +        'name': inst.name, +        'network': netname, +        'private_ip': inst.private_ips[0], +        'public_ip': public_ip, +        'status': ('status' in inst.extra) and inst.extra['status'] or None, +        'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], +        'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, +    }) + + +def create_instances(module, gce, instance_names): +    """Creates new instances. Attributes other than instance_names are picked +    up from 'module' + +    module : AnsibleModule object +    gce: authenticated GCE libcloud driver +    instance_names: python list of instance names to create + +    Returns: +        A list of dictionaries with instance information +        about the instances that were launched. + +    """ +    image = module.params.get('image') +    machine_type = module.params.get('machine_type') +    metadata = module.params.get('metadata') +    network = module.params.get('network') +    persistent_boot_disk = module.params.get('persistent_boot_disk') +    disks = module.params.get('disks') +    state = module.params.get('state') +    tags = module.params.get('tags') +    zone = module.params.get('zone') +    ip_forward = module.params.get('ip_forward') +    external_ip = module.params.get('external_ip') +    disk_auto_delete = module.params.get('disk_auto_delete') +    service_account_permissions = module.params.get('service_account_permissions') +    service_account_email = module.params.get('service_account_email') + +    if external_ip == "none": +        external_ip = None + +    new_instances = [] +    changed = False + +    lc_image = gce.ex_get_image(image) +    lc_disks = [] +    disk_modes = [] +    for i, disk in enumerate(disks or []): +        if isinstance(disk, dict): +            lc_disks.append(gce.ex_get_volume(disk['name'])) +            disk_modes.append(disk['mode']) +        else: +            lc_disks.append(gce.ex_get_volume(disk)) +            # boot disk is implicitly READ_WRITE +            disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') +    lc_network = gce.ex_get_network(network) +    lc_machine_type = gce.ex_get_size(machine_type) +    lc_zone = gce.ex_get_zone(zone) + +    # Try to convert the user's metadata value into the format expected +    # by GCE.  First try to ensure user has proper quoting of a +    # dictionary-like syntax using 'literal_eval', then convert the python +    # dict into a python list of 'key' / 'value' dicts.  Should end up +    # with: +    # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] +    if metadata: +        if isinstance(metadata, dict): +            md = metadata +        else: +            try: +                md = literal_eval(str(metadata)) +                if not isinstance(md, dict): +                    raise ValueError('metadata must be a dict') +            except ValueError as e: +                module.fail_json(msg='bad metadata: %s' % str(e)) +            except SyntaxError as e: +                module.fail_json(msg='bad metadata syntax') + +    if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': +        items = [] +        for k, v in md.items(): +            items.append({"key": k, "value": v}) +        metadata = {'items': items} +    else: +        metadata = md + +    ex_sa_perms = [] +    bad_perms = [] +    if service_account_permissions: +        for perm in service_account_permissions: +            if perm not in gce.SA_SCOPES_MAP.keys(): +                bad_perms.append(perm) +        if len(bad_perms) > 0: +            module.fail_json(msg='bad permissions: %s' % str(bad_perms)) +        if service_account_email: +            ex_sa_perms.append({'email': service_account_email}) +        else: +            ex_sa_perms.append({'email': "default"}) +        ex_sa_perms[0]['scopes'] = service_account_permissions + +    # These variables all have default values but check just in case +    if not lc_image or not lc_network or not lc_machine_type or not lc_zone: +        module.fail_json(msg='Missing required create instance variable', +                         changed=False) + +    for name in instance_names: +        pd = None +        if lc_disks: +            pd = lc_disks[0] +        elif persistent_boot_disk: +            try: +                pd = gce.create_volume(None, "%s" % name, image=lc_image) +            except ResourceExistsError: +                pd = gce.ex_get_volume("%s" % name, lc_zone) +        inst = None +        try: +            inst = gce.create_node( +                name, lc_machine_type, lc_image, location=lc_zone, +                ex_network=network, ex_tags=tags, ex_metadata=metadata, +                ex_boot_disk=pd, ex_can_ip_forward=ip_forward, +                external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, +                ex_service_accounts=ex_sa_perms +            ) +            changed = True +        except ResourceExistsError: +            inst = gce.ex_get_node(name, lc_zone) +        except GoogleBaseError as e: +            module.fail_json(msg='Unexpected error attempting to create ' + +                             'instance %s, error: %s' % (name, e.value)) + +        for i, lc_disk in enumerate(lc_disks): +            # Check whether the disk is already attached +            if (len(inst.extra['disks']) > i): +                attached_disk = inst.extra['disks'][i] +                if attached_disk['source'] != lc_disk.extra['selfLink']: +                    module.fail_json( +                        msg=("Disk at index %d does not match: requested=%s found=%s" % ( +                            i, lc_disk.extra['selfLink'], attached_disk['source']))) +                elif attached_disk['mode'] != disk_modes[i]: +                    module.fail_json( +                        msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( +                            i, disk_modes[i], attached_disk['mode']))) +                else: +                    continue +            gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) +            # Work around libcloud bug: attached volumes don't get added +            # to the instance metadata. get_instance_info() only cares about +            # source and index. +            if len(inst.extra['disks']) != i+1: +                inst.extra['disks'].append( +                    {'source': lc_disk.extra['selfLink'], 'index': i}) + +        if inst: +            new_instances.append(inst) + +    instance_names = [] +    instance_json_data = [] +    for inst in new_instances: +        d = get_instance_info(inst) +        instance_names.append(d['name']) +        instance_json_data.append(d) + +    return (changed, instance_json_data, instance_names) + + +def terminate_instances(module, gce, instance_names, zone_name): +    """Terminates a list of instances. + +    module: Ansible module object +    gce: authenticated GCE connection object +    instance_names: a list of instance names to terminate +    zone_name: the zone where the instances reside prior to termination + +    Returns a dictionary of instance names that were terminated. + +    """ +    changed = False +    terminated_instance_names = [] +    for name in instance_names: +        inst = None +        try: +            inst = gce.ex_get_node(name, zone_name) +        except ResourceNotFoundError: +            pass +        except Exception as e: +            module.fail_json(msg=unexpected_error_msg(e), changed=False) +        if inst: +            gce.destroy_node(inst) +            terminated_instance_names.append(inst.name) +            changed = True + +    return (changed, terminated_instance_names) + + +def main(): +    module = AnsibleModule( +        argument_spec=dict( +            image=dict(default='debian-7'), +            instance_names=dict(), +            machine_type=dict(default='n1-standard-1'), +            metadata=dict(), +            name=dict(), +            network=dict(default='default'), +            persistent_boot_disk=dict(type='bool', default=False), +            disks=dict(type='list'), +            state=dict(choices=['active', 'present', 'absent', 'deleted'], +                       default='present'), +            tags=dict(type='list'), +            zone=dict(default='us-central1-a'), +            service_account_email=dict(), +            service_account_permissions=dict(type='list'), +            pem_file=dict(), +            project_id=dict(), +            ip_forward=dict(type='bool', default=False), +            external_ip=dict(choices=['ephemeral', 'none'], +                             default='ephemeral'), +            disk_auto_delete=dict(type='bool', default=True), +        ) +    ) + +    if not HAS_PYTHON26: +        module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+") +    if not HAS_LIBCLOUD: +        module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module') + +    gce = gce_connect(module) + +    image = module.params.get('image') +    instance_names = module.params.get('instance_names') +    machine_type = module.params.get('machine_type') +    metadata = module.params.get('metadata') +    name = module.params.get('name') +    network = module.params.get('network') +    persistent_boot_disk = module.params.get('persistent_boot_disk') +    state = module.params.get('state') +    tags = module.params.get('tags') +    zone = module.params.get('zone') +    ip_forward = module.params.get('ip_forward') +    changed = False + +    inames = [] +    if isinstance(instance_names, list): +        inames = instance_names +    elif isinstance(instance_names, str): +        inames = instance_names.split(',') +    if name: +        inames.append(name) +    if not inames: +        module.fail_json(msg='Must specify a "name" or "instance_names"', +                         changed=False) +    if not zone: +        module.fail_json(msg='Must specify a "zone"', changed=False) + +    json_output = {'zone': zone} +    if state in ['absent', 'deleted']: +        json_output['state'] = 'absent' +        (changed, terminated_instance_names) = terminate_instances( +            module, gce, inames, zone) + +        # based on what user specified, return the same variable, although +        # value could be different if an instance could not be destroyed +        if instance_names: +            json_output['instance_names'] = terminated_instance_names +        elif name: +            json_output['name'] = name + +    elif state in ['active', 'present']: +        json_output['state'] = 'present' +        (changed, instance_data, instance_name_list) = create_instances( +            module, gce, inames) +        json_output['instance_data'] = instance_data +        if instance_names: +            json_output['instance_names'] = instance_name_list +        elif name: +            json_output['name'] = name + +    json_output['changed'] = changed +    module.exit_json(**json_output) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * +if __name__ == '__main__': +    main() diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index e3efd8566..c5c479052 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -17,6 +17,11 @@        - clusterid-{{ cluster_id }}        - host-type-{{ type }}        - sub-host-type-{{ g_sub_host_type }} +    metadata: +      startup-script: | +        #!/bin/bash +        echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }} +    when: instances |length > 0    register: gce diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 9b7a2777a..332f27da7 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,12 +1,25 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Populate oo_hosts_to_update group    hosts: localhost    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Evaluate oo_hosts_to_update      add_host: @@ -14,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 81a6fff0d..032d4cf68 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -2,10 +2,23 @@  # TODO: need to figure out a plan for setting hostname, currently the default  # is localhost, so no hostname value (or public_hostname) value is getting  # assigned +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../libvirt/openshift-cluster/vars.yml -  - ../../libvirt/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo: "{{ deployment_vars[deployment_type].become }}" @@ -21,3 +34,4 @@      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"      openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"      openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" +    openshift_use_dnsmasq: false diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 7231f255a..833586ffa 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -83,7 +83,7 @@    with_items: instances  - name: Wait for the VMs to get an IP -  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}''' +  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''    register: nb_allocated_ips    until: nb_allocated_ips.stdout == '{{ instances | length }}'    retries: 60 @@ -91,7 +91,7 @@    when: instances | length != 0  - name: Collect IP addresses of the VMs -  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}''' +  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''    register: scratch_ip    with_items: instances diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml index 9b7a2777a..28362c984 100644 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -1,4 +1,20 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Populate oo_hosts_to_update group    hosts: localhost    connection: local @@ -14,7 +30,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 9c0ca9af9..6e4f414d6 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -1,8 +1,21 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../openstack/openshift-cluster/vars.yml -  - ../../openstack/openshift-cluster/cluster_hosts.yml    vars:      g_nodeonmaster: true      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 2f05c3adc..422e6dafe 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -288,6 +288,14 @@ resources:            port_range_max: 53          - direction: ingress            protocol: tcp +          port_range_min: 8053 +          port_range_max: 8053 +        - direction: ingress +          protocol: udp +          port_range_min: 8053 +          port_range_max: 8053 +        - direction: ingress +          protocol: tcp            port_range_min: 24224            port_range_max: 24224          - direction: ingress @@ -591,11 +599,17 @@ resources:      type: OS::Heat::MultipartMime      properties:        parts: -        - config: { get_file: user-data }          - config:              str_replace:                template: |                  #cloud-config +                disable_root: true + +                system_info: +                  default_user: +                    name: openshift +                    sudo: ["ALL=(ALL) NOPASSWD: ALL"] +                  write_files:                    - path: /etc/sudoers.d/00-openshift-no-requiretty                      permissions: 440 diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 3d4fe42d0..b9aae2f4c 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -46,7 +46,7 @@               -P master_flavor={{ openstack_flavor["master"] }}               -P node_flavor={{ openstack_flavor["node"] }}               -P infra_flavor={{ openstack_flavor["infra"] }} -             -P dns_flavor=m1.small +             -P dns_flavor={{ openstack_flavor["dns"] }}               openshift-ansible-{{ cluster_id }}-stack'    - name: Wait for OpenStack Stack readiness diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 539af6524..6d4d23963 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -1,4 +1,20 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: dns.yml  - name: Populate oo_hosts_to_update group @@ -6,9 +22,6 @@    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Evaluate oo_hosts_to_update      add_host: @@ -16,7 +29,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index 84cba0506..bc53a51b0 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -13,6 +13,7 @@ openstack_ssh_public_key:       "{{ lookup('file', lookup('oo_option', 'public_k  openstack_ssh_access_from:      "{{ lookup('oo_option', 'ssh_from')          |                                      default('0.0.0.0/0',                     True) }}"  openstack_flavor: +  dns:    "{{ lookup('oo_option', 'dns_flavor'       ) | default('m1.small',  True) }}"    etcd:   "{{ lookup('oo_option', 'etcd_flavor'      ) | default('m1.small',  True) }}"    master: "{{ lookup('oo_option', 'master_flavor'    ) | default('m1.small',  True) }}"    infra:  "{{ lookup('oo_option', 'infra_flavor'     ) | default('m1.small',  True) }}"  | 
