diff options
139 files changed, 1164 insertions, 741 deletions
@@ -35,12 +35,12 @@ fixes.  Older branches will receive only critical fixes.  Follow this release pattern and you can't go wrong: -| Origin        | OpenShift-Ansible | -| ------------- | ----------------- | -| 1.3           | 3.3               | -| 1.4           | 3.4               | -| 1.5           | 3.5               | -| 3.*X*         | 3.*X*             | +| Origin/OCP    | OpenShift-Ansible version | openshift-ansible branch | +| ------------- | ----------------- |----------------------------------| +| 1.3 / 3.3          | 3.3               | release-1.3 | +| 1.4 / 3.4          | 3.4               | releaes-1.4 | +| 1.5 / 3.5          | 3.5               | release-1.5 | +| 3.*X*         | 3.*X*             | release-3.x |  If you're running from the openshift-ansible **master branch** we can  only guarantee compatibility with the newest origin releases **in diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml index a9fc18958..6f95b4e2d 100644 --- a/playbooks/byo/openshift-cluster/service-catalog.yml +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -5,6 +5,12 @@  # currently supported method.  #  - include: initialize_groups.yml +  tags: +  - always + +- include: ../../common/openshift-cluster/std_include.yml +  tags: +  - always  - include: ../../common/openshift-cluster/service_catalog.yml    vars: diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml index f03854c2a..c35fd9f37 100644 --- a/playbooks/byo/openshift-etcd/scaleup.yml +++ b/playbooks/byo/openshift-etcd/scaleup.yml @@ -1,19 +1,5 @@  --- -- hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml -  - add_host: -      name: "{{ item }}" -      groups: l_oo_all_hosts -    with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts -  gather_facts: no -  tasks: -  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: ../openshift-cluster/initialize_groups.yml  - include: ../../common/openshift-cluster/evaluate_groups.yml  - include: ../../common/openshift-etcd/scaleup.yml diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 7136f1c1f..14d7d9822 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -22,6 +22,12 @@        - docker_image_availability        - docker_storage +- hosts: localhost +  tasks: +  - fail: +      msg: No etcd hosts defined. Running an all-in-one master is deprecated and will no longer be supported in a future upgrade. +    when: groups.oo_etcd_to_config | default([]) | length == 0 and not openshift_master_unsupported_all_in_one | default(False) +  - include: initialize_oo_option_facts.yml    tags:    - always diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index 5425f448f..50351588f 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -37,7 +37,7 @@        dest: "{{ openshift.common.config_base }}/master/master-config.yaml"        yaml_key: dnsConfig.bindAddress        yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" -    notify: restart master +    notify: restart master api    - meta: flush_handlers  - name: Configure nodes for dnsmasq diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 8accda8c7..c56b07037 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -8,7 +8,7 @@    - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required      fail:        msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set -    when: "{{ g_etcd_hosts is not defined and g_new_etcd_hosts is not defined}}" +    when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined    - name: Evaluate groups - g_master_hosts or g_new_master_hosts required      fail: diff --git a/playbooks/common/openshift-cluster/initialize_firewall.yml b/playbooks/common/openshift-cluster/initialize_firewall.yml new file mode 100644 index 000000000..f0374fbc7 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_firewall.yml @@ -0,0 +1,7 @@ +--- +- name: Initialize host firewall +  hosts: oo_all_hosts +  tasks: +  - name: Install and configure the proper firewall settings +    include_role: +      name: os_firewall diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml new file mode 100644 index 000000000..a7114fc80 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml @@ -0,0 +1,8 @@ +--- +- name: Setup yum repositories for all hosts +  hosts: oo_all_hosts +  gather_facts: no +  tasks: +  - name: initialize openshift repos +    include_role: +      name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index f4e52869e..7112a6084 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,24 +1,5 @@  ---  # NOTE: requires openshift_facts be run -- name: Verify compatible yum/subscription-manager combination -  hosts: oo_all_hosts -  gather_facts: no -  tasks: -  # See: -  #   https://bugzilla.redhat.com/show_bug.cgi?id=1395047 -  #   https://bugzilla.redhat.com/show_bug.cgi?id=1282961 -  #   https://github.com/openshift/openshift-ansible/issues/1138 -  #   Consider the repoquery module for this work -  - name: Check for bad combinations of yum and subscription-manager -    command: > -      {{ repoquery_cmd }} --installed --qf '%{version}' "yum" -    register: yum_ver_test -    changed_when: false -    when: not openshift.common.is_atomic | bool -  - fail: -      msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. -    when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout -  - name: Determine openshift_version to configure on first master    hosts: oo_first_master    roles: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index ce7f981ab..99a634970 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -29,6 +29,7 @@    - role: openshift_default_storage_class      when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')    - role: openshift_hosted +    r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}"    - role: openshift_metrics      when: openshift_hosted_metrics_deploy | default(false) | bool    - role: openshift_logging diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index 6c12875fe..599350258 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,5 +1,4 @@  --- -- include: evaluate_groups.yml  - name: Update Master configs    hosts: oo_masters diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 6ed31a644..5a1187ec7 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -11,6 +11,14 @@    tags:    - node +- include: initialize_openshift_repos.yml +  tags: +  - always +  - include: initialize_openshift_version.yml    tags:    - always + +- include: initialize_firewall.yml +  tags: +  - always diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml index 13313377e..83f16ac0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml @@ -15,7 +15,6 @@    with_items:      - etcd_container      - openvswitch -    - "{{ openshift.common.service_type }}-master"      - "{{ openshift.common.service_type }}-master-api"      - "{{ openshift.common.service_type }}-master-controllers"      - "{{ openshift.common.service_type }}-node" diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 35d000e49..808cc562c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -4,7 +4,6 @@  - name: Stop containerized services    service: name={{ item }} state=stopped    with_items: -    - "{{ openshift.common.service_type }}-master"      - "{{ openshift.common.service_type }}-master-api"      - "{{ openshift.common.service_type }}-master-controllers"      - "{{ openshift.common.service_type }}-node" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 06eb5f936..45022cd61 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,23 +9,16 @@        local_facts:          ha: "{{ groups.oo_masters_to_config | length > 1 }}" -  - name: Ensure Master is running -    service: -      name: "{{ openshift.common.service_type }}-master" -      state: started -      enabled: yes -    when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool -    - name: Ensure HA Master is running      service:        name: "{{ openshift.common.service_type }}-master-api"        state: started        enabled: yes -    when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool +    when: openshift.common.is_containerized | bool    - name: Ensure HA Master is running      service:        name: "{{ openshift.common.service_type }}-master-controllers"        state: started        enabled: yes -    when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool +    when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 6a0471948..abcd21c90 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -91,6 +91,9 @@    - include_vars: ../../../../roles/openshift_master/vars/main.yml +  - name: Remove any legacy systemd units +    include: ../../../../roles/openshift_master/tasks/clean_systemd_units.yml +    - name: Update systemd units      include: ../../../../roles/openshift_master/tasks/systemd_units.yml diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 83d2cec81..8558bf3e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -74,18 +74,21 @@    - block:      - debug:          msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" -      when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and -                openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}" +      when: +      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates +      - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]      - set_fact:          openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" -      when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and -                openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}" +      when: +      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates +      - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]      - set_fact:          openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" -      when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and -                openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" +      when: +      - openshift_master_scheduler_current_predicates != default_predicates_no_region +      - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]      when: openshift_master_scheduler_predicates | default(none) is none @@ -131,18 +134,21 @@    - block:      - debug:          msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" -      when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and -                openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}" +      when: +      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities +      - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]      - set_fact:          openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" -      when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and -                openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}" +      when: +      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities +      - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]      - set_fact:          openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" -      when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and -                openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" +      when: +      - openshift_master_scheduler_current_priorities != default_priorities_no_zone +      - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]      when: openshift_master_scheduler_priorities | default(none) is none @@ -162,5 +168,6 @@        content: "{{ scheduler_config | to_nice_json }}"        dest: "{{ openshift_master_scheduler_conf }}"        backup: true -  when: "{{ openshift_upgrade_scheduler_predicates is defined or -            openshift_upgrade_scheduler_priorities is defined }}" +  when: > +    openshift_upgrade_scheduler_predicates is defined or +    openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml index f1245aa2e..0f6fb46a4 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -39,8 +39,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index b693ab55c..cfba788a8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -47,8 +47,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 4fd029107..1054f430e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -40,8 +40,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml index 965e39482..783289c87 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -39,8 +39,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index 7830f462c..8aa443c3c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -47,8 +47,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 4364ff8e3..436795694 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -40,8 +40,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index 4e7c14e94..9a000265e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -39,8 +39,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 45b664d06..2dd9676c7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -47,8 +47,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index 036d3fcf5..d5fe8285e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -40,8 +40,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index da4444867..8ceab09f4 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -39,8 +39,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a470c7595..f765e9064 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -47,8 +47,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 25eceaf90..8bed6a8c2 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -40,8 +40,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index b8fc1b13c..4f05d0c64 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -39,8 +39,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index c571a03c8..2ef95e778 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -47,8 +47,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index 8a2bd19c9..abc4c245b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -40,8 +40,9 @@                                                      | union(groups['oo_etcd_to_config'] | default([])))                                                  | oo_collect('openshift.common.hostname') | default([]) | join (',')                                                  }}" -    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and -            openshift_generate_no_proxy_hosts | default(True) | bool }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool  - include: ../pre/verify_inventory_vars.yml    tags: diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 47fa8cdf5..192305bc8 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -1,4 +1,13 @@  --- +- name: Gather facts +  hosts: oo_etcd_to_config:oo_new_etcd_to_config +  roles: +  - openshift_etcd_facts +  post_tasks: +  - set_fact: +      etcd_hostname: "{{ etcd_hostname }}" +      etcd_ip: "{{ etcd_ip }}" +  - name: Configure etcd    hosts: oo_new_etcd_to_config    serial: 1 @@ -8,11 +17,11 @@    pre_tasks:    - name: Add new etcd members to cluster      command: > -      /usr/bin/etcdctl  --cert-file {{ etcd_peer_cert_file }} -                        --key-file {{ etcd_peer_key_file }} -                        --ca-file {{ etcd_peer_ca_file }} -                        -C {{ etcd_peer_url_scheme }}://{{ etcd_ca_host }}:{{ etcd_client_port }} -                        member add {{ inventory_hostname }} {{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ etcd_peer_port }} +      /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} +                       --key-file {{ etcd_peer_key_file }} +                       --ca-file {{ etcd_peer_ca_file }} +                       -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} +                       member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}      delegate_to: "{{ etcd_ca_host }}"      register: etcd_add_check    roles: @@ -23,7 +32,6 @@      etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"      etcd_initial_cluster_state: "existing"      initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') }}" -    etcd_hostname: "{{ inventory_hostname }}"      etcd_ca_setup: False      r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"    - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index edc15a3f2..d9de578f3 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,40 +1,26 @@  ---  - name: Open firewall ports for GlusterFS nodes    hosts: glusterfs -  vars: -    os_firewall_allow: -    - service: glusterfs_sshd -      port: "2222/tcp" -    - service: glusterfs_daemon -      port: "24007/tcp" -    - service: glusterfs_management -      port: "24008/tcp" -    - service: glusterfs_bricks -      port: "49152-49251/tcp" -  roles: -  - role: os_firewall +  tasks: +  - include_role: +      name: openshift_storage_glusterfs +      tasks_from: firewall.yml      when:      - openshift_storage_glusterfs_is_native | default(True) | bool  - name: Open firewall ports for GlusterFS registry nodes    hosts: glusterfs_registry -  vars: -    os_firewall_allow: -    - service: glusterfs_sshd -      port: "2222/tcp" -    - service: glusterfs_daemon -      port: "24007/tcp" -    - service: glusterfs_management -      port: "24008/tcp" -    - service: glusterfs_bricks -      port: "49152-49251/tcp" -  roles: -  - role: os_firewall +  tasks: +  - include_role: +      name: openshift_storage_glusterfs +      tasks_from: firewall.yml      when:      - openshift_storage_glusterfs_registry_is_native | default(True) | bool  - name: Configure GlusterFS    hosts: oo_first_master -  roles: -  - role: openshift_storage_glusterfs +  tasks: +  - name: setup glusterfs +    include_role: +      name: openshift_storage_glusterfs      when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index a844fb369..4f8b758fd 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,9 +1,4 @@  --- -- name: Restart master -  service: -    name: "{{ openshift.common.service_type }}-master" -    state: restarted -  when: not openshift_master_ha | bool  - name: Restart master API    service:      name: "{{ openshift.common.service_type }}-master-api" diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index ed2473a43..c3beb59b7 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -27,7 +27,6 @@      with_items:      - etcd_container      - openvswitch -    - "{{ openshift.common.service_type }}-master"      - "{{ openshift.common.service_type }}-master-api"      - "{{ openshift.common.service_type }}-master-controllers"      - "{{ openshift.common.service_type }}-node" diff --git a/roles/calico/README.md b/roles/calico/README.md index 9b9458bfa..65f66ebfa 100644 --- a/roles/calico/README.md +++ b/roles/calico/README.md @@ -6,12 +6,6 @@ Configure Calico components for the Master host.  * Ansible 2.2 -## Warning: This Calico Integration is in Alpha - -Calico shares the etcd instance used by OpenShift, and distributes client etcd certificates to each node. -For this reason, **we do not (yet) recommend running Calico on any production-like -cluster, or using it for any purpose besides early access testing.** -  ## Installation  To install, set the following inventory configuration parameters: @@ -20,7 +14,19 @@ To install, set the following inventory configuration parameters:  * `openshift_use_openshift_sdn=False`  * `os_sdn_network_plugin_name='cni'` -## Additional Calico/Node and Felix Configuration Options +For more information, see [Calico's official OpenShift Installation Documentation](https://docs.projectcalico.org/latest/getting-started/openshift/installation#bring-your-own-etcd) + +## Improving security with BYO-etcd + +By default, Calico uses the etcd set up by OpenShift. To accomplish this, it generates and distributes client etcd certificates to each node. +Distributing these certs across the cluster in this way weakens the overall security, +so Calico should not be deployed in production in this mode. + +Instead, Calico can be installed in BYO-etcd mode, where it connects to an externally +set up etcd. For information on deploying Calico in BYO-etcd mode, see  +[Calico's official OpenShift Installation Documentation](https://docs.projectcalico.org/latest/getting-started/openshift/installation#bring-your-own-etcd) + +## Calico Configuration Options  Additional parameters that can be defined in the inventory are: diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml index 207dee068..e7a9db92f 100644 --- a/roles/calico/defaults/main.yaml +++ b/roles/calico/defaults/main.yaml @@ -5,11 +5,11 @@ cni_conf_dir: "/etc/cni/net.d/"  cni_bin_dir: "/opt/cni/bin/"  cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz" -calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico" -calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam" +calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.10.0/calico" +calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.10.0/calico-ipam"  calico_ipv4pool_ipip: "always"  calico_ipv4pool_cidr: "192.168.0.0/16"  calico_log_dir: "/var/log/calico" -calico_node_image: "calico/node:v1.2.1" +calico_node_image: "calico/node:v2.4.1" diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml index b2df0105f..d40286aba 100644 --- a/roles/calico_master/defaults/main.yaml +++ b/roles/calico_master/defaults/main.yaml @@ -3,5 +3,5 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf  calicoctl_bin_dir: "/usr/local/bin/" -calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl" -calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4" +calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.4.0/calicoctl" +calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.7.0" diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml new file mode 100644 index 000000000..cbe5bb92b --- /dev/null +++ b/roles/cockpit/defaults/main.yml @@ -0,0 +1,8 @@ +--- +r_cockpit_firewall_enabled: True +r_cockpit_use_firewalld: False + +r_cockpit_os_firewall_deny: [] +r_cockpit_os_firewall_allow: +- service: cockpit-ws +  port: 9090/tcp diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml index 0f507e75e..8c0ed3cb8 100644 --- a/roles/cockpit/meta/main.yml +++ b/roles/cockpit/meta/main.yml @@ -12,7 +12,4 @@ galaxy_info:    categories:    - cloud  dependencies: -- role: os_firewall -  os_firewall_allow: -  - service: cockpit-ws -    port: 9090/tcp +- role: lib_os_firewall diff --git a/roles/cockpit/tasks/firewall.yml b/roles/cockpit/tasks/firewall.yml new file mode 100644 index 000000000..e597ac84d --- /dev/null +++ b/roles/cockpit/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_cockpit_firewall_enabled | bool and not r_cockpit_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_cockpit_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_cockpit_os_firewall_deny }}" + +- when: r_cockpit_firewall_enabled | bool and r_cockpit_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_cockpit_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_cockpit_os_firewall_deny }}" diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 57f49ea11..066ee3f3b 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -1,4 +1,8 @@  --- +- name: setup firewall +  include: firewall.yml +  static: yes +  - name: Install cockpit-ws    package: name={{ item }} state=present    with_items: diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index cd4083572..b773a417c 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -10,5 +10,4 @@ galaxy_info:      versions:      - 7  dependencies: -- role: os_firewall  - role: lib_openshift diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index c0d1d5946..d12d7a358 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -1,4 +1,7 @@  --- +r_etcd_firewall_enabled: True +r_etcd_use_firewalld: False +  etcd_initial_cluster_state: new  etcd_initial_cluster_token: etcd-cluster-1 @@ -7,4 +10,13 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_  etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"  etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" +etcd_client_port: 2379 +etcd_peer_port: 2380 +  etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" +r_etcd_os_firewall_deny: [] +r_etcd_os_firewall_allow: +- service: etcd +  port: "{{etcd_client_port}}/tcp" +- service: etcd peering +  port: "{{ etcd_peer_port }}/tcp" diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 689c07a84..9a955c822 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -17,11 +17,6 @@ galaxy_info:    - system  dependencies:  - role: lib_openshift -- role: os_firewall -  os_firewall_allow: -  - service: etcd -    port: "{{etcd_client_port}}/tcp" -  - service: etcd peering -    port: "{{ etcd_peer_port }}/tcp" +- role: lib_os_firewall  - role: etcd_server_certificates  - role: etcd_common diff --git a/roles/etcd/tasks/firewall.yml b/roles/etcd/tasks/firewall.yml new file mode 100644 index 000000000..4d0f6290a --- /dev/null +++ b/roles/etcd/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_etcd_firewall_enabled | bool and not r_etcd_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_etcd_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_etcd_os_firewall_deny }}" + +- when: r_etcd_firewall_enabled | bool and r_etcd_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_etcd_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_etcd_os_firewall_deny }}" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 8c2f392ee..78e543ef1 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -6,6 +6,10 @@      etcd_hostname: "{{ etcd_hostname }}"      etcd_ip: "{{ etcd_ip }}" +- name: setup firewall +  include: firewall.yml +  static: yes +  - name: Install etcd    package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present    when: not etcd_is_containerized | bool diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml index fb9280c9e..dfb1c7a2c 100644 --- a/roles/etcd_common/meta/main.yml +++ b/roles/etcd_common/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info:    categories:    - cloud    - system -dependencies: -- { role: openshift_repos } +dependencies: [] diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 9b0c0e0e4..7d9392af9 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1478,7 +1478,16 @@ class OCObject(OpenShiftCLI):          if files:              return self._create(files[0]) -        content['data'] = yaml.dump(content['data']) +        # pylint: disable=no-member +        # The purpose of this change is twofold: +        # - we need a check to only use the ruamel specific dumper if ruamel is loaded +        # - the dumper or the flow style change is needed so openshift is able to parse +        # the resulting yaml, at least until gopkg.in/yaml.v2 is updated +        if hasattr(yaml, 'RoundTripDumper'): +            content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper) +        else: +            content['data'] = yaml.safe_dump(content['data'], default_flow_style=False) +          content_file = Utils.create_tmp_files_from_contents(content)[0]          return self._create(content_file['path']) diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index a78bc06d2..bf65ef603 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1657,6 +1657,9 @@ class OCProject(OpenShiftCLI):                  # Create it here                  api_rval = oadm_project.create() +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} +                  # return the created object                  api_rval = oadm_project.get() diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 5e423bea9..68f7818e4 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -50,7 +50,16 @@ class OCObject(OpenShiftCLI):          if files:              return self._create(files[0]) -        content['data'] = yaml.dump(content['data']) +        # pylint: disable=no-member +        # The purpose of this change is twofold: +        # - we need a check to only use the ruamel specific dumper if ruamel is loaded +        # - the dumper or the flow style change is needed so openshift is able to parse +        # the resulting yaml, at least until gopkg.in/yaml.v2 is updated +        if hasattr(yaml, 'RoundTripDumper'): +            content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper) +        else: +            content['data'] = yaml.safe_dump(content['data'], default_flow_style=False) +          content_file = Utils.create_tmp_files_from_contents(content)[0]          return self._create(content_file['path']) diff --git a/roles/lib_openshift/src/class/oc_project.py b/roles/lib_openshift/src/class/oc_project.py index 9ad8111a8..298597067 100644 --- a/roles/lib_openshift/src/class/oc_project.py +++ b/roles/lib_openshift/src/class/oc_project.py @@ -156,6 +156,9 @@ class OCProject(OpenShiftCLI):                  # Create it here                  api_rval = oadm_project.create() +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} +                  # return the created object                  api_rval = oadm_project.get() diff --git a/roles/lib_os_firewall/README.md b/roles/lib_os_firewall/README.md new file mode 100644 index 000000000..ba8c84865 --- /dev/null +++ b/roles/lib_os_firewall/README.md @@ -0,0 +1,63 @@ +lib_os_firewall +=========== + +lib_os_firewall manages iptables firewall settings for a minimal use +case (Adding/Removing rules based on protocol and port number). + +Note: firewalld is not supported on Atomic Host +https://bugzilla.redhat.com/show_bug.cgi?id=1403331 + +Requirements +------------ + +Ansible 2.2 + +Role Variables +-------------- + +| Name                      | Default |                                        | +|---------------------------|---------|----------------------------------------| +| os_firewall_allow         | []      | List of service,port mappings to allow | +| os_firewall_deny          | []      | List of service, port mappings to deny | + +Dependencies +------------ + +None. + +Example Playbook +---------------- + +Use iptables and open tcp ports 80 and 443: +``` +--- +- hosts: servers +  vars: +    os_firewall_use_firewalld: false +    os_firewall_allow: +    - service: httpd +      port: 80/tcp +    - service: https +      port: 443/tcp +  tasks: +  - include_role: +      name: lib_os_firewall + +  - name: set allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    with_items: "{{ os_firewall_allow }}" +``` + + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ +Jason DeTiberus - jdetiber@redhat.com diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/lib_os_firewall/library/os_firewall_manage_iptables.py index aeee3ede8..aeee3ede8 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/lib_os_firewall/library/os_firewall_manage_iptables.py diff --git a/roles/nuage_master/defaults/main.yml b/roles/nuage_master/defaults/main.yml new file mode 100644 index 000000000..ffab25775 --- /dev/null +++ b/roles/nuage_master/defaults/main.yml @@ -0,0 +1,10 @@ +--- +r_nuage_master_firewall_enabled: True +r_nuage_master_use_firewalld: False + +nuage_mon_rest_server_port: '9443' + +r_nuage_master_os_firewall_deny: [] +r_nuage_master_os_firewall_allow: +- service: openshift-monitor +  port: "{{ nuage_mon_rest_server_port }}/tcp" diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 162aaae1a..ad7bbb111 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -3,10 +3,6 @@    become: yes    systemd: name=nuage-openshift-monitor state=restarted -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false)) -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when: > diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index e3ed9ac71..3da340c85 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -16,8 +16,5 @@ dependencies:  - role: nuage_ca  - role: nuage_common  - role: openshift_etcd_client_certificates -- role: os_firewall  - role: lib_openshift -  os_firewall_allow: -  - service: openshift-monitor -    port: "{{ nuage_mon_rest_server_port }}/tcp" +- role: lib_os_firewall diff --git a/roles/nuage_master/tasks/firewall.yml b/roles/nuage_master/tasks/firewall.yml new file mode 100644 index 000000000..0057dc9ab --- /dev/null +++ b/roles/nuage_master/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_nuage_master_firewall_enabled | bool and not r_nuage_master_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_nuage_master_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_nuage_master_os_firewall_deny }}" + +- when: r_nuage_master_firewall_enabled | bool and r_nuage_master_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_nuage_master_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_nuage_master_os_firewall_deny }}" diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index 4f8adb63e..d0363c981 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -1,4 +1,8 @@  --- +- name: setup firewall +  include: firewall.yml +  static: yes +  - name: Create directory /usr/share/nuage-openshift-monitor    become: yes    file: path=/usr/share/nuage-openshift-monitor state=directory @@ -45,7 +49,6 @@    become: yes    template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644    notify: -    - restart master      - restart master api      - restart master controllers      - restart nuage-openshift-monitor diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index eee448e2c..fbf2c4f8d 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -1,26 +1,6 @@  --- -- name: Create temporary directory for admin kubeconfig -  command: mktemp -u /tmp/openshift-ansible-XXXXXXX.kubeconfig -  register: nuage_tmp_conf_mktemp -  changed_when: False -  run_once: True -  delegate_to: "{{ nuage_ca_master }}" - -- set_fact: -    nuage_tmp_conf: "{{ nuage_tmp_conf_mktemp.stdout }}" -  run_once: True -  delegate_to: "{{ nuage_ca_master }}" - -- name: Copy Configuration to temporary conf -  command: > -    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{nuage_tmp_conf}} -  changed_when: false -  run_once: True -  delegate_to: "{{ nuage_ca_master }}" -  - name: Create Admin Service Account    oc_serviceaccount: -    kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"      name: nuage      namespace: default      state: present @@ -28,13 +8,12 @@    delegate_to: "{{ nuage_ca_master }}"  - name: Configure role/user permissions -  command: > -    {{ openshift.common.client_binary }} adm {{item}} -    --config={{ nuage_tmp_conf }} -  with_items: "{{nuage_tasks}}" -  register: osnuage_perm_task -  failed_when: "'the object has been modified' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0" -  changed_when: osnuage_perm_task.rc == 0 +  oc_adm_policy_user: +    namespace: default +    resource_name: "{{ item.resource_name }}" +    resource_kind: "{{ item.resource_kind }}" +    user: "{{ item.user }}" +  with_items: "{{ nuage_tasks }}"    run_once: True    delegate_to: "{{ nuage_ca_master }}" @@ -52,10 +31,3 @@        --user={{ nuage_service_account }}    delegate_to: "{{ nuage_ca_master }}"    run_once: True - -- name: Clean temporary configuration file -  command: > -    rm -f {{nuage_tmp_conf}} -  changed_when: false -  delegate_to: "{{ nuage_ca_master }}" -  run_once: True diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index 651d5775c..57d5d2595 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -23,4 +23,6 @@ nuage_master_crt_dir: /usr/share/nuage-openshift-monitor  nuage_service_account: system:serviceaccount:default:nuage  nuage_tasks: -  - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }} +- resource_kind: cluster-role +  resource_name: cluster-reader +  user: "{{ nuage_service_account }}" diff --git a/roles/nuage_node/defaults/main.yml b/roles/nuage_node/defaults/main.yml new file mode 100644 index 000000000..b3d2e3cec --- /dev/null +++ b/roles/nuage_node/defaults/main.yml @@ -0,0 +1,12 @@ +--- +r_nuage_node_firewall_enabled: True +r_nuage_node_use_firewalld: False + +nuage_mon_rest_server_port: '9443' + +r_nuage_node_os_firewall_deny: [] +r_nuage_node_os_firewall_allow: +- service: vxlan +  port: 4789/udp +- service: nuage-monitor +  port: "{{ nuage_mon_rest_server_port }}/tcp" diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml index 3e2a5e0c9..9b0315054 100644 --- a/roles/nuage_node/meta/main.yml +++ b/roles/nuage_node/meta/main.yml @@ -15,9 +15,4 @@ galaxy_info:  dependencies:  - role: nuage_common  - role: nuage_ca -- role: os_firewall -  os_firewall_allow: -  - service: vxlan -    port: 4789/udp -  - service: nuage-monitor -    port: "{{ nuage_mon_rest_server_port }}/tcp" +- role: lib_os_firewall diff --git a/roles/nuage_node/tasks/firewall.yml b/roles/nuage_node/tasks/firewall.yml new file mode 100644 index 000000000..baf600d57 --- /dev/null +++ b/roles/nuage_node/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_nuage_node_firewall_enabled | bool and not r_nuage_node_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_nuage_node_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_nuage_node_os_firewall_deny }}" + +- when: r_nuage_node_firewall_enabled | bool and r_nuage_node_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_nuage_node_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_nuage_node_os_firewall_deny }}" diff --git a/roles/nuage_node/tasks/iptables.yml b/roles/nuage_node/tasks/iptables.yml index 847c8395d..95ee8643a 100644 --- a/roles/nuage_node/tasks/iptables.yml +++ b/roles/nuage_node/tasks/iptables.yml @@ -15,3 +15,9 @@    when: "'nuage-underlay-overlay' not in iptablesrules.stdout"    notify:      - save iptable rules + +- name: Allow docker daemon traffic from underlay to overlay +  command: /sbin/iptables -t nat -A POSTROUTING ! -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -o svc-pat-tap -j MASQUERADE -m comment --comment "nuage-docker-underlay-overlay" +  when: "'nuage-docker-underlay-overlay' not in iptablesrules.stdout" +  notify: +    - save iptable rules diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index 928f9e2e6..66d6ef4ca 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -11,6 +11,10 @@    become: yes    lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}' +- name: Set the K8S/OSE Cluster service CIDR +  become: yes +  lineinfile: dest={{ vrs_config }} regexp=^K8S_SERVICE_IPV4_SUBNET line='K8S_SERVICE_IPV4_SUBNET={{ k8s_cluster_service_cidr }}' +  - name: Set the Standby Controller    become: yes    lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}' @@ -54,3 +58,7 @@      - restart node  - include: iptables.yml + +- name: setup firewall +  include: firewall.yml +  static: yes diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2 index 9fab53906..f6bccebc2 100644 --- a/roles/nuage_node/templates/vsp-openshift.j2 +++ b/roles/nuage_node/templates/vsp-openshift.j2 @@ -9,7 +9,7 @@ enterpriseName: {{ enterprise }}  # Name of the domain in which pods will reside  domainName: {{ domain }}  # Name of the VSD user in admin group -vsdUser: {{ vsduser }} +vsdUser: {{ vsd_user }}  # IP address and port number of master API server  masterApiServer: {{ api_server }}  # REST server URL  diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index 444c5b77e..dfbdf0cc7 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -13,6 +13,5 @@ galaxy_info:    - cloud    - system  dependencies: -- role: openshift_repos  - role: openshift_cli  - role: openshift_named_certificates diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml index 79e59b410..393bee1f3 100644 --- a/roles/openshift_cfme/defaults/main.yml +++ b/roles/openshift_cfme/defaults/main.yml @@ -1,6 +1,7 @@  --- -# Namespace for the CFME project -openshift_cfme_project: cfme +# Namespace for the CFME project (Note: changed post-3.6 to use +# reserved 'openshift-' namespace prefix) +openshift_cfme_project: openshift-cfme  # Namespace/project description  openshift_cfme_project_description: ManageIQ - CloudForms Management Engine  # Basic user assigned the `admin` role for the project @@ -35,9 +36,9 @@ openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"  # --template=manageiq). If False everything UP TO 'new-app' is ran.  openshift_cfme_install_app: False  # Docker image to pull -openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods:app-latest-fine' }}" -openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods:app-latest-fine' }}" -openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods:app-latest-fine' }}" +openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" +openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" +openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"  openshift_cfme_application_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'app-latest-fine' }}"  openshift_cfme_memcached_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'memcached-latest-fine' }}"  openshift_cfme_postgresql_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'postgresql-latest-fine' }}" diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml index 476a5e030..7e90b09a4 100644 --- a/roles/openshift_cfme/handlers/main.yml +++ b/roles/openshift_cfme/handlers/main.yml @@ -6,19 +6,14 @@  # See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782  ###################################################################### -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) -  notify: Verify API Server -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers    systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml index cd8c75ec5..7cc95d8fa 100644 --- a/roles/openshift_common/meta/main.yml +++ b/roles/openshift_common/meta/main.yml @@ -13,5 +13,3 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts -- role: openshift_repos -- role: openshift_version diff --git a/roles/openshift_docker/meta/main.yml b/roles/openshift_docker/meta/main.yml index 10131f717..60efd4e45 100644 --- a/roles/openshift_docker/meta/main.yml +++ b/roles/openshift_docker/meta/main.yml @@ -12,6 +12,5 @@ galaxy_info:    categories:    - cloud  dependencies: -- role: openshift_version  - role: openshift_docker_facts  - role: docker diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml index ed97d539c..bc8e7bdcf 100644 --- a/roles/openshift_health_checker/meta/main.yml +++ b/roles/openshift_health_checker/meta/main.yml @@ -1 +1,3 @@  --- +dependencies: +- role: openshift_facts diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index f26008c9f..07ec6f7ef 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -10,6 +10,7 @@ from importlib import import_module  from ansible.module_utils import six  from ansible.module_utils.six.moves import reduce  # pylint: disable=import-error,redefined-builtin +from ansible.plugins.filter.core import to_bool as ansible_to_bool  class OpenShiftCheckException(Exception): @@ -119,16 +120,59 @@ class OpenShiftCheck(object):          Ansible task_vars structures are Python dicts, often mapping strings to          other dicts. This helper makes it easier to get a nested value, raising -        OpenShiftCheckException when a key is not found or returning a default value -        provided as a keyword argument. +        OpenShiftCheckException when a key is not found. + +        Keyword args: +          default: +            On missing key, return this as default value instead of raising exception. +          convert: +            Supply a function to apply to normalize the value before returning it. +            None is the default (return as-is). +            This function should raise ValueError if the user has provided a value +            that cannot be converted, or OpenShiftCheckException if some other +            problem needs to be described to the user.          """ +        if len(keys) == 1: +            keys = keys[0].split(".") +          try:              value = reduce(operator.getitem, keys, self.task_vars)          except (KeyError, TypeError): -            if "default" in kwargs: -                return kwargs["default"] -            raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys)))) -        return value +            if "default" not in kwargs: +                raise OpenShiftCheckException( +                    "This check expects the '{}' inventory variable to be defined\n" +                    "in order to proceed, but it is undefined. There may be a bug\n" +                    "in Ansible, the checks, or their dependencies." +                    "".format(".".join(map(str, keys))) +                ) +            value = kwargs["default"] + +        convert = kwargs.get("convert", None) +        try: +            if convert is None: +                return value +            elif convert is bool:  # interpret bool as Ansible does, instead of python truthiness +                return ansible_to_bool(value) +            else: +                return convert(value) + +        except ValueError as error:  # user error in specifying value +            raise OpenShiftCheckException( +                'Cannot convert inventory variable to expected type:\n' +                '  "{var}={value}"\n' +                '{error}'.format(var=".".join(keys), value=value, error=error) +            ) + +        except OpenShiftCheckException:  # some other check-specific problem +            raise + +        except Exception as error:  # probably a bug in the function +            raise OpenShiftCheckException( +                'There is a bug in this check. While trying to convert variable \n' +                '  "{var}={value}"\n' +                'the given converter cannot be used or failed unexpectedly:\n' +                '{error}'.format(var=".".join(keys), value=value, error=error) +            )      @staticmethod      def get_major_minor_version(openshift_image_tag): @@ -153,6 +197,31 @@ class OpenShiftCheck(object):          components = tuple(int(x) for x in components[:2])          return components +    def find_ansible_mount(self, path): +        """Return the mount point for path from ansible_mounts.""" + +        # reorganize list of mounts into dict by path +        mount_for_path = { +            mount['mount']: mount +            for mount +            in self.get_var('ansible_mounts') +        } + +        # NOTE: including base cases '/' and '' to ensure the loop ends +        mount_targets = set(mount_for_path.keys()) | {'/', ''} +        mount_point = path +        while mount_point not in mount_targets: +            mount_point = os.path.dirname(mount_point) + +        try: +            return mount_for_path[mount_point] +        except KeyError: +            known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) +            raise OpenShiftCheckException( +                'Unable to determine mount point for path "{}".\n' +                'Known mount points: {}.'.format(path, known_mounts or 'none') +            ) +  LOADER_EXCLUDES = (      "__init__.py", diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 39ac0e4ec..6d1dea9ce 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -1,6 +1,5 @@  """Check that there is enough disk space in predefined paths.""" -import os.path  import tempfile  from openshift_checks import OpenShiftCheck, OpenShiftCheckException @@ -55,9 +54,6 @@ class DiskAvailability(OpenShiftCheck):      def run(self):          group_names = self.get_var("group_names") -        ansible_mounts = self.get_var("ansible_mounts") -        ansible_mounts = {mount['mount']: mount for mount in ansible_mounts} -          user_config = self.get_var("openshift_check_min_host_disk_gb", default={})          try:              # For backwards-compatibility, if openshift_check_min_host_disk_gb @@ -80,7 +76,7 @@ class DiskAvailability(OpenShiftCheck):          # not part of the official recommendation but present in the user          # configuration.          for path, recommendation in self.recommended_disk_space_bytes.items(): -            free_bytes = self.free_bytes(path, ansible_mounts) +            free_bytes = self.free_bytes(path)              recommended_bytes = max(recommendation.get(name, 0) for name in group_names)              config = user_config.get(path, {}) @@ -127,22 +123,17 @@ class DiskAvailability(OpenShiftCheck):          return {} -    @staticmethod -    def free_bytes(path, ansible_mounts): +    def free_bytes(self, path):          """Return the size available in path based on ansible_mounts.""" -        mount_point = path -        # arbitry value to prevent an infinite loop, in the unlike case that '/' -        # is not in ansible_mounts. -        max_depth = 32 -        while mount_point not in ansible_mounts and max_depth > 0: -            mount_point = os.path.dirname(mount_point) -            max_depth -= 1 - +        mount = self.find_ansible_mount(path)          try: -            free_bytes = ansible_mounts[mount_point]['size_available'] +            return mount['size_available']          except KeyError: -            known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(ansible_mounts)) or 'none' -            msg = 'Unable to determine disk availability for "{}". Known mount points: {}.' -            raise OpenShiftCheckException(msg.format(path, known_mounts)) - -        return free_bytes +            raise OpenShiftCheckException( +                'Unable to retrieve disk availability for "{path}".\n' +                'Ansible facts included a matching mount point for this path:\n' +                '  {mount}\n' +                'however it is missing the size_available field.\n' +                'To investigate, you can inspect the output of `ansible -m setup <host>`' +                ''.format(path=path, mount=mount) +            ) diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 7ae384bd7..0558ddf14 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -1,6 +1,5 @@  """Check Docker storage driver and usage."""  import json -import os.path  import re  from openshift_checks import OpenShiftCheck, OpenShiftCheckException  from openshift_checks.mixins import DockerHostMixin @@ -252,7 +251,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):                  "msg": "Specified 'max_overlay_usage_percent' is not a percentage: {}".format(threshold),              } -        mount = self.find_ansible_mount(path, self.get_var("ansible_mounts")) +        mount = self.find_ansible_mount(path)          try:              free_bytes = mount['size_available']              total_bytes = mount['size_total'] @@ -275,22 +274,3 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):              }          return {} - -    # TODO(lmeyer): migrate to base class -    @staticmethod -    def find_ansible_mount(path, ansible_mounts): -        """Return the mount point for path from ansible_mounts.""" - -        mount_for_path = {mount['mount']: mount for mount in ansible_mounts} -        mount_point = path -        while mount_point not in mount_for_path: -            if mount_point in ["/", ""]:  # "/" not in ansible_mounts??? -                break -            mount_point = os.path.dirname(mount_point) - -        try: -            return mount_for_path[mount_point] -        except KeyError: -            known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) or 'none' -            msg = 'Unable to determine mount point for path "{}". Known mount points: {}.' -            raise OpenShiftCheckException(msg.format(path, known_mounts)) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py index ae8460b7e..f4296753a 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py @@ -2,7 +2,7 @@  Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.  """ -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck  class EtcdImageDataSize(OpenShiftCheck): @@ -12,7 +12,7 @@ class EtcdImageDataSize(OpenShiftCheck):      tags = ["etcd"]      def run(self): -        etcd_mountpath = self._get_etcd_mountpath(self.get_var("ansible_mounts")) +        etcd_mountpath = self.find_ansible_mount("/var/lib/etcd")          etcd_avail_diskspace = etcd_mountpath["size_available"]          etcd_total_diskspace = etcd_mountpath["size_total"] @@ -68,18 +68,5 @@ class EtcdImageDataSize(OpenShiftCheck):          return {}      @staticmethod -    def _get_etcd_mountpath(ansible_mounts): -        valid_etcd_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"] - -        mount_for_path = {mnt.get("mount"): mnt for mnt in ansible_mounts} -        for path in valid_etcd_mount_paths: -            if path in mount_for_path: -                return mount_for_path[path] - -        paths = ', '.join(sorted(mount_for_path)) or 'none' -        msg = "Unable to determine a valid etcd mountpath. Paths mounted: {}.".format(paths) -        raise OpenShiftCheckException(msg) - -    @staticmethod      def _to_gigabytes(byte_size):          return float(byte_size) / 10.0**9 diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index e55d55e91..e5d93ff3f 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -1,6 +1,6 @@  """A health check for OpenShift clusters.""" -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck  class EtcdVolume(OpenShiftCheck): @@ -11,8 +11,8 @@ class EtcdVolume(OpenShiftCheck):      # Default device usage threshold. Value should be in the range [0, 100].      default_threshold_percent = 90 -    # Where to find ectd data, higher priority first. -    supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"] +    # Where to find etcd data +    etcd_mount_path = "/var/lib/etcd"      def is_active(self):          etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] @@ -20,7 +20,7 @@ class EtcdVolume(OpenShiftCheck):          return super(EtcdVolume, self).is_active() and is_etcd_host      def run(self): -        mount_info = self._etcd_mount_info() +        mount_info = self.find_ansible_mount(self.etcd_mount_path)          available = mount_info["size_available"]          total = mount_info["size_total"]          used = total - available @@ -41,15 +41,3 @@ class EtcdVolume(OpenShiftCheck):              return {"failed": True, "msg": msg}          return {} - -    def _etcd_mount_info(self): -        ansible_mounts = self.get_var("ansible_mounts") -        mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts} - -        for path in self.supported_mount_paths: -            if path in mounts: -                return mounts[path] - -        paths = ', '.join(sorted(mounts)) or 'none' -        msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths) -        raise OpenShiftCheckException(msg) diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index 3b7c39760..ecd8adb64 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -27,7 +27,7 @@ class LoggingCheck(OpenShiftCheck):      name = "logging"      def is_active(self): -        logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False) +        logging_deployed = self.get_var("openshift_hosted_logging_deploy", convert=bool, default=False)          return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()      def is_first_master(self): diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index 5720eeacf..f4fd2dfed 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -20,12 +20,24 @@ def test_is_active(group_names, is_active):      assert DiskAvailability(None, task_vars).is_active() == is_active -@pytest.mark.parametrize('ansible_mounts,extra_words', [ -    ([], ['none']),  # empty ansible_mounts -    ([{'mount': '/mnt'}], ['/mnt']),  # missing relevant mount paths -    ([{'mount': '/var'}], ['/var']),  # missing size_available +@pytest.mark.parametrize('desc, ansible_mounts, expect_chunks', [ +    ( +        'empty ansible_mounts', +        [], +        ['determine mount point', 'none'], +    ), +    ( +        'missing relevant mount paths', +        [{'mount': '/mnt'}], +        ['determine mount point', '/mnt'], +    ), +    ( +        'missing size_available', +        [{'mount': '/var'}, {'mount': '/usr'}, {'mount': '/tmp'}], +        ['missing', 'size_available'], +    ),  ]) -def test_cannot_determine_available_disk(ansible_mounts, extra_words): +def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):      task_vars = dict(          group_names=['masters'],          ansible_mounts=ansible_mounts, @@ -34,8 +46,8 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):      with pytest.raises(OpenShiftCheckException) as excinfo:          DiskAvailability(fake_execute_module, task_vars).run() -    for word in 'determine disk availability'.split() + extra_words: -        assert word in str(excinfo.value) +    for chunk in expect_chunks: +        assert chunk in str(excinfo.value)  @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [ @@ -97,7 +109,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib      assert not result.get('failed', False) -@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,extra_words', [ +@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [      (          'test with no space available',          ['masters'], @@ -164,7 +176,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib          ['0.0 GB'],      ),  ], ids=lambda argval: argval[0]) -def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, extra_words): +def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, expect_chunks):      task_vars = dict(          group_names=group_names,          openshift_check_min_host_disk_gb=configured_min, @@ -174,8 +186,8 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a      result = DiskAvailability(fake_execute_module, task_vars).run()      assert result['failed'] -    for word in 'below recommended'.split() + extra_words: -        assert word in result.get('msg', '') +    for chunk in 'below recommended'.split() + expect_chunks: +        assert chunk in result.get('msg', '')  @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ diff --git a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py index e3d6706fa..d3aae98f2 100644 --- a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py +++ b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py @@ -1,7 +1,8 @@  import pytest  from collections import namedtuple -from openshift_checks.etcd_imagedata_size import EtcdImageDataSize, OpenShiftCheckException +from openshift_checks.etcd_imagedata_size import EtcdImageDataSize +from openshift_checks import OpenShiftCheckException  from etcdkeysize import check_etcd_key_size @@ -56,7 +57,7 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):      with pytest.raises(OpenShiftCheckException) as excinfo:          check.run() -    for word in 'determine valid etcd mountpath'.split() + extra_words: +    for word in ['Unable to determine mount point'] + extra_words:          assert word in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/etcd_volume_test.py b/roles/openshift_health_checker/test/etcd_volume_test.py index 0b255136e..077cea3ea 100644 --- a/roles/openshift_health_checker/test/etcd_volume_test.py +++ b/roles/openshift_health_checker/test/etcd_volume_test.py @@ -1,6 +1,7 @@  import pytest -from openshift_checks.etcd_volume import EtcdVolume, OpenShiftCheckException +from openshift_checks.etcd_volume import EtcdVolume +from openshift_checks import OpenShiftCheckException  @pytest.mark.parametrize('ansible_mounts,extra_words', [ @@ -15,7 +16,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):      with pytest.raises(OpenShiftCheckException) as excinfo:          EtcdVolume(fake_execute_module, task_vars).run() -    for word in 'Unable to find etcd storage mount point'.split() + extra_words: +    for word in ['Unable to determine mount point'] + extra_words:          assert word in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py index 43aa875f4..789784c77 100644 --- a/roles/openshift_health_checker/test/openshift_check_test.py +++ b/roles/openshift_health_checker/test/openshift_check_test.py @@ -81,6 +81,7 @@ def dummy_check(task_vars):  @pytest.mark.parametrize("keys,expected", [      (("foo",), 42),      (("bar", "baz"), "openshift"), +    (("bar.baz",), "openshift"),  ])  def test_get_var_ok(task_vars, keys, expected):      assert dummy_check(task_vars).get_var(*keys) == expected @@ -94,3 +95,24 @@ def test_get_var_error(task_vars, missing_keys):  def test_get_var_default(task_vars, missing_keys):      default = object()      assert dummy_check(task_vars).get_var(*missing_keys, default=default) == default + + +@pytest.mark.parametrize("keys, convert, expected", [ +    (("foo",), str, "42"), +    (("foo",), float, 42.0), +    (("bar", "baz"), bool, False), +]) +def test_get_var_convert(task_vars, keys, convert, expected): +    assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected + + +@pytest.mark.parametrize("keys, convert", [ +    (("bar", "baz"), int), +    (("bar.baz"), float), +    (("foo"), "bogus"), +    (("foo"), lambda a, b: 1), +    (("foo"), lambda a: 1 / 0), +]) +def test_get_var_convert_error(task_vars, keys, convert): +    with pytest.raises(OpenShiftCheckException): +        dummy_check(task_vars).get_var(*keys, convert=convert) diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 0391e5602..13cbfb14e 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -1,4 +1,10 @@  --- +r_openshift_hosted_router_firewall_enabled: True +r_openshift_hosted_router_use_firewalld: False + +r_openshift_hosted_registry_firewall_enabled: True +r_openshift_hosted_registry_use_firewalld: False +  registry_volume_claim: 'registry-claim'  openshift_hosted_router_edits: @@ -26,12 +32,15 @@ openshift_hosted_routers:    - 443:443    certificate: "{{ openshift_hosted_router_certificate | default({}) }}" -  openshift_hosted_router_certificate: {}  openshift_hosted_registry_cert_expire_days: 730  openshift_hosted_router_create_certificate: True -os_firewall_allow: +r_openshift_hosted_router_os_firewall_deny: [] +r_openshift_hosted_router_os_firewall_allow: [] + +r_openshift_hosted_registry_os_firewall_deny: [] +r_openshift_hosted_registry_os_firewall_allow:  - service: Docker Registry Port    port: 5000/tcp -  when: openshift.common.use_calico | bool +  cond: "{{ r_openshift_hosted_use_calico }}" diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index 9e3f37130..28fd396d6 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -15,8 +15,4 @@ dependencies:  - role: openshift_cli  - role: openshift_hosted_facts  - role: lib_openshift -- role: os_firewall -  os_firewall_allow: -  - service: Docker Registry Port -    port: 5000/tcp -  when: openshift.common.use_calico | bool +- role: lib_os_firewall diff --git a/roles/openshift_hosted/tasks/registry/firewall.yml b/roles/openshift_hosted/tasks/registry/firewall.yml new file mode 100644 index 000000000..775b7d6d7 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_hosted_registry_firewall_enabled | bool and not r_openshift_hosted_registry_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}" + +- when: r_openshift_hosted_registry_firewall_enabled | bool and r_openshift_hosted_registry_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}" diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index b946ec8ca..dcd9c87fc 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -1,6 +1,10 @@  --- -- block: +- name: setup firewall +  include: firewall.yml +  static: yes +- when: openshift.hosted.registry.replicas | default(none) is none +  block:    - name: Retrieve list of openshift nodes matching registry selector      oc_obj:        state: list @@ -28,7 +32,6 @@        l_default_replicas: "{{ l_node_count if openshift.hosted.registry.storage.kind | default(none) is not none else 1 }}"      when: l_node_count | int > 0 -  when: openshift.hosted.registry.replicas | default(none) is none  - name: set openshift_hosted facts    set_fact: diff --git a/roles/openshift_hosted/tasks/router/firewall.yml b/roles/openshift_hosted/tasks/router/firewall.yml new file mode 100644 index 000000000..ff90f3372 --- /dev/null +++ b/roles/openshift_hosted/tasks/router/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_hosted_router_firewall_enabled | bool and not r_openshift_hosted_router_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}" + +- when: r_openshift_hosted_router_firewall_enabled | bool and r_openshift_hosted_router_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}" diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index dd485a64a..72a1ead80 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -1,4 +1,8 @@  --- +- name: setup firewall +  include: firewall.yml +  static: yes +  - name: Retrieve list of openshift nodes matching router selector    oc_obj:      state: list diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml index ffb812271..d7e83fe9a 100644 --- a/roles/openshift_hosted_logging/handlers/main.yml +++ b/roles/openshift_hosted_logging/handlers/main.yml @@ -1,9 +1,4 @@  --- -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) -  notify: Verify API Server -  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and    # wait_for port doesn't provide health information. diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml index 69c5a1663..ce7688581 100644 --- a/roles/openshift_hosted_metrics/handlers/main.yml +++ b/roles/openshift_hosted_metrics/handlers/main.yml @@ -1,17 +1,12 @@  --- -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) -  notify: Verify API Server -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers    systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index 6190383b6..3f6409233 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -1,4 +1,7 @@  --- +r_openshift_loadbalancer_firewall_enabled: True +r_openshift_loadbalancer_use_firewalld: False +  haproxy_frontends:  - name: main    binds: @@ -12,3 +15,13 @@ haproxy_backends:    - name: web01      address: 127.0.0.1:9000      opts: check + +r_openshift_loadbalancer_os_firewall_deny: [] +r_openshift_loadbalancer_os_firewall_allow: +- service: haproxy stats +  port: "9000/tcp" +- service: haproxy balance +  port: "{{ openshift_master_api_port | default(8443) }}/tcp" +- service: nuage mon +  port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" +  cond: "{{ openshift_use_nuage | default(false) | bool }}" diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml index 0dffb545f..72298b599 100644 --- a/roles/openshift_loadbalancer/meta/main.yml +++ b/roles/openshift_loadbalancer/meta/main.yml @@ -10,16 +10,5 @@ galaxy_info:      versions:      - 7  dependencies: +- role: lib_os_firewall  - role: openshift_facts -- role: os_firewall -  os_firewall_allow: -  - service: haproxy stats -    port: "9000/tcp" -  - service: haproxy balance -    port: "{{ openshift_master_api_port | default(8443) }}/tcp" -- role: os_firewall -  os_firewall_allow: -  - service: nuage mon -    port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp" -  when: openshift_use_nuage | default(false) | bool -- role: openshift_repos diff --git a/roles/openshift_loadbalancer/tasks/firewall.yml b/roles/openshift_loadbalancer/tasks/firewall.yml new file mode 100644 index 000000000..7d6e8ff36 --- /dev/null +++ b/roles/openshift_loadbalancer/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_loadbalancer_firewall_enabled | bool and not r_openshift_loadbalancer_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_loadbalancer_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_loadbalancer_os_firewall_deny }}" + +- when: r_openshift_loadbalancer_firewall_enabled | bool and r_openshift_loadbalancer_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_loadbalancer_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_loadbalancer_os_firewall_deny }}" diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 68bb4ace8..69b061fc5 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -1,4 +1,8 @@  --- +- name: setup firewall +  include: firewall.yml +  static: yes +  - name: Install haproxy    package: name=haproxy state=present    when: not openshift.common.is_containerized | bool diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index 69c5a1663..ce7688581 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -1,17 +1,12 @@  --- -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) -  notify: Verify API Server -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers    systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index 10f522b61..b96b8e29d 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -5,7 +5,6 @@      yaml_key: assetConfig.loggingPublicURL      yaml_value: "https://{{ openshift_logging_kibana_hostname }}"    notify: -  - restart master    - restart master api    - restart master controllers    tags: diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 9dfc6fc86..74b4d7db4 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -1,7 +1,7 @@  ---  - fail:      msg: Only one Fluentd nodeselector key pair should be provided -  when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" +  when: openshift_logging_fluentd_nodeselector.keys() | count > 1  - fail:      msg: Application logs destination is required diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 62bc26e37..166f102f7 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -99,17 +99,17 @@  # TODO: set up these certs differently?  - set_fact:      kibana_key: "{{ lookup('file', openshift_logging_kibana_key) | b64encode }}" -  when: "{{ openshift_logging_kibana_key | trim | length > 0 }}" +  when: openshift_logging_kibana_key | trim | length > 0    changed_when: false  - set_fact:      kibana_cert: "{{ lookup('file', openshift_logging_kibana_cert) | b64encode }}" -  when: "{{ openshift_logging_kibana_cert | trim | length > 0 }}" +  when: openshift_logging_kibana_cert | trim | length > 0    changed_when: false  - set_fact:      kibana_ca: "{{ lookup('file', openshift_logging_kibana_ca) | b64encode }}" -  when: "{{ openshift_logging_kibana_ca | trim | length > 0 }}" +  when: openshift_logging_kibana_ca | trim | length > 0    changed_when: false  - set_fact: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 2d3ce5bcd..a4c178908 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,4 +1,21 @@  --- +r_openshift_master_firewall_enabled: True +r_openshift_master_use_firewalld: False +  openshift_node_ips: []  r_openshift_master_clean_install: false  r_openshift_master_etcd3_storage: false +r_openshift_master_os_firewall_enable: true +r_openshift_master_os_firewall_deny: [] +r_openshift_master_os_firewall_allow: +- service: api server https +  port: "{{ openshift.master.api_port }}/tcp" +- service: api controllers https +  port: "{{ openshift.master.controllers_port }}/tcp" +- service: skydns tcp +  port: "{{ openshift.master.dns_port }}/tcp" +- service: skydns udp +  port: "{{ openshift.master.dns_port }}/udp" +- service: etcd embedded +  port: 4001/tcp +  cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" diff --git a/roles/openshift_master/files/atomic-openshift-master.service b/roles/openshift_master/files/atomic-openshift-master.service deleted file mode 100644 index 02af4dd16..000000000 --- a/roles/openshift_master/files/atomic-openshift-master.service +++ /dev/null @@ -1,23 +0,0 @@ -[Unit] -Description=Atomic OpenShift Master -Documentation=https://github.com/openshift/origin -After=network-online.target -After=etcd.service -Before=atomic-openshift-node.service -Requires=network-online.target - -[Service] -Type=notify -EnvironmentFile=/etc/sysconfig/atomic-openshift-master -Environment=GOTRACEBACK=crash -ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS -LimitNOFILE=131072 -LimitCORE=infinity -WorkingDirectory=/var/lib/origin/ -SyslogIdentifier=atomic-openshift-master -Restart=always -RestartSec=5s - -[Install] -WantedBy=multi-user.target -WantedBy=atomic-openshift-node.service diff --git a/roles/openshift_master/files/origin-master.service b/roles/openshift_master/files/origin-master.service deleted file mode 100644 index cf79dda02..000000000 --- a/roles/openshift_master/files/origin-master.service +++ /dev/null @@ -1,23 +0,0 @@ -[Unit] -Description=Origin Master Service -Documentation=https://github.com/openshift/origin -After=network-online.target -After=etcd.service -Before=origin-node.service -Requires=network-online.target - -[Service] -Type=notify -EnvironmentFile=/etc/sysconfig/origin-master -Environment=GOTRACEBACK=crash -ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS -LimitNOFILE=131072 -LimitCORE=infinity -WorkingDirectory=/var/lib/origin/ -SyslogIdentifier=origin-master -Restart=always -RestartSec=5s - -[Install] -WantedBy=multi-user.target -WantedBy=origin-node.service diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 69c5a1663..ce7688581 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,17 +1,12 @@  --- -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) -  notify: Verify API Server -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers    systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index 907f25bc5..bd2383f61 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -13,6 +13,7 @@ galaxy_info:    - cloud  dependencies:  - role: lib_openshift +- role: lib_os_firewall  - role: openshift_master_facts  - role: openshift_hosted_facts  - role: openshift_master_certificates @@ -25,21 +26,6 @@ dependencies:  - role: openshift_cloud_provider  - role: openshift_builddefaults  - role: openshift_buildoverrides -- role: os_firewall -  os_firewall_allow: -  - service: api server https -    port: "{{ openshift.master.api_port }}/tcp" -  - service: api controllers https -    port: "{{ openshift.master.controllers_port }}/tcp" -  - service: skydns tcp -    port: "{{ openshift.master.dns_port }}/tcp" -  - service: skydns udp -    port: "{{ openshift.master.dns_port }}/udp" -- role: os_firewall -  os_firewall_allow: -  - service: etcd embedded -    port: 4001/tcp -  when: groups.oo_etcd_to_config | default([]) | length == 0  - role: nickhammond.logrotate  - role: contiv    contiv_role: netmaster diff --git a/roles/openshift_master/tasks/clean_systemd_units.yml b/roles/openshift_master/tasks/clean_systemd_units.yml new file mode 100644 index 000000000..590692c10 --- /dev/null +++ b/roles/openshift_master/tasks/clean_systemd_units.yml @@ -0,0 +1,5 @@ +--- + +- name: Disable master service +  systemd: name={{ openshift.common.service_type }}-master state=stopped enabled=no masked=yes +  ignore_errors: true diff --git a/roles/openshift_master/tasks/files b/roles/openshift_master/tasks/files deleted file mode 120000 index feb122881..000000000 --- a/roles/openshift_master/tasks/files +++ /dev/null @@ -1 +0,0 @@ -../files
\ No newline at end of file diff --git a/roles/openshift_master/tasks/firewall.yml b/roles/openshift_master/tasks/firewall.yml new file mode 100644 index 000000000..e51eeb56e --- /dev/null +++ b/roles/openshift_master/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_master_firewall_enabled | bool and not r_openshift_master_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_master_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_master_os_firewall_deny }}" + +- when: r_openshift_master_firewall_enabled | bool and r_openshift_master_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_master_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_master_os_firewall_deny }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 1f182a25c..b1412c3d9 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -12,16 +12,20 @@  # HA Variable Validation  - fail:      msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" -  when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"])) +  when: openshift.master.ha | bool and ((openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]))  - fail:      msg: "'native' high availability is not supported for the requested OpenShift version" -  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool +  when: openshift.master.ha | bool and openshift.master.cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool  - fail:      msg: "openshift_master_cluster_password must be set for multi-master installations" -  when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password) +  when: openshift.master.ha | bool and openshift.master.cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)  - fail:      msg: "Pacemaker based HA is not supported at this time when used with containerized installs" -  when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool +  when: openshift.master.ha | bool and openshift.master.cluster_method == "pacemaker" and openshift.common.is_containerized | bool + +- name: Open up firewall ports +  include: firewall.yml +  static: yes  - name: Install Master package    package: @@ -57,7 +61,6 @@    args:      creates: "{{ openshift_master_policy }}"    notify: -    - restart master      - restart master api      - restart master controllers @@ -67,7 +70,6 @@      dest: "{{ openshift_master_scheduler_conf }}"      backup: true    notify: -    - restart master      - restart master api      - restart master controllers @@ -146,6 +148,9 @@      local_facts:        no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" +- name: Remove the legacy master service if it exists +  include: clean_systemd_units.yml +  - name: Install the systemd units    include: systemd_units.yml @@ -162,7 +167,6 @@      mode: 0600    when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined    notify: -    - restart master      - restart master api  - set_fact: @@ -178,66 +182,18 @@      group: root      mode: 0600    notify: -    - restart master      - restart master api      - restart master controllers  - include: set_loopback_context.yml    when: openshift.common.version_gte_3_2_or_1_2 -# TODO: Master startup can fail when ec2 transparently reallocates the block -# storage, causing etcd writes to temporarily fail. Retry failures blindly just -# once to allow time for this transient condition to to resolve and for systemd -# to restart the master (which will eventually succeed). -# -# https://github.com/coreos/etcd/issues/3864 -# https://github.com/openshift/origin/issues/6065 -# https://github.com/openshift/origin/issues/6447 -- name: Start and enable master -  systemd: -    daemon_reload: yes -    name: "{{ openshift.common.service_type }}-master" -    enabled: yes -    state: started -  when: not openshift_master_ha | bool -  register: start_result -  until: not start_result | failed -  retries: 1 -  delay: 60 -  notify: Verify API Server - -- name: Dump logs from master service if it failed -  command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master -  when: start_result | failed - -- name: Stop and disable non-HA master when running HA -  systemd: -    name: "{{ openshift.common.service_type }}-master" -    enabled: no -    state: stopped -  when: openshift_master_ha | bool -  register: task_result -  failed_when: task_result|failed and 'could not' not in task_result.msg|lower - -- set_fact: -    master_service_status_changed: "{{ start_result | changed }}" -  when: not openshift_master_ha | bool - -- name: Mask master service -  systemd: -    name: "{{ openshift.common.service_type }}-master" -    masked: yes -  when: > -    openshift_master_ha | bool and -    openshift.master.cluster_method == 'native' and -    not openshift.common.is_containerized | bool -  - name: Start and enable master api on first master    systemd:      name: "{{ openshift.common.service_type }}-master-api"      enabled: yes      state: started -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0] +  when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]    register: start_result    until: not start_result | failed    retries: 1 @@ -249,18 +205,18 @@  - set_fact:      master_api_service_status_changed: "{{ start_result | changed }}" -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0] +  when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]  - pause:      seconds: 15 -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +  when: openshift.master.ha | bool and openshift.master.cluster_method == 'native'  - name: Start and enable master api all masters    systemd:      name: "{{ openshift.common.service_type }}-master-api"      enabled: yes      state: started -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0] +  when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]    register: start_result    until: not start_result | failed    retries: 1 @@ -272,7 +228,7 @@  - set_fact:      master_api_service_status_changed: "{{ start_result | changed }}" -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0] +  when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]  # A separate wait is required here for native HA since notifies will  # be resolved after all tasks in the role. @@ -293,14 +249,14 @@    delay: 1    run_once: true    changed_when: false -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool +  when: openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool  - name: Start and enable master controller on first master    systemd:      name: "{{ openshift.common.service_type }}-master-controllers"      enabled: yes      state: started -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0] +  when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]    register: start_result    until: not start_result | failed    retries: 1 @@ -313,14 +269,14 @@  - name: Wait for master controller service to start on first master    pause:      seconds: 15 -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +  when: openshift.master.cluster_method == 'native'  - name: Start and enable master controller on all masters    systemd:      name: "{{ openshift.common.service_type }}-master-controllers"      enabled: yes      state: started -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0] +  when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]    register: start_result    until: not start_result | failed    retries: 1 @@ -332,11 +288,11 @@  - set_fact:      master_controllers_service_status_changed: "{{ start_result | changed }}" -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +  when: openshift.master.cluster_method == 'native'  - name: Install cluster packages    package: name=pcs state=present -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' +  when: openshift.master.cluster_method == 'pacemaker'      and not openshift.common.is_containerized | bool    register: install_result @@ -345,7 +301,7 @@      name: pcsd      enabled: yes      state: started -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' +  when: openshift.master.cluster_method == 'pacemaker'      and not openshift.common.is_containerized | bool  - name: Set the cluster user password diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index 9944682cc..8d343336f 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -10,14 +10,6 @@      atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master    register: result -- name: Install or Update master system container -  oc_atomic_container: -    name: "{{ openshift.common.service_type }}-master" -    image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" -    state: latest -  when: -    - not l_is_ha -  # HA  - name: Install or Update HA api master system container    oc_atomic_container: @@ -26,15 +18,11 @@      state: latest      values:        - COMMAND=api -  when: -    - l_is_ha  - name: Install or Update HA controller master system container    oc_atomic_container:      name: "{{ openshift.common.service_type }}-master-controllers" -    image: "{{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"      state: latest      values:        - COMMAND=controllers -  when: -    - l_is_ha diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index d71ad3459..723bdb0c4 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -22,34 +22,12 @@    changed_when: "'Downloaded newer image' in pull_result.stdout"    when: openshift.common.is_containerized | bool and not openshift.common.is_master_system_container | bool -# workaround for missing systemd unit files -- name: "Create the {{ openshift.common.service_type }} systemd unit file" -  template: -    src: "master_docker/master.docker.service.j2" -    dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service" -  when: -  - openshift.common.is_containerized | bool and (openshift.master.ha is not defined or not openshift.master.ha) | bool -  - not openshift.common.is_master_system_container | bool -  register: create_master_unit_file - -- name: "Install {{ openshift.common.service_type }} systemd unit file" -  copy: -    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" -    src: "{{ openshift.common.service_type }}-master.service" -  register: create_master_unit_file -  when: -  - not openshift.common.is_containerized | bool -  - (openshift.master.ha is not defined or not openshift.master.ha) | bool - -- command: systemctl daemon-reload -  when: create_master_unit_file | changed - -- name: Create the ha systemd unit files for api and controller services +- name: Create the ha systemd unit files    template:      src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"      dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"    when: -  - openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  - openshift.master.cluster_method == "native"    - not openshift.common.is_master_system_container | bool    with_items:    - api @@ -63,14 +41,14 @@  - name: Preserve Master API Proxy Config options    command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api    register: master_api_proxy -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"    failed_when: false    changed_when: false  - name: Preserve Master API AWS options    command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api    register: master_api_aws -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"    failed_when: false    changed_when: false @@ -79,12 +57,12 @@      src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api      backup: true -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"    notify:    - restart master api  - name: Restore Master API Proxy Config Options -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"        and master_api_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common    lineinfile:      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api @@ -92,7 +70,7 @@    with_items: "{{ master_api_proxy.stdout_lines | default([]) }}"  - name: Restore Master API AWS Options -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"        and master_api_aws.rc == 0 and        not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)    lineinfile: @@ -104,14 +82,14 @@  - name: Preserve Master Controllers Proxy Config options    command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers    register: master_controllers_proxy -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"    failed_when: false    changed_when: false  - name: Preserve Master Controllers AWS options    command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers    register: master_controllers_aws -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"    failed_when: false    changed_when: false @@ -120,7 +98,7 @@      src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers      backup: true -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"    notify:    - restart master controllers @@ -129,7 +107,7 @@      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers      line: "{{ item }}"    with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}" -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"          and master_controllers_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common  - name: Restore Master Controllers AWS Options @@ -137,39 +115,6 @@      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers      line: "{{ item }}"    with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" -  when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native" +  when: openshift.master.cluster_method == "native"        and master_controllers_aws.rc == 0 and        not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) - -- name: Install Master docker service file -  template: -    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service" -    src: master_docker/master.docker.service.j2 -  register: install_result -  when: openshift.common.is_containerized | bool and openshift.master.ha is defined and not openshift.master.ha | bool and not openshift.common.is_master_system_container | bool - -- name: Preserve Master Proxy Config options -  command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master -  register: master_proxy_result -  failed_when: false -  changed_when: false - -- set_fact: -    master_proxy: "{{ master_proxy_result.stdout_lines | default([]) }}" - -- name: Preserve Master AWS options -  command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master -  register: master_aws_result -  failed_when: false -  changed_when: false - -- set_fact: -    master_aws: "{{ master_aws_result.stdout_lines | default([]) }}" - -- name: Create the master service env file -  template: -    src: "atomic-openshift-master.j2" -    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master -    backup: true -  notify: -  - restart master diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 7964bbb48..c14579435 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -47,11 +47,10 @@ assetConfig:  {% if openshift.master.audit_config | default(none) is not none and openshift.common.version_gte_3_2_or_1_2 | bool %}  auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}  {% endif %} -{% if openshift_master_ha | bool %} -controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }} -{% endif %}  {% if openshift.common.version_gte_3_3_or_1_3 | bool %}  controllerConfig: +  election: +    lockName: openshift-master-controllers    serviceServingCert:      signer:        certFile: service-signer.crt diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2 deleted file mode 100644 index 31c1dfc33..000000000 --- a/roles/openshift_master/templates/master_docker/master.docker.service.j2 +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -After={{ openshift.docker.service_name }}.service -Requires={{ openshift.docker.service_name }}.service -PartOf={{ openshift.docker.service_name }}.service -After=etcd_container.service -Wants=etcd_container.service - -[Service] -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master -ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master -ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS -ExecStartPost=/usr/bin/sleep 10 -ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master -Restart=always -RestartSec=5s - -[Install] -WantedBy={{ openshift.docker.service_name }}.service diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index 7745d014f..cf39b73f6 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -19,5 +19,4 @@ openshift_master_valid_grant_methods:  - prompt  - deny -l_is_ha: "{{ openshift.master.ha is defined and openshift.master.ha | bool }}"  openshift_master_is_scaleup_host: False diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml index f2a67bc54..c452b165e 100644 --- a/roles/openshift_master_cluster/meta/main.yml +++ b/roles/openshift_master_cluster/meta/main.yml @@ -12,5 +12,4 @@ galaxy_info:    categories:    - cloud    - system -dependencies: -- { role: openshift_repos } +dependencies: [] diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index ef8dcd5fd..fa228af2a 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -32,7 +32,7 @@    openshift_facts:      role: master      local_facts: -      cluster_method: "{{ openshift_master_cluster_method | default(None) }}" +      cluster_method: "{{ openshift_master_cluster_method | default('native') }}"        cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"        cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"        debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index 69c5a1663..ce7688581 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,17 +1,12 @@  --- -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) -  notify: Verify API Server -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers    systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted -  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index be1e3c3a0..5059d8d94 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -5,7 +5,6 @@      yaml_key: assetConfig.metricsPublicURL      yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"    notify: -  - restart master    - restart master api    - restart master controllers    tags: diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 47073ee0f..973b3a619 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,5 +1,8 @@  --- -os_firewall_allow: +r_openshift_node_firewall_enabled: True +r_openshift_node_use_firewalld: False +r_openshift_node_os_firewall_deny: [] +r_openshift_node_os_firewall_allow:  - service: Kubernetes kubelet    port: 10250/tcp  - service: http @@ -8,7 +11,13 @@ os_firewall_allow:    port: 443/tcp  - service: OpenShift OVS sdn    port: 4789/udp -  when: openshift.common.use_openshift_sdn | default(true) | bool +  cond: openshift.common.use_openshift_sdn | default(true) | bool  - service: Calico BGP Port    port: 179/tcp -  when: openshift.common.use_calico | bool +  cond: "{{ openshift.common.use_calico | bool }}" +- service: Kubernetes service NodePort TCP +  port: "{{ openshift_node_port_range | default('') }}/tcp" +  cond: "{{ openshift_node_port_range is defined }}" +- service: Kubernetes service NodePort UDP +  port: "{{ openshift_node_port_range | default('') }}/udp" +  cond: "{{ openshift_node_port_range is defined }}" diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 4fb841add..06373de04 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -14,36 +14,11 @@ galaxy_info:  dependencies:  - role: openshift_node_facts  - role: lib_openshift +- role: lib_os_firewall  - role: openshift_common  - role: openshift_clock  - role: openshift_docker  - role: openshift_node_certificates  - role: openshift_cloud_provider -- role: os_firewall -  os_firewall_allow: -  - service: Kubernetes kubelet -    port: 10250/tcp -  - service: http -    port: 80/tcp -  - service: https -    port: 443/tcp -- role: os_firewall -  os_firewall_allow: -  - service: OpenShift OVS sdn -    port: 4789/udp -  when: openshift.common.use_openshift_sdn | default(true) | bool -- role: os_firewall -  os_firewall_allow: -  - service: Calico BGP Port -    port: 179/tcp -  when: openshift.common.use_calico | bool - -- role: os_firewall -  os_firewall_allow: -  - service: Kubernetes service NodePort TCP -    port: "{{ openshift_node_port_range | default('') }}/tcp" -  - service: Kubernetes service NodePort UDP -    port: "{{ openshift_node_port_range | default('') }}/udp" -  when: openshift_node_port_range is defined  - role: openshift_node_dnsmasq    when: openshift.common.use_dnsmasq | bool diff --git a/roles/openshift_node/tasks/firewall.yml b/roles/openshift_node/tasks/firewall.yml new file mode 100644 index 000000000..255aa886a --- /dev/null +++ b/roles/openshift_node/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_node_firewall_enabled | bool and not r_openshift_node_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_node_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_node_os_firewall_deny }}" + +- when: r_openshift_node_firewall_enabled | bool and r_openshift_node_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_node_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_node_os_firewall_deny }}" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index ca4fef360..3353a22e3 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -6,6 +6,38 @@      - (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']      - not openshift_docker_use_crio | default(false) +- name: setup firewall +  include: firewall.yml +  static: yes + +- name: Set node facts +  openshift_facts: +    role: "{{ item.role }}" +    local_facts: "{{ item.local_facts }}" +  with_items: +    # Reset node labels to an empty dictionary. +    - role: node +      local_facts: +        labels: {} +    - role: node +      local_facts: +        annotations: "{{ openshift_node_annotations | default(none) }}" +        debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" +        iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" +        kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" +        labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" +        registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" +        schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" +        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" +        storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" +        set_node_ip: "{{ openshift_set_node_ip | default(None) }}" +        node_image: "{{ osn_image | default(None) }}" +        ovs_image: "{{ osn_ovs_image | default(None) }}" +        proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" +        local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" +        dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" +        env_vars: "{{ openshift_node_env_vars | default(None) }}" +  # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory  - name: Check for swap usage    command: grep "^[^#].*swap" /etc/fstab diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index f984a04b2..bc092c26c 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -24,7 +24,6 @@      name: "{{ item }}"      state: stopped    with_items: -  - "{{ openshift.common.service_type }}-master"    - "{{ openshift.common.service_type }}-master-controllers"    - "{{ openshift.common.service_type }}-master-api"    - etcd_container @@ -81,7 +80,6 @@    with_items:    - etcd_container    - openvswitch -  - "{{ openshift.common.service_type }}-master"    - "{{ openshift.common.service_type }}-master-api"    - "{{ openshift.common.service_type }}-master-controllers"    - "{{ openshift.common.service_type }}-node" diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml index f228b6e08..a4fa51172 100644 --- a/roles/openshift_node_upgrade/tasks/restart.yml +++ b/roles/openshift_node_upgrade/tasks/restart.yml @@ -31,7 +31,6 @@    with_items:      - etcd_container      - openvswitch -    - "{{ openshift.common.service_type }}-master"      - "{{ openshift.common.service_type }}-master-api"      - "{{ openshift.common.service_type }}-master-controllers"      - "{{ openshift.common.service_type }}-node" diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index b53b6afa1..4a6e00513 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -67,4 +67,4 @@    register: efs_output    failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr    check_mode: no -  when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1 +  when: efs_anyuid.stdout.find("system:serviceaccount:" + openshift_provisioners_project + ":provisioners-efs") == -1 diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml index d5291a99a..1c788470a 100644 --- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml +++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml @@ -156,24 +156,16 @@    register: yedit_output  #restart master serially here -- name: restart master -  systemd: name={{ openshift.common.service_type }}-master state=restarted -  when: -  - yedit_output.changed -  - openshift.master.ha is not defined or not openshift.master.ha | bool -  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when:    - yedit_output.changed -  - openshift.master.ha is defined and openshift.master.ha | bool    - openshift.master.cluster_method == 'native'  - name: restart master controllers    systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted    when:    - yedit_output.changed -  - openshift.master.ha is defined and openshift.master.ha | bool    - openshift.master.cluster_method == 'native'  - name: Verify API Server diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ff2c18812..a5887465e 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -52,3 +52,15 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus  openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"  openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"  openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}" +r_openshift_master_firewall_enabled: True +r_openshift_master_use_firewalld: False +r_openshift_storage_glusterfs_os_firewall_deny: [] +r_openshift_storage_glusterfs_os_firewall_allow: +- service: glusterfs_sshd +  port: "2222/tcp" +- service: glusterfs_daemon +  port: "24007/tcp" +- service: glusterfs_management +  port: "24008/tcp" +- service: glusterfs_bricks +  port: "49152-49251/tcp" diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml index aab9851f9..0cdd33880 100644 --- a/roles/openshift_storage_glusterfs/meta/main.yml +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -11,5 +11,5 @@ galaxy_info:      - 7  dependencies:  - role: openshift_hosted_facts -- role: openshift_repos  - role: lib_openshift +- role: lib_os_firewall diff --git a/roles/openshift_storage_glusterfs/tasks/firewall.yml b/roles/openshift_storage_glusterfs/tasks/firewall.yml new file mode 100644 index 000000000..09dcf1ef9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_storage_glusterfs_firewall_enabled | bool and not r_openshift_storage_glusterfs_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_glusterfs_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_glusterfs_os_firewall_deny }}" + +- when: r_openshift_storage_glusterfs_firewall_enabled | bool and r_openshift_storage_glusterfs_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_glusterfs_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_glusterfs_os_firewall_deny }}" diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml index 7f3c054e7..4a2bc6141 100644 --- a/roles/openshift_storage_nfs/defaults/main.yml +++ b/roles/openshift_storage_nfs/defaults/main.yml @@ -1,4 +1,12 @@  --- +r_openshift_storage_nfs_firewall_enabled: True +r_openshift_storage_nfs_use_firewalld: False + +r_openshift_storage_nfs_os_firewall_deny: [] +r_openshift_storage_nfs_os_firewall_allow: +- service: nfs +  port: "2049/tcp" +  openshift:    hosted:      registry: diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index 62e38bd8c..98f7c317e 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -10,9 +10,5 @@ galaxy_info:      versions:      - 7  dependencies: -- role: os_firewall -  os_firewall_allow: -  - service: nfs -    port: "2049/tcp" +- role: lib_os_firewall  - role: openshift_hosted_facts -- role: openshift_repos diff --git a/roles/openshift_storage_nfs/tasks/firewall.yml b/roles/openshift_storage_nfs/tasks/firewall.yml new file mode 100644 index 000000000..c1c318ff4 --- /dev/null +++ b/roles/openshift_storage_nfs/tasks/firewall.yml @@ -0,0 +1,40 @@ +--- +- when: r_openshift_storage_nfs_firewall_enabled | bool and not r_openshift_storage_nfs_use_firewalld | bool +  block: +  - name: Add iptables allow rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: add +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_nfs_os_firewall_allow }}" + +  - name: Remove iptables rules +    os_firewall_manage_iptables: +      name: "{{ item.service }}" +      action: remove +      protocol: "{{ item.port.split('/')[1] }}" +      port: "{{ item.port.split('/')[0] }}" +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_nfs_os_firewall_deny }}" + +- when: r_openshift_storage_nfs_firewall_enabled | bool and r_openshift_storage_nfs_use_firewalld | bool +  block: +  - name: Add firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: enabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_nfs_os_firewall_allow }}" + +  - name: Remove firewalld allow rules +    firewalld: +      port: "{{ item.port }}" +      permanent: true +      immediate: true +      state: disabled +    when: item.cond | default(True) +    with_items: "{{ r_openshift_storage_nfs_os_firewall_deny }}" diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 019ada2fb..51f8f4e0e 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -1,4 +1,8 @@  --- +- name: setup firewall +  include: firewall.yml +  static: yes +  - name: Install nfs-utils    package: name=nfs-utils state=present diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml index ca896addd..38b398343 100644 --- a/roles/openshift_version/meta/main.yml +++ b/roles/openshift_version/meta/main.yml @@ -12,7 +12,6 @@ galaxy_info:    categories:    - cloud  dependencies: -- role: openshift_repos  - role: openshift_docker_facts  - role: docker    when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index e7ef544f4..be0b8291a 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -1,8 +1,8 @@  OS Firewall  =========== -OS Firewall manages firewalld and iptables firewall settings for a minimal use -case (Adding/Removing rules based on protocol and port number). +OS Firewall manages firewalld and iptables installation. +case.  Note: firewalld is not supported on Atomic Host  https://bugzilla.redhat.com/show_bug.cgi?id=1403331 @@ -18,8 +18,6 @@ Role Variables  | Name                      | Default |                                        |  |---------------------------|---------|----------------------------------------|  | os_firewall_use_firewalld | False   | If false, use iptables                 | -| os_firewall_allow         | []      | List of service,port mappings to allow | -| os_firewall_deny          | []      | List of service, port mappings to deny |  Dependencies  ------------ @@ -29,34 +27,27 @@ None.  Example Playbook  ---------------- -Use iptables and open tcp ports 80 and 443: +Use iptables:  ```  ---  - hosts: servers -  vars: -    os_firewall_use_firewalld: false -    os_firewall_allow: -    - service: httpd -      port: 80/tcp -    - service: https -      port: 443/tcp -  roles: -  - os_firewall +  task: +  - include_role: +      name: os_firewall +    vars: +      os_firewall_use_firewalld: false  ``` -Use firewalld and open tcp port 443 and close previously open tcp port 80: +Use firewalld:  ```  ---  - hosts: servers    vars: -    os_firewall_allow: -    - service: https -      port: 443/tcp -    os_firewall_deny: -    - service: httpd -      port: 80/tcp -  roles: -  - os_firewall +  tasks: +  - include_role: +      name: os_firewall +    vars: +      os_firewall_use_firewalld: true  ```  License diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml index 01859e5fc..f96a80f1c 100644 --- a/roles/os_firewall/defaults/main.yml +++ b/roles/os_firewall/defaults/main.yml @@ -3,5 +3,3 @@ os_firewall_enabled: True  # firewalld is not supported on Atomic Host  # https://bugzilla.redhat.com/show_bug.cgi?id=1403331  os_firewall_use_firewalld: "{{ False }}" -os_firewall_allow: [] -os_firewall_deny: [] diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 509655b0c..2cc7af478 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -49,19 +49,3 @@    until: pkaction.rc == 0    retries: 6    delay: 10 - -- name: Add firewalld allow rules -  firewalld: -    port: "{{ item.port }}" -    permanent: true -    immediate: true -    state: enabled -  with_items: "{{ os_firewall_allow }}" - -- name: Remove firewalld allow rules -  firewalld: -    port: "{{ item.port }}" -    permanent: true -    immediate: true -    state: disabled -  with_items: "{{ os_firewall_deny }}" diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 55f2fc471..7e1fa2c02 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -29,23 +29,10 @@      masked: no      daemon_reload: yes    register: result +  delegate_to: "{{item}}" +  run_once: true +  with_items: "{{ ansible_play_hosts }}"  - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail    pause: seconds=10    when: result | changed - -- name: Add iptables allow rules -  os_firewall_manage_iptables: -    name: "{{ item.service }}" -    action: add -    protocol: "{{ item.port.split('/')[1] }}" -    port: "{{ item.port.split('/')[0] }}" -  with_items: "{{ os_firewall_allow }}" - -- name: Remove iptables rules -  os_firewall_manage_iptables: -    name: "{{ item.service }}" -    action: remove -    protocol: "{{ item.port.split('/')[1] }}" -    port: "{{ item.port.split('/')[0] }}" -  with_items: "{{ os_firewall_deny }}" @@ -221,27 +221,43 @@ class OpenShiftAnsibleSyntaxCheck(Command):          ''' run command '''          has_errors = False +        playbooks = set() +        included_playbooks = set()          for yaml_file in find_files(                  os.path.join(os.getcwd(), 'playbooks', 'byo'),                  None, None, r'\.ya?ml$'):              with open(yaml_file, 'r') as contents: -                for line in contents: -                    # initialize_groups.yml is used to identify entry point playbooks -                    if re.search(r'initialize_groups\.yml', line): -                        print('-' * 60) -                        print('Syntax checking playbook: %s' % yaml_file) -                        try: -                            subprocess.check_output( -                                ['ansible-playbook', '-i localhost,', -                                 '--syntax-check', yaml_file] -                            ) -                        except subprocess.CalledProcessError as cpe: -                            print('{}Execution failed: {}{}'.format( -                                self.FAIL, cpe, self.ENDC)) -                            has_errors = True -                        # Break for loop, no need to continue looping lines -                        break +                for task in yaml.safe_load(contents): +                    if not isinstance(task, dict): +                        # Skip yaml files which do not contain plays or includes +                        continue +                    if 'include' in task: +                        # Add the playbook and capture included playbooks +                        playbooks.add(yaml_file) +                        included_file_name = task['include'].split()[0] +                        included_file = os.path.normpath( +                            os.path.join(os.path.dirname(yaml_file), +                                         included_file_name)) +                        included_playbooks.add(included_file) +                    elif 'hosts' in task: +                        playbooks.add(yaml_file) +        # Evaluate the difference between all playbooks and included playbooks +        entrypoint_playbooks = sorted(playbooks.difference(included_playbooks)) +        print('Entry point playbook count: {}'.format(len(entrypoint_playbooks))) +        # Syntax each entry point playbook +        for playbook in entrypoint_playbooks: +            print('-' * 60) +            print('Syntax checking playbook: {}'.format(playbook)) +            try: +                subprocess.check_output( +                    ['ansible-playbook', '-i localhost,', +                     '--syntax-check', playbook] +                ) +            except subprocess.CalledProcessError as cpe: +                print('{}Execution failed: {}{}'.format( +                    self.FAIL, cpe, self.ENDC)) +                has_errors = True          if has_errors:              raise SystemExit(1)  | 
