diff options
Diffstat (limited to 'roles')
34 files changed, 509 insertions, 383 deletions
| diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index d7eb8663f..dd185cb38 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -115,7 +115,7 @@ l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio  # systemcontainers_docker #  # ----------------------- #  l_crt_docker_image_prepend_dict: -  Fedora: "registry.fedoraproject.org/f25" +  Fedora: "registry.fedoraproject.org/latest"    Centos: "docker.io/gscrivano"    RedHat: "registry.access.redhat.com/openshift3" diff --git a/roles/container_runtime/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2 index 3f066a17f..0a1ff2e0a 100644 --- a/roles/container_runtime/templates/crio.conf.j2 +++ b/roles/container_runtime/templates/crio.conf.j2 @@ -27,7 +27,7 @@ storage_option = [  [crio.api]  # listen is the path to the AF_LOCAL socket on which crio will listen. -listen = "/var/run/crio.sock" +listen = "/var/run/crio/crio.sock"  # stream_address is the IP address on which the stream server will listen  stream_address = "" diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 62b4716a3..a4d260279 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -27,7 +27,7 @@  - name: PkgMgr RHEL/CentOS | Install ovs    yum: -    pkg=openvswitch-2.5.0-2.el7.x86_64 +    pkg=openvswitch      state=present    environment:      http_proxy: "{{ http_proxy|default('') }}" diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 90bb98001..9b3f12567 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift  ExecStart={{ bin_dir }}/aci_gw.sh start  ExecStop={{ bin_dir }}/aci_gw.sh stop  KillMode=control-group -Restart=on-failure +Restart=always  RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service index a602c955e..ce7d0c75e 100644 --- a/roles/contiv/templates/netmaster.service +++ b/roles/contiv/templates/netmaster.service @@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service  EnvironmentFile=/etc/default/netmaster  ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS  KillMode=control-group -Restart=on-failure +Restart=always  RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service index dc7b95bb5..6358d89ec 100644 --- a/roles/contiv/templates/netplugin.service +++ b/roles/contiv/templates/netplugin.service @@ -6,3 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service  EnvironmentFile=/etc/default/netplugin  ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS  KillMode=control-group +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 3038ed9f6..86cea5c46 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -10,7 +10,7 @@ r_etcd_common_embedded_etcd: false  osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'  etcd_image_dict: -  origin: "registry.fedoraproject.org/f26/etcd" +  origin: "registry.fedoraproject.org/latest/etcd"    openshift-enterprise: "{{ osm_etcd_image }}"  etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}" diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index fcb4c94d3..59d6098d4 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,17 +1,20 @@  --- -- oc_obj: -    state: list -    kind: project -    name: "{{ item }}" -  with_items: "{{ __default_logging_ops_projects }}" +- command: > +    {{ openshift.common.client_binary }} +    --config={{ openshift.common.config_base }}/master/admin.kubeconfig +    get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}    register: __logging_ops_projects  - name: Annotate Operations Projects    oc_edit:      kind: ns -    name: "{{ item.item }}" +    name: "{{ project }}"      separator: '#'      content:        metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" -  with_items: "{{ __logging_ops_projects.results }}" -  when: item.results.stderr is not defined +  with_items: "{{ __logging_ops_projects.stdout.split(' ') }}" +  loop_control: +    loop_var: project +  when: +  - __logging_ops_projects.stderr | length == 0 +  - openshift_logging_use_ops | default(false) | bool diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index ffed956a4..af36d67c6 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -107,6 +107,24 @@      - logging-fluentd      - logging-mux +# remove annotations added by logging +- command: > +    {{ openshift.common.client_binary }} +    --config={{ openshift.common.config_base }}/master/admin.kubeconfig +    get namespaces -o name {{ __default_logging_ops_projects | join(' ') }} +  register: __logging_ops_projects + +- name: Remove Annotation of Operations Projects +  command: > +    {{ openshift.common.client_binary }} +    --config={{ openshift.common.config_base }}/master/admin.kubeconfig +    annotate {{ project }} openshift.io/logging.ui.hostname- +  with_items: "{{ __logging_ops_projects.stdout_lines }}" +  loop_control: +    loop_var: project +  when: +    - __logging_ops_projects.stderr | length == 0 +  ## EventRouter  - include_role:      name: openshift_logging_eventrouter diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 38b2fd8b8..efd119299 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -54,6 +54,48 @@ ha_svc_template_path: "native-cluster"  openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" +loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" +openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" +openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" + +scheduler_config: +  kind: Policy +  apiVersion: v1 +  predicates: "{{ openshift_master_scheduler_predicates +                  | default(openshift_master_scheduler_current_predicates +                            | default(openshift_master_scheduler_default_predicates)) }}" +  priorities: "{{ openshift_master_scheduler_priorities +                  | default(openshift_master_scheduler_current_priorities +                            | default(openshift_master_scheduler_default_priorities)) }}" + +openshift_master_valid_grant_methods: +- auto +- prompt +- deny + +openshift_master_is_scaleup_host: False + +# These defaults assume forcing journald persistence, fsync to disk once +# a second, rate-limiting to 10,000 logs a second, no forwarding to +# syslog or wall, using 8GB of disk space maximum, using 10MB journal +# files, keeping only a days worth of logs per journal file, and +# retaining journal files no longer than a month. +journald_vars_to_replace: +- { var: Storage, val: persistent } +- { var: Compress, val: yes } +- { var: SyncIntervalSec, val: 1s } +- { var: RateLimitInterval, val: 1s } +- { var: RateLimitBurst, val: 10000 } +- { var: SystemMaxUse, val: 8G } +- { var: SystemKeepFree, val: 20% } +- { var: SystemMaxFileSize, val: 10M } +- { var: MaxRetentionSec, val: 1month } +- { var: MaxFileSec, val: 1day } +- { var: ForwardToSyslog, val: no } +- { var: ForwardToWall, val: no } + +  # NOTE  # r_openshift_master_*_default may be defined external to this role.  # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml deleted file mode 100644 index 0c681c764..000000000 --- a/roles/openshift_master/vars/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" -loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" -openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" -openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" - -scheduler_config: -  kind: Policy -  apiVersion: v1 -  predicates: "{{ openshift_master_scheduler_predicates -                  | default(openshift_master_scheduler_current_predicates -                            | default(openshift_master_scheduler_default_predicates)) }}" -  priorities: "{{ openshift_master_scheduler_priorities -                  | default(openshift_master_scheduler_current_priorities -                            | default(openshift_master_scheduler_default_priorities)) }}" - -openshift_master_valid_grant_methods: -- auto -- prompt -- deny - -openshift_master_is_scaleup_host: False - -# These defaults assume forcing journald persistence, fsync to disk once -# a second, rate-limiting to 10,000 logs a second, no forwarding to -# syslog or wall, using 8GB of disk space maximum, using 10MB journal -# files, keeping only a days worth of logs per journal file, and -# retaining journal files no longer than a month. -journald_vars_to_replace: -- { var: Storage, val: persistent } -- { var: Compress, val: yes } -- { var: SyncIntervalSec, val: 1s } -- { var: RateLimitInterval, val: 1s } -- { var: RateLimitBurst, val: 10000 } -- { var: SystemMaxUse, val: 8G } -- { var: SystemKeepFree, val: 20% } -- { var: SystemMaxFileSize, val: 10M } -- { var: MaxRetentionSec, val: 1month } -- { var: MaxFileSec, val: 1day } -- { var: ForwardToSyslog, val: no } -- { var: ForwardToWall, val: no } diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 170a3dc6e..1d9797f84 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -4,11 +4,15 @@      name: NetworkManager      state: restarted      enabled: True +  when: +  - (not skip_node_svc_handlers | default(False) | bool)  - name: restart dnsmasq    systemd:      name: dnsmasq      state: restarted +  when: +  - (not skip_node_svc_handlers | default(False) | bool)  - name: restart openvswitch    systemd: @@ -47,3 +51,5 @@  - name: reload systemd units    command: systemctl daemon-reload +  when: +  - (not skip_node_svc_handlers | default(False) | bool) diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index e5c80bd09..5d66de0a3 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,6 +2,10 @@  - name: Install the systemd units    include_tasks: systemd_units.yml +- name: Pull container images +  include_tasks: container_images.yml +  when: openshift.common.is_containerized | bool +  - name: Start and enable openvswitch service    systemd:      name: openvswitch.service diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml new file mode 100644 index 000000000..0b8c806ae --- /dev/null +++ b/roles/openshift_node/tasks/container_images.yml @@ -0,0 +1,20 @@ +--- +- name: Install Node system container +  include_tasks: node_system_container.yml +  when: +  - l_is_node_system_container | bool + +- name: Install OpenvSwitch system containers +  include_tasks: openvswitch_system_container.yml +  when: +  - openshift_node_use_openshift_sdn | bool +  - l_is_openvswitch_system_container | bool + +- name: Pre-pull openvswitch image +  command: > +    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Downloaded newer image' in pull_result.stdout" +  when: +  - openshift_node_use_openshift_sdn | bool +  - not l_is_openvswitch_system_container | bool diff --git a/roles/openshift_node/tasks/dnsmasq.yml b/roles/openshift_node/tasks/dnsmasq.yml index f210a3a21..31ca46ec0 100644 --- a/roles/openshift_node/tasks/dnsmasq.yml +++ b/roles/openshift_node/tasks/dnsmasq.yml @@ -1,43 +1,4 @@  --- -- name: Check for NetworkManager service -  command: > -    systemctl show NetworkManager -  register: nm_show -  changed_when: false -  ignore_errors: True - -- name: Set fact using_network_manager -  set_fact: -    network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}" - -- name: Install dnsmasq -  package: name=dnsmasq state=installed -  when: not openshift.common.is_atomic | bool -  register: result -  until: result | success - -- name: ensure origin/node directory exists -  file: -    state: directory -    path: "{{ item }}" -    owner: root -    group: root -    mode: '0700' -  with_items: -  - /etc/origin -  - /etc/origin/node - -# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed -# when the node stops. A dbus-message is sent to dnsmasq to add the same entries -# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or -# newer we can use --server-file option to update the servers dynamically and -# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else -# triggers a restart of dnsmasq but not a node restart. -- name: Install node-dnsmasq.conf -  template: -    src: node-dnsmasq.conf.j2 -    dest: /etc/origin/node/node-dnsmasq.conf -  - name: Install dnsmasq configuration    template:      src: origin-dns.conf.j2 @@ -63,7 +24,3 @@  # Dynamic NetworkManager based dispatcher  - include_tasks: dnsmasq/network-manager.yml    when: network_manager_active | bool - -# Relies on ansible in order to configure static config -- include_tasks: dnsmasq/no-network-manager.yml -  when: not network_manager_active | bool diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml new file mode 100644 index 000000000..9f66bf12d --- /dev/null +++ b/roles/openshift_node/tasks/dnsmasq_install.yml @@ -0,0 +1,43 @@ +--- +- name: Check for NetworkManager service +  command: > +    systemctl show NetworkManager +  register: nm_show +  changed_when: false +  ignore_errors: True + +- name: Set fact using_network_manager +  set_fact: +    network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}" + +- name: Install dnsmasq +  package: name=dnsmasq state=installed +  when: not openshift.common.is_atomic | bool +  register: result +  until: result | success + +- name: ensure origin/node directory exists +  file: +    state: directory +    path: "{{ item }}" +    owner: root +    group: root +    mode: '0700' +  with_items: +  - /etc/origin +  - /etc/origin/node + +# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed +# when the node stops. A dbus-message is sent to dnsmasq to add the same entries +# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or +# newer we can use --server-file option to update the servers dynamically and +# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else +# triggers a restart of dnsmasq but not a node restart. +- name: Install node-dnsmasq.conf +  template: +    src: node-dnsmasq.conf.j2 +    dest: /etc/origin/node/node-dnsmasq.conf + +# Relies on ansible in order to configure static config +- include_tasks: dnsmasq/no-network-manager.yml +  when: not network_manager_active | bool diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml deleted file mode 100644 index bbe9c71f5..000000000 --- a/roles/openshift_node/tasks/docker/upgrade.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# input variables: -# - openshift_service_type -# - openshift.common.is_containerized -# - docker_version -# - skip_docker_restart - -- name: Check Docker image count -  shell: "docker images -aq | wc -l" -  register: docker_image_count - -- debug: var=docker_image_count.stdout - -- service: -    name: docker -    state: stopped -  register: l_openshift_node_upgrade_docker_stop_result -  until: not l_openshift_node_upgrade_docker_stop_result | failed -  retries: 3 -  delay: 30 - -- name: Upgrade Docker -  package: name=docker{{ '-' + docker_version }} state=present -  register: result -  until: result | success - -# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 32c5f495f..946deb4d3 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -6,6 +6,7 @@      - deployment_type == 'openshift-enterprise'      - not openshift_use_crio +- include_tasks: dnsmasq_install.yml  - include_tasks: dnsmasq.yml  - name: setup firewall diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index c532147b1..262ee698b 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -16,29 +16,10 @@    - name: include ovs service environment file      include_tasks: config/install-ovs-service-env-file.yml -  - name: Install Node system container -    include_tasks: node_system_container.yml -    when: -    - l_is_node_system_container | bool - -  - name: Install OpenvSwitch system containers -    include_tasks: openvswitch_system_container.yml +  - include_tasks: config/install-ovs-docker-service-file.yml      when:      - openshift_node_use_openshift_sdn | bool -    - l_is_openvswitch_system_container | bool - -- block: -  - name: Pre-pull openvswitch image -    command: > -      docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} -    register: pull_result -    changed_when: "'Downloaded newer image' in pull_result.stdout" - -  - include_tasks: config/install-ovs-docker-service-file.yml -  when: -  - openshift.common.is_containerized | bool -  - openshift_node_use_openshift_sdn | bool -  - not l_is_openvswitch_system_container | bool +    - not l_is_openvswitch_system_container | bool  - include_tasks: config/configure-node-settings.yml  - include_tasks: config/configure-proxy-settings.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index 9f333645a..87556533a 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -10,161 +10,29 @@  # tasks file for openshift_node_upgrade -- include_tasks: registry_auth.yml +- name: stop services for upgrade +  include_tasks: upgrade/stop_services.yml -- name: Stop node and openvswitch services -  service: -    name: "{{ item }}" -    state: stopped -  with_items: -  - "{{ openshift_service_type }}-node" -  - openvswitch -  failed_when: false - -- name: Stop additional containerized services -  service: -    name: "{{ item }}" -    state: stopped -  with_items: -  - "{{ openshift_service_type }}-master-controllers" -  - "{{ openshift_service_type }}-master-api" -  - etcd_container -  failed_when: false -  when: openshift.common.is_containerized | bool - -- name: Pre-pull node image -  command: > -    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} -  register: pull_result -  changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: openshift.common.is_containerized | bool - -- name: Pre-pull openvswitch image -  command: > -    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} -  register: pull_result -  changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: -  - openshift.common.is_containerized | bool -  - openshift_use_openshift_sdn | bool - -- include_tasks: docker/upgrade.yml -  vars: -    # We will restart Docker ourselves after everything is ready: -    skip_docker_restart: True +# Ensure actually install latest package. +- name: download docker upgrade rpm +  command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}" +  register: result +  until: result | success    when:    - l_docker_upgrade is defined    - l_docker_upgrade | bool -- include_tasks: "{{ node_config_hook }}" -  when: node_config_hook is defined - -- include_tasks: upgrade/rpm_upgrade.yml +- name: install pre-pulled rpms. +  include_tasks: upgrade/rpm_upgrade_install.yml    vars: -    component: "node"      openshift_version: "{{ openshift_pkg_version | default('') }}"    when: not openshift.common.is_containerized | bool -- name: Remove obsolete docker-sdn-ovs.conf -  file: -    path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf" -    state: absent - -- include_tasks: upgrade/containerized_node_upgrade.yml -  when: openshift.common.is_containerized | bool - -- name: Ensure containerized services stopped before Docker restart -  service: -    name: "{{ item }}" -    state: stopped -  with_items: -  - etcd_container -  - openvswitch -  - "{{ openshift_service_type }}-master-api" -  - "{{ openshift_service_type }}-master-controllers" -  - "{{ openshift_service_type }}-node" -  failed_when: false -  when: openshift.common.is_containerized | bool - -- name: Stop rpm based services -  service: -    name: "{{ item }}" -    state: stopped -  with_items: -  - "{{ openshift_service_type }}-node" -  - openvswitch -  failed_when: false -  when: not openshift.common.is_containerized | bool - -# https://bugzilla.redhat.com/show_bug.cgi?id=1513054 -- name: Clean up dockershim data -  file: -    path: "/var/lib/dockershim/sandbox/" -    state: absent -- name: Upgrade openvswitch -  package: -    name: openvswitch -    state: latest -  when: not openshift.common.is_containerized | bool -  register: result -  until: result | success - -- name: Update oreg value -  yedit: -    src: "{{ openshift.common.config_base }}/node/node-config.yaml" -    key: 'imageConfig.format' -    value: "{{ oreg_url | default(oreg_url_node) }}" -  when: oreg_url is defined or oreg_url_node is defined - -# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory -- name: Check for swap usage -  command: grep "^[^#].*swap" /etc/fstab -  # grep: match any lines which don't begin with '#' and contain 'swap' -  changed_when: false -  failed_when: false -  register: swap_result - -  # Disable Swap Block -- block: - -  - name: Disable swap -    command: swapoff --all - -  - name: Remove swap entries from /etc/fstab -    replace: -      dest: /etc/fstab -      regexp: '(^[^#].*swap.*)' -      replace: '# \1' -      backup: yes - -  - name: Add notice about disabling swap -    lineinfile: -      dest: /etc/fstab -      line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' -      state: present - -  when: -  - swap_result.stdout_lines | length > 0 -  - openshift_disable_swap | default(true) | bool -  # End Disable Swap Block - -- name: Reset selinux context -  command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes -  when: -  - ansible_selinux is defined -  - ansible_selinux.status == 'enabled' +- include_tasks: "{{ node_config_hook }}" +  when: node_config_hook is defined -- name: Apply 3.6 dns config changes -  yedit: -    src: /etc/origin/node/node-config.yaml -    key: "{{ item.key }}" -    value: "{{ item.value }}" -  with_items: -  - key: "dnsBindAddress" -    value: "127.0.0.1:53" -  - key: "dnsRecursiveResolvConf" -    value: "/etc/origin/node/resolv.conf" +- include_tasks: upgrade/config_changes.yml  # Restart all services  - include_tasks: upgrade/restart.yml @@ -181,4 +49,7 @@    retries: 24    delay: 5 +- include_tasks: dnsmasq_install.yml  - include_tasks: dnsmasq.yml + +- meta: flush_handlers diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml new file mode 100644 index 000000000..e22018e6d --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -0,0 +1,77 @@ +--- +- name: Update systemd units +  include_tasks: ../systemd_units.yml +  when: openshift.common.is_containerized + +- name: Update oreg value +  yedit: +    src: "{{ openshift.common.config_base }}/node/node-config.yaml" +    key: 'imageConfig.format' +    value: "{{ oreg_url | default(oreg_url_node) }}" +  when: oreg_url is defined or oreg_url_node is defined + +- name: Remove obsolete docker-sdn-ovs.conf +  file: +    path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf" +    state: absent + +# https://bugzilla.redhat.com/show_bug.cgi?id=1513054 +- name: Clean up dockershim data +  file: +    path: "/var/lib/dockershim/sandbox/" +    state: absent + +# Disable Swap Block (pre) +- block: +  - name: Remove swap entries from /etc/fstab +    replace: +      dest: /etc/fstab +      regexp: '(^[^#].*swap.*)' +      replace: '# \1' +      backup: yes + +  - name: Add notice about disabling swap +    lineinfile: +      dest: /etc/fstab +      line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' +      state: present + +  - name: Disable swap +    command: swapoff --all + +  when: +  - openshift_node_upgrade_swap_result | default(False) | bool +  - openshift_disable_swap | default(true) | bool +# End Disable Swap Block + +- name: Apply 3.6 dns config changes +  yedit: +    src: /etc/origin/node/node-config.yaml +    key: "{{ item.key }}" +    value: "{{ item.value }}" +  with_items: +  - key: "dnsBindAddress" +    value: "127.0.0.1:53" +  - key: "dnsRecursiveResolvConf" +    value: "/etc/origin/node/resolv.conf" + +- name: Install Node service file +  template: +    dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" +    src: "node.service.j2" +  register: l_node_unit + +- name: Reset selinux context +  command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes +  when: +  - ansible_selinux is defined +  - ansible_selinux.status == 'enabled' + +# NOTE: This is needed to make sure we are using the correct set +#       of systemd unit files. The RPMs lay down defaults but +#       the install/upgrade may override them in /etc/systemd/system/. +# NOTE: We don't use the systemd module as some versions of the module +#       require a service to be part of the call. +- name: Reload systemd units +  command: systemctl daemon-reload +  when: l_node_unit | changed diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml deleted file mode 100644 index 245de60a7..000000000 --- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: -    skip_node_svc_handlers: True - -- name: Update systemd units -  include_tasks: ../systemd_units.yml - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml new file mode 100644 index 000000000..71f00dcd2 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml @@ -0,0 +1,15 @@ +--- +- name: Pre-pull node image +  command: > +    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Downloaded newer image' in pull_result.stdout" + +- name: Pre-pull openvswitch image +  command: > +    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} +  register: pull_result +  changed_when: "'Downloaded newer image' in pull_result.stdout" +  when: openshift_use_openshift_sdn | bool + +- include_tasks: ../container_images.yml diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index 65c301783..717cfa712 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -13,6 +13,15 @@  - name: Reload systemd to ensure latest unit files    command: systemctl daemon-reload +- name: Restart support services +  service: +    name: "{{ item }}" +    state: restarted +    enabled: True +  with_items: +    - NetworkManager +    - dnsmasq +  - name: Restart container runtime    service:      name: "{{ openshift_docker_service_name }}" diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index 120b93bc3..d2864e6b8 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -5,29 +5,20 @@  # - openshift_pkg_version  # - openshift.common.is_atomic -# We verified latest rpm available is suitable, so just yum update. -- name: Upgrade packages -  package: "name={{ openshift_service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" +# Pre-pull new node rpm, but don't install +- name: download new node packages +  command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}"    register: result    until: result | success +  vars: +    openshift_node_upgrade_rpm_list: +      - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" +      - "PyYAML" +      - "dnsmasq" -- name: Ensure python-yaml present for config upgrade -  package: name=PyYAML state=present -  when: not openshift.common.is_atomic | bool +# Pre-pull the rpms for openvswitch, but don't install +# openvswitch requires the latest version to be installed. +- name: download openvswitch upgrade rpm +  command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch"    register: result    until: result | success - -- name: Install Node service file -  template: -    dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" -    src: "node.service.j2" -  register: l_node_unit - -# NOTE: This is needed to make sure we are using the correct set -#       of systemd unit files. The RPMs lay down defaults but -#       the install/upgrade may override them in /etc/systemd/system/. -# NOTE: We don't use the systemd module as some versions of the module -#       require a service to be part of the call. -- name: Reload systemd units -  command: systemctl daemon-reload -  when: l_node_unit | changed diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml new file mode 100644 index 000000000..6390be558 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -0,0 +1,19 @@ +--- +# input variables: +# - openshift_service_type +# - component +# - openshift_pkg_version +# - openshift.common.is_atomic + +# Install the pre-pulled RPM +# Note: dnsmasq is covered in it's own play.  openvswitch is included here +# because once we have the latest rpm downloaded, it will happily be installed. +- name: download new node packages +  command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}" +  register: result +  until: result | success +  vars: +    openshift_node_upgrade_rpm_list: +      - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" +      - "PyYAML" +      - "openvswitch" diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml new file mode 100644 index 000000000..bbf1c5f25 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/stop_services.yml @@ -0,0 +1,43 @@ +--- +- name: Stop node and openvswitch services +  service: +    name: "{{ item }}" +    state: stopped +  with_items: +  - "{{ openshift_service_type }}-node" +  - openvswitch +  failed_when: false + +- name: Ensure containerized services stopped before Docker restart +  service: +    name: "{{ item }}" +    state: stopped +  with_items: +  - etcd_container +  - openvswitch +  - "{{ openshift_service_type }}-master-api" +  - "{{ openshift_service_type }}-master-controllers" +  - "{{ openshift_service_type }}-node" +  failed_when: false +  when: openshift.common.is_containerized | bool + +- service: +    name: docker +    state: stopped +  register: l_openshift_node_upgrade_docker_stop_result +  until: not l_openshift_node_upgrade_docker_stop_result | failed +  retries: 3 +  delay: 30 +  when: +  - l_docker_upgrade is defined +  - l_docker_upgrade | bool + +- name: Stop rpm based services +  service: +    name: "{{ item }}" +    state: stopped +  with_items: +  - "{{ openshift_service_type }}-node" +  - openvswitch +  failed_when: false +  when: not openshift.common.is_containerized | bool diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml new file mode 100644 index 000000000..3346b7c65 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -0,0 +1,56 @@ +--- +# This is a hack to allow us to update various components without restarting +# services.  This will persist into the upgrade play as well, so everything +# needs to be restarted by hand. +- set_fact: +    skip_node_svc_handlers: True + +- include_tasks: registry_auth.yml + +- name: update package meta data to speed install later. +  command: "{{ ansible_pkg_mgr }} makecache" +  register: result +  until: result | success +  when: not openshift.common.is_containerized | bool + +- name: Check Docker image count +  shell: "docker images -aq | wc -l" +  register: docker_image_count +  when: +  - l_docker_upgrade is defined +  - l_docker_upgrade | bool + +- debug: var=docker_image_count.stdout +  when: +  - l_docker_upgrade is defined +  - l_docker_upgrade | bool + +- include_tasks: upgrade/containerized_upgrade_pull.yml +  when: openshift.common.is_containerized | bool + +# Prepull the rpms for docker upgrade, but don't install +- name: download docker upgrade rpm +  command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}" +  register: result +  until: result | success +  when: +  - l_docker_upgrade is defined +  - l_docker_upgrade | bool + +- include_tasks: upgrade/rpm_upgrade.yml +  vars: +    openshift_version: "{{ openshift_pkg_version | default('') }}" +  when: not openshift.common.is_containerized | bool + +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage +  command: grep "^[^#].*swap" /etc/fstab +  # grep: match any lines which don't begin with '#' and contain 'swap' +  changed_when: false +  failed_when: false +  register: swap_result + +# Set this fact here so we can use it during the next play, which is serial. +- name: set_fact swap_result +  set_fact: +    openshift_node_upgrade_swap_result: "{{ swap_result.stdout_lines | length > 0 | bool }}" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 16fdde02e..261cac6f1 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -20,9 +20,9 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam    container-runtime:    - remote    container-runtime-endpoint: -  - /var/run/crio.sock +  - /var/run/crio/crio.sock    image-service-endpoint: -  - /var/run/crio.sock +  - /var/run/crio/crio.sock    node-labels:    - router=true    - registry=true diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 5e7bde1e1..83954eaf8 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -37,6 +37,13 @@    - when: r_openshift_repos_has_run is not defined      block: +    - include_tasks: rhel_repos.yml +      when: +      - ansible_distribution == 'RedHat' +      - deployment_type == 'openshift-enterprise' +      - (rhel_subscription_user or rhsub_user) is defined +      - (rhel_subscription_password or rhsub_pass) is defined +      - include_tasks: centos_repos.yml        when:        - ansible_os_family == "RedHat" diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml new file mode 100644 index 000000000..c384cbe9a --- /dev/null +++ b/roles/openshift_repos/tasks/rhel_repos.yml @@ -0,0 +1,34 @@ +--- +- name: Ensure RHEL rhui repositories are disabled +  command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'rhui'" +  register: repo_rhui +  changed_when: "repo_rhui.rc != 1" +  failed_when: repo_rhui.rc == 11 + +- name: Disable RHEL rhui repositories +  command: bash -c "yum-config-manager \ +    --disable 'rhui-REGION-client-config-server-7' \ +    --disable 'rhui-REGION-rhel-server-rh-common' \ +    --disable 'rhui-REGION-rhel-server-releases' \ +    --disable 'rhui-REGION-client-config-server-7'" +  when: repo_rhui.changed + +- name: Ensure RHEL repositories are enabled +  command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'Red Hat' | wc -l" +  register: repo_rhel +  changed_when: "'4' not in repo_rhel.stdout" +  failed_when: repo_rhel.rc == 11 + +- name: Disable all repositories +  command: bash -c "subscription-manager repos --disable='*'" +  when: repo_rhel.changed + +- name: Enable RHEL repositories +  command: subscription-manager repos \ +               --enable="rhel-7-server-rpms" \ +               --enable="rhel-7-server-extras-rpms" \ +               --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \ +               --enable="rhel-7-fast-datapath-rpms" +  register: subscribe_repos +  until: subscribe_repos | succeeded +  when: repo_rhel.changed diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml deleted file mode 100644 index 8acdfb969..000000000 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- set_fact: -    openshift_release: "{{ openshift_release[1:] }}" -  when: -  - openshift_release is defined -  - openshift_release[0] == 'v' - -- name: Disable all repositories -  command: subscription-manager repos --disable="*" - -- name: Enable RHEL repositories -  command: subscription-manager repos \ -               --enable="rhel-7-server-rpms" \ -               --enable="rhel-7-server-extras-rpms" \ -               --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \ -               --enable="rhel-7-fast-datapath-rpms" -  register: subscribe_repos -  until: subscribe_repos | succeeded diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 3466b7e44..74ee8bbfe 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -1,30 +1,21 @@  --- -# TODO: Enhance redhat_subscription module -#       to make it able to attach to a pool -#       to make it able to enable repositories +- set_fact: +    rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}" +    rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}" +    rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}" +    rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}"  - fail:      msg: "This role is only supported for Red Hat hosts"    when: ansible_distribution != 'RedHat'  - fail: -    msg: The rhsub_user variable is required for this role. -  when: rhsub_user is not defined or not rhsub_user +    msg: The rhel_subscription_user variable is required for this role. +  when: rhel_subscription_user is not defined or not rhsub_user is not defined  - fail: -    msg: The rhsub_pass variable is required for this role. -  when: rhsub_pass is not defined or not rhsub_pass - -- name: Detecting Atomic Host Operating System -  stat: -    path: /run/ostree-booted -  register: ostree_booted - -- name: Satellite preparation -  command: "rpm -Uvh http://{{ rhsub_server }}/pub/katello-ca-consumer-latest.noarch.rpm" -  args: -    creates: /etc/rhsm/ca/katello-server-ca.pem -  when: rhsub_server is defined and rhsub_server +    msg: The rhel_subscription_pass variable is required for this role. +  when: rhel_subscription_pass is not defined or not rhsub_pass is not defined  - name: Install Red Hat Subscription manager    yum: @@ -33,36 +24,58 @@    register: result    until: result | success -- name: RedHat subscriptions +- name: Is host already registered? +  command: bash -c "subscription-manager version" +  register: rh_subscribed +  changed_when: "'not registered' in rh_subscribed.stdout" +  ignore_errors: yes + +- name: Register host    redhat_subscription: -    username: "{{ rhsub_user }}" -    password: "{{ rhsub_pass }}" +    username: "{{ rhel_subscription_user }}" +    password: "{{ rhel_subscription_pass }}"    register: rh_subscription    until: rh_subscription | succeeded +  when: +    - "'not registered' in rh_subscribed.stdout" +    - rhel_subscription_user is defined +    - rhel_subscription_pass is defined -- name: Retrieve the OpenShift Pool ID -  command: subscription-manager list --available --matches="{{ rhsub_pool }}" --pool-only -  register: openshift_pool_id -  until: openshift_pool_id | succeeded -  changed_when: False +- fail: +    msg: 'Unable to register host with Red Hat Subscription Manager' +  when: +    - "'not registered' in rh_subscribed.stdout" +    - rh_subscription.failed  - name: Determine if OpenShift Pool Already Attached -  command: subscription-manager list --consumed --matches="{{ rhsub_pool }}" --pool-only +  command: bash -c "subscription-manager list --consumed --pool-only --matches '*OpenShift*' | grep {{ rhel_subscription_pool }}"    register: openshift_pool_attached -  until: openshift_pool_attached | succeeded -  changed_when: False -  when: openshift_pool_id.stdout == '' +  changed_when: rhel_subscription_pool not in openshift_pool_attached.stdout +  failed_when: openshift_pool_attached.rc == 2 +  ignore_errors: yes + +- name: Retrieve the OpenShift Pool ID +  command: bash -c "subscription-manager list --available --pool-only --matches '*OpenShift*' | grep {{ rhel_subscription_pool }}" +  register: openshift_pool_retrieve +  changed_when: rhel_subscription_pool in openshift_pool_retrieve.stdout +  when: rhel_subscription_pool not in openshift_pool_attached.stdout +  ignore_errors: yes  - fail: -    msg: "Unable to find pool matching {{ rhsub_pool }} in available or consumed pools" -  when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' +    msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available pools" +  when: +    - rhel_subscription_pool not in openshift_pool_attached.stdout +    - rhel_subscription_pool not in openshift_pool_retrieve.stdout  - name: Attach to OpenShift Pool -  command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }} -  register: subscribe_pool -  until: subscribe_pool | succeeded -  when: openshift_pool_id.stdout != '' +  command: bash -c "subscription-manager attach --pool {{ rhel_subscription_pool }}" +  register: openshift_pool_attached +  changed_when: "'Successfully attached a subscription' in openshift_pool_attached.stdout" +  when: rhel_subscription_pool not in openshift_pool_attached.stdout -- include_tasks: enterprise.yml +- include_role: +    role: rhel_subscribe +    tasks_from: satellite    when: -  - not ostree_booted.stat.exists | bool +    - (rhel_subscription_server or rhsub_server) is defined +    - (rhel_subscription_server or rhsub_server) diff --git a/roles/rhel_subscribe/tasks/satellite.yml b/roles/rhel_subscribe/tasks/satellite.yml new file mode 100644 index 000000000..b2b2a621d --- /dev/null +++ b/roles/rhel_subscribe/tasks/satellite.yml @@ -0,0 +1,5 @@ +--- +- name: Satellite preparation +  command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm" +  args: +    creates: /etc/rhsm/ca/katello-server-ca.pem | 
