diff options
Diffstat (limited to 'roles')
121 files changed, 1858 insertions, 608 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 0f4b71124..9a91927b8 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -30,8 +30,12 @@      ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"      ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}" +- set_fact: +    openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +  when: openshift_master_config_dir is undefined +  - slurp: -    src: "{{ ansible_service_broker_certs_dir }}/ca.crt" +    src: "{{ openshift_master_config_dir }}/service-signer.crt"    register: catalog_ca @@ -231,6 +235,20 @@                        value: /etc/ansible-service-broker/config.yaml                    resources: {}                    terminationMessagePath: /tmp/termination-log +                  readinessProbe: +                    httpGet: +                      port: 1338 +                      path: /healthz +                      scheme: HTTPS +                    initialDelaySeconds: 15 +                    timeoutSeconds: 1 +                  livenessProbe: +                    httpGet: +                      port: 1338 +                      path: /healthz +                      scheme: HTTPS +                    initialDelaySeconds: 15 +                    timeoutSeconds: 1                  - image: "{{ ansible_service_broker_etcd_image }}"                    name: etcd @@ -327,12 +345,12 @@    oc_obj:      name: ansible-service-broker      state: present -    kind: ServiceBroker +    kind: ClusterServiceBroker      content:        path: /tmp/brokerout        data: -        apiVersion: servicecatalog.k8s.io/v1alpha1 -        kind: ServiceBroker +        apiVersion: servicecatalog.k8s.io/v1beta1 +        kind: ClusterServiceBroker          metadata:            name: ansible-service-broker          spec: diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e36dfa7b9..1c830cb4e 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,5 +1,6 @@  ---  docker_cli_auth_config_path: '/root/.docker' +openshift_docker_signature_verification: False  # oreg_url is defined by user input.  oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index dbe0b0d28..7ccab37a5 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -115,11 +115,12 @@      dest: /etc/sysconfig/docker      regexp: '^OPTIONS=.*$'      line: "OPTIONS='\ -      {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\ -      {% if docker_log_driver is defined  %} --log-driver {{ docker_log_driver }}{% endif %}\ -      {% if docker_log_options is defined %} {{ docker_log_options |  oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ -      {% if docker_options is defined %} {{ docker_options }}{% endif %}\ -      {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" +      {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \ +      {% if docker_log_driver is defined  %} --log-driver {{ docker_log_driver }}{% endif %} \ +      {% if docker_log_options is defined %} {{ docker_log_options |  oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ +      {% if docker_options is defined %} {{ docker_options }}{% endif %} \ +      {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \ +      --signature-verification={{ openshift_docker_signature_verification | bool }}'"    when: docker_check.stat.isreg is defined and docker_check.stat.isreg    notify:    - restart docker @@ -139,6 +140,13 @@    notify:    - restart docker +# The following task is needed as the systemd module may report a change in +# state even though docker is already running. +- name: Detect if docker is already started +  command: "systemctl show docker -p ActiveState" +  changed_when: False +  register: r_docker_already_running_result +  - name: Start the Docker service    systemd:      name: docker @@ -151,7 +159,7 @@    delay: 30  - set_fact: -    docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}" +    docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"  - name: Check for credentials file for registry auth    stat: diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index fdc6cd24a..a79600930 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -36,6 +36,12 @@      state: present    when: not openshift.common.is_atomic | bool +- name: Check we are not using node as a Docker container with CRI-O +  fail: msg='Cannot use CRI-O with node configured as a Docker container' +  when: +    - openshift.common.is_containerized | bool +    - not openshift.common.is_node_system_container | bool +  # Used to pull and install the system container  - name: Ensure atomic is installed    package: diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 807b9541a..78f231416 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -70,7 +70,8 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_  etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"  etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_peer: 127.0.0.1 +# required role variable +#etcd_peer: 127.0.0.1  etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"  etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}" diff --git a/roles/etcd/tasks/auxiliary/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml index 95a0e7c0a..1ed2db5bc 100644 --- a/roles/etcd/tasks/auxiliary/clean_data.yml +++ b/roles/etcd/tasks/auxiliary/clean_data.yml @@ -1,5 +1,5 @@  ---  - name: Remove member data    file: -    path: /var/lib/etcd/member +    path: "{{ etcd_data_dir }}/member"      state: absent diff --git a/roles/etcd/tasks/auxiliary/disable_etcd.yml b/roles/etcd/tasks/auxiliary/disable_etcd.yml new file mode 100644 index 000000000..7c6d0409d --- /dev/null +++ b/roles/etcd/tasks/auxiliary/disable_etcd.yml @@ -0,0 +1,5 @@ +--- +- name: Disable etcd members +  service: +    name: "{{ etcd_service }}" +    state: stopped diff --git a/roles/etcd/tasks/auxiliary/force_new_cluster.yml b/roles/etcd/tasks/auxiliary/force_new_cluster.yml new file mode 100644 index 000000000..ae8a36130 --- /dev/null +++ b/roles/etcd/tasks/auxiliary/force_new_cluster.yml @@ -0,0 +1,31 @@ +--- +- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host +  lineinfile: +    line: "ETCD_FORCE_NEW_CLUSTER=true" +    dest: /etc/etcd/etcd.conf +    backup: true + +- name: Start etcd +  systemd: +    name: "{{ etcd_service }}" +    state: started + +- name: Wait for cluster to become healthy after bringing up first member +  command: > +    etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health +  register: l_etcd_migrate_health +  until: l_etcd_migrate_health.rc == 0 +  retries: 3 +  delay: 30 + +- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host +  lineinfile: +    line: "ETCD_FORCE_NEW_CLUSTER=true" +    dest: /etc/etcd/etcd.conf +    state: absent +    backup: true + +- name: Restart first etcd host +  systemd: +    name: "{{ etcd_service }}" +    state: restarted diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml new file mode 100644 index 000000000..6daa6dc51 --- /dev/null +++ b/roles/etcd/tasks/backup.archive.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/archive.yml diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml new file mode 100644 index 000000000..cc540cbca --- /dev/null +++ b/roles/etcd/tasks/backup.copy.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/copy.yml diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml new file mode 100644 index 000000000..26ec15043 --- /dev/null +++ b/roles/etcd/tasks/backup.fetch.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/fetch.yml diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml new file mode 100644 index 000000000..24bd0540d --- /dev/null +++ b/roles/etcd/tasks/backup.force_new_cluster.yml @@ -0,0 +1,12 @@ +--- +- include: backup/vars.yml + +- name: Move content of etcd backup under the etcd data directory +  command: > +    mv "{{ l_etcd_backup_dir }}/member" "{{ l_etcd_data_dir }}" + +- name: Set etcd group for the etcd data directory +  command: > +    chown -R etcd:etcd "{{ l_etcd_data_dir }}" + +- include: auxiliary/force_new_cluster.yml diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml new file mode 100644 index 000000000..77a637360 --- /dev/null +++ b/roles/etcd/tasks/backup.unarchive.yml @@ -0,0 +1,3 @@ +--- +- include: backup/vars.yml +- include: backup/unarchive.yml diff --git a/roles/etcd/tasks/backup/archive.yml b/roles/etcd/tasks/backup/archive.yml new file mode 100644 index 000000000..f6aa68a6e --- /dev/null +++ b/roles/etcd/tasks/backup/archive.yml @@ -0,0 +1,5 @@ +--- +- name: Archive backup +  archive: +    path: "{{ l_etcd_backup_dir }}" +    dest: "{{ l_etcd_backup_dir }}.tgz" diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml index 42d27c081..ec1a1989c 100644 --- a/roles/etcd/tasks/backup/backup.yml +++ b/roles/etcd/tasks/backup/backup.yml @@ -1,21 +1,5 @@  --- -# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time -# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes -# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different -- set_fact: -    l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" - -- set_fact: -    l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}" - -- set_fact: -    l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}" - -- set_fact: -    l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}" - -- set_fact: -    l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}" +- include: vars.yml  # TODO: replace shell module with command and update later checks  - name: Check available disk space for etcd backup diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml new file mode 100644 index 000000000..16604bae8 --- /dev/null +++ b/roles/etcd/tasks/backup/copy.yml @@ -0,0 +1,5 @@ +--- +- name: Copy etcd backup +  copy: +    src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz" +    dest: "{{ l_etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/fetch.yml b/roles/etcd/tasks/backup/fetch.yml new file mode 100644 index 000000000..610ce1960 --- /dev/null +++ b/roles/etcd/tasks/backup/fetch.yml @@ -0,0 +1,8 @@ +--- +- name: Fetch etcd backup +  fetch: +    src: "{{ l_etcd_backup_dir }}.tgz" +    dest: "{{ etcd_backup_sync_directory }}/" +    flat: yes +    fail_on_missing: yes +    validate_checksum: yes diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml new file mode 100644 index 000000000..6c75d00a7 --- /dev/null +++ b/roles/etcd/tasks/backup/unarchive.yml @@ -0,0 +1,14 @@ +--- +- shell: ls /var/lib/etcd +  register: output + +- debug: +    msg: "output: {{ output }}" + +- name: Unarchive backup +  # can't use unarchive https://github.com/ansible/ansible/issues/30821 +  # unarchive: +  #   src: "{{ l_etcd_backup_dir }}.tgz" +  #   dest: "{{ l_etcd_backup_dir }}" +  command: > +    tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ l_etcd_data_dir }}" diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml new file mode 100644 index 000000000..3c009f557 --- /dev/null +++ b/roles/etcd/tasks/backup/vars.yml @@ -0,0 +1,18 @@ +--- +# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time +# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes +# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different +- set_fact: +    l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" + +- set_fact: +    l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}" + +- set_fact: +    l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}" + +- set_fact: +    l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}" + +- set_fact: +    l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}" diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml new file mode 100644 index 000000000..129e1831c --- /dev/null +++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml @@ -0,0 +1,2 @@ +--- +- include: certificates/backup_master_etcd_certificates.yml diff --git a/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml new file mode 100644 index 000000000..e65b3e5a2 --- /dev/null +++ b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml @@ -0,0 +1,7 @@ +--- +- name: Backup master etcd certificates +  shell: > +    tar -czvf /etc/origin/master/master-etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz +    /etc/origin/master/master.etcd-* +  args: +    warn: no diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml new file mode 100644 index 000000000..75c110972 --- /dev/null +++ b/roles/etcd/tasks/check_cluster_health.yml @@ -0,0 +1,2 @@ +--- +- include: migration/check_cluster_health.yml diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml new file mode 100644 index 000000000..9202e6e48 --- /dev/null +++ b/roles/etcd/tasks/disable_etcd.yml @@ -0,0 +1,2 @@ +--- +- include: auxiliary/disable_etcd.yml diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml new file mode 100644 index 000000000..513eed17a --- /dev/null +++ b/roles/etcd/tasks/fetch_backup.yml @@ -0,0 +1,8 @@ +--- +- include: backup/vars.yml + +- include: backup/archive.yml + +- include: backup/sync_backup.yml + +- include: backup/ diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index e735bf50a..024479fb4 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -17,6 +17,7 @@        {{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},        {%- endif -%}        {% endfor -%} +  when: etcd_initial_cluster is undefined  - name: Check etcd system container package    command: > diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 8462bb4c8..3027a9447 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -29,8 +29,8 @@ ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }}  ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}  ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1  {% else %} -{% if initial_etcd_cluster is defined and initial_etcd_cluster %} -ETCD_INITIAL_CLUSTER={{ initial_etcd_cluster }} +{% if etcd_initial_cluster is defined and etcd_initial_cluster %} +ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}  {% else %}  ETCD_INITIAL_CLUSTER={{ initial_cluster() }}  {% endif %} diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md index 321acca21..83e00e504 100644 --- a/roles/installer_checkpoint/README.md +++ b/roles/installer_checkpoint/README.md @@ -92,8 +92,7 @@ phase/component and then a final play for setting `installer_hase_initialize` to  # common/openshift-cluster/std_include.yml  ---  - name: Initialization Checkpoint Start -  hosts: localhost -  connection: local +  hosts: oo_all_hosts    gather_facts: false    roles:    - installer_checkpoint diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py index ac369b882..25f9405af 100644 --- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py +++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py @@ -136,7 +136,7 @@ class CallbackModule(CallbackBase):              },              'installer_phase_management': {                  'title': 'Management Install', -                'playbook': 'playbooks/common/openshift-cluster/openshift_management.yml' +                'playbook': 'playbooks/byo/openshift-management/config.yml'              },          } diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md new file mode 100644 index 000000000..7b618f902 --- /dev/null +++ b/roles/kuryr/README.md @@ -0,0 +1,38 @@ +## OpenStack Kuryr + +Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker +nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for +pods. This allows to have interconnectivity between pods and OpenStack VMs. + +## Requirements + +* Ansible 2.2+ +* Centos/ RHEL 7.3+ + +## Current Kuryr restrictions when used with OpenShift + +* Openshift Origin only +* OpenShift on OpenStack Newton or newer (only with Trunk ports) + +## Key Ansible inventory Kuryr master configuration parameters + +* ``openshift_use_kuryr=True`` +* ``openshift_use_openshift_sdn=False`` +* ``openshift_sdn_network_plugin_name='cni'`` +* ``kuryr_cni_link_interface=eth0`` +* ``kuryr_openstack_auth_url=keystone_url`` +* ``kuryr_openstack_user_domain_name=Default`` +* ``kuryr_openstack_user_project_name=Default`` +* ``kuryr_openstack_project_id=project_uuid`` +* ``kuryr_openstack_username=kuryr`` +* ``kuryr_openstack_password=kuryr_pass`` +* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid`` +* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid`` +* ``kuryr_openstack_pod_service_id=service_subnet_uuid`` +* ``kuryr_openstack_pod_project_id=pod_project_uuid`` +* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid`` + +## Kuryr resources + +* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/) +* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html) diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml new file mode 100644 index 000000000..ff298dda0 --- /dev/null +++ b/roles/kuryr/defaults/main.yaml @@ -0,0 +1,72 @@ +--- +# Kuryr conf directory +kuryr_config_dir: /etc/kuryr + +# Kuryr username +kuryr_openstack_username: kuryr + +# Kuryr username domain +kuryr_openstack_user_domain_name: default + +# Kuryr username domain +kuryr_openstack_project_domain_name: default + +# Kuryr OpenShift namespace +kuryr_namespace: kube-system + +# Whether to run the cni plugin in debug mode +kuryr_cni_debug: "false" + +# The version of cni binaries +cni_version: v0.5.2 + +# Path to bin dir (where kuryr execs get installed) +bin_dir: /usr/bin + +# Path to the cni binaries +cni_bin_dir: /opt/cni/bin + +# URL for cni binaries +cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz" +cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9" + +# Kuryr ClusterRole definiton +kuryr_clusterrole: +  name: kuryrctl +  state: present +  rules: +    - apiGroups: +        - "" +      attributeRestrictions: null +      verbs: +        - get +        - list +        - watch +      resources: +        - daemonsets +        - deployments +        - deploymentconfigs +        - endpoints +        - ingress +        - nodes +        - namespaces +        - pods +        - projects +        - routes +        - services +    - apiGroups: +        - "" +      attributeRestrictions: null +      verbs: +        - update +        - patch +      resources: +        - endpoints +        - ingress +        - pods +        - namespaces +        - nodes +        - services +        - services/status +        - routes diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml new file mode 100644 index 000000000..7fd5adf41 --- /dev/null +++ b/roles/kuryr/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: +  author: Red Hat +  description: Kuryr networking +  company: Red Hat +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +  - system +dependencies: +- { role: lib_openshift } +- { role: openshift_facts } diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml new file mode 100644 index 000000000..55ab16f74 --- /dev/null +++ b/roles/kuryr/tasks/master.yaml @@ -0,0 +1,52 @@ +--- +- name: Perform OpenShit ServiceAccount config +  include: serviceaccount.yaml + +- name: Create kuryr manifests tempdir +  command: mktemp -d +  register: manifests_tmpdir + +- name: Create kuryr ConfigMap manifest +  become: yes +  template: +    src: configmap.yaml.j2 +    dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml" + +- name: Create kuryr-controller Deployment manifest +  become: yes +  template: +    src: controller-deployment.yaml.j2 +    dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml" + +- name: Create kuryr-cni DaemonSet manifest +  become: yes +  template: +    src: cni-daemonset.yaml.j2 +    dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml" + +- name: Apply ConfigMap manifest +  oc_obj: +    state: present +    kind: ConfigMap +    name: "kuryr-config" +    namespace: "{{ kuryr_namespace }}" +    files: +    - "{{ manifests_tmpdir.stdout }}/configmap.yaml" + +- name: Apply Controller Deployment manifest +  oc_obj: +    state: present +    kind: Deployment +    name: "kuryr-controller" +    namespace: "{{ kuryr_namespace }}" +    files: +    - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml" + +- name: Apply kuryr-cni DaemonSet manifest +  oc_obj: +    state: present +    kind: DaemonSet +    name: "kuryr-cni-ds" +    namespace: "{{ kuryr_namespace }}" +    files: +    - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml" diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml new file mode 100644 index 000000000..ffe814713 --- /dev/null +++ b/roles/kuryr/tasks/node.yaml @@ -0,0 +1,48 @@ +--- +- name: Create CNI bin directory +  file: +    state: directory +    path: "{{ cni_bin_dir }}" +    mode: 0755 +    owner: root +    group: root +    recurse: yes + +- name: Create CNI extraction tempdir +  command: mktemp -d +  register: cni_tmpdir + +- name: Download CNI +  get_url: +    url: "{{ cni_bin_url }}" +    checksum: "sha1:{{ cni_bin_checksum }}" +    mode: 0644 +    dest: "{{ cni_tmpdir.stdout }}" +  register: downloaded_tarball + +- name: Extract CNI +  become: yes +  unarchive: +    remote_src: True +    src: "{{ downloaded_tarball.dest }}" +    dest: "{{ cni_bin_dir }}" +  when: downloaded_tarball.changed + +- name: Ensure CNI net.d exists +  file: +    path: /etc/cni/net.d +    recurse: yes +    state: directory + +- name: Configure OpenShift node with disabled service proxy +  lineinfile: +    dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node" +    regexp: '^OPTIONS="?(.*?)"?$' +    backrefs: yes +    backup: yes +    line: 'OPTIONS="\1 --disable dns,proxy,plugins"' + +- name: force node restart to disable the proxy +  service: +    name: "{{ openshift.common.service_type }}-node" +    state: restarted diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml new file mode 100644 index 000000000..088f13091 --- /dev/null +++ b/roles/kuryr/tasks/serviceaccount.yaml @@ -0,0 +1,31 @@ +--- +- name: Create Controller service account +  oc_serviceaccount: +    name: kuryr-controller +    namespace: "{{ kuryr_namespace }}" +  register: saout + +- name: Create a role for the Kuryr +  oc_clusterrole: "{{ kuryr_clusterrole }}" + +- name: Fetch the created Kuryr controller cluster role +  oc_clusterrole: +    name: kuryrctl +    state: list +  register: crout + +- name: Grant Kuryr the privileged security context constraints +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}" +    namespace: "{{ kuryr_namespace }}" +    resource_kind: scc +    resource_name: privileged +    state: present + +- name: Assign role to Kuryr service account +  oc_adm_policy_user: +    user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}" +    namespace: "{{ kuryr_namespace }}" +    resource_kind: cluster-role +    resource_name: "{{ crout.results.results.metadata.name }}" +    state: present diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2 new file mode 100644 index 000000000..39348ae90 --- /dev/null +++ b/roles/kuryr/templates/cni-daemonset.yaml.j2 @@ -0,0 +1,53 @@ +# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes + +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: +  name: kuryr-cni-ds +  namespace: {{ kuryr_namespace }} +  labels: +    tier: node +    app: kuryr +spec: +  template: +    metadata: +      labels: +        tier: node +        app: kuryr +    spec: +      hostNetwork: true +      tolerations: +      - key: node-role.kubernetes.io/master +        operator: Exists +        effect: NoSchedule +      serviceAccountName: kuryr-controller +      containers: +      - name: kuryr-cni +        image: kuryr/cni:latest +        imagePullPolicy: IfNotPresent +        command: [ "cni_ds_init" ] +        securityContext: +          privileged: true +        volumeMounts: +        - name: bin +          mountPath: /opt/cni/bin +        - name: net-conf +          mountPath: /etc/cni/net.d +        - name: config-volume +          mountPath: /tmp/kuryr/kuryr.conf +          subPath: kuryr-cni.conf +        - name: etc +          mountPath: /etc +      volumes: +        - name: bin +          hostPath: +            path: {{ cni_bin_dir }} +        - name: net-conf +          hostPath: +            path: /etc/cni/net.d +        - name: config-volume +          configMap: +            name: kuryr-config +        - name: etc +          hostPath: +            path: /etc
\ No newline at end of file diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2 new file mode 100644 index 000000000..e874d6c25 --- /dev/null +++ b/roles/kuryr/templates/configmap.yaml.j2 @@ -0,0 +1,343 @@ +# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes + +apiVersion: v1 +kind: ConfigMap +metadata: +  name: kuryr-config +  namespace: {{ kuryr_namespace }} +data: +  kuryr.conf: |+ +    [DEFAULT] + +    # +    # From kuryr_kubernetes +    # + +    # Directory for Kuryr vif binding executables. (string value) +    #bindir = /usr/libexec/kuryr + +    # If set to true, the logging level will be set to DEBUG instead of the default +    # INFO level. (boolean value) +    # Note: This option can be changed without restarting. +    #debug = false + +    # DEPRECATED: If set to false, the logging level will be set to WARNING instead +    # of the default INFO level. (boolean value) +    # This option is deprecated for removal. +    # Its value may be silently ignored in the future. +    #verbose = true + +    # The name of a logging configuration file. This file is appended to any +    # existing logging configuration files. For details about logging configuration +    # files, see the Python logging module documentation. Note that when logging +    # configuration files are used then all logging configuration is set in the +    # configuration file and other logging configuration options are ignored (for +    # example, logging_context_format_string). (string value) +    # Note: This option can be changed without restarting. +    # Deprecated group/name - [DEFAULT]/log_config +    #log_config_append = <None> + +    # Defines the format string for %%(asctime)s in log records. Default: +    # %(default)s . This option is ignored if log_config_append is set. (string +    # value) +    #log_date_format = %Y-%m-%d %H:%M:%S + +    # (Optional) Name of log file to send logging output to. If no default is set, +    # logging will go to stderr as defined by use_stderr. This option is ignored if +    # log_config_append is set. (string value) +    # Deprecated group/name - [DEFAULT]/logfile +    #log_file = /var/log/kuryr/kuryr-controller.log + +    # (Optional) The base directory used for relative log_file  paths. This option +    # is ignored if log_config_append is set. (string value) +    # Deprecated group/name - [DEFAULT]/logdir +    #log_dir = <None> + +    # Uses logging handler designed to watch file system. When log file is moved or +    # removed this handler will open a new log file with specified path +    # instantaneously. It makes sense only if log_file option is specified and +    # Linux platform is used. This option is ignored if log_config_append is set. +    # (boolean value) +    #watch_log_file = false + +    # Use syslog for logging. Existing syslog format is DEPRECATED and will be +    # changed later to honor RFC5424. This option is ignored if log_config_append +    # is set. (boolean value) +    #use_syslog = false + +    # Syslog facility to receive log lines. This option is ignored if +    # log_config_append is set. (string value) +    #syslog_log_facility = LOG_USER + +    # Log output to standard error. This option is ignored if log_config_append is +    # set. (boolean value) +    #use_stderr = true + +    # Format string to use for log messages with context. (string value) +    #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +    # Format string to use for log messages when context is undefined. (string +    # value) +    #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +    # Additional data to append to log message when logging level for the message +    # is DEBUG. (string value) +    #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +    # Prefix each line of exception output with this format. (string value) +    #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +    # Defines the format string for %(user_identity)s that is used in +    # logging_context_format_string. (string value) +    #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +    # List of package logging levels in logger=LEVEL pairs. This option is ignored +    # if log_config_append is set. (list value) +    #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + +    # Enables or disables publication of error events. (boolean value) +    #publish_errors = false + +    # The format for an instance that is passed with the log message. (string +    # value) +    #instance_format = "[instance: %(uuid)s] " + +    # The format for an instance UUID that is passed with the log message. (string +    # value) +    #instance_uuid_format = "[instance: %(uuid)s] " + +    # Enables or disables fatal status of deprecations. (boolean value) +    #fatal_deprecations = false + + +    [binding] + +    driver = kuryr.lib.binding.drivers.vlan +    link_iface = eth0 + +    [kubernetes] + +    # +    # From kuryr_kubernetes +    # + +    # The root URL of the Kubernetes API (string value) +    api_root = {{ openshift.master.api_url }} + +    # Absolute path to client cert to connect to HTTPS K8S_API (string value) +    # ssl_client_crt_file = /etc/kuryr/controller.crt + +    # Absolute path client key file to connect to HTTPS K8S_API (string value) +    # ssl_client_key_file = /etc/kuryr/controller.key + +    # Absolute path to ca cert file to connect to HTTPS K8S_API (string value) +    ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + +    # The token to talk to the k8s API +    token_file = /var/run/secrets/kubernetes.io/serviceaccount/token + +    # HTTPS K8S_API server identity verification (boolean value) +    # TODO (apuimedo): Make configurable +    ssl_verify_server_crt = True + +    # The driver to determine OpenStack project for pod ports (string value) +    pod_project_driver = default + +    # The driver to determine OpenStack project for services (string value) +    service_project_driver = default + +    # The driver to determine Neutron subnets for pod ports (string value) +    pod_subnets_driver = default + +    # The driver to determine Neutron subnets for services (string value) +    service_subnets_driver = default + +    # The driver to determine Neutron security groups for pods (string value) +    pod_security_groups_driver = default + +    # The driver to determine Neutron security groups for services (string value) +    service_security_groups_driver = default + +    # The driver that provides VIFs for Kubernetes Pods. (string value) +    pod_vif_driver = nested-vlan + + +    [neutron] +    # Configuration options for OpenStack Neutron + +    # +    # From kuryr_kubernetes +    # + +    # Authentication URL (string value) +    auth_url = {{ kuryr_openstack_auth_url }} + +    # Authentication type to load (string value) +    # Deprecated group/name - [neutron]/auth_plugin +    auth_type = password + +    # Domain ID to scope to (string value) +    user_domain_name = {{ kuryr_openstack_user_domain_name }} + +    # User's password (string value) +    password = {{ kuryr_openstack_password }} + +    # Domain name containing project (string value) +    project_domain_name = {{ kuryr_openstack_project_domain_name }} + +    # Project ID to scope to (string value) +    # Deprecated group/name - [neutron]/tenant-id +    project_id = {{ kuryr_openstack_project_id }} + +    # Token (string value) +    #token = <None> + +    # Trust ID (string value) +    #trust_id = <None> + +    # User's domain id (string value) +    #user_domain_id = <None> + +    # User id (string value) +    #user_id = <None> + +    # Username (string value) +    # Deprecated group/name - [neutron]/user-name +    username = {{kuryr_openstack_username }} + +    # Whether a plugging operation is failed if the port to plug does not become +    # active (boolean value) +    #vif_plugging_is_fatal = false + +    # Seconds to wait for port to become active (integer value) +    #vif_plugging_timeout = 0 + +    [neutron_defaults] + +    pod_security_groups = {{ kuryr_openstack_pod_sg_id }} +    pod_subnet = {{ kuryr_openstack_pod_subnet_id }} +    service_subnet = {{ kuryr_openstack_service_subnet_id }} +    project = {{ kuryr_openstack_pod_project_id }} +    # TODO (apuimedo): Remove the duplicated line just after this one once the +    # RDO packaging contains the upstream patch +    worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} + +    [pod_vif_nested] +    worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }} +  kuryr-cni.conf: |+ +    [DEFAULT] + +    # +    # From kuryr_kubernetes +    # +    # If set to true, the logging level will be set to DEBUG instead of the default +    # INFO level. (boolean value) +    # Note: This option can be changed without restarting. +    #debug = false + +    # The name of a logging configuration file. This file is appended to any +    # existing logging configuration files. For details about logging configuration +    # files, see the Python logging module documentation. Note that when logging +    # configuration files are used then all logging configuration is set in the +    # configuration file and other logging configuration options are ignored (for +    # example, logging_context_format_string). (string value) +    # Note: This option can be changed without restarting. +    # Deprecated group/name - [DEFAULT]/log_config +    #log_config_append = <None> + +    # Defines the format string for %%(asctime)s in log records. Default: +    # %(default)s . This option is ignored if log_config_append is set. (string +    # value) +    #log_date_format = %Y-%m-%d %H:%M:%S + +    # (Optional) Name of log file to send logging output to. If no default is set, +    # logging will go to stderr as defined by use_stderr. This option is ignored if +    # log_config_append is set. (string value) +    # Deprecated group/name - [DEFAULT]/logfile +    #log_file = /var/log/kuryr/cni.log + +    # (Optional) The base directory used for relative log_file  paths. This option +    # is ignored if log_config_append is set. (string value) +    # Deprecated group/name - [DEFAULT]/logdir +    #log_dir = <None> + +    # Uses logging handler designed to watch file system. When log file is moved or +    # removed this handler will open a new log file with specified path +    # instantaneously. It makes sense only if log_file option is specified and +    # Linux platform is used. This option is ignored if log_config_append is set. +    # (boolean value) +    #watch_log_file = false + +    # Use syslog for logging. Existing syslog format is DEPRECATED and will be +    # changed later to honor RFC5424. This option is ignored if log_config_append +    # is set. (boolean value) +    #use_syslog = false + +    # Syslog facility to receive log lines. This option is ignored if +    # log_config_append is set. (string value) +    #syslog_log_facility = LOG_USER + +    # Log output to standard error. This option is ignored if log_config_append is +    # set. (boolean value) +    use_stderr = true + +    # Format string to use for log messages with context. (string value) +    #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +    # Format string to use for log messages when context is undefined. (string +    # value) +    #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +    # Additional data to append to log message when logging level for the message +    # is DEBUG. (string value) +    #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +    # Prefix each line of exception output with this format. (string value) +    #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +    # Defines the format string for %(user_identity)s that is used in +    # logging_context_format_string. (string value) +    #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +    # List of package logging levels in logger=LEVEL pairs. This option is ignored +    # if log_config_append is set. (list value) +    #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + +    # Enables or disables publication of error events. (boolean value) +    #publish_errors = false + +    # The format for an instance that is passed with the log message. (string +    # value) +    #instance_format = "[instance: %(uuid)s] " + +    # The format for an instance UUID that is passed with the log message. (string +    # value) +    #instance_uuid_format = "[instance: %(uuid)s] " + +    # Enables or disables fatal status of deprecations. (boolean value) +    #fatal_deprecations = false + + +    [binding] + +    driver = kuryr.lib.binding.drivers.vlan +    link_iface = {{ kuryr_cni_link_interface }} + +    [kubernetes] + +    # +    # From kuryr_kubernetes +    # + +    # The root URL of the Kubernetes API (string value) +    api_root = {{ openshift.master.api_url }} + +    # The token to talk to the k8s API +    token_file = /etc/kuryr/token + +    # Absolute path to ca cert file to connect to HTTPS K8S_API (string value) +    ssl_ca_crt_file = /etc/kuryr/ca.crt + +    # HTTPS K8S_API server identity verification (boolean value) +    # TODO (apuimedo): Make configurable +    ssl_verify_server_crt = True diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2 new file mode 100644 index 000000000..d970270b5 --- /dev/null +++ b/roles/kuryr/templates/controller-deployment.yaml.j2 @@ -0,0 +1,40 @@ +# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes + +apiVersion: apps/v1beta1 +kind: Deployment +metadata: +  labels: +    name: kuryr-controller +  name: kuryr-controller +  namespace: {{ kuryr_namespace }} +spec: +  replicas: 1 +  template: +    metadata: +      labels: +        name: kuryr-controller +      name: kuryr-controller +    spec: +      serviceAccountName: kuryr-controller +      automountServiceAccountToken: true +      hostNetwork: true +      containers: +      - image: kuryr/controller:latest +        imagePullPolicy: IfNotPresent +        name: controller +        terminationMessagePath: "/dev/termination-log" +        # FIXME(dulek): This shouldn't be required, but without it selinux is +        #               complaining about access to kuryr.conf. +        securityContext: +          privileged: true +          runAsUser: 0 +        volumeMounts: +        - name: config-volume +          mountPath: "/etc/kuryr/kuryr.conf" +          subPath: kuryr.conf +      volumes: +      - name: config-volume +        configMap: +          name: kuryr-config +          defaultMode: 0666 +      restartPolicy: Always diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index d1dc4caf8..324f52689 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1478,11 +1478,23 @@ class OCcsr(OpenShiftCLI):          return False +    def get_csr_request(self, request): +        '''base64 decode the request object and call openssl to determine the +           subject and specifically the CN: from the request + +           Output: +           (0, '... +                Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal +                ...') +        ''' +        import base64 +        return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1] +      def match_node(self, csr):          '''match an inc csr to a node in self.nodes'''          for node in self.nodes: -            # we have a match -            if node['name'] in csr['metadata']['name']: +            # we need to match based upon the csr's request certificate's CN +            if node['name'] in self.get_csr_request(csr['spec']['request']):                  node['csrs'][csr['metadata']['name']] = csr                  # check that the username is the node and type is 'Approved' diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py index ea11c6ca9..22b8f9165 100644 --- a/roles/lib_openshift/src/class/oc_adm_csr.py +++ b/roles/lib_openshift/src/class/oc_adm_csr.py @@ -66,11 +66,23 @@ class OCcsr(OpenShiftCLI):          return False +    def get_csr_request(self, request): +        '''base64 decode the request object and call openssl to determine the +           subject and specifically the CN: from the request + +           Output: +           (0, '... +                Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal +                ...') +        ''' +        import base64 +        return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1] +      def match_node(self, csr):          '''match an inc csr to a node in self.nodes'''          for node in self.nodes: -            # we have a match -            if node['name'] in csr['metadata']['name']: +            # we need to match based upon the csr's request certificate's CN +            if node['name'] in self.get_csr_request(csr['spec']['request']):                  node['csrs'][csr['metadata']['name']] = csr                  # check that the username is the node and type is 'Approved' diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md index ff96081fe..4aca5c7a8 100644 --- a/roles/openshift_aws/README.md +++ b/roles/openshift_aws/README.md @@ -1,7 +1,29 @@  openshift_aws  ================================== -Provision AWS infrastructure helpers. +Provision AWS infrastructure and instances. + +This role contains many task-areas to provision resources and perform actions +against an AWS account for the purposes of dynamically building an openshift +cluster. + +This role is primarily intended to be used with "include_role" and "tasks_from". + +include_role can be called from the tasks section in a play.  See example +playbook below for reference. + +These task-areas are: + +* provision a vpc: vpc.yml +* provision elastic load balancers: elb.yml +* upload IAM ssl certificates to use with load balancers: iam_cert.yml +* provision an S3 bucket: s3.yml +* provision an instance to build an AMI: provision_instance.yml +* provision a security group in AWS: security_group.yml +* provision ssh keys and users in AWS: ssh_keys.yml +* provision an AMI in AWS: seal_ami.yml +* provision scale groups: scale_group.yml +* provision launch configs: launch_config.yml  Requirements  ------------ @@ -9,56 +31,9 @@ Requirements  * Ansible 2.3  * Boto -Role Variables --------------- - -From this role: - -| Name                                              | Default value -|---------------------------------------------------|----------------------- -| openshift_aws_clusterid                           | default -| openshift_aws_elb_scheme                          | internet-facing -| openshift_aws_launch_config_bootstrap_token       | '' -| openshift_aws_node_group_config                   | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}} -| openshift_aws_ami_copy_wait                       | False -| openshift_aws_users                               | [] -| openshift_aws_launch_config_name                  | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} -| openshift_aws_node_group_type                     | master -| openshift_aws_elb_cert_arn                        | '' -| openshift_aws_kubernetes_cluster_status           | owned -| openshift_aws_s3_mode                             | create -| openshift_aws_vpc                                 | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'} -| openshift_aws_create_ssh_keys                     | False -| openshift_aws_iam_kms_alias                       | alias/{{ openshift_aws_clusterid }}_kms -| openshift_aws_use_custom_ami                      | False -| openshift_aws_ami_copy_src_region                 | {{ openshift_aws_region }} -| openshift_aws_s3_bucket_name                      | {{ openshift_aws_clusterid }} -| openshift_aws_elb_health_check                    | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2} -| openshift_aws_node_security_groups                | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}} -| openshift_aws_elb_security_groups                 | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}'] -| openshift_aws_vpc_tags                            | {'Name': '{{ openshift_aws_vpc_name }}'} -| openshift_aws_create_security_groups              | False -| openshift_aws_create_iam_cert                     | False -| openshift_aws_create_scale_group                  | True -| openshift_aws_ami_encrypt                         | False -| openshift_aws_node_group_config_node_volumes      | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] -| openshift_aws_elb_instance_filter                 | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'} -| openshift_aws_region                              | us-east-1 -| openshift_aws_elb_name                            | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }} -| openshift_aws_elb_idle_timout                     | 400 -| openshift_aws_subnet_name                     | us-east-1c -| openshift_aws_node_group_config_tags              | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }} -| openshift_aws_create_launch_config                | True -| openshift_aws_ami_tags                            | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'} -| openshift_aws_ami_name                            | openshift-gi -| openshift_aws_node_group_config_master_volumes    | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}] -| openshift_aws_vpc_name                            | {{ openshift_aws_clusterid }} -| openshift_aws_elb_listeners                       | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}} -| - - -Dependencies ------------- +Appropriate AWS credentials and permissions are required. + +  Example Playbook diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index ea09857b0..5371588cf 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -4,7 +4,6 @@ openshift_aws_create_iam_cert: True  openshift_aws_create_security_groups: True  openshift_aws_create_launch_config: True  openshift_aws_create_scale_group: True -openshift_aws_kubernetes_cluster_status: owned  # or shared  openshift_aws_node_group_type: master  openshift_aws_wait_for_ssh: True @@ -13,6 +12,7 @@ openshift_aws_clusterid: default  openshift_aws_region: us-east-1  openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"  openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" +openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"  openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"  openshift_aws_iam_cert_path: '' @@ -89,6 +89,10 @@ openshift_aws_node_group_config_node_volumes:    delete_on_termination: True  openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}" +openshift_aws_node_group_termination_policy: Default +openshift_aws_node_group_replace_instances: [] +openshift_aws_node_group_replace_all_instances: False +openshift_aws_node_group_config_extra_labels: {}  openshift_aws_node_group_config:    tags: "{{ openshift_aws_node_group_config_tags }}" @@ -105,7 +109,11 @@ openshift_aws_node_group_config:      tags:        host-type: master        sub-host-type: default +    labels: +      type: master      wait_for_instances: True +    termination_policy: "{{ openshift_aws_node_group_termination_policy }}" +    replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"    compute:      instance_type: m4.xlarge      ami: "{{ openshift_aws_ami }}" @@ -119,6 +127,10 @@ openshift_aws_node_group_config:      tags:        host-type: node        sub-host-type: compute +    labels: +      type: compute +    termination_policy: "{{ openshift_aws_node_group_termination_policy }}" +    replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"    infra:      instance_type: m4.xlarge      ami: "{{ openshift_aws_ami }}" @@ -132,6 +144,10 @@ openshift_aws_node_group_config:      tags:        host-type: node        sub-host-type: infra +    labels: +      type: infra +    termination_policy: "{{ openshift_aws_node_group_termination_policy }}" +    replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"  openshift_aws_elb_security_groups:  - "{{ openshift_aws_clusterid }}" @@ -211,3 +227,7 @@ openshift_aws_vpc:        az: "us-east-1e"      - cidr: 172.31.16.0/20        az: "us-east-1a" + +openshift_aws_node_run_bootstrap_startup: True +openshift_aws_node_user_data: '' +openshift_aws_node_config_namespace: openshift-node diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index e6be9969c..8b7b02a0e 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -4,6 +4,11 @@    when:    - openshift_aws_ami is undefined +- fail: +    msg: "Ensure that openshift_deployment_type is defined." +  when: +  - openshift_deployment_type is undefined +  - name: query vpc    ec2_vpc_net_facts:      region: "{{ openshift_aws_region }}" @@ -27,23 +32,7 @@      image_id: "{{ openshift_aws_ami }}"      instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"      security_groups: "{{ openshift_aws_launch_config_security_group_id  | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" -    user_data: |- -      #cloud-config -      {%  if openshift_aws_node_group_type != 'master' %} -      write_files: -      - path: /root/csr_kubeconfig -        owner: root:root -        permissions: '0640' -        content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }} -      - path: /root/openshift_settings -        owner: root:root -        permissions: '0640' -        content: -          openshift_type: "{{ openshift_aws_node_group_type }}" -      runcmd: -      - [ systemctl, enable, atomic-openshift-node] -      - [ systemctl, start, atomic-openshift-node] -      {% endif %} +    user_data: "{{ lookup('template', 'user_data.j2') }}"      key_name: "{{ openshift_aws_ssh_key_name }}"      ebs_optimized: False      volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}" diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 1384bae59..25ae6ce1c 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -1,4 +1,8 @@  --- +- name: set openshift_node_bootstrap to True when building AMI +  set_fact: +    openshift_node_bootstrap: True +  - name: query vpc    ec2_vpc_net_facts:      region: "{{ openshift_aws_region }}" @@ -53,10 +57,6 @@      timeout: 300      search_regex: OpenSSH -- name: Pause 10 seconds to ensure ssh actually accepts logins -  pause: -    seconds: 20 -  - name: add host to nodes    add_host:      groups: nodes diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index 3e969fc43..eb31636e7 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -28,5 +28,7 @@      load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"      wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"      vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" +    replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}" +    replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}"      tags:      - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index 0cb749dcc..d319fdd1a 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -1,4 +1,11 @@  --- +- name: Remove any ansible facts created during AMI creation +  file: +    path: "/etc/ansible/facts.d/{{ item }}" +    state: absent +  with_items: +  - openshift.fact +  - name: fetch newly created instances    ec2_remote_facts:      region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 new file mode 100644 index 000000000..ed9c0ed0b --- /dev/null +++ b/roles/openshift_aws/templates/user_data.j2 @@ -0,0 +1,26 @@ +{% if openshift_aws_node_user_data is defined and openshift_aws_node_user_data != '' %} +{{ openshift_aws_node_user_data }} +{% else %} +#cloud-config +write_files: +- path: /root/openshift_bootstrap/openshift_settings.yaml +  owner: 'root:root' +  permissions: '0640' +  content: | +    openshift_group_type: {{ openshift_aws_node_group_type }} +{%   if openshift_aws_node_group_type != 'master' %} +- path: /etc/origin/node/csr_kubeconfig +  owner: 'root:root' +  permissions: '0640' +  encoding: b64 +  content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }} +{%   endif %} +runcmd: +{%     if openshift_aws_node_run_bootstrap_startup %} +- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] +{%     endif %} +{%     if openshift_aws_node_group_type != 'master' %} +- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] +- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] +{%     endif %} +{% endif %} diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 3a866cedf..7a5bebf6f 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -6,19 +6,46 @@    block: -  - name: Install docker excluder +  - name: Install docker excluder - yum      package:        name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) +  '*' }}"        state: "{{ r_openshift_excluder_docker_package_state }}"      when:      - r_openshift_excluder_enable_docker_excluder | bool +    - ansible_pkg_mgr == "yum" -  - name: Install openshift excluder + +  # For DNF we do not need the "*" and if we add it, it causes an error because +  # it's not a valid pkg_spec +  # +  # https://bugzilla.redhat.com/show_bug.cgi?id=1199432 +  - name: Install docker excluder - dnf +    package: +      name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +      state: "{{ r_openshift_excluder_docker_package_state }}" +    when: +    - r_openshift_excluder_enable_docker_excluder | bool +    - ansible_pkg_mgr == "dnf" + +  - name: Install openshift excluder - yum      package:        name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"        state: "{{ r_openshift_excluder_package_state }}"      when:      - r_openshift_excluder_enable_openshift_excluder | bool +    - ansible_pkg_mgr == "yum" + +  # For DNF we do not need the "*" and if we add it, it causes an error because +  # it's not a valid pkg_spec +  # +  # https://bugzilla.redhat.com/show_bug.cgi?id=1199432 +  - name: Install openshift excluder - dnf +    package: +      name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +      state: "{{ r_openshift_excluder_package_state }}" +    when: +    - r_openshift_excluder_enable_openshift_excluder | bool +    - ansible_pkg_mgr == "dnf"    - set_fact:        r_openshift_excluder_install_ran: True diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 215ff4b72..33028fea4 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -498,6 +498,20 @@ def set_selectors(facts):          facts['hosted']['etcd'] = {}      if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:          facts['hosted']['etcd']['selector'] = None +    if 'prometheus' not in facts: +        facts['prometheus'] = {} +    if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']: +        facts['prometheus']['selector'] = None +    if 'alertmanager' not in facts['prometheus']: +        facts['prometheus']['alertmanager'] = {} +    # pylint: disable=line-too-long +    if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']: +        facts['prometheus']['alertmanager']['selector'] = None +    if 'alertbuffer' not in facts['prometheus']: +        facts['prometheus']['alertbuffer'] = {} +    # pylint: disable=line-too-long +    if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']: +        facts['prometheus']['alertbuffer']['selector'] = None      return facts @@ -1779,7 +1793,8 @@ class OpenShiftFacts(object):                     'node',                     'logging',                     'loggingops', -                   'metrics'] +                   'metrics', +                   'prometheus']      # Disabling too-many-arguments, this should be cleaned up as a TODO item.      # pylint: disable=too-many-arguments,no-value-for-parameter @@ -1907,7 +1922,6 @@ class OpenShiftFacts(object):                                    portal_net='172.30.0.0/16',                                    client_binary='oc', admin_binary='oadm',                                    dns_domain='cluster.local', -                                  debug_level=2,                                    config_base='/etc/origin')          if 'master' in roles: @@ -2069,6 +2083,66 @@ class OpenShiftFacts(object):                  )              ) +            defaults['prometheus'] = dict( +                storage=dict( +                    kind=None, +                    volume=dict( +                        name='prometheus', +                        size='10Gi' +                    ), +                    nfs=dict( +                        directory='/exports', +                        options='*(rw,root_squash)' +                    ), +                    host=None, +                    access=dict( +                        modes=['ReadWriteOnce'] +                    ), +                    create_pv=True, +                    create_pvc=False +                ) +            ) + +            defaults['prometheus']['alertmanager'] = dict( +                storage=dict( +                    kind=None, +                    volume=dict( +                        name='prometheus-alertmanager', +                        size='10Gi' +                    ), +                    nfs=dict( +                        directory='/exports', +                        options='*(rw,root_squash)' +                    ), +                    host=None, +                    access=dict( +                        modes=['ReadWriteOnce'] +                    ), +                    create_pv=True, +                    create_pvc=False +                ) +            ) + +            defaults['prometheus']['alertbuffer'] = dict( +                storage=dict( +                    kind=None, +                    volume=dict( +                        name='prometheus-alertbuffer', +                        size='10Gi' +                    ), +                    nfs=dict( +                        directory='/exports', +                        options='*(rw,root_squash)' +                    ), +                    host=None, +                    access=dict( +                        modes=['ReadWriteOnce'] +                    ), +                    create_pv=True, +                    create_pvc=False +                ) +            ) +          return defaults      def guess_host_provider(self): diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh index d72a11de1..64c7cd019 100644 --- a/roles/openshift_gcp/templates/provision.j2.sh +++ b/roles/openshift_gcp/templates/provision.j2.sh @@ -313,7 +313,7 @@ fi  # wait until all node groups are stable  {% for node_group in openshift_gcp_node_group_config %}  # wait for stable {{ node_group.name }} -( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) & +( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &  {% endfor %} diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml index 47dc9171d..8fc70cecb 100644 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ b/roles/openshift_hosted_facts/tasks/main.yml @@ -16,4 +16,4 @@                         | oo_openshift_env }}"      openshift_env_structures:      - 'openshift.hosted.router.*' -  with_items: [hosted, logging, loggingops, metrics] +  with_items: [hosted, logging, loggingops, metrics, prometheus] diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 829c78728..69eb9283d 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -69,6 +69,9 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin  - `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.  - `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit.  Defaults to '1Gi' per destination. +- `openshift_logging_fluentd_audit_container_engine`: When `openshift_logging_fluentd_audit_container_engine` is set to `True`, the audit log of the container engine will be collected and stored in ES. +- `openshift_logging_fluentd_audit_file`: Location of audit log file. The default is `/var/log/audit/audit.log` +- `openshift_logging_fluentd_audit_pos_file`: Location of fluentd in_tail position file for the audit log file. The default is `/var/log/audit/audit.log.pos`  - `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.  - `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'. diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml index 554aa5bb2..fc48b7f71 100644 --- a/roles/openshift_logging_elasticsearch/defaults/main.yml +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -40,8 +40,6 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_  # config the es plugin to write kibana index based on the index mode  openshift_logging_elasticsearch_kibana_index_mode: 'unique' -openshift_logging_elasticsearch_proxy_image_prefix: "openshift/oauth-proxy" -openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"  openshift_logging_elasticsearch_proxy_cpu_limit: "100m"  openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"  openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus" diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 8380a25f9..44f6b00f3 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -17,6 +17,17 @@  - include: determine_version.yaml +- name: Set default image variables based on deployment_type +  include_vars: "{{ item }}" +  with_first_found: +    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "default_images.yml" + +- name: Set elasticsearch_prefix image facts +  set_fact: +    openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}" +    openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}" +  # allow passing in a tempdir  - name: Create temp directory for doing work in    command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX @@ -52,7 +63,7 @@      name: "aggregated-logging-elasticsearch"      namespace: "{{ openshift_logging_elasticsearch_namespace }}"    when: -  - openshift_logging_image_pull_secret == '' +    - openshift_logging_image_pull_secret == ''  # rolebinding reader  - copy: @@ -66,7 +77,7 @@      kind: clusterrole      namespace: "{{ openshift_logging_elasticsearch_namespace }}"      files: -    - "{{ tempdir }}/rolebinding-reader.yml" +      - "{{ tempdir }}/rolebinding-reader.yml"      delete_after: true  # SA roles @@ -107,8 +118,8 @@  - fail:      msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"    when: -  - "prometheus_out.stderr | length > 0" -  - "'already exists' not in prometheus_out.stderr" +    - "prometheus_out.stderr | length > 0" +    - "'already exists' not in prometheus_out.stderr"  # View role and binding  - name: Generate logging-elasticsearch-view-role @@ -120,8 +131,8 @@      roleRef:        name: view      subjects: -    - kind: ServiceAccount -      name: aggregated-logging-elasticsearch +      - kind: ServiceAccount +        name: aggregated-logging-elasticsearch    changed_when: no  - name: Set logging-elasticsearch-view-role role @@ -131,18 +142,18 @@      kind: rolebinding      namespace: "{{ openshift_logging_elasticsearch_namespace }}"      files: -    - "{{ tempdir }}/logging-elasticsearch-view-role.yaml" +      - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"      delete_after: true  # configmap  - assert:      that: -    - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes +      - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes      msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode  }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"  - assert:      that: -    - "{{ openshift_logging_es_log_appenders | length > 0 }}" +      - "{{ openshift_logging_es_log_appenders | length > 0 }}"      msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"  - template: @@ -198,22 +209,22 @@      name: "logging-elasticsearch"      namespace: "{{ openshift_logging_elasticsearch_namespace }}"      files: -    - name: key -      path: "{{ generated_certs_dir }}/logging-es.jks" -    - name: truststore -      path: "{{ generated_certs_dir }}/truststore.jks" -    - name: searchguard.key -      path: "{{ generated_certs_dir }}/elasticsearch.jks" -    - name: searchguard.truststore -      path: "{{ generated_certs_dir }}/truststore.jks" -    - name: admin-key -      path: "{{ generated_certs_dir }}/system.admin.key" -    - name: admin-cert -      path: "{{ generated_certs_dir }}/system.admin.crt" -    - name: admin-ca -      path: "{{ generated_certs_dir }}/ca.crt" -    - name: admin.jks -      path: "{{ generated_certs_dir }}/system.admin.jks" +      - name: key +        path: "{{ generated_certs_dir }}/logging-es.jks" +      - name: truststore +        path: "{{ generated_certs_dir }}/truststore.jks" +      - name: searchguard.key +        path: "{{ generated_certs_dir }}/elasticsearch.jks" +      - name: searchguard.truststore +        path: "{{ generated_certs_dir }}/truststore.jks" +      - name: admin-key +        path: "{{ generated_certs_dir }}/system.admin.key" +      - name: admin-cert +        path: "{{ generated_certs_dir }}/system.admin.crt" +      - name: admin-ca +        path: "{{ generated_certs_dir }}/ca.crt" +      - name: admin.jks +        path: "{{ generated_certs_dir }}/system.admin.jks"  # services  - name: Set logging-{{ es_component }}-cluster service @@ -227,7 +238,7 @@      labels:        logging-infra: 'support'      ports: -    - port: 9300 +      - port: 9300  - name: Set logging-{{ es_component }} service    oc_service: @@ -240,8 +251,8 @@      labels:        logging-infra: 'support'      ports: -    - port: 9200 -      targetPort: "restapi" +      - port: 9200 +        targetPort: "restapi"  - name: Set logging-{{ es_component}}-prometheus service    oc_service: @@ -251,9 +262,9 @@      labels:        logging-infra: 'support'      ports: -    - name: proxy -      port: 443 -      targetPort: 4443 +      - name: proxy +        port: 443 +        targetPort: 4443      selector:        component: "{{ es_component }}-prometheus"        provider: openshift @@ -281,46 +292,46 @@  # so we check for the presence of 'stderr' to determine if the obj exists or not  # the RC for existing and not existing is both 0  - when: -  - logging_elasticsearch_pvc.results.stderr is defined -  - openshift_logging_elasticsearch_storage_type == "pvc" +    - logging_elasticsearch_pvc.results.stderr is defined +    - openshift_logging_elasticsearch_storage_type == "pvc"    block: -  # storageclasses are used by default but if static then disable -  # storageclasses with the storageClassName set to "" in pvc.j2 -  - name: Creating ES storage template - static -    template: -      src: pvc.j2 -      dest: "{{ tempdir }}/templates/logging-es-pvc.yml" -    vars: -      obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" -      size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" -      access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" -      pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" -      storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" -    when: -    - not openshift_logging_elasticsearch_pvc_dynamic | bool - -  # Storageclasses are used by default if configured -  - name: Creating ES storage template - dynamic -    template: -      src: pvc.j2 -      dest: "{{ tempdir }}/templates/logging-es-pvc.yml" -    vars: -      obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" -      size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" -      access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" -      pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" -    when: -    - openshift_logging_elasticsearch_pvc_dynamic | bool - -  - name: Set ES storage -    oc_obj: -      state: present -      kind: pvc -      name: "{{ openshift_logging_elasticsearch_pvc_name }}" -      namespace: "{{ openshift_logging_elasticsearch_namespace }}" -      files: -      - "{{ tempdir }}/templates/logging-es-pvc.yml" -      delete_after: true +    # storageclasses are used by default but if static then disable +    # storageclasses with the storageClassName set to "" in pvc.j2 +    - name: Creating ES storage template - static +      template: +        src: pvc.j2 +        dest: "{{ tempdir }}/templates/logging-es-pvc.yml" +      vars: +        obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" +        size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" +        access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" +        pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" +        storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" +      when: +        - not openshift_logging_elasticsearch_pvc_dynamic | bool + +    # Storageclasses are used by default if configured +    - name: Creating ES storage template - dynamic +      template: +        src: pvc.j2 +        dest: "{{ tempdir }}/templates/logging-es-pvc.yml" +      vars: +        obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" +        size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}" +        access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" +        pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" +      when: +        - openshift_logging_elasticsearch_pvc_dynamic | bool + +    - name: Set ES storage +      oc_obj: +        state: present +        kind: pvc +        name: "{{ openshift_logging_elasticsearch_pvc_name }}" +        namespace: "{{ openshift_logging_elasticsearch_namespace }}" +        files: +          - "{{ tempdir }}/templates/logging-es-pvc.yml" +        delete_after: true  - set_fact:      es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" @@ -341,6 +352,7 @@      logging_component: elasticsearch      deploy_name: "{{ es_deploy_name }}"      image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}" +    proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"      es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"      es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"      es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" @@ -356,7 +368,7 @@      namespace: "{{ openshift_logging_elasticsearch_namespace }}"      kind: dc      files: -    - "{{ tempdir }}/templates/logging-es-dc.yml" +      - "{{ tempdir }}/templates/logging-es-dc.yml"      delete_after: true  - name: Retrieving the cert to use when generating secrets for the {{ es_component }} component @@ -364,37 +376,37 @@      src: "{{ generated_certs_dir }}/{{ item.file }}"    register: key_pairs    with_items: -  - { name: "ca_file", file: "ca.crt" } -  - { name: "es_key", file: "system.logging.es.key" } -  - { name: "es_cert", file: "system.logging.es.crt" } +    - { name: "ca_file", file: "ca.crt" } +    - { name: "es_key", file: "system.logging.es.key" } +    - { name: "es_cert", file: "system.logging.es.crt" }    when: openshift_logging_es_allow_external | bool  - set_fact:      es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"    when: -  - openshift_logging_es_key | trim | length > 0 -  - openshift_logging_es_allow_external | bool +    - openshift_logging_es_key | trim | length > 0 +    - openshift_logging_es_allow_external | bool    changed_when: false  - set_fact:      es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode  }}"    when: -  - openshift_logging_es_cert | trim | length > 0 -  - openshift_logging_es_allow_external | bool +    - openshift_logging_es_cert | trim | length > 0 +    - openshift_logging_es_allow_external | bool    changed_when: false  - set_fact:      es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode  }}"    when: -  - openshift_logging_es_ca_ext | trim | length > 0 -  - openshift_logging_es_allow_external | bool +    - openshift_logging_es_ca_ext | trim | length > 0 +    - openshift_logging_es_allow_external | bool    changed_when: false  - set_fact:      es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"    when: -  - es_ca is not defined -  - openshift_logging_es_allow_external | bool +    - es_ca is not defined +    - openshift_logging_es_allow_external | bool    changed_when: false  - name: Generating Elasticsearch {{ es_component }} route template @@ -425,7 +437,7 @@      namespace: "{{ openshift_logging_elasticsearch_namespace }}"      kind: route      files: -    - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml" +      - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"    when: openshift_logging_es_allow_external | bool  ## Placeholder for migration when necessary ## diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 1ed886627..ce3b2eb83 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -40,7 +40,7 @@ spec:  {% endif %}        containers:          - name: proxy -          image: {{openshift_logging_elasticsearch_proxy_image_prefix}}:{{openshift_logging_elasticsearch_proxy_image_version}} +          image: {{ proxy_image }}            imagePullPolicy: Always            args:             - --upstream-ca=/etc/elasticsearch/secret/admin-ca @@ -86,7 +86,7 @@ spec:              requests:                memory: "{{es_memory_limit}}"  {% if es_container_security_context %} -          securityContext: {{ es_container_security_context | to_yaml }}  +          securityContext: {{ es_container_security_context | to_yaml }}  {% endif %}            ports:              - diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml new file mode 100644 index 000000000..b7d105caf --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/" +__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0" diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml new file mode 100644 index 000000000..c87d48e27 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_logging_elasticsearch_proxy_image_version: "v3.7" diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 index 9ff4c7e80..ea1fd3efd 100644 --- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 +++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 @@ -54,9 +54,9 @@ objects:            serviceAccount: aggregated-logging-eventrouter            serviceAccountName: aggregated-logging-eventrouter  {% if node_selector is iterable and node_selector | length > 0 %} -      nodeSelector: +          nodeSelector:  {% for key, value in node_selector.iteritems() %} -        {{ key }}: "{{ value }}" +            {{ key }}: "{{ value }}"  {% endfor %}  {% endif %}            containers: diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 82326bdd1..25f7580a4 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -56,3 +56,7 @@ openshift_logging_fluentd_aggregating_passphrase: none  #fluentd_secureforward_contents:  openshift_logging_fluentd_file_buffer_limit: 1Gi + +# Configure fluentd to tail audit log file and filter out container engine's logs from there +# These logs are then stored in ES operation index +openshift_logging_fluentd_audit_container_engine: False diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 37960afd1..06bb35dbc 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -108,7 +108,6 @@      src: secure-forward.conf      dest: "{{ tempdir }}/secure-forward.conf"    when: fluentd_secureforward_contents is undefined -    changed_when: no  - copy: @@ -173,6 +172,9 @@      ops_port: "{{ openshift_logging_fluentd_ops_port }}"      fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"      fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" +    audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}" +    audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}" +    audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"    check_mode: no    changed_when: no diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index b5f27b60d..644b70031 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -66,7 +66,9 @@ spec:            readOnly: true          - name: filebufferstorage            mountPath: /var/lib/fluentd -{% if openshift_logging_mux_client_mode is defined %} +{% if openshift_logging_mux_client_mode is defined and +     ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or +      (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}          - name: muxcerts            mountPath: /etc/fluent/muxkeys            readOnly: true @@ -114,7 +116,9 @@ spec:                resource: limits.memory          - name: "FILE_BUFFER_LIMIT"            value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}" -{% if openshift_logging_mux_client_mode is defined %} +{% if openshift_logging_mux_client_mode is defined and +     ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or +      (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}          - name: "MUX_CLIENT_MODE"            value: "{{ openshift_logging_mux_client_mode }}"  {% endif %} @@ -168,6 +172,28 @@ spec:            value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"  {% endif %} +{% if audit_container_engine %} +        - name: "AUDIT_CONTAINER_ENGINE" +          value: "{{ audit_container_engine | lower }}" +{% endif %} + +{% if audit_container_engine %} +        - name: "NODE_NAME" +          valueFrom: +            fieldRef: +              fieldPath: spec.nodeName +{% endif %} + +{% if audit_log_file != '' %} +        - name: AUDIT_FILE +          value: "{{ audit_log_file }}" +{% endif %} + +{% if audit_pos_log_file != '' %} +        - name: AUDIT_POS_FILE +          value: "{{ audit_pos_log_file }}" +{% endif %} +        volumes:        - name: runlogjournal          hostPath: @@ -196,7 +222,9 @@ spec:        - name: dockerdaemoncfg          hostPath:            path: /etc/docker -{% if openshift_logging_mux_client_mode is defined %} +{% if openshift_logging_mux_client_mode is defined and +     ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or +      (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}        - name: muxcerts          secret:            secretName: logging-mux diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 73e935d3f..3da861d03 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,4 +1,9 @@  --- +# openshift_master_defaults_in_use is a workaround to detect if we are consuming +# the plays from the role or outside of the role. +openshift_master_defaults_in_use: True +openshift_master_debug_level: "{{ debug_level | default(2) }}" +  r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"  r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" @@ -26,6 +31,9 @@ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"  oreg_auth_credentials_replace: False  l_bind_docker_reg_auth: False +containerized_svc_dir: "/usr/lib/systemd/system" +ha_svc_template_path: "native-cluster" +  # NOTE  # r_openshift_master_*_default may be defined external to this role.  # openshift_use_*, if defined, may affect other roles or play behavior. @@ -38,8 +46,99 @@ r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"  r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"  r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}" +r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}" +r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}" +  r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"  r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"  r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"  r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}" + +openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" +openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}" + +openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}" +openshift_master_config_dir: "{{ openshift_master_config_dir_default }}" +openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}" + +openshift_master_node_config_networkconfig_mtu: 1450 + +openshift_master_node_config_kubeletargs_cpu: 500m +openshift_master_node_config_kubeletargs_mem: 512M + +openshift_master_bootstrap_enabled: False + +openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}" + +openshift_master_config_imageconfig_format: "{{ oreg_url if oreg_url != '' else 'registry.access.redhat.com/openshift3/ose-${component}:${version}' }}" + +# these are for the default settings in a generated node-config.yaml +openshift_master_node_config_default_edits: +- key: nodeName +  state: absent +- key: dnsBindAddress +  value: 127.0.0.1:53 +- key: dnsDomain +  value: cluster.local +- key: dnsRecursiveResolvConf +  value: /etc/origin/node/resolv.conf +- key: imageConfig.format +  value: "{{ openshift_master_config_imageconfig_format }}" +- key: kubeletArguments.cloud-config +  value: +  - "/etc/origin/cloudprovider/{{ openshift_master_cloud_provider }}.conf" +- key: kubeletArguments.cloud-provider +  value: +  - "{{ openshift_master_cloud_provider }}" +- key: kubeletArguments.kube-reserved +  value: +  - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}" +- key: kubeletArguments.system-reserved +  value: +  - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}" +- key: enable-controller-attach-detach +  value: +  - 'true' +- key: networkConfig.mtu +  value: 8951 +- key: networkConfig.networkPluginName +  value: "{{ r_openshift_master_sdn_network_plugin_name }}" +- key: networkPluginName +  value: "{{ r_openshift_master_sdn_network_plugin_name }}" + + +# We support labels for all nodes here +openshift_master_node_config_kubeletargs_default_labels: [] +# We do support overrides for node group labels +openshift_master_node_config_kubeletargs_master_labels: [] +openshift_master_node_config_kubeletargs_infra_labels: [] +openshift_master_node_config_kubeletargs_compute_labels: [] + +openshift_master_node_config_master: +  type: master +  edits: +  - key: kubeletArguments.node-labels +    value: "{{ openshift_master_node_config_kubeletargs_default_labels | +               union(openshift_master_node_config_kubeletargs_master_labels) | +               union(['type=master']) }}" +openshift_master_node_config_infra: +  type: infra +  edits: +  - key: kubeletArguments.node-labels +    value: "{{ openshift_master_node_config_kubeletargs_default_labels | +               union(openshift_master_node_config_kubeletargs_infra_labels) | +               union(['type=infra']) }}" +openshift_master_node_config_compute: +  type: compute +  edits: +  - key: kubeletArguments.node-labels +    value: "{{ openshift_master_node_config_kubeletargs_default_labels | +               union(openshift_master_node_config_kubeletargs_compute_labels) | +               union(['type=compute']) }}" + +openshift_master_node_configs: +- "{{ openshift_master_node_config_infra }}" +- "{{ openshift_master_node_config_compute }}" + +openshift_master_bootstrap_namespace: openshift-node diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index a657668a9..a1cda2ad4 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info:    - cloud  dependencies:  - role: lib_openshift +- role: lib_utils  - role: lib_os_firewall diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml index 0013f5289..eee89743c 100644 --- a/roles/openshift_master/tasks/bootstrap.yml +++ b/roles/openshift_master/tasks/bootstrap.yml @@ -26,3 +26,66 @@    copy:      content: "{{ kubeconfig_out.stdout }}"      dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig" + +- name: create a temp dir for this work +  command: mktemp -d /tmp/openshift_node_config-XXXXXX +  register: mktempout +  run_once: true + +# This generate is so that we do not have to maintain +# our own copy of the template.  This is generated by +# the product and the following settings will be +# generated by the master +- name: generate a node-config dynamically +  command: > +    {{ openshift_master_client_binary }} adm create-node-config +    --node-dir={{ mktempout.stdout }}/ +    --node=CONFIGMAP +    --hostnames=test +    --certificate-authority={{ openshift_master_config_dir }}/ca.crt +    --signer-cert={{ openshift_master_config_dir }}/ca.crt +    --signer-key={{ openshift_master_config_dir }}/ca.key +    --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt +    --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt +  register: configgen +  run_once: true + +- name: remove the default settings +  yedit: +    state: "{{ item.state | default('present') }}" +    src: "{{ mktempout.stdout }}/node-config.yaml" +    key: "{{ item.key }}" +    value: "{{ item.value | default(omit) }}" +  with_items: "{{ openshift_master_node_config_default_edits }}" +  run_once: true + +- name: copy the generated config into each group +  copy: +    src: "{{ mktempout.stdout }}/node-config.yaml" +    remote_src: true +    dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" +  with_items: "{{ openshift_master_node_configs }}" +  run_once: true + +- name: "specialize the generated configs for node-config-{{ item.type }}" +  yedit: +    src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" +    edits: "{{ item.edits }}" +  with_items: "{{ openshift_master_node_configs }}" +  run_once: true + +- name: create node-config.yaml configmap +  oc_configmap: +    name: "node-config-{{ item.type }}" +    namespace: "{{ openshift_master_bootstrap_namespace }}" +    from_file: +      node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml" +  with_items: "{{ openshift_master_node_configs }}" +  run_once: true + +- name: remove templated files +  file: +    dest: "{{ mktempout.stdout }}/" +    state: absent +  with_items: "{{ openshift_master_node_configs }}" +  run_once: true diff --git a/roles/openshift_master/tasks/check_master_api_is_ready.yml b/roles/openshift_master/tasks/check_master_api_is_ready.yml new file mode 100644 index 000000000..7e8a7a596 --- /dev/null +++ b/roles/openshift_master/tasks/check_master_api_is_ready.yml @@ -0,0 +1,14 @@ +--- +- name: Wait for API to become available +  # Using curl here since the uri module requires python-httplib2 and +  # wait_for port doesn't provide health information. +  command: > +    curl --silent --tlsv1.2 +    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt +    {{ openshift.master.api_url }}/healthz/ready +  register: l_api_available_output +  until: l_api_available_output.stdout == 'ok' +  retries: 120 +  delay: 1 +  run_once: true +  changed_when: false diff --git a/roles/openshift_master/tasks/configure_external_etcd.yml b/roles/openshift_master/tasks/configure_external_etcd.yml new file mode 100644 index 000000000..b0590ac84 --- /dev/null +++ b/roles/openshift_master/tasks/configure_external_etcd.yml @@ -0,0 +1,17 @@ +--- +- name: Remove etcdConfig section +  yedit: +    src: /etc/origin/master/master-config.yaml +    key: "etcdConfig" +    state: absent +- name: Set etcdClientInfo.ca to master.etcd-ca.crt +  yedit: +    src: /etc/origin/master/master-config.yaml +    key: etcdClientInfo.ca +    value: master.etcd-ca.crt +- name: Set etcdClientInfo.urls to the external etcd +  yedit: +    src: /etc/origin/master/master-config.yaml +    key: etcdClientInfo.urls +    value: +      - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 82b4b420c..824a5886e 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -311,23 +311,7 @@  # A separate wait is required here for native HA since notifies will  # be resolved after all tasks in the role. -- name: Wait for API to become available -  # Using curl here since the uri module requires python-httplib2 and -  # wait_for port doesn't provide health information. -  command: > -    curl --silent --tlsv1.2 -    {% if openshift.common.version_gte_3_2_or_1_2 | bool %} -    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt -    {% else %} -    --cacert {{ openshift.common.config_base }}/master/ca.crt -    {% endif %} -    {{ openshift.master.api_url }}/healthz/ready -  register: l_api_available_output -  until: l_api_available_output.stdout == 'ok' -  retries: 120 -  delay: 1 -  run_once: true -  changed_when: false +- include: check_master_api_is_ready.yml    when:    - openshift.master.cluster_method == 'native'    - master_api_service_status_changed | bool diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 2644f235e..63d483760 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -1,14 +1,4 @@  --- -# We need to setup some variables as this play might be called directly -# from outside of the role. -- set_fact: -    oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" -  when: oreg_auth_credentials_path is not defined - -- set_fact: -    oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" -  when: oreg_host is not defined -  - name: Check for credentials file for registry auth    stat:      path: "{{ oreg_auth_credentials_path }}" diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 8de62c59a..fcc66044b 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -1,31 +1,9 @@  --- -# This file is included both in the openshift_master role and in the upgrade -# playbooks.  For that reason the ha_svc variables are use set_fact instead of -# the vars directory on the role. +# systemd_units.yml is included both in the openshift_master role and in the upgrade +# playbooks. -# This play may be consumed outside the role, we need to ensure that -# openshift_master_config_dir is set. -- name: Set openshift_master_config_dir if unset -  set_fact: -    openshift_master_config_dir: '/etc/origin/master' -  when: openshift_master_config_dir is not defined - -# This play may be consumed outside the role, we need to ensure that -# r_openshift_master_data_dir is set. -- name: Set r_openshift_master_data_dir if unset -  set_fact: -    r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" -  when: r_openshift_master_data_dir is not defined - -- include: registry_auth.yml - -- name: Remove the legacy master service if it exists -  include: clean_systemd_units.yml - -- name: Init HA Service Info -  set_fact: -    containerized_svc_dir: "/usr/lib/systemd/system" -    ha_svc_template_path: "native-cluster" +- include: upgrade_facts.yml +  when: openshift_master_defaults_in_use is not defined  - name: Set HA Service Info for containerized installs    set_fact: @@ -34,6 +12,11 @@    when:    - openshift.common.is_containerized | bool +- include: registry_auth.yml + +- name: Remove the legacy master service if it exists +  include: clean_systemd_units.yml +  # This is the image used for both HA and non-HA clusters:  - name: Pre-pull master image    command: > diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml new file mode 100644 index 000000000..f6ad438aa --- /dev/null +++ b/roles/openshift_master/tasks/upgrade_facts.yml @@ -0,0 +1,33 @@ +--- +# This file exists because we call systemd_units.yml from outside of the role +# during upgrades.  When we remove this pattern, we can probably +# eliminate most of these set_fact items. + +- name: Set openshift_master_config_dir if unset +  set_fact: +    openshift_master_config_dir: '/etc/origin/master' +  when: openshift_master_config_dir is not defined + +- name: Set r_openshift_master_data_dir if unset +  set_fact: +    r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}" +  when: r_openshift_master_data_dir is not defined + +- set_fact: +    oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker" +  when: oreg_auth_credentials_path is not defined + +- set_fact: +    oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" +  when: oreg_host is not defined + +- name: Set openshift_master_debug_level +  set_fact: +    openshift_master_debug_level: "{{ debug_level | default(2) }}" +  when: +  - openshift_master_debug_level is not defined + +- name: Init HA Service Info +  set_fact: +    containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}" +    ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}" diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index b931f1414..7ec26ceb7 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }} +OPTIONS=--loglevel={{ openshift_master_debug_level }}  CONFIG_FILE={{ openshift_master_config_file }}  {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}  {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 9b3fbcf49..40775571f 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -115,7 +115,7 @@ etcdStorageConfig:    openShiftStorageVersion: v1  imageConfig:    format: {{ openshift.master.registry_url }} -  latest: false +  latest: {{ openshift_master_image_config_latest }}  {% if 'image_policy_config' in openshift.master %}  imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }}  {% endif %} @@ -179,7 +179,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}  networkConfig:    clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} -{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %} +{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}    networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}  {% endif %}  # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet @@ -275,12 +275,5 @@ servingInfo:    - {{ cipher_suite }}  {% endfor %}  {% endif %} -{% if openshift_template_service_broker_namespaces is defined %} -templateServiceBrokerConfig: -  templateNamespaces: -{% for namespace in openshift_template_service_broker_namespaces %} -  - {{ namespace }} -{% endfor %} -{% endif %}  volumeConfig:    dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index 63eb3ea1b..cc21b37af 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}  CONFIG_FILE={{ openshift_master_config_file }}  {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}  {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 0adfd05b6..493fc510e 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -1,4 +1,4 @@ -OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} +OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}  CONFIG_FILE={{ openshift_master_config_file }}  {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}  {% if openshift_master_is_scaleup_host %} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index a95570d38..501be148e 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -34,7 +34,6 @@        cluster_method: "{{ openshift_master_cluster_method | default('native') }}"        cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"        cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" -      debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"        api_port: "{{ openshift_master_api_port | default(None) }}"        api_url: "{{ openshift_master_api_url | default(None) }}"        api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 7928a0346..48584bd64 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -54,6 +54,7 @@      access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"      size: "{{ openshift_metrics_cassandra_pvc_size }}"      pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" +    storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"    with_sequence: count={{ openshift_metrics_cassandra_replicas }}    when: openshift_metrics_cassandra_storage_type == 'dynamic'    changed_when: false diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 1214c08e5..b9f16dfd4 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,8 +1,11 @@  --- +openshift_node_debug_level: "{{ debug_level | default(2) }}" +  r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"  r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" -openshift_service_type: "{{ openshift.common.service_type }}" +openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" +openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"  openshift_image_tag: '' @@ -15,7 +18,6 @@ openshift_node_ami_prep_packages:  - openvswitch  - docker  - etcd -#- pcs  - haproxy  - dnsmasq  - ntp @@ -52,7 +54,6 @@ openshift_node_ami_prep_packages:  # - container-selinux  # - atomic  # -openshift_deployment_type: origin  openshift_node_bootstrap: False @@ -103,5 +104,11 @@ openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"  openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"  openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}" +openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}" +openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}" +  openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"  openshift_node_data_dir: "{{ openshift_node_data_dir_default }}" + +openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}" +openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}" diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml new file mode 100644 index 000000000..ea280640f --- /dev/null +++ b/roles/openshift_node/files/bootstrap.yml @@ -0,0 +1,63 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost +  gather_facts: yes +  vars: +    origin_dns: +      file: /etc/dnsmasq.d/origin-dns.conf +      lines: +      - regex: ^listen-address +        state: present +        line: "listen-address={{ ansible_default_ipv4.address }}" +    node_dns: +      file: /etc/dnsmasq.d/node-dnsmasq.conf +      lines: +      - regex: "^server=/in-addr.arpa/127.0.0.1$" +        line: server=/in-addr.arpa/127.0.0.1 +      - regex: "^server=/cluster.local/127.0.0.1$" +        line: server=/cluster.local/127.0.0.1 + +  tasks: +  - include_vars: openshift_settings.yaml + +  - name: set the data for node_dns +    lineinfile: +      create: yes +      insertafter: EOF +      path: "{{ node_dns.file }}" +      regexp: "{{ item.regex }}" +      line: "{{ item.line | default(omit) }}" +    with_items: "{{ node_dns.lines }}" + +  - name: set the data for origin_dns +    lineinfile: +      create: yes +      state: "{{ item.state | default('present') }}" +      insertafter: "{{ item.after | default(omit) }}" +      path: "{{ origin_dns.file }}" +      regexp: "{{ item.regex }}" +      line: "{{ item.line | default(omit)}}" +    with_items: "{{ origin_dns.lines }}" + +  - when: +    - openshift_group_type is defined +    - openshift_group_type != '' +    - openshift_group_type != 'master' +    block: +    - name: determine the openshift_service_type +      stat: +        path: /etc/sysconfig/atomic-openshift-node +      register: service_type_results + +    - name: set openshift_service_type fact based on stat results +      set_fact: +        openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}" + +    - name: update the sysconfig to have necessary variables +      lineinfile: +        dest: "/etc/sysconfig/{{ openshift_service_type }}-node" +        line: "{{ item.line }}" +        regexp: "{{ item.regexp }}" +      with_items: +      - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}" +        regexp: "^BOOTSTRAP_CONFIG_NAME=.*" diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 25a6fc721..b102c1b18 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -3,7 +3,11 @@    systemd:      name: openvswitch      state: restarted -  when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool +  when: +  - (not skip_node_svc_handlers | default(False) | bool) +  - not (ovs_service_status_changed | default(false) | bool) +  - openshift_node_use_openshift_sdn | bool +  - not openshift_node_bootstrap    register: l_openshift_node_stop_openvswitch_result    until: not l_openshift_node_stop_openvswitch_result | failed    retries: 3 @@ -11,10 +15,11 @@    notify:    - restart openvswitch pause -  - name: restart openvswitch pause    pause: seconds=15 -  when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool +  when: +  - (not skip_node_svc_handlers | default(False) | bool) +  - openshift.common.is_containerized | bool  - name: restart node    systemd: diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml new file mode 100644 index 000000000..38c2b794d --- /dev/null +++ b/roles/openshift_node/tasks/aws.yml @@ -0,0 +1,21 @@ +--- +- name: Configure AWS Cloud Provider Settings +  lineinfile: +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node +    regexp: "{{ item.regex }}" +    line: "{{ item.line }}" +    create: true +  with_items: +    - regex: '^AWS_ACCESS_KEY_ID=' +      line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" +    - regex: '^AWS_SECRET_ACCESS_KEY=' +      line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" +  register: sys_env_update +  no_log: True +  when: +    - openshift_cloudprovider_kind is defined +    - openshift_cloudprovider_kind == 'aws' +    - openshift_cloudprovider_aws_access_key is defined +    - openshift_cloudprovider_aws_secret_key is defined +  notify: +    - restart node diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml index b83b2c452..8c03f6c41 100644 --- a/roles/openshift_node/tasks/bootstrap.yml +++ b/roles/openshift_node/tasks/bootstrap.yml @@ -17,19 +17,31 @@        [Unit]        After=cloud-init.service -- name: update the sysconfig to have KUBECONFIG +- name: update the sysconfig to have necessary variables    lineinfile:      dest: "/etc/sysconfig/{{ openshift_service_type }}-node" -    line: "KUBECONFIG=/root/csr_kubeconfig" +    line: "{{ item.line | default(omit) }}" +    regexp: "{{ item.regexp }}" +    state: "{{ item.state | default('present') }}" +  with_items: +  # add the kubeconfig +  - line: "KUBECONFIG=/etc/origin/node/csr_kubeconfig"      regexp: "^KUBECONFIG=.*" +  # remove the config file.  This comes from openshift_facts +  - regexp: "^CONFIG_FILE=.*" +    state: absent -- name: update the ExecStart to have bootstrap -  lineinfile: -    dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service" -    line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" -    regexp: "^ExecStart=.*" +- name: include aws sysconfig credentials +  include: aws.yml +  static: yes + +#- name: update the ExecStart to have bootstrap +#  lineinfile: +#    dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service" +#    line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}" +#    regexp: "^ExecStart=.*" -- name: "systemctl enable {{ openshift_service_type }}-node" +- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"    systemd:      name: "{{ item }}"      enabled: no @@ -42,6 +54,30 @@      path: /etc/origin/.config_managed    register: rpmgenerated_config +- name: create directories for bootstrapping +  file: +    state: directory +    dest: "{{ item }}" +  with_items: +  - /root/openshift_bootstrap +  - /var/lib/origin/openshift.local.config +  - /var/lib/origin/openshift.local.config/node +  - "/etc/docker/certs.d/docker-registry.default.svc:5000" + +- name: laydown the bootstrap.yml file for on boot configuration +  copy: +    src: bootstrap.yml +    dest: /root/openshift_bootstrap/bootstrap.yml + +- name: symlink master ca for docker-registry +  file: +    src: "{{ item }}" +    dest: "/etc/docker/certs.d/docker-registry.default.svc:5000/{{ item | basename }}" +    state: link +    force: yes +  with_items: +  - /var/lib/origin/openshift.local.config/node/node-client-ca.crt +  - when: rpmgenerated_config.stat.exists    block:    - name: Remove RPM generated config files if present @@ -50,6 +86,7 @@        state: absent      with_items:      - master +    - .config_managed    # with_fileglob doesn't work correctly due to a few issues.    # Could change this to fileglob when it gets fixed. @@ -62,5 +99,7 @@      file:        path: "{{ item.path }}"        state: absent -    when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path" +    when: +    - "'resolv.conf' not in item.path" +    - "'node-dnsmasq.conf' not in item.path"      with_items: "{{ find_results.files }}" diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index e3898b520..c08f43118 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -46,26 +46,16 @@    notify:      - restart node -- name: Configure AWS Cloud Provider Settings -  lineinfile: -    dest: /etc/sysconfig/{{ openshift.common.service_type }}-node -    regexp: "{{ item.regex }}" -    line: "{{ item.line }}" -    create: true -  with_items: -    - regex: '^AWS_ACCESS_KEY_ID=' -      line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}" -    - regex: '^AWS_SECRET_ACCESS_KEY=' -      line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" -  no_log: True -  when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined -  notify: -    - restart node +- name: include aws provider credentials +  include: aws.yml +  static: yes  # Necessary because when you're on a node that's also a master the master will be  # restarted after the node restarts docker and it will take up to 60 seconds for  # systemd to start the master again -- when: openshift.common.is_containerized | bool +- when: +    - openshift.common.is_containerized | bool +    - not openshift_node_bootstrap    block:      - name: Wait for master API to become available before proceeding        # Using curl here since the uri module requires python-httplib2 and @@ -90,30 +80,28 @@          enabled: yes          state: started -- name: Start and enable node -  systemd: -    name: "{{ openshift.common.service_type }}-node" -    enabled: yes -    state: started -    daemon_reload: yes -  register: node_start_result -  until: not node_start_result | failed -  retries: 1 -  delay: 30 -  ignore_errors: true - -- name: Dump logs from node service if it failed -  command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node -  when: node_start_result | failed +- when: not openshift_node_bootstrap +  block: +    - name: Start and enable node +      systemd: +        name: "{{ openshift.common.service_type }}-node" +        enabled: yes +        state: started +        daemon_reload: yes +      register: node_start_result +      until: not node_start_result | failed +      retries: 1 +      delay: 30 +      ignore_errors: true -- name: Abort if node failed to start -  fail: -    msg: Node failed to start please inspect the logs and try again -  when: node_start_result | failed +    - name: Dump logs from node service if it failed +      command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node +      when: node_start_result | failed -- name: Setup tuned -  include: tuned.yml -  static: yes +    - name: Abort if node failed to start +      fail: +        msg: Node failed to start please inspect the logs and try again +      when: node_start_result | failed -- set_fact: -    node_service_status_changed: "{{ node_start_result | changed }}" +    - set_fact: +        node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml index 1186062eb..527580481 100644 --- a/roles/openshift_node/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@      create: true    with_items:    - regex: '^OPTIONS=' -    line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" +    line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"    - regex: '^CONFIG_FILE='      line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"    - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml deleted file mode 100644 index f92ff79b5..000000000 --- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Install Node docker service file -  template: -    dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" -    src: openshift.docker.node.service -  notify: -  - reload systemd units -  - restart node diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 265bf2c46..6b7e40491 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -3,12 +3,12 @@    block:    - name: Install Node package      package: -      name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"        state: present    - name: Install sdn-ovs package      package: -      name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"        state: present      when:      - openshift_node_use_openshift_sdn | bool @@ -27,5 +27,3 @@        docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}      register: pull_result      changed_when: "'Downloaded newer image' in pull_result.stdout" - -  - include: config/install-node-docker-service-file.yml diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 59b8bb76e..eae9ca7bc 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -66,15 +66,10 @@      sysctl_file: "/etc/sysctl.d/99-openshift.conf"      reload: yes -- name: include bootstrap node config -  include: bootstrap.yml -  when: openshift_node_bootstrap -  - include: registry_auth.yml  - name: include standard node config    include: config.yml -  when: not openshift_node_bootstrap  #### Storage class plugins here ####  - name: NFS storage plugin configuration @@ -98,3 +93,7 @@  - include: config/workaround-bz1331590-ovs-oom-fix.yml    when: openshift_node_use_openshift_sdn | default(true) | bool + +- name: include bootstrap node config +  include: bootstrap.yml +  when: openshift_node_bootstrap diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 6b4490f61..9c182ade6 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -1,11 +1,9 @@  --- -# This file is included both in the openshift_master role and in the upgrade -# playbooks.  - name: Install Node service file    template:      dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" -    src: "node.service.j2" -  when: not openshift.common.is_containerized | bool +    src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" +  when: not openshift.common.is_node_system_container | bool    notify:    - reload systemd units    - restart node diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 0856737f6..7602d8ee6 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -12,17 +12,17 @@ After=dnsmasq.service  [Service]  Type=notify -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node  Environment=GOTRACEBACK=crash  ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/  ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1  ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf  ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string: -ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS +ExecStart=/usr/bin/openshift start node {% if openshift_node_bootstrap %} --kubeconfig=${KUBECONFIG} --bootstrap-config-name=${BOOTSTRAP_CONFIG_NAME}{% endif %} --config=${CONFIG_FILE} $OPTIONS  LimitNOFILE=65536  LimitCORE=infinity  WorkingDirectory=/var/lib/origin/ -SyslogIdentifier={{ openshift.common.service_type }}-node +SyslogIdentifier={{ openshift_service_type }}-node  Restart=always  RestartSec=5s  TimeoutStartSec=300 diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 7049f7189..718d35dca 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -13,7 +13,7 @@ dockerConfig:  iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"  imageConfig:    format: {{ openshift.node.registry_url }} -  latest: false +  latest: {{ openshift_node_image_config_latest }}  kind: NodeConfig  kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}  {% if openshift_use_crio | default(False) %} @@ -44,7 +44,7 @@ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}  # deprecates networkPluginName above. The two should match.  networkConfig:     mtu: {{ openshift.node.sdn_mtu }} -{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %} +{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}     networkPluginName: {{ openshift_node_sdn_network_plugin_name }}  {% endif %}  {% if openshift.node.set_node_ip | bool %} @@ -67,9 +67,11 @@ servingInfo:  {% endfor %}  {% endif %}  volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes +{% if not (openshift_node_use_kuryr | default(False)) | bool %}  proxyArguments:    proxy-mode:       - {{ openshift.node.proxy_mode }} +{% endif %}  volumeConfig:    localQuota:      perFSGroup: {{ openshift.node.local_quota_per_fsgroup }} diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml index 0d5fa664c..b45130400 100644 --- a/roles/openshift_node_facts/tasks/main.yml +++ b/roles/openshift_node_facts/tasks/main.yml @@ -11,7 +11,6 @@    - role: node      local_facts:        annotations: "{{ openshift_node_annotations | default(none) }}" -      debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"        iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"        kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"        labels: "{{ openshift_node_labels | default(None) }}" diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md index c7c0ff34a..73b98ad90 100644 --- a/roles/openshift_node_upgrade/README.md +++ b/roles/openshift_node_upgrade/README.md @@ -49,7 +49,6 @@ From openshift.node:  | Name                               |  Default Value      |                     |  |------------------------------------|---------------------|---------------------| -| openshift.node.debug_level         |---------------------|---------------------|  | openshift.node.node_image          |---------------------|---------------------|  | openshift.node.ovs_image           |---------------------|---------------------| diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml index 6507b015d..10b4c6977 100644 --- a/roles/openshift_node_upgrade/defaults/main.yml +++ b/roles/openshift_node_upgrade/defaults/main.yml @@ -1,4 +1,6 @@  --- +openshift_node_debug_level: "{{ debug_level | default(2) }}" +  openshift_use_openshift_sdn: True  os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml index 1186062eb..527580481 100644 --- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml @@ -7,7 +7,7 @@      create: true    with_items:    - regex: '^OPTIONS=' -    line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}" +    line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"    - regex: '^CONFIG_FILE='      line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"    - regex: '^IMAGE_VERSION=' diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml index afff2f8ba..226f5290c 100644 --- a/roles/openshift_node_upgrade/tasks/systemd_units.yml +++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml @@ -6,7 +6,7 @@  # - openshift.node.ovs_image  # - openshift_use_openshift_sdn  # - openshift.common.service_type -# - openshift.node.debug_level +# - openshift_node_debug_level  # - openshift.common.config_base  # - openshift.common.http_proxy  # - openshift.common.portal_net diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 5aa8aecec..c08bec4cb 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -10,50 +10,30 @@ openshift_prometheus_node_selector: {"region":"infra"}  # images  openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"  openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev" -openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev" +openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1"  openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"  # additional prometheus rules file  openshift_prometheus_additional_rules_file: null -# All the required exports -openshift_prometheus_pv_exports: -  - prometheus -  - prometheus-alertmanager -  - prometheus-alertbuffer -# PV template files and their created object names -openshift_prometheus_pv_data: -  - pv_name: prometheus -    pv_template: prom-pv-server.yml -    pv_label: Prometheus Server PV -  - pv_name: prometheus-alertmanager -    pv_template: prom-pv-alertmanager.yml -    pv_label: Prometheus Alertmanager PV -  - pv_name: prometheus-alertbuffer -    pv_template: prom-pv-alertbuffer.yml -    pv_label: Prometheus Alert Buffer PV - -# Hostname/IP of the NFS server. Currently defaults to first master -openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}" -  # storage  openshift_prometheus_storage_type: pvc  openshift_prometheus_pvc_name: prometheus -openshift_prometheus_pvc_size: 10G +openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"  openshift_prometheus_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_pvc_pv_selector: {} +openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"  openshift_prometheus_alertmanager_storage_type: pvc  openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager -openshift_prometheus_alertmanager_pvc_size: 10G +openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"  openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_alertmanager_pvc_pv_selector: {} +openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"  openshift_prometheus_alertbuffer_storage_type: pvc  openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer -openshift_prometheus_alertbuffer_pvc_size: 10G +openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"  openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce] -openshift_prometheus_alertbuffer_pvc_pv_selector: {} +openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"  # container resources  openshift_prometheus_cpu_limit: null diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports deleted file mode 100644 index 3ccedb1fd..000000000 --- a/roles/openshift_prometheus/files/openshift_prometheus.exports +++ /dev/null @@ -1,3 +0,0 @@ -/exports/prometheus *(rw,no_root_squash,no_wdelay) -/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay) -/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay) diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml deleted file mode 100644 index 4e79da05f..000000000 --- a/roles/openshift_prometheus/tasks/create_pvs.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Check for existance and then conditionally: -# - evaluate templates -# - PVs -# -# These tasks idempotently create required Prometheus PV objects. Do not -# call this file directly. This file is intended to be ran as an -# include that has a 'with_items' attached to it. Hence the use below -# of variables like "{{ item.pv_label }}" - -- name: "Check if the {{ item.pv_label }} template has been created already" -  oc_obj: -    namespace: "{{ openshift_prometheus_namespace }}" -    state: list -    kind: pv -    name: "{{ item.pv_name }}" -  register: prom_pv_check - -# Skip all of this if the PV already exists -- block: -    - name: "Ensure the {{ item.pv_label }} template is evaluated" -      template: -        src: "{{ item.pv_template }}.j2" -        dest: "{{ tempdir }}/templates/{{ item.pv_template }}" - -    - name: "Ensure {{ item.pv_label }} is created" -      oc_obj: -        namespace: "{{ openshift_prometheus_namespace }}" -        kind: pv -        name: "{{ item.pv_name }}" -        state: present -        delete_after: True -        files: -          - "{{ tempdir }}/templates/{{ item.pv_template }}" -  when: -    - not prom_pv_check.results.results.0 diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index a9bce2fb1..cb75eedca 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -54,15 +54,6 @@      resource_name: cluster-reader      user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" - -###################################################################### -# NFS -# In the case that we are not running on a cloud provider, volumes must be statically provisioned - -- include: nfs.yaml -  when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')) - -  # create prometheus and alerts services  # TODO join into 1 task with loop  - name: Create prometheus service diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml deleted file mode 100644 index 0b45f2cee..000000000 --- a/roles/openshift_prometheus/tasks/nfs.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# Tasks to statically provision NFS volumes -# Include if not using dynamic volume provisioning -- name: Ensure the /exports/ directory exists -  file: -    path: /exports/ -    state: directory -    mode: 0755 -    owner: root -    group: root - -- name: Ensure the prom-pv0X export directories exist -  file: -    path: "/exports/{{ item }}" -    state: directory -    mode: 0777 -    owner: nfsnobody -    group: nfsnobody -  with_items: "{{ openshift_prometheus_pv_exports }}" - -- name: Ensure the NFS exports for Prometheus PVs exist -  copy: -    src: openshift_prometheus.exports -    dest: /etc/exports.d/openshift_prometheus.exports -  register: nfs_exports_updated - -- name: Ensure the NFS export table is refreshed if exports were added -  command: exportfs -ar -  when: -    - nfs_exports_updated.changed - - -###################################################################### -# Create the required Prometheus PVs. Check out these online docs if you -# need a refresher on includes looping with items: -# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0 -# * http://stackoverflow.com/a/35128533 -# -# TODO: Handle the case where a PV template is updated in -# openshift-ansible and the change needs to be landed on the managed -# cluster. - -- include: create_pvs.yaml -  with_items: "{{ openshift_prometheus_pv_data }}" diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 deleted file mode 100644 index 55a5e19c3..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: -  name: prometheus-alertbuffer -  labels: -    storage: prometheus-alertbuffer -spec: -  capacity: -    storage: 15Gi -  accessModes: -    - ReadWriteOnce -  nfs: -    path: /exports/prometheus-alertbuffer -    server: {{ openshift_prometheus_nfs_server }} -  persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 deleted file mode 100644 index 4ee518735..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: -  name: prometheus-alertmanager -  labels: -    storage: prometheus-alertmanager -spec: -  capacity: -    storage: 15Gi -  accessModes: -    - ReadWriteOnce -  nfs: -    path: /exports/prometheus-alertmanager -    server: {{ openshift_prometheus_nfs_server }} -  persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 deleted file mode 100644 index 933bf0f60..000000000 --- a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: -  name: prometheus -  labels: -    storage: prometheus -spec: -  capacity: -    storage: 15Gi -  accessModes: -    - ReadWriteOnce -  nfs: -    path: /exports/prometheus -    server: {{ openshift_prometheus_nfs_server }} -  persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2 index 98c117f19..66eab6df4 100644 --- a/roles/openshift_prometheus/templates/prometheus_deployment.j2 +++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2 @@ -38,7 +38,7 @@ spec:              cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"  {% endif %}            limits: -{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}              memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"  {% endif %}  {% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml index 71e21a269..56b2d1463 100644 --- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml +++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml @@ -4,22 +4,23 @@ metadata:    name: service-catalog  objects: -- kind: ClusterRole -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRole    metadata:      name: servicecatalog-serviceclass-viewer    rules:    - apiGroups:      - servicecatalog.k8s.io      resources: -    - serviceclasses +    - clusterserviceclasses +    - clusterserviceplans      verbs:      - list      - watch      - get -- kind: ClusterRoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRoleBinding    metadata:      name: servicecatalog-serviceclass-viewer-binding    roleRef: @@ -37,8 +38,8 @@ objects:    metadata:      name: service-catalog-apiserver -- kind: ClusterRole -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRole    metadata:      name: sar-creator    rules: @@ -49,17 +50,19 @@ objects:      verbs:      - create -- kind: ClusterRoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRoleBinding    metadata:      name: service-catalog-sar-creator-binding    roleRef:      name: sar-creator -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-apiserver +  subjects: +  - kind: ServiceAccount +    name: service-catalog-apiserver +    namespace: kube-service-catalog -- kind: ClusterRole -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRole    metadata:      name: namespace-viewer    rules: @@ -72,26 +75,30 @@ objects:      - watch      - get -- kind: ClusterRoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRoleBinding    metadata:      name: service-catalog-namespace-viewer-binding    roleRef:      name: namespace-viewer -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-apiserver +  subjects: +  - kind: ServiceAccount +    name: service-catalog-apiserver +    namespace: kube-service-catalog -- kind: ClusterRoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRoleBinding    metadata:      name: service-catalog-controller-namespace-viewer-binding    roleRef:      name: namespace-viewer -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-controller +  subjects: +  - kind: ServiceAccount +    name: service-catalog-controller +    namespace: kube-service-catalog -- kind: ClusterRole -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRole    metadata:      name: service-catalog-controller    rules: @@ -102,6 +109,7 @@ objects:      verbs:      - create      - update +    - patch      - delete      - get      - list @@ -109,19 +117,22 @@ objects:    - apiGroups:      - servicecatalog.k8s.io      resources: -    - brokers/status -    - instances/status -    - bindings/status +    - clusterservicebrokers/status +    - serviceinstances/status +    - servicebindings/status +    - servicebindings/finalizers +    - serviceinstances/reference      verbs:      - update    - apiGroups:      - servicecatalog.k8s.io      resources: -    - brokers -    - instances -    - bindings +    - clusterservicebrokers +    - serviceinstances +    - servicebindings      verbs:      - list +    - get      - watch    - apiGroups:      - "" @@ -133,7 +144,8 @@ objects:    - apiGroups:      - servicecatalog.k8s.io      resources: -    - serviceclasses +    - clusterserviceclasses +    - clusterserviceplans      verbs:      - create      - delete @@ -154,17 +166,19 @@ objects:      - list      - watch -- kind: ClusterRoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRoleBinding    metadata:      name: service-catalog-controller-binding    roleRef:      name: service-catalog-controller -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-controller - -- kind: Role -  apiVersion: v1 +  subjects: +  - kind: ServiceAccount +    name: service-catalog-controller +    namespace: kube-service-catalog +   +- apiVersion: authorization.openshift.io/v1 +  kind: Role    metadata:      name: endpoint-accessor    rules: @@ -179,21 +193,25 @@ objects:      - create      - update -- kind: RoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: RoleBinding    metadata: -    name: endpoint-accessor-binding +    name: endpointer-accessor-binding    roleRef:      name: endpoint-accessor      namespace: kube-service-catalog -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-controller +  subjects: +  - kind: ServiceAccount +    namespace: kube-service-catalog +    name: service-catalog-controller -- kind: ClusterRoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: ClusterRoleBinding    metadata:      name: system:auth-delegator-binding    roleRef:      name: system:auth-delegator -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-apiserver +  subjects: +  - kind: ServiceAccount +    name: service-catalog-apiserver +    namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml index f6ee0955d..e1af51ce6 100644 --- a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml +++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml @@ -4,8 +4,8 @@ metadata:    name: kube-system-service-catalog  objects: -- kind: Role -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: Role    metadata:      name: extension-apiserver-authentication-reader      namespace: ${KUBE_SYSTEM_NAMESPACE} @@ -19,16 +19,18 @@ objects:      verbs:      - get -- kind: RoleBinding -  apiVersion: v1 +- apiVersion: authorization.openshift.io/v1 +  kind: RoleBinding    metadata:      name: extension-apiserver-authentication-reader-binding      namespace: ${KUBE_SYSTEM_NAMESPACE}    roleRef:      name: extension-apiserver-authentication-reader -    namespace: kube-system -  userNames: -    - system:serviceaccount:kube-service-catalog:service-catalog-apiserver +    namespace: ${KUBE_SYSTEM_NAMESPACE} +  subjects: +  - kind: ServiceAccount +    name: service-catalog-apiserver +    namespace: kube-service-catalog  parameters:  - description: Do not change this value. diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index cc897b032..416bdac70 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -36,19 +36,28 @@      - name: tls.key        path: "{{ generated_certs_dir }}/apiserver.key" +- name: Create service-catalog-ssl secret +  oc_secret: +    state: present +    name: service-catalog-ssl +    namespace: kube-service-catalog +    files: +    - name: tls.crt +      path: "{{ generated_certs_dir }}/apiserver.crt" +  - slurp:      src: "{{ generated_certs_dir }}/ca.crt"    register: apiserver_ca  - shell: > -    oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" +    oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"    register: get_apiservices    changed_when: no  - name: Create api service    oc_obj:      state: present -    name: v1alpha1.servicecatalog.k8s.io +    name: v1beta1.servicecatalog.k8s.io      kind: apiservices.apiregistration.k8s.io      namespace: "kube-service-catalog"      content: @@ -57,10 +66,10 @@          apiVersion: apiregistration.k8s.io/v1beta1          kind: APIService          metadata: -          name: v1alpha1.servicecatalog.k8s.io +          name: v1beta1.servicecatalog.k8s.io          spec:            group: servicecatalog.k8s.io -          version: v1alpha1 +          version: v1beta1            service:              namespace: "kube-service-catalog"              name: apiserver diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index e202ae173..1e94c8c5d 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -90,14 +90,14 @@    vars:      original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"    when: -    - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  # only do this if we don't already have the updated role info  - name: update edit role for service catalog and pod preset access    command: >      oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml    when: -    - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  - oc_obj:      name: admin @@ -113,14 +113,14 @@    vars:      original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"    when: -    - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  # only do this if we don't already have the updated role info  - name: update admin role for service catalog and pod preset access    command: >      oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml    when: -    - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  - oc_adm_policy_user:      namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index 2fb1ec440..96ae61507 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@  ---  - name: Remove Service Catalog APIServer    command: > -    oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog +    oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog  - name: Remove Policy Binding    command: > @@ -13,7 +13,7 @@  #    state: absent  #    namespace: "kube-service-catalog"  #    kind: apiservices.apiregistration.k8s.io -#    name: v1alpha1.servicecatalog.k8s.io +#    name: v1beta1.servicecatalog.k8s.io  - name: Remove Service Catalog API Server route    oc_obj: diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 index c09834fd4..5d5352c1c 100644 --- a/roles/openshift_service_catalog/templates/api_server.j2 +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -41,7 +41,9 @@ spec:          - --cors-allowed-origins          - {{ cors_allowed_origin }}          - --admission-control -        - "KubernetesNamespaceLifecycle" +        - KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck +        - --feature-gates +        - OriginatingIdentity=true          image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}          command: ["/usr/bin/apiserver"]          imagePullPolicy: Always diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 index 1bbc0fa2c..2272cbb44 100644 --- a/roles/openshift_service_catalog/templates/controller_manager.j2 +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -31,7 +31,12 @@ spec:          args:          - -v          - "5" -        - "--leader-election-namespace=$(K8S_NAMESPACE)" +        - --leader-election-namespace +        - kube-service-catalog +        - --broker-relist-interval +        - "5m" +        - --feature-gates +        - OriginatingIdentity=true          image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}          command: ["/usr/bin/controller-manager"]          imagePullPolicy: Always @@ -41,7 +46,19 @@ spec:            protocol: TCP          resources: {}          terminationMessagePath: /dev/termination-log +        volumeMounts: +        - mountPath: /var/run/kubernetes-service-catalog +          name: service-catalog-ssl +          readOnly: true        dnsPolicy: ClusterFirst        restartPolicy: Always        securityContext: {}        terminationGracePeriodSeconds: 30 +      volumes: +      - name: service-catalog-ssl +        secret: +          defaultMode: 420 +          items: +          - key: tls.crt +            path: apiserver.crt +          secretName: apiserver-ssl diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml index 9ebb0d5ec..7b705c2d4 100644 --- a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -85,8 +85,6 @@ objects:            volumeMounts:            - name: db              mountPath: /var/lib/heketi -          - name: topology -            mountPath: ${TOPOLOGY_PATH}            - name: config              mountPath: /etc/heketi            readinessProbe: @@ -103,9 +101,6 @@ objects:                port: 8080          volumes:          - name: db -        - name: topology -          secret: -            secretName: heketi-${CLUSTER_NAME}-topology-secret          - name: config            secret:              secretName: heketi-${CLUSTER_NAME}-config-secret @@ -138,6 +133,3 @@ parameters:    displayName: GlusterFS cluster name    description: A unique name to identify this heketi service, useful for running multiple heketi instances    value: glusterfs -- name: TOPOLOGY_PATH -  displayName: heketi topology file location -  required: True diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 3047fbaf9..c4e023c1e 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -35,6 +35,9 @@      - "{{ openshift.logging }}"      - "{{ openshift.loggingops }}"      - "{{ openshift.hosted.etcd }}" +    - "{{ openshift.prometheus }}" +    - "{{ openshift.prometheus.alertmanager }}" +    - "{{ openshift.prometheus.alertbuffer }}"  - name: Configure exports    template: diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 0141e0d25..c2a741035 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -3,3 +3,6 @@  {{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}  {{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}  {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }} +{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }} +{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }} +{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }} diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml index fb407c4a2..a92a138b0 100644 --- a/roles/template_service_broker/defaults/main.yml +++ b/roles/template_service_broker/defaults/main.yml @@ -2,3 +2,4 @@  # placeholder file?  template_service_broker_remove: False  template_service_broker_install: False +openshift_template_service_broker_namespaces: ['openshift'] diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index f5fd6487c..6a532a206 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -6,7 +6,7 @@      - "{{ openshift_deployment_type | default(deployment_type) }}.yml"      - "default_images.yml" -- name: set ansible_service_broker facts +- name: set template_service_broker facts    set_fact:      template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"      template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}" @@ -28,10 +28,24 @@      - "{{ __tsb_template_file }}"      - "{{ __tsb_rbac_file }}"      - "{{ __tsb_broker_file }}" +    - "{{ __tsb_config_file }}" + +- yedit: +    src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}" +    key: templateNamespaces +    value: "{{ openshift_template_service_broker_namespaces }}" +    value_type: list + +- slurp: +    src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}" +  register: config  - name: Apply template file    shell: > -    oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f - +    oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" +    --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}" +    --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" +    | kubectl apply -f -  # reconcile with rbac  - name: Reconcile with RBAC file @@ -62,7 +76,7 @@    when: openshift_master_config_dir is undefined  - slurp: -    src: "{{ openshift_master_config_dir }}/ca.crt" +    src: "{{ openshift_master_config_dir }}/service-signer.crt"    register: __ca_bundle  # Register with broker diff --git a/roles/tuned/defaults/main.yml b/roles/tuned/defaults/main.yml new file mode 100644 index 000000000..418a4b521 --- /dev/null +++ b/roles/tuned/defaults/main.yml @@ -0,0 +1,3 @@ +--- +tuned_etc_directory: '/etc/tuned' +tuned_templates_source: '../templates' diff --git a/roles/tuned/meta/main.yml b/roles/tuned/meta/main.yml new file mode 100644 index 000000000..833d94c13 --- /dev/null +++ b/roles/tuned/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: +  author: Jiri Mencak +  description: Restart the tuned daemon if present and make it use the recommended profile +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.3 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/tuned/tasks/main.yml index 425bf6a26..e95d274d5 100644 --- a/roles/openshift_node/tasks/tuned.yml +++ b/roles/tuned/tasks/main.yml @@ -12,8 +12,6 @@    - name: Set tuned OpenShift variables      set_fact:        openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" -      tuned_etc_directory: '/etc/tuned' -      tuned_templates_source: '../templates/tuned'    - name: Ensure directory structure exists      file: diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/tuned/templates/openshift-control-plane/tuned.conf index f22f21065..f22f21065 100644 --- a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf +++ b/roles/tuned/templates/openshift-control-plane/tuned.conf diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/tuned/templates/openshift-node/tuned.conf index 78c7d19c9..78c7d19c9 100644 --- a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf +++ b/roles/tuned/templates/openshift-node/tuned.conf diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/tuned/templates/openshift/tuned.conf index 68ac5dadb..68ac5dadb 100644 --- a/roles/openshift_node/templates/tuned/openshift/tuned.conf +++ b/roles/tuned/templates/openshift/tuned.conf diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/tuned/templates/recommend.conf index 5fa765798..086e5673d 100644 --- a/roles/openshift_node/templates/tuned/recommend.conf +++ b/roles/tuned/templates/recommend.conf @@ -1,8 +1,11 @@ -[openshift-node] -/etc/origin/node/node-config.yaml=.*region=primary -  [openshift-control-plane,master]  /etc/origin/master/master-config.yaml=.*  [openshift-control-plane,node]  /etc/origin/node/node-config.yaml=.*region=infra + +[openshift-control-plane,lb] +/etc/haproxy/haproxy.cfg=.* + +[openshift-node] +/etc/origin/node/node-config.yaml=.*  | 
