diff options
20 files changed, 307 insertions, 241 deletions
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index dde172c4a..0a1b8c5c4 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -89,6 +89,25 @@ openshift_release=v1.4  # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.  # docker_upgrade=False + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + +  # Alternate image format string, useful if you've got your own registry mirror  #oreg_url=example.com/openshift3/ose-${component}:${version}  # If oreg_url points to a registry other than registry.access.redhat.com we can diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index c0dd8a1e8..89b9d7e48 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -89,6 +89,25 @@ openshift_release=v3.4  # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.  # docker_upgrade=False + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + +  # Alternate image format string, useful if you've got your own registry mirror  #oreg_url=example.com/openshift3/ose-${component}:${version}  # If oreg_url points to a registry other than registry.access.redhat.com we can diff --git a/openshift-ansible.spec b/openshift-ansible.spec index a2940e001..0b7c44660 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -18,6 +18,8 @@ Requires:      python2  Requires:      python-six  Requires:      tar  Requires:      openshift-ansible-docs = %{version}-%{release} +Requires:      java-1.8.0-openjdk-headless +Requires:      httpd-tools  %description  Openshift and Atomic Enterprise Ansible diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 7f738ea0f..77b37cdc2 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -51,6 +51,8 @@    roles:    - openshift_master_facts +# The main master upgrade play. Should handle all changes to the system in one pass, with +# support for optional hooks to be defined.  - name: Upgrade master    hosts: oo_masters_to_config    vars: @@ -62,6 +64,14 @@    roles:    - openshift_facts    post_tasks: + +  # Run the pre-upgrade hook if defined: +  - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" +    when: openshift_master_upgrade_pre_hook is defined + +  - include: "{{ openshift_master_upgrade_pre_hook }}" +    when: openshift_master_upgrade_pre_hook is defined +    - include: rpm_upgrade.yml component=master      when: not openshift.common.is_containerized | bool @@ -102,12 +112,26 @@        state: link      when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists +  # Run the upgrade hook prior to restarting services/system if defined: +  - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" +    when: openshift_master_upgrade_hook is defined + +  - include: "{{ openshift_master_upgrade_hook }}" +    when: openshift_master_upgrade_hook is defined +    - include: ../../openshift-master/restart_hosts.yml      when: openshift.common.rolling_restart_mode == 'system'    - include: ../../openshift-master/restart_services.yml      when: openshift.common.rolling_restart_mode == 'services' +  # Run the post-upgrade hook if defined: +  - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}" +    when: openshift_master_upgrade_post_hook is defined + +  - include: "{{ openshift_master_upgrade_post_hook }}" +    when: openshift_master_upgrade_post_hook is defined +    - set_fact:        master_update_complete: True diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index de36fd263..7a334e771 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -75,17 +75,6 @@          ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"          master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Create temp directory for syncing certs -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - name: Create local temp directory for syncing certs -    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX -    register: g_master_mktemp -    changed_when: False -  - name: Determine if session secrets must be generated    hosts: oo_first_master    roles: @@ -117,7 +106,6 @@    hosts: oo_masters_to_config    any_errors_fatal: true    vars: -    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"      openshift_master_ha: "{{ openshift.master.ha }}"      openshift_master_count: "{{ openshift.master.master_count }}"      openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" @@ -144,12 +132,3 @@    - name: Create group for deployment type      group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}      changed_when: False - -- name: Delete temporary directory on localhost -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - file: name={{ g_master_mktemp.stdout }} state=absent -    changed_when: False diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index d096019af..6190383b6 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -2,7 +2,7 @@  haproxy_frontends:  - name: main    binds: -  - "*:8443" +  - "*:{{ openshift_master_api_port | default(8443) }}"    default_backend: default  haproxy_backends: diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 400f80715..e9bc8b4ab 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -1,14 +1,31 @@  --- -- fail: msg="Cannot use containerized=true for load balancer hosts." -  when: openshift.common.is_containerized | bool -  - name: Install haproxy    package: name=haproxy state=present +  when: not openshift.common.is_containerized | bool + +- name: Pull haproxy image +  command: > +    docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }} +  when: openshift.common.is_containerized | bool + +- name: Create config directory for haproxy +  file: +    path: /etc/haproxy +    state: directory +  when: openshift.common.is_containerized | bool + +- name: Create the systemd unit files +  template: +    src: "haproxy.docker.service.j2" +    dest: "{{ containerized_svc_dir }}/haproxy.service" +  when: openshift.common.is_containerized | bool +  notify: restart haproxy  - name: Configure systemd service directory for haproxy    file:      path: /etc/systemd/system/haproxy.service.d      state: directory +  when: not openshift.common.is_containerized | bool  # Work around ini_file create option in 2.2 which defaults to no  - name: Create limits.conf file @@ -19,6 +36,7 @@      owner: root      group: root    changed_when: false +  when: not openshift.common.is_containerized | bool  - name: Configure the nofile limits for haproxy    ini_file: @@ -27,6 +45,7 @@      option: LimitNOFILE      value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"    notify: restart haproxy +  when: not openshift.common.is_containerized | bool  - name: Configure haproxy    template: diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 new file mode 100644 index 000000000..624876ab0 --- /dev/null +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -0,0 +1,17 @@ +[Unit] +After=docker.service +Requires=docker.service +PartOf=docker.service + +[Service] +ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint="haproxy -f /etc/haproxy/haproxy.cfg" {{ openshift.common.router_image }}:{{ openshift_image_tag }} +ExecStartPost=/usr/bin/sleep 10 +ExecStop=/usr/bin/docker stop openshift_loadbalancer +LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} +LimitCORE=infinity +Restart=always +RestartSec=5s + +[Install] +WantedBy=docker.service diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 2cc2c48ee..9b71dc676 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -6,6 +6,9 @@ This role is used for installing the Aggregated Logging stack. It should be run  a single host, it will create any missing certificates and API objects that the current  [logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does. +This role requires that the control host it is run on has Java installed as part of keystore +generation for Elasticsearch (it uses JKS) as well as openssl to sign certificates. +  As part of the installation, it is recommended that you add the Fluentd node selector label  to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels). diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh index 995ec0b98..9fe557f83 100644 --- a/roles/openshift_logging/files/generate-jks.sh +++ b/roles/openshift_logging/files/generate-jks.sh @@ -1,6 +1,10 @@  #! /bin/sh  set -ex +function usage() { +  echo Usage: `basename $0` cert_directory [logging_namespace] 1>&2 +} +  function generate_JKS_chain() {      dir=${SCRATCH_DIR:-_output}      ADD_OID=$1 @@ -147,8 +151,14 @@ function createTruststore() {      -noprompt -alias sig-ca  } -dir="$CERT_DIR" +if [ $# -lt 1 ]; then +  usage +  exit 1 +fi + +dir=$1  SCRATCH_DIR=$dir +PROJECT=${2:-logging}  if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then    generate_JKS_client_cert "system.admin" diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index e16071e46..20e50482e 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -85,82 +85,8 @@    loop_control:      loop_var: node_name -- name: Check for jks-generator service account -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}} -  register: serviceaccount_result -  ignore_errors: yes -  when: not ansible_check_mode -  changed_when: no - -- name: Create jks-generator service account -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}} -  when: not ansible_check_mode and "not found" in serviceaccount_result.stderr - -- name: Check for hostmount-anyuid scc entry -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}' -  register: scc_result -  when: not ansible_check_mode -  changed_when: no - -- name: Add to hostmount-anyuid scc -  command: > -    {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}} -  when: -    - not ansible_check_mode -    - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1 - -- name: Copy JKS generation script -  copy: -    src: generate-jks.sh -    dest: "{{generated_certs_dir}}/generate-jks.sh" -  check_mode: no - -- name: Generate JKS pod template -  template: -    src: jks_pod.j2 -    dest: "{{mktemp.stdout}}/jks_pod.yaml" -  check_mode: no -  changed_when: no - -# check if pod generated files exist -- if they all do don't run the pod -- name: Checking for elasticsearch.jks -  stat: path="{{generated_certs_dir}}/elasticsearch.jks" -  register: elasticsearch_jks -  check_mode: no - -- name: Checking for logging-es.jks -  stat: path="{{generated_certs_dir}}/logging-es.jks" -  register: logging_es_jks -  check_mode: no - -- name: Checking for system.admin.jks -  stat: path="{{generated_certs_dir}}/system.admin.jks" -  register: system_admin_jks -  check_mode: no - -- name: Checking for truststore.jks -  stat: path="{{generated_certs_dir}}/truststore.jks" -  register: truststore_jks -  check_mode: no - -- name: create JKS generation pod -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name -  register: podoutput -  check_mode: no -  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists - -- command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}} -  register: result -  until: result.stdout.find("Succeeded") != -1 -  retries: 5 -  delay: 10 -  changed_when: no -  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists +- name: Creating necessary JKS certs +  include: generate_jks.yaml  # check for secret/logging-kibana-proxy  - command: > diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml new file mode 100644 index 000000000..adb6c2b2d --- /dev/null +++ b/roles/openshift_logging/tasks/generate_jks.yaml @@ -0,0 +1,111 @@ +--- +# check if pod generated files exist -- if they all do don't run the pod +- name: Checking for elasticsearch.jks +  stat: path="{{generated_certs_dir}}/elasticsearch.jks" +  register: elasticsearch_jks +  check_mode: no + +- name: Checking for logging-es.jks +  stat: path="{{generated_certs_dir}}/logging-es.jks" +  register: logging_es_jks +  check_mode: no + +- name: Checking for system.admin.jks +  stat: path="{{generated_certs_dir}}/system.admin.jks" +  register: system_admin_jks +  check_mode: no + +- name: Checking for truststore.jks +  stat: path="{{generated_certs_dir}}/truststore.jks" +  register: truststore_jks +  check_mode: no + +- name: Create temp directory for doing work in +  local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: local_tmp +  changed_when: False +  check_mode: no + +- name: Create placeholder for previously created JKS certs to prevent recreating... +  file: +    path: "{{local_tmp.stdout}}/elasticsearch.jks" +    state: touch +    mode: "u=rw,g=r,o=r" +  when: elasticsearch_jks.stat.exists +  changed_when: False + +- name: Create placeholder for previously created JKS certs to prevent recreating... +  file: +    path: "{{local_tmp.stdout}}/logging-es.jks" +    state: touch +    mode: "u=rw,g=r,o=r" +  when: logging_es_jks.stat.exists +  changed_when: False + +- name: Create placeholder for previously created JKS certs to prevent recreating... +  file: +    path: "{{local_tmp.stdout}}/system.admin.jks" +    state: touch +    mode: "u=rw,g=r,o=r" +  when: system_admin_jks.stat.exists +  changed_when: False + +- name: Create placeholder for previously created JKS certs to prevent recreating... +  file: +    path: "{{local_tmp.stdout}}/truststore.jks" +    state: touch +    mode: "u=rw,g=r,o=r" +  when: truststore_jks.stat.exists +  changed_when: False + +- name: pulling down signing items from host +  fetch: +    src: "{{generated_certs_dir}}/{{item}}" +    dest: "{{local_tmp.stdout}}/{{item}}" +    flat: yes +  with_items: +    - ca.crt +    - ca.key +    - ca.serial.txt +    - ca.crl.srl +    - ca.db + +- local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf +  vars: +    - top_dir: "{{local_tmp.stdout}}" + +- name: Run JKS generation script +  local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}} +  check_mode: no +  become: yes +  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... +  copy: +    src: "{{local_tmp.stdout}}/elasticsearch.jks" +    dest: "{{generated_certs_dir}}/elasticsearch.jks" +  when: not elasticsearch_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... +  copy: +    src: "{{local_tmp.stdout}}/logging-es.jks" +    dest: "{{generated_certs_dir}}/logging-es.jks" +  when: not logging_es_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... +  copy: +    src: "{{local_tmp.stdout}}/system.admin.jks" +    dest: "{{generated_certs_dir}}/system.admin.jks" +  when: not system_admin_jks.stat.exists + +- name: Pushing locally generated JKS certs to remote host... +  copy: +    src: "{{local_tmp.stdout}}/truststore.jks" +    dest: "{{generated_certs_dir}}/truststore.jks" +  when: not truststore_jks.stat.exists + +- name: Cleaning up temp dir +  file: +    path: "{{local_tmp.stdout}}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index c4ec1b255..4c718805e 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -3,7 +3,6 @@      msg: Only one Fluentd nodeselector key pair should be provided    when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" -  - name: Create temp directory for doing work in    command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX    register: mktemp diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index 0f287e944..a61b0db5e 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -5,6 +5,10 @@ OpenShift Metrics Installation  Requirements  ------------ +This role has the following dependencies: + +- Java is required on the control node to generate keystores for the Java components +- httpd-tools is required on the control node to generate various passwords for the metrics components  The following variables need to be set and will be validated: diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh index bb046df87..f4315ef34 100755 --- a/roles/openshift_metrics/files/import_jks_certs.sh +++ b/roles/openshift_metrics/files/import_jks_certs.sh @@ -114,5 +114,3 @@ function import_certs() {  }  import_certs - -exit 0 diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml index f6bf6c1a6..f5192b005 100644 --- a/roles/openshift_metrics/tasks/import_jks_certs.yaml +++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml @@ -1,76 +1,4 @@  --- -- name: Check for jks-generator service account -  command: > -    {{ openshift.common.client_binary }} -    --config={{ mktemp.stdout }}/admin.kubeconfig -    -n {{openshift_metrics_project}} -    get serviceaccount/jks-generator --no-headers -  register: serviceaccount_result -  ignore_errors: yes -  when: not ansible_check_mode -  changed_when: no - -- name: Create jks-generator service account -  command: > -    {{ openshift.common.client_binary }} -    --config={{ mktemp.stdout }}/admin.kubeconfig -    -n {{openshift_metrics_project}} -    create serviceaccount jks-generator -  when: not ansible_check_mode and "not found" in serviceaccount_result.stderr - -- name: Check for hostmount-anyuid scc entry -  command: > -    {{ openshift.common.client_binary }} -    --config={{ mktemp.stdout }}/admin.kubeconfig -    get scc hostmount-anyuid -    -o jsonpath='{.users}' -  register: scc_result -  when: not ansible_check_mode -  changed_when: no - -- name: Add to hostmount-anyuid scc -  command: > -    {{ openshift.common.admin_binary }} -    --config={{ mktemp.stdout }}/admin.kubeconfig -    -n {{openshift_metrics_project}} -    policy add-scc-to-user hostmount-anyuid -    -z jks-generator -  when: -    - not ansible_check_mode -    - scc_result.stdout.find("system:serviceaccount:{{openshift_metrics_project}}:jks-generator") == -1 - -- name: Copy JKS generation script -  copy: -    src: import_jks_certs.sh -    dest: "{{openshift_metrics_certs_dir}}/import_jks_certs.sh" -  check_mode: no - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd -  register: metrics_keystore_password - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd -  register: cassandra_keystore_password - -- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd -  register: jgroups_keystore_password - -- name: Generate JKS pod template -  template: -    src: jks_pod.j2 -    dest: "{{mktemp.stdout}}/jks_pod.yaml" -  vars: -    metrics_keystore_passwd: "{{metrics_keystore_password.content}}" -    cassandra_keystore_passwd: "{{cassandra_keystore_password.content}}" -    metrics_truststore_passwd: "{{hawkular_truststore_password.content}}" -    cassandra_truststore_passwd: "{{cassandra_truststore_password.content}}" -    jgroups_passwd: "{{jgroups_keystore_password.content}}" -  check_mode: no -  changed_when: no - -- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" -  register: metrics_keystore -  check_mode: no -  - stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.keystore"    register: cassandra_keystore    check_mode: no @@ -79,6 +7,10 @@    register: cassandra_truststore    check_mode: no +- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore" +  register: metrics_keystore +  check_mode: no +  - stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.truststore"    register: metrics_truststore    check_mode: no @@ -87,32 +19,52 @@    register: jgroups_keystore    check_mode: no -- name: create JKS pod -  command: > -    {{ openshift.common.client_binary }} -    --config={{ mktemp.stdout }}/admin.kubeconfig -    -n {{openshift_metrics_project}} -    create -f {{mktemp.stdout}}/jks_pod.yaml -    -o name -  register: podoutput -  check_mode: no -  when: not metrics_keystore.stat.exists or -        not metrics_truststore.stat.exists or -        not cassandra_keystore.stat.exists or -        not cassandra_truststore.stat.exists or -        not jgroups_keystore.stat.exists +- block: +  - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd +    register: metrics_keystore_password + +  - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd +    register: cassandra_keystore_password + +  - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd +    register: jgroups_keystore_password + +  - local_action: command mktemp -d +    register: local_tmp +    changed_when: False + +  - fetch: +      dest: "{{local_tmp.stdout}}/" +      src: "{{ openshift_metrics_certs_dir }}/{{item}}" +      flat: yes +    changed_when: False +    with_items: +    - hawkular-metrics.pkcs12 +    - hawkular-cassandra.pkcs12 +    - hawkular-metrics.crt +    - hawkular-cassandra.crt +    - ca.crt + +  - local_action: command {{role_path}}/files/import_jks_certs.sh +    environment: +      CERT_DIR: "{{local_tmp.stdout}}" +      METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}" +      CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}" +      METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}" +      CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}" +      JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}" +    changed_when: False + +  - copy: +      dest: "{{openshift_metrics_certs_dir}}/" +      src: "{{item}}" +    with_fileglob: "{{local_tmp.stdout}}/*.*store" + +  - file: +      path: "{{local_tmp.stdout}}" +      state: absent +    changed_when: False -- command: > -    {{ openshift.common.client_binary }} -    --config={{ mktemp.stdout }}/admin.kubeconfig -    -n {{openshift_metrics_project}} -    get {{podoutput.stdout}} -    -o jsonpath='{.status.phase}' -  register: result -  until: result.stdout.find("Succeeded") != -1 -  retries: 5 -  delay: 10 -  changed_when: no    when: not metrics_keystore.stat.exists or          not metrics_truststore.stat.exists or          not cassandra_keystore.stat.exists or diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml index b0e4bec80..cc5acc6e5 100644 --- a/roles/openshift_metrics/tasks/install_support.yaml +++ b/roles/openshift_metrics/tasks/install_support.yaml @@ -1,4 +1,22 @@  --- +- name: Check control node to see if htpasswd is installed +  local_action: command which htpasswd +  register: htpasswd_check +  failed_when: no +  changed_when: no + +- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node" +  when: htpasswd_check.rc  == 1 + +- name: Check control node to see if keytool is installed +  local_action: command which htpasswd +  register: keytool_check +  failed_when: no +  changed_when: no + +- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node" +  when: keytool_check.rc  == 1 +  - include: generate_certificates.yaml  - include: generate_serviceaccounts.yaml  - include: generate_services.yaml diff --git a/roles/openshift_metrics/templates/jks_pod.j2 b/roles/openshift_metrics/templates/jks_pod.j2 deleted file mode 100644 index e86fe38a4..000000000 --- a/roles/openshift_metrics/templates/jks_pod.j2 +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: -  labels: -    metrics-infra: support -  generateName: jks-cert-gen- -spec: -  containers: -  - name: jks-cert-gen -    image: {{openshift_metrics_image_prefix}}metrics-deployer:{{openshift_metrics_image_version}} -    imagePullPolicy: Always -    command: ["sh",  "{{openshift_metrics_certs_dir}}/import_jks_certs.sh"] -    securityContext: -      runAsUser: 0 -    volumeMounts: -    - mountPath: {{openshift_metrics_certs_dir}} -      name: certmount -    env: -    - name: CERT_DIR -      value: {{openshift_metrics_certs_dir}} -    - name: METRICS_KEYSTORE_PASSWD -      value: {{metrics_keystore_passwd}} -    - name: CASSANDRA_KEYSTORE_PASSWD -      value: {{cassandra_keystore_passwd}} -    - name: METRICS_TRUSTSTORE_PASSWD -      value: {{metrics_truststore_passwd}} -    - name: CASSANDRA_TRUSTSTORE_PASSWD -      value: {{cassandra_truststore_passwd}} -    - name: hawkular_cassandra_alias -      value: {{cassandra_keystore_passwd}} -    - name: JGROUPS_PASSWD -      value: {{jgroups_passwd}} -  restartPolicy: Never -  serviceAccount: jks-generator -  volumes: -  - hostPath: -      path: "{{openshift_metrics_certs_dir}}" -    name: certmount diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 8ba650994..4ba38b721 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -223,7 +223,9 @@ class IpTablesManager(object):  # pylint: disable=too-many-instance-attributes      def gen_cmd(self):          cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables' -        return ["/usr/sbin/%s" % cmd] +        # Include -w (wait for xtables lock) in default arguments. +        default_args = '-w' +        return ["/usr/sbin/%s %s" % (cmd, default_args)]      def gen_save_cmd(self):  # pylint: disable=no-self-use          return ['/usr/libexec/iptables/iptables.init', 'save'] diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt index f6a7bde10..aebfe7c39 100644 --- a/utils/test-requirements.txt +++ b/utils/test-requirements.txt @@ -13,3 +13,5 @@ pyOpenSSL  yamllint  tox  detox +# Temporary work-around for flake8 vs maccabe version conflict +mccabe==0.5.3  | 
