diff options
41 files changed, 2161 insertions, 120 deletions
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py index a55e72725..35accfb78 100644 --- a/roles/openshift_logging/library/openshift_logging_facts.py +++ b/roles/openshift_logging/library/openshift_logging_facts.py @@ -318,7 +318,7 @@ def main():      ''' The main method '''      module = AnsibleModule(   # noqa: F405          argument_spec=dict( -            admin_kubeconfig={"required": True, "type": "str"}, +            admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},              oc_bin={"required": True, "type": "str"},              openshift_logging_namespace={"required": True, "type": "str"}          ), diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 2f5b68b4d..46c035f22 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -4,9 +4,11 @@  # delete the deployment objects that we had created  - name: delete logging api objects -  command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig -    delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true +  oc_obj: +    state: absent +    kind: "{{ item }}" +    namespace: "{{ openshift_logging_namespace }}" +    selector: "logging-infra"    with_items:      - dc      - rc @@ -15,7 +17,6 @@      - templates      - daemonset    register: delete_result -  changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0  # delete the oauthclient diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index aec455c22..b5b266f2d 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -2,30 +2,12 @@  - name: Gather OpenShift Logging Facts    openshift_logging_facts:      oc_bin: "{{openshift.common.client_binary}}" -    admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"      openshift_logging_namespace: "{{openshift_logging_namespace}}" -  tags: logging_facts -  check_mode: no -- name: Validate Elasticsearch cluster size -  fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this." -  when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int - -- name: Validate Elasticsearch Ops cluster size -  fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this." -  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int - -- name: Install logging -  include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml" -  when: openshift_hosted_logging_install | default(true) | bool -  with_items: -    - support -    - elasticsearch -    - kibana -    - curator -    - fluentd -  loop_control: -    loop_var: install_component +- name: Set logging project +  oc_project: +    state: present +    name: "{{ openshift_logging_namespace }}"  - name: Install logging mux    include: "{{ role_path }}/tasks/install_mux.yaml" @@ -35,56 +17,162 @@    register: object_def_files    changed_when: no -- slurp: src={{item}} -  register: object_defs -  with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}" -  changed_when: no +- name: Create logging cert directory +  file: +    path: "{{ openshift.common.config_base }}/logging" +    state: directory +    mode: 0755 +  changed_when: False +  check_mode: no -- name: Create objects -  include: oc_apply.yaml +- include: generate_certs.yaml    vars: -    - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" -    - namespace: "{{ openshift_logging_namespace }}" -    - file_name: "{{ file.source }}" -    - file_content: "{{ file.content | b64decode | from_yaml }}" -  with_items: "{{ object_defs.results }}" -  loop_control: -    loop_var: file -  when: not ansible_check_mode +    generated_certs_dir: "{{openshift.common.config_base}}/logging" -- include: update_master_config.yaml +## Elasticsearch + +# TODO: add more vars +# We don't allow scaling down of ES nodes currently +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" +    openshift_logging_elasticsearch_pvc_name: "{{ item.1 }}" +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" + +    #openshift_logging_elasticsearch_storage_type: "{{ }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" +    openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" +    openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + +  with_together: +  - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}" +  - "{{ openshift_logging_facts.elasticsearch.pvcs }}" + +# Create any new DC that may be required +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count }}" +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" + +  with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} + +# TODO: add more vars +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}" +    openshift_logging_elasticsearch_pvc_name: "{{ item.1 }}" +    openshift_logging_elasticsearch_ops_deployment: true +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" + +    #openshift_logging_elasticsearch_storage_type: "{{ }}" +    openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" +    openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" +    openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + +  with_together: +  - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}" +  - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}" +  when: +  - openshift_logging_use_ops | bool + +# Create any new DC that may be required +- include_role: +    name: openshift_logging_elasticsearch +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}" +    openshift_logging_elasticsearch_ops_deployment: true +    openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" + +  with_sequence: count={{ ( ( openshift_logging_es_ops_cluster_size | int ) - ( openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count ) ) }} +  when: +  - openshift_logging_use_ops | bool -- name: Printing out objects to create -  debug: msg={{file.content | b64decode }} -  with_items: "{{ object_defs.results }}" -  loop_control: -    loop_var: file -  when: ansible_check_mode - -  # TODO replace task with oc_secret module that supports -  # linking when available -- name: Link Pull Secrets With Service Accounts -  include: oc_secret.yaml + +## Kibana +- include_role: +    name: openshift_logging_kibana    vars: -    kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" -    subcommand: link -    service_account: "{{sa_account}}" -    secret_name: "{{openshift_logging_image_pull_secret}}" -    add_args: "--for=pull" -  with_items: -    - default -    - aggregated-logging-elasticsearch -    - aggregated-logging-kibana -    - aggregated-logging-fluentd -    - aggregated-logging-curator -  register: link_pull_secret -  loop_control: -    loop_var: sa_account +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" +    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}" +    openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}" +    openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}" +    openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" + + +- include_role: +    name: openshift_logging_kibana +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_kibana_ops_deployment: true +    openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}" +    openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" +    openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}" +    openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}" +    openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}" +    openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}" +    openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}" +    openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}" +    openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}" +    openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}" +    openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}" +    openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}" +    openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}" +    openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}" +    openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"    when: -    - openshift_logging_image_pull_secret is defined -    - openshift_logging_image_pull_secret != '' -  failed_when: link_pull_secret.rc != 0 +  - openshift_logging_use_ops | bool -- name: Scaling up cluster -  include: start_cluster.yaml -  when: start_cluster | default(true) | bool + +## Curator +- include_role: +    name: openshift_logging_curator +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" + +- include_role: +    name: openshift_logging_curator +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_curator_ops_deployment: true +    openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}" +    openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}" +    openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}" +    openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}" +    openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}" +    openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}" +    openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}" +    openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}" +  when: +  - openshift_logging_use_ops | bool + + +## Fluentd +- include_role: +    name: openshift_logging_fluentd +  vars: +    generated_certs_dir: "{{openshift.common.config_base}}/logging" +    openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}" +    openshift_logging_fluentd_use_journal: "{{ openshift.docker.options | search('journald') }}" + +- include: update_master_config.yaml diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml index 877ce3149..d26352e96 100644 --- a/roles/openshift_logging/tasks/install_support.yaml +++ b/roles/openshift_logging/tasks/install_support.yaml @@ -45,29 +45,3 @@    file: path={{mktemp.stdout}}/templates state=directory mode=0755    changed_when: False    check_mode: no - -- include: generate_secrets.yaml -  vars: -    generated_certs_dir: "{{openshift.common.config_base}}/logging" - -- include: generate_configmaps.yaml - -- include: generate_services.yaml - -- name: Generate kibana-proxy oauth client -  template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml -  vars: -    secret: "{{oauth_secret}}" -  when: oauth_secret is defined -  check_mode: no -  changed_when: no - -- include: generate_clusterroles.yaml - -- include: generate_rolebindings.yaml - -- include: generate_clusterrolebindings.yaml - -- include: generate_serviceaccounts.yaml - -- include: generate_routes.yaml diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 3d8cd3410..f475024dd 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -30,33 +30,12 @@    check_mode: no    become: no -- debug: msg="Created local temp dir {{local_tmp.stdout}}" - -- name: Copy the admin client config(s) -  command: > -    cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig -  changed_when: False -  check_mode: no -  tags: logging_init -  - include: "{{ role_path }}/tasks/install_logging.yaml"    when: openshift_logging_install_logging | default(false) | bool -- include: "{{ role_path }}/tasks/upgrade_logging.yaml" -  when: openshift_logging_upgrade_logging | default(false) | bool -  - include: "{{ role_path }}/tasks/delete_logging.yaml"    when:      - not openshift_logging_install_logging | default(false) | bool -    - not openshift_logging_upgrade_logging | default(false) | bool - -- name: Delete temp directory -  file: -    name: "{{ mktemp.stdout }}" -    state: absent -  tags: logging_cleanup -  changed_when: False -  check_mode: no  - name: Cleaning up local temp dir    local_action: file path="{{local_tmp.stdout}}" state=absent diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml new file mode 100644 index 000000000..82ffb2f93 --- /dev/null +++ b/roles/openshift_logging_curator/defaults/main.yml @@ -0,0 +1,33 @@ +--- +### General logging settings +openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local" + +openshift_logging_curator_namespace: logging + +### Common settings +openshift_logging_curator_nodeselector: "" +openshift_logging_curator_cpu_limit: 100m +openshift_logging_curator_memory_limit: null + +openshift_logging_curator_es_host: "logging-es" +openshift_logging_curator_es_port: 9200 + +# This should not exceed 1, should check for this +openshift_logging_curator_replicas: 1 + +# this is used to determine if this is an operations deployment or a non-ops deployment +# simply used for naming purposes +openshift_logging_curator_ops_deployment: false + +openshift_logging_curator_default_days: 30 +openshift_logging_curator_run_hour: 0 +openshift_logging_curator_run_minute: 0 +openshift_logging_curator_run_timezone: UTC +openshift_logging_curator_script_log_level: INFO +openshift_logging_curator_log_level: ERROR + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#curator_config_contents: diff --git a/roles/openshift_logging_curator/files/curator.yml b/roles/openshift_logging_curator/files/curator.yml new file mode 100644 index 000000000..8d62d8e7d --- /dev/null +++ b/roles/openshift_logging_curator/files/curator.yml @@ -0,0 +1,18 @@ +# Logging example curator config file + +# uncomment and use this to override the defaults from env vars +#.defaults: +#  delete: +#    days: 30 +#  runhour: 0 +#  runminute: 0 + +# to keep ops logs for a different duration: +#.operations: +#  delete: +#    weeks: 8 + +# example for a normal project +#myapp: +#  delete: +#    weeks: 1 diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml new file mode 100644 index 000000000..6752fb7f9 --- /dev/null +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Curator Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_curator/tasks/determine_version.yaml b/roles/openshift_logging_curator/tasks/determine_version.yaml new file mode 100644 index 000000000..94f8b4a97 --- /dev/null +++ b/roles/openshift_logging_curator/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    curator_version: "{{ __latest_curator_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: curator_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Curator +  when: curator_version not in __allowed_curator_versions diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml new file mode 100644 index 000000000..1ee380610 --- /dev/null +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -0,0 +1,122 @@ +--- +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +# This may not be necessary in this role +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# service account +- name: Create Curator service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Curator service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# configmap +- copy: +    src: curator.yml +    dest: "{{ tempdir }}/curator.yml" +  when: curator_config_contents is undefined +  changed_when: no + +- copy: +    content: "{{ curator_config_contents }}" +    dest: "{{ tempdir }}/curator.yml" +  when: curator_config_contents is defined +  changed_when: no + +- name: Set Curator configmap +  oc_configmap: +    state: present +    name: "logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +    from_file: +      config.yaml: "{{ tempdir }}/curator.yml" + +# secret +- name: Set Curator secret +  oc_secret: +    state: present +    name: "logging-curator" +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.curator.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.curator.crt" + +- set_fact: +    curator_name: "{{ 'logging-curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" +    curator_component: "{{ 'curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" + +# DC +# TODO: scale should not exceed 1 +- name: Generate Curator deploymentconfig +  template: +    src: curator.j2 +    dest: "{{ tempdir }}/templates/curator-dc.yaml" +  vars: +    component: "{{ curator_component }}" +    logging_component: curator +    deploy_name: "{{ curator_name }}" +    image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" +    es_host: "{{ openshift_logging_curator_es_host }}" +    es_port: "{{ openshift_logging_curator_es_port }}" +    curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}" +    curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}" +    replicas: "{{ openshift_logging_curator_replicas | default (0)}}" +    curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}" +  check_mode: no +  changed_when: no + +- name: Set Curator DC +  oc_obj: +    state: present +    name: "{{ curator_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/curator-dc.yaml" +    delete_after: true + +# scale up +- name: Start Curator +  oc_scale: +    kind: dc +    name: "{{ curator_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    replicas: "{{ openshift_logging_curator_replicas | default (1) }}" + + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2 new file mode 100644 index 000000000..db991e4a9 --- /dev/null +++ b/roles/openshift_logging_curator/templates/curator.j2 @@ -0,0 +1,103 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: +  name: "{{deploy_name}}" +  labels: +    provider: openshift +    component: "{{component}}" +    logging-infra: "{{logging_component}}" +spec: +  replicas: {{replicas|default(0)}} +  selector: +    provider: openshift +    component: "{{component}}" +    logging-infra: "{{logging_component}}" +  strategy: +    rollingParams: +      intervalSeconds: 1 +      timeoutSeconds: 600 +      updatePeriodSeconds: 1 +    type: Recreate +  template: +    metadata: +      name: "{{deploy_name}}" +      labels: +        logging-infra: "{{logging_component}}" +        provider: openshift +        component: "{{component}}" +    spec: +      terminationGracePeriod: 600 +      serviceAccountName: aggregated-logging-curator +{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} +      nodeSelector: +{% for key, value in curator_node_selector.iteritems() %} +        {{key}}: "{{value}}" +{% endfor %} +{% endif %} +      containers: +        - +          name: "curator" +          image: {{image}} +          imagePullPolicy: Always +          resources: +            limits: +              cpu: "{{curator_cpu_limit}}" +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} +              memory: "{{curator_memory_limit}}" +{% endif %} +          env: +            - +              name: "K8S_HOST_URL" +              value: "{{openshift_logging_curator_master_url}}" +            - +              name: "ES_HOST" +              value: "{{es_host}}" +            - +              name: "ES_PORT" +              value: "{{es_port}}" +            - +              name: "ES_CLIENT_CERT" +              value: "/etc/curator/keys/cert" +            - +              name: "ES_CLIENT_KEY" +              value: "/etc/curator/keys/key" +            - +              name: "ES_CA" +              value: "/etc/curator/keys/ca" +            - +              name: "CURATOR_DEFAULT_DAYS" +              value: "{{openshift_logging_curator_default_days}}" +            - +              name: "CURATOR_RUN_HOUR" +              value: "{{openshift_logging_curator_run_hour}}" +            - +              name: "CURATOR_RUN_MINUTE" +              value: "{{openshift_logging_curator_run_minute}}" +            - +              name: "CURATOR_RUN_TIMEZONE" +              value: "{{openshift_logging_curator_run_timezone}}" +            - +              name: "CURATOR_SCRIPT_LOG_LEVEL" +              value: "{{openshift_logging_curator_script_log_level}}" +            - +              name: "CURATOR_LOG_LEVEL" +              value: "{{openshift_logging_curator_log_level}}" +          volumeMounts: +            - name: certs +              mountPath: /etc/curator/keys +              readOnly: true +            - name: config +              mountPath: /etc/curator/settings +              readOnly: true +            - name: elasticsearch-storage +              mountPath: /elasticsearch/persistent +              readOnly: true +      volumes: +        - name: certs +          secret: +            secretName: logging-curator +        - name: config +          configMap: +            name: logging-curator +        - name: elasticsearch-storage +          emptyDir: {} diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml new file mode 100644 index 000000000..97525479e --- /dev/null +++ b/roles/openshift_logging_curator/vars/main.yml @@ -0,0 +1,3 @@ +--- +__latest_curator_version: "3_5" +__allowed_curator_versions: ["3_5", "3_6"] diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml new file mode 100644 index 000000000..7923059da --- /dev/null +++ b/roles/openshift_logging_elasticsearch/defaults/main.yml @@ -0,0 +1,57 @@ +--- +### Common settings +openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_elasticsearch_namespace: logging + +openshift_logging_elasticsearch_nodeselector: "" +openshift_logging_elasticsearch_cpu_limit: 100m +openshift_logging_elasticsearch_memory_limit: 512Mi +openshift_logging_elasticsearch_recover_after_time: 5m + +openshift_logging_elasticsearch_replica_count: 1 + +# ES deployment type +openshift_logging_elasticsearch_deployment_type: "data-master" + +# ES deployment name +openshift_logging_elasticsearch_deployment_name: "" + +# One of ['emptydir', 'pvc', 'hostmount'] +openshift_logging_elasticsearch_storage_type: "emptydir" + +# hostmount options +openshift_logging_elasticsearch_hostmount_path: "" + +# pvc options +# the name of the PVC we will bind to -- create it if it does not exist +openshift_logging_elasticsearch_pvc_name: "" + +# required if the PVC does not already exist +openshift_logging_elasticsearch_pvc_size: "" +openshift_logging_elasticsearch_pvc_dynamic: false +openshift_logging_elasticsearch_pvc_pv_selector: {} +openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce'] +openshift_logging_elasticsearch_storage_group: '65534' + +openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" + +# this is used to determine if this is an operations deployment or a non-ops deployment +# simply used for naming purposes +openshift_logging_elasticsearch_ops_deployment: false + +openshift_logging_elasticsearch_ops_allow_cluster_reader: false + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#es_logging_contents: +#es_config_contents: + + +openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_es_host: logging-es +openshift_logging_es_port: 9200 +openshift_logging_es_ca: /etc/fluent/keys/ca +openshift_logging_es_client_cert: /etc/fluent/keys/cert +openshift_logging_es_client_key: /etc/fluent/keys/key diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh new file mode 100644 index 000000000..339b5a1b2 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/files/es_migration.sh @@ -0,0 +1,79 @@ +CA=${1:-/etc/openshift/logging/ca.crt} +KEY=${2:-/etc/openshift/logging/system.admin.key} +CERT=${3:-/etc/openshift/logging/system.admin.crt} +openshift_logging_es_host=${4:-logging-es} +openshift_logging_es_port=${5:-9200} +namespace=${6:-logging} + +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# skip indices that contain a uuid +# get a list of unique project +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +function get_list_of_indices() { +    curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ +        awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ +        '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ +    sort -u +} + +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# get a list of unique project.uuid +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +function get_list_of_proj_uuid_indices() { +    curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ +        awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ +            '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ +        sort -u +} + +if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then +  echo "No Elasticsearch pods found running.  Cannot update common data model." +  exit 1 +fi + +count=$(get_list_of_indices | wc -l) +if [ $count -eq 0 ]; then +  echo No matching indices found - skipping update_for_uuid +else +  echo Creating aliases for $count index patterns . . . +  { +    echo '{"actions":[' +    get_list_of_indices | \ +      while IFS=. read proj ; do +        # e.g. make test.uuid.* an alias of test.* so we can search for +        # /test.uuid.*/_search and get both the test.uuid.* and +        # the test.* indices +        uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null) +        [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}" +      done +    echo ']}' +  } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" +fi + +count=$(get_list_of_proj_uuid_indices | wc -l) +if [ $count -eq 0 ] ; then +    echo No matching indexes found - skipping update_for_common_data_model +    exit 0 +fi + +echo Creating aliases for $count index patterns . . . +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# get a list of unique project.uuid +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +{ +  echo '{"actions":[' +  get_list_of_proj_uuid_indices | \ +    while IFS=. read proj uuid ; do +      # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for +      # /project.test.uuid.*/_search and get both the test.uuid.* and +      # the project.test.uuid.* indices +      echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}" +    done +  echo ']}' +} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml new file mode 100644 index 000000000..567c9f289 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ClusterRole +metadata: +  name: rolebinding-reader +rules: +- resources: +    - clusterrolebindings +  verbs: +    - get diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml new file mode 100644 index 000000000..097270772 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Elasticsearch Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml new file mode 100644 index 000000000..1a952b5cf --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -0,0 +1,19 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    es_version: "{{ __latest_es_version }}" +  when: openshift_logging_image_version == 'latest' + +- debug: var=openshift_logging_image_version + +# should we just assume that we will have the correct major version? +- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Elasticsearch +  when: es_version not in __allowed_es_versions diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml new file mode 100644 index 000000000..0d4c7a013 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -0,0 +1,231 @@ +--- +- name: Validate Elasticsearch cluster size +  fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this." +  when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int + +- name: Validate Elasticsearch Ops cluster size +  fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this." +  when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int + +- fail: +    msg: Invalid deployment type, one of ['data-master', 'data-client', 'master', 'client'] allowed +  when: not openshift_logging_elasticsearch_deployment_type in __allowed_es_types + +- set_fact: elasticsearch_name="{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" + +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +# This may not be necessary in this role +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# service account +- name: Create ES service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-elasticsearch" +    namespace: "{{ openshift_logging_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create ES service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-elasticsearch" +    namespace: "{{ openshift_logging_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# rolebinding reader +- copy: +    src: rolebinding-reader.yml +    dest: "{{ tempdir }}/rolebinding-reader.yml" + +- name: Create rolebinding-reader role +  oc_obj: +    state: present +    name: "rolebinding-reader" +    kind: clusterrole +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - "{{ tempdir }}/rolebinding-reader.yml" +    delete_after: true + +# SA roles +- name: Set rolebinding-reader permissions for ES +  oc_adm_policy_user: +    state: present +    namespace: "{{ openshift_logging_namespace }}" +    resource_kind: cluster-role +    resource_name: rolebinding-reader +    user: "system:serviceaccount:{{ openshift_logging_namespace }}:aggregated-logging-elasticsearch" + +# configmap +- template: +    src: elasticsearch-logging.yml.j2 +    dest: "{{ tempdir }}/elasticsearch-logging.yml" +  when: es_logging_contents is undefined +  changed_when: no + +- template: +    src: elasticsearch.yml.j2 +    dest: "{{ tempdir }}/elasticsearch.yml" +  vars: +    allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" +    deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" +  when: es_config_contents is undefined +  changed_when: no + +- copy: +    content: "{{ es_logging_contents }}" +    dest: "{{ tempdir }}/elasticsearch-logging.yml" +  when: es_logging_contents is defined +  changed_when: no + +- copy: +    content: "{{ es_config_contents }}" +    dest: "{{ tempdir }}/elasticsearch.yml" +  when: es_config_contents is defined +  changed_when: no + +- name: Set ES configmap +  oc_configmap: +    state: present +    name: "{{ elasticsearch_name }}-{{ openshift_logging_elasticsearch_deployment_type }}" +    namespace: "{{ openshift_logging_namespace }}" +    from_file: +      elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" +      logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" +#  when: + + +# secret +- name: Set ES secret +  oc_secret: +    state: present +    name: "logging-elasticsearch" +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - name: key +      path: "{{ generated_certs_dir }}/logging-es.jks" +    - name: truststore +      path: "{{ generated_certs_dir }}/truststore.jks" +    - name: searchguard.key +      path: "{{ generated_certs_dir }}/elasticsearch.jks" +    - name: searchguard.truststore +      path: "{{ generated_certs_dir }}/truststore.jks" +    - name: admin-key +      path: "{{ generated_certs_dir }}/system.admin.key" +    - name: admin-cert +      path: "{{ generated_certs_dir }}/system.admin.crt" +    - name: admin-ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: admin.jks +      path: "{{ generated_certs_dir }}/system.admin.jks" + +- name: Creating ES storage template +  template: +    src: pvc.j2 +    dest: "{{ tempdir }}/templates/logging-es-pvc.yml" +  vars: +    obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" +    size: "{{ openshift_logging_elasticsearch_pvc_size }}" +    access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" +    pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" +  when: +  - openshift_logging_elasticsearch_storage_type == "pvc" +  - not openshift_logging_elasticsearch_pvc_dynamic + +- name: Creating ES storage template +  template: +    src: pvc.j2 +    dest: "{{ tempdir }}/templates/logging-es-pvc.yml" +  vars: +    obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" +    size: "{{ openshift_logging_elasticsearch_pvc_size }}" +    access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" +    pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" +    annotations: +      volume.alpha.kubernetes.io/storage-class: "dynamic" +  when: +  - openshift_logging_elasticsearch_storage_type == "pvc" +  - openshift_logging_elasticsearch_pvc_dynamic + +- name: Set ES storage +  oc_obj: +    state: present +    kind: pvc +    name: "{{ openshift_logging_elasticsearch_pvc_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - "{{ tempdir }}/templates/logging-es-pvc.yml" +    delete_after: true +  when: +  - openshift_logging_elasticsearch_storage_type == "pvc" + +- set_fact: +    es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" + +- set_fact: +    es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}" +  when: openshift_logging_elasticsearch_deployment_name == "" + +- set_fact: +    es_deploy_name: "{{ openshift_logging_elasticsearch_deployment_name }}" +  when: openshift_logging_elasticsearch_deployment_name != "" + +# DC +- name: Set ES dc templates +  template: +    src: es.j2 +    dest: "{{ tempdir }}/templates/logging-es-dc.yml" +  vars: +    es_configmap: "{{ elasticsearch_name }}-{{ openshift_logging_elasticsearch_deployment_type }}" +    es_cluster_name: "{{ es_component }}" +    logging_component: "{{ es_component }}" +    deploy_name: "{{ es_deploy_name }}" +    image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}" +    es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}" +    es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}" +    es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}" + +- name: Set ES dc +  oc_obj: +    state: present +    name: "{{ es_deploy_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/logging-es-dc.yml" +    delete_after: true + +# scale up +- name: Start Elasticsearch +  oc_scale: +    kind: dc +    name: "{{ es_deploy_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    replicas: 1 + +## Placeholder for migration when necessary ## + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 new file mode 100644 index 000000000..377abe21f --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 @@ -0,0 +1,72 @@ +# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +es.logger.level: INFO +rootLogger: ${es.logger.level}, console, file +logger: +  # log action execution errors for easier debugging +  action: WARN +  # reduce the logging for aws, too much is logged under the default INFO +  com.amazonaws: WARN +  io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL} +  io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL} + +  # gateway +  #gateway: DEBUG +  #index.gateway: DEBUG + +  # peer shard recovery +  #indices.recovery: DEBUG + +  # discovery +  #discovery: TRACE + +  index.search.slowlog: TRACE, index_search_slow_log_file +  index.indexing.slowlog: TRACE, index_indexing_slow_log_file + +  # search-guard +  com.floragunn.searchguard: WARN + +additivity: +  index.search.slowlog: false +  index.indexing.slowlog: false + +appender: +  console: +    type: console +    layout: +      type: consolePattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  file: +    type: dailyRollingFile +    file: ${path.logs}/${cluster.name}.log +    datePattern: "'.'yyyy-MM-dd" +    layout: +      type: pattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. +  # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html +  #file: +    #type: extrasRollingFile +    #file: ${path.logs}/${cluster.name}.log +    #rollingPolicy: timeBased +    #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz +    #layout: +      #type: pattern +      #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  index_search_slow_log_file: +    type: dailyRollingFile +    file: ${path.logs}/${cluster.name}_index_search_slowlog.log +    datePattern: "'.'yyyy-MM-dd" +    layout: +      type: pattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  index_indexing_slow_log_file: +    type: dailyRollingFile +    file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log +    datePattern: "'.'yyyy-MM-dd" +    layout: +      type: pattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 new file mode 100644 index 000000000..cd4bde98b --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -0,0 +1,80 @@ +cluster: +  name: ${CLUSTER_NAME} + +script: +  inline: on +  indexed: on + +index: +  number_of_shards: 1 +  number_of_replicas: 0 +  auto_expand_replicas: 0-2 +  unassigned.node_left.delayed_timeout: 2m +  translog: +    flush_threshold_size: 256mb +    flush_threshold_period: 5m + +node: +  master: {% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %} +  data: {% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %} + +network: +  host: 0.0.0.0 + +cloud: +  kubernetes: +    service: ${SERVICE_DNS} +    namespace: ${NAMESPACE} + +discovery: +  type: kubernetes +  zen.ping.multicast.enabled: false + +gateway: +  expected_master_nodes: ${NODE_QUORUM} +  recover_after_nodes: ${RECOVER_AFTER_NODES} +  expected_nodes: ${RECOVER_EXPECTED_NODES} +  recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] + +openshift.config: +  use_common_data_model: true +  project_index_prefix: "project" +  time_field_name: "@timestamp" + +openshift.searchguard: +  keystore.path: /etc/elasticsearch/secret/admin.jks +  truststore.path: /etc/elasticsearch/secret/searchguard.truststore + +openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default ('false')}} + +path: +  data: /elasticsearch/persistent/${CLUSTER_NAME}/data +  logs: /elasticsearch/${CLUSTER_NAME}/logs +  work: /elasticsearch/${CLUSTER_NAME}/work +  scripts: /elasticsearch/${CLUSTER_NAME}/scripts + +searchguard: +  authcz.admin_dn: +  - CN=system.admin,OU=OpenShift,O=Logging +  config_index_name: ".searchguard.${HOSTNAME}" +  ssl: +    transport: +      enabled: true +      enforce_hostname_verification: false +      keystore_type: JKS +      keystore_filepath: /etc/elasticsearch/secret/searchguard.key +      keystore_password: kspass +      truststore_type: JKS +      truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore +      truststore_password: tspass +    http: +      enabled: true +      keystore_type: JKS +      keystore_filepath: /etc/elasticsearch/secret/key +      keystore_password: kspass +      clientauth_mode: OPTIONAL +      truststore_type: JKS +      truststore_filepath: /etc/elasticsearch/secret/truststore +      truststore_password: tspass diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 new file mode 100644 index 000000000..295e58981 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -0,0 +1,114 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: +  name: "{{deploy_name}}" +  labels: +    provider: openshift +    component: elasticsearch +    deployment: "{{deploy_name}}" +    logging-infra: "{{logging_component}}" +spec: +  replicas: {{replicas|default(0)}} +  selector: +    provider: openshift +    component: elasticsearch +    deployment: "{{deploy_name}}" +    logging-infra: "{{logging_component}}" +  strategy: +    type: Recreate +  template: +    metadata: +      name: "{{deploy_name}}" +      labels: +        logging-infra: "{{logging_component}}" +        provider: openshift +        component: elasticsearch +        deployment: "{{deploy_name}}" +    spec: +      terminationGracePeriod: 600 +      serviceAccountName: aggregated-logging-elasticsearch +      securityContext: +        supplementalGroups: +        - {{openshift_logging_elasticsearch_storage_group}} +{% if es_node_selector is iterable and es_node_selector | length > 0 %} +      nodeSelector: +{% for key, value in es_node_selector.iteritems() %} +        {{key}}: "{{value}}" +{% endfor %} +{% endif %} +      containers: +        - +          name: "elasticsearch" +          image: {{image}} +          imagePullPolicy: Always +          resources: +            limits: +              memory: "{{es_memory_limit}}" +{% if es_cpu_limit is defined and es_cpu_limit is not none %} +              cpu: "{{es_cpu_limit}}" +{% endif %} +            requests: +              memory: "512Mi" +          ports: +            - +              containerPort: 9200 +              name: "restapi" +            - +              containerPort: 9300 +              name: "cluster" +          env: +            - +              name: "NAMESPACE" +              valueFrom: +                fieldRef: +                  fieldPath: metadata.namespace +            - +              name: "KUBERNETES_TRUST_CERT" +              value: "true" +            - +              name: "SERVICE_DNS" +              value: "logging-{{es_cluster_name}}-cluster" +            - +              name: "CLUSTER_NAME" +              value: "logging-{{es_cluster_name}}" +            - +              name: "INSTANCE_RAM" +              value: "{{openshift_logging_elasticsearch_memory_limit}}" +            - +              name: "NODE_QUORUM" +              value: "{{es_node_quorum | int}}" +            - +              name: "RECOVER_AFTER_NODES" +              value: "{{es_recover_after_nodes}}" +            - +              name: "RECOVER_EXPECTED_NODES" +              value: "{{es_recover_expected_nodes}}" +            - +              name: "RECOVER_AFTER_TIME" +              value: "{{openshift_logging_elasticsearch_recover_after_time}}" +          volumeMounts: +            - name: elasticsearch +              mountPath: /etc/elasticsearch/secret +              readOnly: true +            - name: elasticsearch-config +              mountPath: /usr/share/java/elasticsearch/config +              readOnly: true +            - name: elasticsearch-storage +              mountPath: /elasticsearch/persistent +      volumes: +        - name: elasticsearch +          secret: +            secretName: logging-elasticsearch +        - name: elasticsearch-config +          configMap: +            name: {{ es_configmap }} +        - name: elasticsearch-storage +{% if openshift_logging_elasticsearch_storage_type == 'pvc' %} +          persistentVolumeClaim: +            claimName: {{ openshift_logging_elasticsearch_pvc_name }} +{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %} +          hostPath: +            path: {{ openshift_logging_elasticsearch_hostmount_path }} +{% else %} +          emptydir: {} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2 new file mode 100644 index 000000000..f19a3a750 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2 @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: +  name: {{obj_name}} +  labels: +    logging-infra: support +{% if annotations is defined %} +  annotations: +{% for key,value in annotations.iteritems() %} +    {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +{% if pv_selector is defined and pv_selector is mapping %} +  selector: +    matchLabels: +{% for key,value in pv_selector.iteritems() %} +      {{key}}: {{value}} +{% endfor %} +{% endif %} +  accessModes: +{% for mode in access_modes %} +    - {{ mode }} +{% endfor %} +  resources: +    requests: +      storage: {{size}} diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml new file mode 100644 index 000000000..7a1f5048b --- /dev/null +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -0,0 +1,12 @@ +--- +__latest_es_version: "3_5" +__allowed_es_versions: ["3_5", "3_6"] +__allowed_es_types: ["data-master", "data-client", "master", "client"] + +# TODO: integrate these +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}" +es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int / 2 | round(0,'floor') + 1) | int }}" +es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}" +es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" +es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}" diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml new file mode 100644 index 000000000..713962c2e --- /dev/null +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -0,0 +1,43 @@ +--- +### General logging settings +openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" +openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" +openshift_logging_namespace: logging + +### Common settings +openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}" +openshift_logging_fluentd_cpu_limit: 100m +openshift_logging_fluentd_memory_limit: 512Mi +openshift_logging_fluentd_hosts: ['--all'] + +# float time in seconds to wait between node labelling +openshift_logging_fluentd_label_delay: '0.5' + +# Fluentd deployment type +openshift_logging_fluentd_deployment_type: "hosted" + +### Used by "hosted" and "secure-host" deployments + +# Destination for the application based logs +openshift_logging_fluentd_app_host: "logging-es" +openshift_logging_fluentd_app_port: 9200 +# Destination for the operations based logs +openshift_logging_fluentd_ops_host: "{{ openshift_logging_fluentd_app_host }}" +openshift_logging_fluentd_ops_port: "{{ openshift_logging_fluentd_app_port }}" + +### Used by "hosted" and "secure-aggregator" deployments +openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}" +openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" +openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" + + +### Deprecating in 3.6 +openshift_logging_fluentd_es_copy: false + +# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly +#fluentd_config_contents: +#fluentd_throttle_contents: +#fluentd_secureforward_contents: diff --git a/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml new file mode 100644 index 000000000..375621ff1 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml @@ -0,0 +1,7 @@ +# Logging example fluentd throttling config file + +#example-project: +#  read_lines_limit: 10 +# +#.operations: +#  read_lines_limit: 100 diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf new file mode 100644 index 000000000..f4483df79 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/secure-forward.conf @@ -0,0 +1,24 @@ +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key <SECRET_STRING> + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key +  # for private CA secret key +# ca_private_key_passphrase passphrase + +# <server> +  # or IP +#   host server.fqdn.example.com +#   port 24284 +# </server> +# <server> +  # ip address to connect +#   host 203.0.113.8 +  # specify hostlabel for FQDN verification if ipaddress is used for host +#   hostlabel server.fqdn.example.com +# </server> diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml new file mode 100644 index 000000000..2003aacb2 --- /dev/null +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Fluentd Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_fluentd/tasks/determine_version.yaml b/roles/openshift_logging_fluentd/tasks/determine_version.yaml new file mode 100644 index 000000000..a1ba71b1b --- /dev/null +++ b/roles/openshift_logging_fluentd/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    fluentd_version: "{{ __latest_fluentd_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: fluentd_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Fluentd +  when: fluentd_version not in __allowed_fluentd_versions diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml new file mode 100644 index 000000000..e92a35f27 --- /dev/null +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -0,0 +1,10 @@ +--- +- name: Label {{ node }} for Fluentd deployment +  oc_label: +    name: "{{ node }}" +    kind: node +    state: add +    labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" + +# wait half a second between labels +- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml new file mode 100644 index 000000000..0e14328c0 --- /dev/null +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -0,0 +1,194 @@ +--- +- fail: +    msg: Only one Fluentd nodeselector key pair should be provided +  when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" + +- fail: +    msg: Application logs destination is required +  when: not openshift_logging_fluentd_app_host or openshift_logging_fluentd_app_host == '' + +- fail: +    msg: Operations logs destination is required +  when: not openshift_logging_fluentd_ops_host or openshift_logging_fluentd_ops_host == '' + +- fail: +    msg: Invalid deployment type, one of ['hosted', 'secure-aggregator', 'secure-host'] allowed +  when: not openshift_logging_fluentd_deployment_type in __allowed_fluentd_types + +- include: determine_version.yaml + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# create service account +- name: Create Fluentd service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-fluentd" +    namespace: "{{ openshift_logging_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Fluentd service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-fluentd" +    namespace: "{{ openshift_logging_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +# set service account scc +- name: Set privileged permissions for Fluentd +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_namespace }}" +    resource_kind: scc +    resource_name: privileged +    state: present +    user: "system:serviceaccount:{{ openshift_logging_namespace }}:aggregated-logging-fluentd" + +# set service account permissions +- name: Set cluster-reader permissions for Fluentd +  oc_adm_policy_user: +    namespace: "{{ openshift_logging_namespace }}" +    resource_kind: cluster-role +    resource_name: cluster-reader +    state: present +    user: "system:serviceaccount:{{ openshift_logging_namespace }}:aggregated-logging-fluentd" + +# create Fluentd configmap +- template: +    src: fluent.conf.j2 +    dest: "{{ tempdir }}/fluent.conf" +  vars: +    deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" +  when: fluentd_config_contents is undefined +  changed_when: no + +- copy: +    src: fluentd-throttle-config.yaml +    dest: "{{ tempdir }}/fluentd-throttle-config.yaml" +  when: fluentd_throttle_contents is undefined +  changed_when: no + +- copy: +    src: secure-forward.conf +    dest: "{{ tempdir }}/secure-forward.conf" +  when: fluentd_securefoward_contents is undefined + +  changed_when: no + +- copy: +    content: "{{ fluentd_config_contents }}" +    dest: "{{ tempdir }}/fluent.conf" +  when: fluentd_config_contents is defined +  changed_when: no + +- copy: +    content: "{{ fluentd_throttle_contents }}" +    dest: "{{ tempdir }}/fluentd-throttle-config.yaml" +  when: fluentd_throttle_contents is defined +  changed_when: no + +- copy: +    content: "{{ fluentd_secureforward_contents }}" +    dest: "{{ tempdir }}/secure-forward.conf" +  when: fluentd_secureforward_contents is defined +  changed_when: no + +- name: Set Fluentd configmap +  oc_configmap: +    state: present +    name: "logging-fluentd" +    namespace: "{{ openshift_logging_namespace }}" +    from_file: +      fluentd.conf: "{{ tempdir }}/fluent.conf" +      throttle-config.yaml: "{{ tempdir }}/fluentd-throttle-config.yaml" +      secure-forward.conf: "{{ tempdir }}/secure-forward.conf" + +# create Fluentd secret +# TODO: add aggregation secrets if necessary +- name: Set logging-fluentd secret +  oc_secret: +    state: present +    name: logging-fluentd +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.fluentd.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.fluentd.crt" + +# create Fluentd daemonset + +# this should change based on the type of fluentd deployment to be done... +# TODO: pass in aggregation configurations +- name: Generate logging-fluentd daemonset definition +  template: +    src: fluentd.j2 +    dest: "{{ tempdir }}/templates/logging-fluentd.yaml" +  vars: +    daemonset_name: logging-fluentd +    daemonset_component: fluentd +    daemonset_container_name: fluentd-elasticsearch +    daemonset_serviceAccount: aggregated-logging-fluentd +    app_host: "{{ openshift_logging_fluentd_app_host }}" +    app_port: "{{ openshift_logging_fluentd_app_port }}" +    ops_host: "{{ openshift_logging_fluentd_ops_host }}" +    ops_port: "{{ openshift_logging_fluentd_ops_port }}" +    fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}" +    fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}" +  check_mode: no +  changed_when: no + +- name: Set logging-fluentd daemonset +  oc_obj: +    state: present +    name: logging-fluentd +    namespace: "{{ openshift_logging_namespace }}" +    kind: daemonset +    files: +    - "{{ tempdir }}/templates/logging-fluentd.yaml" +    delete_after: true + +# Scale up Fluentd +- name: Retrieve list of Fluentd hosts +  oc_obj: +    state: list +    kind: node +  when: "'--all' in openshift_logging_fluentd_hosts" +  register: fluentd_hosts + +- name: Set openshift_logging_fluentd_hosts +  set_fact: +    openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}" +  when: "'--all' in openshift_logging_fluentd_hosts" + +- include: label_and_wait.yaml +  vars: +    node: "{{ fluentd_host }}" +  with_items: "{{ openshift_logging_fluentd_hosts }}" +  loop_control: +    loop_var: fluentd_host + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 new file mode 100644 index 000000000..46de94d60 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2 @@ -0,0 +1,78 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +{% if deploy_type in ['hosted', 'secure-aggregator'] %} +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## +{% else %} +<source> +  @type secure_forward +  @label @INGRESS + +  self_hostname ${HOSTNAME} +  bind 0.0.0.0 +  port {{openshift_logging_fluentd_aggregating_port}} + +  shared_key {{openshift_logging_fluentd_shared_key}} + +  secure {{openshift_logging_fluentd_aggregating_secure}} +  enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} +  ca_cert_path        {{openshift_logging_fluentd_aggregating_cert_path}} +  ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} +  ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + +  <client> +    host {{openshift_logging_fluentd_aggregating_host}} +  </client> +</source> +{% endif %} + +<label @INGRESS> +{% if deploy_type in ['hosted', 'secure-host'] %} +## filters +  @include configs.d/openshift/filter-pre-*.conf +  @include configs.d/openshift/filter-retag-journal.conf +  @include configs.d/openshift/filter-k8s-meta.conf +  @include configs.d/openshift/filter-kibana-transform.conf +  @include configs.d/openshift/filter-k8s-flatten-hash.conf +  @include configs.d/openshift/filter-k8s-record-transform.conf +  @include configs.d/openshift/filter-syslog-record-transform.conf +  @include configs.d/openshift/filter-viaq-data-model.conf +  @include configs.d/openshift/filter-post-*.conf +## + +## matches +  @include configs.d/openshift/output-pre-*.conf +  @include configs.d/openshift/output-operations.conf +  @include configs.d/openshift/output-applications.conf +  # no post - applications.conf matches everything left +## +{% else %} +  <match **> +    @type secure_forward + +    self_hostname ${HOSTNAME} +    shared_key {{openshift_logging_fluentd_shared_key}} + +    secure {{openshift_logging_fluentd_aggregating_secure}} +    enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} +    ca_cert_path        {{openshift_logging_fluentd_aggregating_cert_path}} +    ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} +    ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + +    <server> +      host {{openshift_logging_fluentd_aggregating_host}} +      port {{openshift_logging_fluentd_aggregating_port}} +    </server> +  </match> +{% endif %} +</label> diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 new file mode 100644 index 000000000..336d657d5 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -0,0 +1,117 @@ +apiVersion: extensions/v1beta1 +kind: "DaemonSet" +metadata: +  name: "{{ daemonset_name }}" +  labels: +    provider: openshift +    component: "{{ daemonset_component }}" +    logging-infra: "{{ daemonset_component }}" +spec: +  selector: +    matchLabels: +      provider: openshift +      component: "{{ daemonset_component }}" +  updateStrategy: +    type: RollingUpdate +    rollingUpdate: +      minReadySeconds: 600 +  template: +    metadata: +      name: "{{ daemonset_container_name }}" +      labels: +        logging-infra: "{{ daemonset_component }}" +        provider: openshift +        component: "{{ daemonset_component }}" +    spec: +      serviceAccountName: "{{ daemonset_serviceAccount }}" +      nodeSelector: +        {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}" +      containers: +      - name: "{{ daemonset_container_name }}" +        image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}" +        imagePullPolicy: Always +        securityContext: +          privileged: true +        resources: +          limits: +            cpu: {{ openshift_logging_fluentd_cpu_limit }} +            memory: {{ openshift_logging_fluentd_memory_limit }} +        volumeMounts: +        - name: runlogjournal +          mountPath: /run/log/journal +        - name: varlog +          mountPath: /var/log +        - name: varlibdockercontainers +          mountPath: /var/lib/docker/containers +          readOnly: true +        - name: config +          mountPath: /etc/fluent/configs.d/user +          readOnly: true +        - name: certs +          mountPath: /etc/fluent/keys +          readOnly: true +        - name: dockerhostname +          mountPath: /etc/docker-hostname +          readOnly: true +        - name: localtime +          mountPath: /etc/localtime +          readOnly: true +        - name: dockercfg +          mountPath: /etc/sysconfig/docker +          readOnly: true +        env: +        - name: "K8S_HOST_URL" +          value: "{{ openshift_logging_master_url }}" +        - name: "ES_HOST" +          value: "{{ app_host }}" +        - name: "ES_PORT" +          value: "{{ app_port }}" +        - name: "ES_CLIENT_CERT" +          value: "{{ openshift_logging_es_client_cert }}" +        - name: "ES_CLIENT_KEY" +          value: "{{ openshift_logging_es_client_key }}" +        - name: "ES_CA" +          value: "{{ openshift_logging_es_ca }}" +        - name: "OPS_HOST" +          value: "{{ ops_host }}" +        - name: "OPS_PORT" +          value: "{{ ops_port }}" +        - name: "OPS_CLIENT_CERT" +          value: "{{ openshift_logging_es_ops_client_cert }}" +        - name: "OPS_CLIENT_KEY" +          value: "{{ openshift_logging_es_ops_client_key }}" +        - name: "OPS_CA" +          value: "{{ openshift_logging_es_ops_ca }}" +        - name: "ES_COPY" +          value: "false" +        - name: "USE_JOURNAL" +          value: "{{ openshift_logging_fluentd_use_journal | lower }}" +        - name: "JOURNAL_SOURCE" +          value: "{{ openshift_logging_fluentd_journal_source | default('') }}" +        - name: "JOURNAL_READ_FROM_HEAD" +          value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" +      volumes: +      - name: runlogjournal +        hostPath: +          path: /run/log/journal +      - name: varlog +        hostPath: +          path: /var/log +      - name: varlibdockercontainers +        hostPath: +          path: /var/lib/docker/containers +      - name: config +        configMap: +          name: logging-fluentd +      - name: certs +        secret: +          secretName: logging-fluentd +      - name: dockerhostname +        hostPath: +          path: /etc/hostname +      - name: localtime +        hostPath: +          path: /etc/localtime +      - name: dockercfg +        hostPath: +          path: /etc/sysconfig/docker diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml new file mode 100644 index 000000000..ad3fb0bdd --- /dev/null +++ b/roles/openshift_logging_fluentd/vars/main.yml @@ -0,0 +1,4 @@ +--- +__latest_fluentd_version: "3_5" +__allowed_fluentd_versions: ["3_5", "3_6"] +__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"] diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml new file mode 100644 index 000000000..6474cd504 --- /dev/null +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -0,0 +1,41 @@ +--- +### Common settings +openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local" +openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local" +openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" +openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" +openshift_logging_kibana_namespace: logging + +openshift_logging_kibana_nodeselector: "" +openshift_logging_kibana_cpu_limit: null +openshift_logging_kibana_memory_limit: null + +openshift_logging_kibana_hostname: "kibana.router.default.svc.cluster.local" + +openshift_logging_kibana_es_host: "logging-es" +openshift_logging_kibana_es_port: 9200 + +openshift_logging_kibana_replicas: 1 +openshift_logging_kibana_edge_term_policy: Redirect + +# this is used to determine if this is an operations deployment or a non-ops deployment +# simply used for naming purposes +openshift_logging_kibana_ops_deployment: false + +# Proxy settings +openshift_logging_kibana_proxy_debug: false +openshift_logging_kibana_proxy_cpu_limit: null +openshift_logging_kibana_proxy_memory_limit: null + +#The absolute path on the control node to the cert file to use +#for the public facing kibana certs +openshift_logging_kibana_cert: "" + +#The absolute path on the control node to the key file to use +#for the public facing kibana certs +openshift_logging_kibana_key: "" + +#The absolute path on the control node to the CA file to use +#for the public facing kibana certs +openshift_logging_kibana_ca: "" diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml new file mode 100644 index 000000000..89e08abc0 --- /dev/null +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -0,0 +1,15 @@ +--- +galaxy_info: +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Kibana Component +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +dependencies: +- role: lib_openshift diff --git a/roles/openshift_logging_kibana/tasks/determine_version.yaml b/roles/openshift_logging_kibana/tasks/determine_version.yaml new file mode 100644 index 000000000..53e15af5f --- /dev/null +++ b/roles/openshift_logging_kibana/tasks/determine_version.yaml @@ -0,0 +1,17 @@ +--- +# debating making this a module instead? +- fail: +    msg: Missing version to install provided by 'openshift_logging_image_version' +  when: not openshift_logging_image_version or openshift_logging_image_version == '' + +- set_fact: +    kibana_version: "{{ __latest_kibana_version }}" +  when: openshift_logging_image_version == 'latest' + +# should we just assume that we will have the correct major version? +- set_fact: kibana_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" +  when: openshift_logging_image_version != 'latest' + +- fail: +    msg: Invalid version specified for Kibana +  when: kibana_version not in __allowed_kibana_versions diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml new file mode 100644 index 000000000..a8ab3f4ef --- /dev/null +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -0,0 +1,222 @@ +--- +# fail is we don't have an endpoint for ES to connect to? + +- include: determine_version.yaml + +- debug: var=tempdir + +# allow passing in a tempdir +- name: Create temp directory for doing work in +  command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX +  register: mktemp +  changed_when: False + +- set_fact: +    tempdir: "{{ mktemp.stdout }}" + +# This may not be necessary in this role +- name: Create templates subdirectory +  file: +    state: directory +    path: "{{ tempdir }}/templates" +    mode: 0755 +  changed_when: False + +# we want to make sure we have all the necessary components here + +# create service account +- name: Create Kibana service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-kibana" +    namespace: "{{ openshift_logging_namespace }}" +    image_pull_secrets: "{{ openshift_logging_image_pull_secret }}" +  when: openshift_logging_image_pull_secret != '' + +- name: Create Kibana service account +  oc_serviceaccount: +    state: present +    name: "aggregated-logging-kibana" +    namespace: "{{ openshift_logging_namespace }}" +  when: +  - openshift_logging_image_pull_secret == '' + +- set_fact: kibana_name="{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}" + +- name: Retrieving the cert to use when generating secrets for the logging components +  slurp: +    src: "{{ generated_certs_dir }}/{{ item.file }}" +  register: key_pairs +  with_items: +    - { name: "ca_file", file: "ca.crt" } +    - { name: "kibana_internal_key", file: "kibana-internal.key"} +    - { name: "kibana_internal_cert", file: "kibana-internal.crt"} +    - { name: "server_tls", file: "server-tls.json"} + +# create routes +# TODO: set up these certs differently? +- set_fact: +    kibana_key: "{{ lookup('file', openshift_logging_kibana_key) | b64encode }}" +  when: "{{ openshift_logging_kibana_key | trim | length > 0 }}" +  changed_when: false + +- set_fact: +    kibana_cert: "{{ lookup('file', openshift_logging_kibana_cert) | b64encode }}" +  when: "{{ openshift_logging_kibana_cert | trim | length > 0 }}" +  changed_when: false + +- set_fact: +    kibana_ca: "{{ lookup('file', openshift_logging_kibana_ca) | b64encode }}" +  when: "{{ openshift_logging_kibana_ca | trim | length > 0 }}" +  changed_when: false + +- set_fact: +    kibana_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}" +  when: kibana_ca is not defined +  changed_when: false + +- name: Generating Kibana route template +  template: +    src: route_reencrypt.j2 +    dest: "{{ tempdir }}/templates/kibana-route.yaml" +  vars: +    obj_name: "{{ kibana_name }}" +    route_host: "{{ openshift_logging_kibana_hostname }}" +    service_name: "{{ kibana_name }}" +    tls_key: "{{ kibana_key | default('') | b64decode }}" +    tls_cert: "{{ kibana_cert | default('') | b64decode }}" +    tls_ca_cert: "{{ kibana_ca | b64decode }}" +    tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}" +    edge_term_policy: "{{ openshift_logging_kibana_edge_term_policy | default('') }}" +    labels: +      component: support +      logging-infra: support +      provider: openshift +  changed_when: no + +- name: Setting Kibana route +  oc_obj: +    state: present +    name: "{{ kibana_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: route +    files: +    - "{{ tempdir }}/templates/kibana-route.yaml" + +# gen session_secret -- if necessary +# TODO: make idempotent +- name: Generate proxy session +  set_fact: +    session_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(200) }}" +  check_mode: no + +# gen oauth_secret -- if necessary +# TODO: make idempotent +- name: Generate oauth client secret +  set_fact: +    oauth_secret: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(64) }}" +  check_mode: no + +# create oauth client +- name: Create oauth-client template +  template: +    src: oauth-client.j2 +    dest: "{{ tempdir }}/templates/oauth-client.yml" +  vars: +    kibana_hostname: "{{ openshift_logging_kibana_hostname }}" +    secret: "{{ oauth_secret }}" + +- name: Set kibana-proxy oauth-client +  oc_obj: +    state: present +    name: "kibana-proxy" +    namespace: "{{ openshift_logging_namespace }}" +    kind: oauthclient +    files: +    - "{{ tempdir }}/templates/oauth-client.yml" +    delete_after: true + +# create Kibana secret +- name: Set Kibana secret +  oc_secret: +    state: present +    name: "logging-kibana" +    namespace: "{{ openshift_logging_namespace }}" +    files: +    - name: ca +      path: "{{ generated_certs_dir }}/ca.crt" +    - name: key +      path: "{{ generated_certs_dir }}/system.logging.kibana.key" +    - name: cert +      path: "{{ generated_certs_dir }}/system.logging.kibana.crt" + +# create Kibana-proxy secret +- name: Set Kibana Proxy secret +  oc_secret: +    state: present +    name: "logging-kibana-proxy" +    namespace: "{{ openshift_logging_namespace }}" +#    files: +#    - name: server-key +#      path: "{{ generated_certs_dir }}/kibana-internal.key" +#    - name: server-cert +#      path: "{{ generated_certs_dir }}/kibana-internal.crt" +#    - name: server-tls +#      path: "{{ generated_certs_dir }}/server-tls.json" +    contents: +    - path: oauth-secret +      data: "{{ oauth_secret }}" +    - path: session-secret +      data: "{{ session_secret }}" +    - path: server-key +      data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}" +    - path: server-cert +      data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}" +    - path: server-tls +      data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}" + +# create Kibana DC +- name: Generate Kibana DC template +  template: +    src: kibana.j2 +    dest: "{{ tempdir }}/templates/kibana-dc.yaml" +  vars: +    component: kibana +    logging_component: kibana +    deploy_name: "{{ kibana_name }}" +    image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}" +    proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}" +    es_host: "{{ openshift_logging_kibana_es_host }}" +    es_port: "{{ openshift_logging_kibana_es_port }}" +    kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}" +    kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}" +    kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}" +    kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" +    replicas: "{{ openshift_logging_kibana_replicas | default (0) }}" +    kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" + +- name: Set Kibana DC +  oc_obj: +    state: present +    name: "{{ kibana_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    kind: dc +    files: +    - "{{ tempdir }}/templates/kibana-dc.yaml" +    delete_after: true + +# Scale up Kibana -- is this really necessary? +- name: Start Kibana +  oc_scale: +    kind: dc +    name: "{{ kibana_name }}" +    namespace: "{{ openshift_logging_namespace }}" +    replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" + +# update master configs? + +- name: Delete temp directory +  file: +    name: "{{ tempdir }}" +    state: absent +  changed_when: False diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 new file mode 100644 index 000000000..9fefef9b7 --- /dev/null +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -0,0 +1,116 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: +  name: "{{ deploy_name }}" +  labels: +    provider: openshift +    component: "{{ component }}" +    logging-infra: "{{ logging_component }}" +spec: +  replicas: {{ replicas | default(0) }} +  selector: +    provider: openshift +    component: "{{ component }}" +    logging-infra: "{{ logging_component }}" +  strategy: +    rollingParams: +      intervalSeconds: 1 +      timeoutSeconds: 600 +      updatePeriodSeconds: 1 +    type: Rolling +  template: +    metadata: +      name: "{{ deploy_name }}" +      labels: +        logging-infra: "{{ logging_component }}" +        provider: openshift +        component: "{{ component }}" +    spec: +      serviceAccountName: aggregated-logging-kibana +{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} +      nodeSelector: +{% for key, value in kibana_node_selector.iteritems() %} +        {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} +      containers: +        - +          name: "kibana" +          image: {{ image }} +          imagePullPolicy: Always +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} +          resources: +            limits: +{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} +              cpu: "{{ kibana_cpu_limit }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} +              memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +          env: +            - name: "ES_HOST" +              value: "{{ es_host }}" +            - name: "ES_PORT" +              value: "{{ es_port }}" +          volumeMounts: +            - name: kibana +              mountPath: /etc/kibana/keys +              readOnly: true +        - +          name: "kibana-proxy" +          image: {{ proxy_image }} +          imagePullPolicy: Always +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} +          resources: +            limits: +{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} +              cpu: "{{ kibana_proxy_cpu_limit }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} +              memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +          ports: +            - +              name: "oaproxy" +              containerPort: 3000 +          env: +            - +             name: "OAP_BACKEND_URL" +             value: "http://localhost:5601" +            - +             name: "OAP_AUTH_MODE" +             value: "oauth2" +            - +             name: "OAP_TRANSFORM" +             value: "user_header,token_header" +            - +             name: "OAP_OAUTH_ID" +             value: kibana-proxy +            - +             name: "OAP_MASTER_URL" +             value: {{ openshift_logging_kibana_master_url }} +            - +             name: "OAP_PUBLIC_MASTER_URL" +             value: {{ openshift_logging_kibana_master_public_url }} +            - +             name: "OAP_LOGOUT_REDIRECT" +             value: {{ openshift_logging_kibana_master_public_url }}/console/logout +            - +             name: "OAP_MASTER_CA_FILE" +             value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +            - +             name: "OAP_DEBUG" +             value: "{{ openshift_logging_kibana_proxy_debug }}" +          volumeMounts: +            - name: kibana-proxy +              mountPath: /secret +              readOnly: true +      volumes: +        - name: kibana +          secret: +            secretName: logging-kibana +        - name: kibana-proxy +          secret: +            secretName: logging-kibana-proxy diff --git a/roles/openshift_logging_kibana/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2 new file mode 100644 index 000000000..6767f6d89 --- /dev/null +++ b/roles/openshift_logging_kibana/templates/oauth-client.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: OAuthClient +metadata: +  name: kibana-proxy +  labels: +    logging-infra: support +secret: {{secret}} +redirectURIs: +- https://{{kibana_hostname}} +scopeRestrictions: +- literals: +  - user:info +  - user:check-access +  - user:list-projects diff --git a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 new file mode 100644 index 000000000..cf8a9e65f --- /dev/null +++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: +  name: "{{obj_name}}" +{% if labels is defined%} +  labels: +{% for key, value in labels.iteritems() %} +    {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +  host: {{ route_host }} +  tls: +{% if tls_key is defined and tls_key | length > 0 %} +    key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} +    certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} +    caCertificate: | +{% for line in tls_ca_cert.split('\n') %} +      {{ line }} +{% endfor %} +    destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} +      {{ line }} +{% endfor %} +    termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} +    insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} +  to: +    kind: Service +    name: {{ service_name }} diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml new file mode 100644 index 000000000..87b281c4b --- /dev/null +++ b/roles/openshift_logging_kibana/vars/main.yml @@ -0,0 +1,3 @@ +--- +__latest_kibana_version: "3_5" +__allowed_kibana_versions: ["3_5", "3_6"]  | 
