diff options
38 files changed, 664 insertions, 104 deletions
| diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index dfd9a111e..f494c0ae5 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -243,6 +243,21 @@ class FilterModule(object):          return string.split(separator)      @staticmethod +    def oo_haproxy_backend_masters(hosts): +        ''' This takes an array of dicts and returns an array of dicts +            to be used as a backend for the haproxy role +        ''' +        servers = [] +        for idx, host_info in enumerate(hosts): +            server = dict(name="master%s" % idx) +            server_ip = host_info['openshift']['common']['ip'] +            server_port = host_info['openshift']['master']['api_port'] +            server['address'] = "%s:%s" % (server_ip, server_port) +            server['opts'] = 'check' +            servers.append(server) +        return servers + +    @staticmethod      def oo_filter_list(data, filter_attr=None):          ''' This returns a list, which contains all items where filter_attr              evaluates to true @@ -407,5 +422,6 @@ class FilterModule(object):              "oo_split": self.oo_split,              "oo_filter_list": self.oo_filter_list,              "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, -            "oo_parse_certificate_names": self.oo_parse_certificate_names +            "oo_parse_certificate_names": self.oo_parse_certificate_names, +            "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters          } diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index f60918e6d..11f076a8a 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -5,6 +5,7 @@  masters  nodes  etcd +lb  # Set variables common for all OSEv3 hosts  [OSEv3:vars] @@ -57,21 +58,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Set cockpit plugins  #osm_cockpit_plugins=['cockpit-kubernetes'] -# master cluster ha variables using pacemaker or RHEL HA +# Native high availbility cluster method with optional load balancer. +# If no lb group is defined installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker  #openshift_master_cluster_password=openshift_cluster  #openshift_master_cluster_vip=192.168.133.25  #openshift_master_cluster_public_vip=192.168.133.25  #openshift_master_cluster_hostname=openshift-ansible.test.example.com  #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -# master cluster ha variables when using a different HA solution -# For installation the value of openshift_master_cluster_hostname must resolve -# to the first master defined in the inventory. -# The HA solution must be manually configured after installation and must ensure -# that the master is running on a single master host. -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_defer_ha=True +# Override the default controller lease ttl +#osm_controller_lease_ttl=30  # default subdomain to use for exposed routes  #osm_default_subdomain=apps.test.example.com @@ -104,6 +113,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Detected names may be overridden by specifying the "names" key  #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +  # host group for masters  [masters]  ose3-master[1:3]-ansible.test.example.com @@ -111,6 +136,9 @@ ose3-master[1:3]-ansible.test.example.com  [etcd]  ose3-etcd[1:3]-ansible.test.example.com +[lb] +ose3-lb-ansible.test.example.com +  # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes  # However, in order to ensure that your masters are not burdened with running pods you should  # make them unschedulable by adding openshift_scheduleable=False any node that's also a master. diff --git a/openshift-ansible.spec b/openshift-ansible.spec index df3418278..10a53d921 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -104,6 +104,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada  %files bin  %{_bindir}/* +%exclude %{_bindir}/atomic-openshift-installer  %{python_sitelib}/openshift_ansible/  /etc/bash_completion.d/*  %config(noreplace) /etc/openshift_ansible/ diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 5b10f856c..e05ab43f8 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -140,10 +140,11 @@      - file: path={{ item }} state=absent        with_items: +        - "~{{ ansible_ssh_user }}/.kube"          - /etc/ansible/facts.d/openshift.fact          - /etc/atomic-enterprise -        - /etc/etcd          - /etc/corosync +        - /etc/etcd          - /etc/openshift          - /etc/openshift-sdn          - /etc/origin @@ -156,10 +157,13 @@          - /etc/sysconfig/origin-master          - /etc/sysconfig/origin-node          - /root/.kube -        - "~{{ ansible_ssh_user }}/.kube" +        - /run/openshift-sdn          - /usr/share/openshift/examples          - /var/lib/atomic-enterprise          - /var/lib/etcd          - /var/lib/openshift          - /var/lib/origin          - /var/lib/pacemaker + +    - name: restart docker +      service: name=docker state=restarted diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index a8e3e27bb..5aa6b0f9b 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -11,6 +11,7 @@  - include: ../../common/openshift-cluster/config.yml    vars:      g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" +    g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"      g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"      g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"      g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 9e50a4a18..411c7e660 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -4,6 +4,7 @@      g_etcd_group: "{{ 'etcd' }}"      g_masters_group: "{{ 'masters' }}"      g_nodes_group: "{{ 'nodes' }}" +    g_lb_group: "{{ 'lb' }}"      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_debug_level: 2      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 57de7130b..a8bd634d3 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -8,4 +8,4 @@  - include: ../openshift-node/config.yml    vars:      osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" -    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" +    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 1919660dd..2bb69614f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -4,17 +4,21 @@    gather_facts: no    tasks:    - fail: -      msg: This playbook rquires g_etcd_group to be set +      msg: This playbook requires g_etcd_group to be set      when: g_etcd_group is not defined    - fail: -      msg: This playbook rquires g_masters_group to be set +      msg: This playbook requires g_masters_group to be set      when: g_masters_group is not defined    - fail: -      msg: This playbook rquires g_nodes_group to be set +      msg: This playbook requires g_nodes_group to be set      when: g_nodes_group is not defined +  - fail: +      msg: This playbook requires g_lb_group to be set +    when: g_lb_group is not defined +    - name: Evaluate oo_etcd_to_config      add_host:        name: "{{ item }}" @@ -62,3 +66,11 @@        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_sudo: "{{ g_sudo | default(omit) }}"      when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + +  - name: Evaluate oo_lb_to_config +    add_host: +      name: "{{ item }}" +      groups: oo_lb_to_config +      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +      ansible_sudo: "{{ g_sudo | default(omit) }}" +    with_items: groups[g_lb_group] | default([]) diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 59c4b2370..64376040f 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -34,7 +34,9 @@        - role: common          local_facts:            hostname: "{{ openshift_hostname | default(None) }}" +          ip: "{{ openshift_ip | default(None) }}"            public_hostname: "{{ openshift_public_hostname | default(None) }}" +          public_ip: "{{ openshift_public_ip | default(None) }}"            deployment_type: "{{ openshift_deployment_type }}"        - role: master          local_facts: @@ -44,7 +46,6 @@            public_api_url: "{{ openshift_master_public_api_url | default(None) }}"            cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"            cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" -          cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"            console_path: "{{ openshift_master_console_path | default(None) }}"            console_port: "{{ openshift_master_console_port | default(None) }}"            console_url: "{{ openshift_master_console_url | default(None) }}" @@ -168,6 +169,10 @@      masters_needing_certs: "{{ hostvars                                 | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))                                 | oo_filter_list(filter_attr='master_certs_missing') }}" +    master_hostnames: "{{ hostvars +                               | oo_select_keys(groups['oo_masters_to_config']) +                               | oo_collect('openshift.common.all_hostnames') +                               | oo_flatten | unique }}"      sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"    roles:    - openshift_master_certificates @@ -207,13 +212,76 @@        parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"      when: openshift_master_named_certificates is defined +- name: Compute haproxy_backend_servers +  hosts: localhost +  connection: local +  sudo: false +  gather_facts: no +  tasks: +  - set_fact: +      haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}" + +- name: Configure load balancers +  hosts: oo_lb_to_config +  vars: +    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" +    haproxy_frontends: +    - name: atomic-openshift-api +      mode: tcp +      options: +      - tcplog +      binds: +      - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" +      default_backend: atomic-openshift-api +    haproxy_backends: +    - name: atomic-openshift-api +      mode: tcp +      option: tcplog +      balance: source +      servers: "{{ hostvars.localhost.haproxy_backend_servers }}" +  roles: +  - role: haproxy +    when: groups.oo_masters_to_config | length > 1 + +- name: Generate master session keys +  hosts: oo_first_master +  tasks: +  - fail: +      msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set" +    when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined) +  - fail: +      msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length" +    when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length) +  - name: Generate session authentication key +    command: /usr/bin/openssl rand -base64 24 +    register: session_auth_output +    with_sequence: count=1 +    when: openshift_master_session_auth_secrets is undefined +  - name: Generate session encryption key +    command: /usr/bin/openssl rand -base64 24 +    register: session_encryption_output +    with_sequence: count=1 +    when: openshift_master_session_encryption_secrets is undefined +  - set_fact: +      session_auth_secret: "{{ openshift_master_session_auth_secrets +                                | default(session_auth_output.results +                                | map(attribute='stdout') +                                | list) }}" +      session_encryption_secret: "{{ openshift_master_session_encryption_secrets +                                      | default(session_encryption_output.results +                                      | map(attribute='stdout') +                                      | list) }}" +  - name: Configure master instances    hosts: oo_masters_to_config +  serial: 1    vars:      named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"      sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"      openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" -    embedded_etcd: "{{ openshift.master.embedded_etcd }}" +    openshift_master_count: "{{ groups.oo_masters_to_config | length }}" +    openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}" +    openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"    pre_tasks:    - name: Ensure certificate directory exists      file: @@ -242,11 +310,25 @@      omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"    roles:    - role: openshift_master_cluster -    when: openshift_master_ha | bool +    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"    - openshift_examples    - role: openshift_cluster_metrics      when: openshift.common.use_cluster_metrics | bool +- name: Determine cluster dns ip +  hosts: oo_first_master +  tasks: +  - name: Get master service ip +    command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}" +    register: master_service_ip_output +    when: openshift.common.version_greater_than_3_1_or_1_1 | bool +  - set_fact: +      cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" +    when: not openshift.common.version_greater_than_3_1_or_1_1 | bool +  - set_fact: +      cluster_dns_ip: "{{ master_service_ip_output.stdout }}" +    when: openshift.common.version_greater_than_3_1_or_1_1 | bool +  - name: Enable cockpit    hosts: oo_first_master    vars: diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 6ca4f7395..745161bcb 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -16,6 +16,7 @@  - include: ../../common/openshift-cluster/config.yml    vars:      g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" +    g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"      g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"      g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"      g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml index 0dfa3e9d7..c8f6065cd 100644 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -46,4 +46,4 @@      openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "      os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"      osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" -    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" +    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index c208eee81..4d1ae22ff 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -15,6 +15,7 @@  - include: ../../common/openshift-cluster/config.yml    vars:      g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" +    g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"      g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"      g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"      g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index a5ee2d6a5..888804e28 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -10,6 +10,7 @@  - include: ../../common/openshift-cluster/config.yml    vars:      g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" +    g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"      g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"      g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"      g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md new file mode 100644 index 000000000..5bc415066 --- /dev/null +++ b/roles/haproxy/README.md @@ -0,0 +1,34 @@ +HAProxy +======= + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml new file mode 100644 index 000000000..7ba5bd485 --- /dev/null +++ b/roles/haproxy/defaults/main.yml @@ -0,0 +1,21 @@ +--- +haproxy_frontends: +- name: main +  binds: +  - "*:80" +  default_backend: default + +haproxy_backends: +- name: default +  balance: roundrobin +  servers: +  - name: web01 +    address: 127.0.0.1:9000 +    opts: check + +os_firewall_use_firewalld: False +os_firewall_allow: +- service: haproxy stats +  port: "9000/tcp" +- service: haproxy balance +  port: "8443/tcp" diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml new file mode 100644 index 000000000..ee60adcab --- /dev/null +++ b/roles/haproxy/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart haproxy +  service: +    name: haproxy +    state: restarted diff --git a/roles/haproxy/meta/main.yml b/roles/haproxy/meta/main.yml new file mode 100644 index 000000000..0fad106a9 --- /dev/null +++ b/roles/haproxy/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: +  author: Jason DeTiberus +  description: HAProxy +  company: Red Hat, Inc. +  license: Apache License, Version 2.0 +  min_ansible_version: 1.9 +  platforms: +  - name: EL +    versions: +    - 7 +dependencies: +- { role: os_firewall } +- { role: openshift_repos } diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml new file mode 100644 index 000000000..5638b7313 --- /dev/null +++ b/roles/haproxy/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Install haproxy +  yum: +    pkg: haproxy +    state: present + +- name: Configure haproxy +  template: +    src: haproxy.cfg.j2 +    dest: /etc/haproxy/haproxy.cfg +    owner: root +    group: root +    mode: 0644 +  notify: restart haproxy + +- name: Enable and start haproxy +  service: +    name: haproxy +    state: started +    enabled: yes +  register: start_result + +- name: Pause 30 seconds if haproxy was just started +  pause: seconds=30 +  when: start_result | changed diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 000000000..c932af72f --- /dev/null +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,76 @@ +# Global settings +#--------------------------------------------------------------------- +global +    chroot      /var/lib/haproxy +    pidfile     /var/run/haproxy.pid +    maxconn     4000 +    user        haproxy +    group       haproxy +    daemon + +    # turn on stats unix socket +    stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults +    mode                    http +    log                     global +    option                  httplog +    option                  dontlognull +    option http-server-close +    option forwardfor       except 127.0.0.0/8 +    option                  redispatch +    retries                 3 +    timeout http-request    10s +    timeout queue           1m +    timeout connect         10s +    timeout client          300s +    timeout server          300s +    timeout http-keep-alive 10s +    timeout check           10s +    maxconn                 3000 + +listen stats :9000 +    mode http +    stats enable +    stats uri / + +{% for frontend in haproxy_frontends %} +frontend  {{ frontend.name }} +{% for bind in frontend.binds %} +    bind {{ bind }} +{% endfor %} +    default_backend {{ frontend.default_backend }} +{% if 'mode' in frontend %} +    mode {{ frontend.mode }} +{% endif %} +{% if 'options' in frontend %} +{% for option in frontend.options %} +    option {{ option }} +{% endfor %} +{% endif %} +{% if 'redirects' in frontend %} +{% for redirect in frontend.redirects %} +    redirect {{ redirect }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for backend in haproxy_backends %} +backend {{ backend.name }} +    balance {{ backend.balance }} +{% if 'mode' in backend %} +    mode {{ backend.mode }} +{% endif %} +{% if 'options' in backend %} +{% for option in backend.options %} +    option {{ option }} +{% endfor %} +{% endif %} +{% for server in backend.servers %} +    server      {{ server.name }} {{ server.address }} {{ server.opts }} +{% endfor %} +{% endfor %} diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 1b2ba6be3..1ba5fc13b 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -407,7 +407,7 @@ def set_identity_providers_if_unset(facts):                  name='allow_all', challenge=True, login=True,                  kind='AllowAllPasswordIdentityProvider'              ) -            if deployment_type == 'enterprise': +            if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:                  identity_provider = dict(                      name='deny_all', challenge=True, login=True,                      kind='DenyAllPasswordIdentityProvider' @@ -554,15 +554,6 @@ def set_deployment_facts_if_unset(facts):              if deployment_type in ['enterprise', 'online']:                  data_dir = '/var/lib/openshift'              facts['common']['data_dir'] = data_dir -        facts['common']['version'] = version = get_openshift_version() -        if version is not None: -            if deployment_type == 'origin': -                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') -            else: -                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') -        else: -            version_gt_3_1_or_1_1 = True -        facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1      for role in ('master', 'node'):          if role in facts: @@ -596,12 +587,34 @@ def set_deployment_facts_if_unset(facts):      return facts +def set_version_facts_if_unset(facts): +    """ Set version facts. This currently includes common.version and +        common.version_greater_than_3_1_or_1_1. + +        Args: +            facts (dict): existing facts +        Returns: +            dict: the facts dict updated with version facts. +    """ +    if 'common' in facts: +        deployment_type = facts['common']['deployment_type'] +        facts['common']['version'] = version = get_openshift_version() +        if version is not None: +            if deployment_type == 'origin': +                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') +            else: +                version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') +        else: +            version_gt_3_1_or_1_1 = True +        facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 +    return facts -def set_sdn_facts_if_unset(facts): +def set_sdn_facts_if_unset(facts, system_facts):      """ Set sdn facts if not already present in facts dict          Args:              facts (dict): existing facts +            system_facts (dict): ansible_facts          Returns:              dict: the facts dict updated with the generated sdn facts if they                    were not already present @@ -620,9 +633,18 @@ def set_sdn_facts_if_unset(facts):          if 'sdn_host_subnet_length' not in facts['master']:              facts['master']['sdn_host_subnet_length'] = '8' -    if 'node' in facts: -        if 'sdn_mtu' not in facts['node']: -            facts['node']['sdn_mtu'] = '1450' +    if 'node' in facts and 'sdn_mtu' not in facts['node']: +        node_ip = facts['common']['ip'] + +        # default MTU if interface MTU cannot be detected +        facts['node']['sdn_mtu'] = '1450' + +        for val in system_facts.itervalues(): +            if isinstance(val, dict) and 'mtu' in val: +                mtu = val['mtu'] + +                if 'ipv4' in val and val['ipv4'].get('address') == node_ip: +                    facts['node']['sdn_mtu'] = str(mtu - 50)      return facts @@ -893,8 +915,9 @@ class OpenShiftFacts(object):          facts = set_master_selectors(facts)          facts = set_metrics_facts_if_unset(facts)          facts = set_identity_providers_if_unset(facts) -        facts = set_sdn_facts_if_unset(facts) +        facts = set_sdn_facts_if_unset(facts, self.system_facts)          facts = set_deployment_facts_if_unset(facts) +        facts = set_version_facts_if_unset(facts)          facts = set_aggregate_facts(facts)          return dict(openshift=facts) @@ -934,7 +957,7 @@ class OpenShiftFacts(object):                            session_name='ssn', session_secrets_file='',                            access_token_max_seconds=86400,                            auth_token_max_seconds=500, -                          oauth_grant_method='auto', cluster_defer_ha=False) +                          oauth_grant_method='auto')              defaults['master'] = master          if 'node' in roles: diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 37028e0f6..4b9500cbd 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -2,3 +2,13 @@  - name: restart master    service: name={{ openshift.common.service_type }}-master state=restarted    when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false)) + +- name: restart master api +  service: name={{ openshift.common.service_type }}-master-api state=restarted +  when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + +# TODO: need to fix up ignore_errors here +- name: restart master controllers +  service: name={{ openshift.common.service_type }}-master-controllers state=restarted +  when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' +  ignore_errors: yes diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 3a886935f..be77fce4a 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -9,16 +9,22 @@    when: openshift_master_oauth_grant_method is defined  - fail: +    msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" +  when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"])) +- fail: +    msg: "'native' high availability is not supported for the requested OpenShift version" +  when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool +- fail:      msg: "openshift_master_cluster_password must be set for multi-master installations" -  when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined +  when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)  - name: Set master facts    openshift_facts:      role: master      local_facts: +      cluster_method: "{{ openshift_master_cluster_method | default(None) }}"        cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"        cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" -      cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"        debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"        api_port: "{{ openshift_master_api_port | default(None) }}"        api_url: "{{ openshift_master_api_url | default(None) }}" @@ -41,6 +47,8 @@        portal_net: "{{ openshift_master_portal_net | default(None) }}"        session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"        session_name: "{{ openshift_master_session_name | default(None) }}" +      session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}" +      session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}"        session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"        access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"        auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" @@ -63,6 +71,8 @@        controller_args: "{{ osm_controller_args | default(None) }}"        infra_nodes: "{{ num_infra | default(None) }}"        disabled_features: "{{ osm_disabled_features | default(None) }}" +      master_count: "{{ openshift_master_count | default(None) }}" +      controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"  - name: Install Master package    yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present @@ -77,7 +87,7 @@        domain: cluster.local    when: openshift.master.embedded_dns -- name: Create config parent directory if it doesn't exist +- name: Create config parent directory if it does not exist    file:      path: "{{ openshift_master_config_dir }}"      state: directory @@ -90,6 +100,8 @@      creates: "{{ openshift_master_policy }}"    notify:    - restart master +  - restart master api +  - restart master controllers  - name: Create the scheduler config    template: @@ -98,6 +110,8 @@      backup: true    notify:    - restart master +  - restart master api +  - restart master controllers  - name: Install httpd-tools if needed    yum: pkg=httpd-tools state=present @@ -120,6 +134,39 @@    when: item.kind == 'HTPasswdPasswordIdentityProvider'    with_items: openshift.master.identity_providers +# workaround for missing systemd unit files for controllers/api +- name: Create the api service file +  template: +    src: atomic-openshift-master-api.service.j2 +    dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service +    force: no +- name: Create the controllers service file +  template: +    src: atomic-openshift-master-controllers.service.j2 +    dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service +    force: no +- name: Create the api env file +  template: +    src: atomic-openshift-master-api.j2 +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api +    force: no +- name: Create the controllers env file +  template: +    src: atomic-openshift-master-controllers.j2 +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +    force: no +- command: systemctl daemon-reload +# end workaround for missing systemd unit files + +- name: Create session secrets file +  template: +    dest: "{{ openshift.master.session_secrets_file }}" +    src: sessionSecretsFile.yaml.v1.j2 +    force: no +  notify: +  - restart master +  - restart master api +  # TODO: add the validate parameter when there is a validation command to run  - name: Create master config    template: @@ -128,12 +175,15 @@      backup: true    notify:    - restart master +  - restart master api +  - restart master controllers  - name: Configure master settings    lineinfile:      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master      regexp: "{{ item.regex }}"      line: "{{ item.line }}" +    create: yes    with_items:      - regex: '^OPTIONS='        line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}" @@ -142,6 +192,32 @@    notify:    - restart master +- name: Configure master api settings +  lineinfile: +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api +    regexp: "{{ item.regex }}" +    line: "{{ item.line }}" +  with_items: +    - regex: '^OPTIONS=' +      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443" +    - regex: '^CONFIG_FILE=' +      line: "CONFIG_FILE={{ openshift_master_config_file }}" +  notify: +  - restart master api + +- name: Configure master controller settings +  lineinfile: +    dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +    regexp: "{{ item.regex }}" +    line: "{{ item.line }}" +  with_items: +    - regex: '^OPTIONS=' +      line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444" +    - regex: '^CONFIG_FILE=' +      line: "CONFIG_FILE={{ openshift_master_config_file }}" +  notify: +  - restart master controllers +  - name: Start and enable master    service: name={{ openshift.common.service_type }}-master enabled=yes state=started    when: not openshift_master_ha | bool @@ -149,15 +225,37 @@  - set_fact:      master_service_status_changed = start_result | changed +  when: not openshift_master_ha | bool + +- name: Start and enable master api +  service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started +  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +  register: start_result + +- set_fact: +    master_api_service_status_changed = start_result | changed +  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' + +# TODO: fix the ugly workaround of setting ignore_errors +#       the controllers service tries to start even if it is already started +- name: Start and enable master controller +  service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started +  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' +  register: start_result +  ignore_errors: yes + +- set_fact: +    master_controllers_service_status_changed = start_result | changed +  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'  - name: Install cluster packages    yum: pkg=pcs state=present -  when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool +  when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'    register: install_result  - name: Start and enable cluster service    service: name=pcsd enabled=yes state=started -  when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool +  when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'  - name: Set the cluster user password    shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.j2 new file mode 100644 index 000000000..205934248 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-api.j2 @@ -0,0 +1,9 @@ +OPTIONS= +CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml + +# Proxy configuration +# Origin uses standard HTTP_PROXY environment variables. Be sure to set +# NO_PROXY for your master +#NO_PROXY=master.example.com +#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT +#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 new file mode 100644 index 000000000..ba19fb348 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=Atomic OpenShift Master API +Documentation=https://github.com/openshift/origin +After=network.target +After=etcd.service +Before={{ openshift.common.service_type }}-node.service +Requires=network.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory={{ openshift.common.data_dir }} +SyslogIdentifier=atomic-openshift-master-api + +[Install] +WantedBy=multi-user.target +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 new file mode 100644 index 000000000..205934248 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 @@ -0,0 +1,9 @@ +OPTIONS= +CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml + +# Proxy configuration +# Origin uses standard HTTP_PROXY environment variables. Be sure to set +# NO_PROXY for your master +#NO_PROXY=master.example.com +#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT +#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 new file mode 100644 index 000000000..8952c86ef --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=Atomic OpenShift Master Controllers +Documentation=https://github.com/openshift/origin +After=network.target +After={{ openshift.common.service_type }}-master-api.service +Before={{ openshift.common.service_type }}-node.service +Requires=network.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory={{ openshift.common.data_dir }} +SyslogIdentifier={{ openshift.common.service_type }}-master-controllers +Restart=on-failure + +[Install] +WantedBy=multi-user.target +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 9547a6945..d4a6590ea 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -10,13 +10,18 @@ assetConfig:    publicURL: {{ openshift.master.public_console_url }}/    servingInfo:      bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }} +    bindNetwork: tcp4      certFile: master.server.crt      clientCA: ""      keyFile: master.server.key      maxRequestsInFlight: 0      requestTimeoutSeconds: 0 +{% if openshift_master_ha | bool %} +controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }} +{% endif %} +controllers: '*'  corsAllowedOrigins: -{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %} +{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}    - {{ origin }}  {% endfor %}  {% for custom_origin in openshift.master.custom_cors_origins | default("") %} @@ -29,8 +34,10 @@ corsAllowedOrigins:  disabledFeatures: {{ openshift.master.disabled_features | to_json }}  {% endif %}  {% if openshift.master.embedded_dns | bool %} +disabledFeatures: null  dnsConfig:    bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }} +  bindNetwork: tcp4  {% endif %}  etcdClientInfo:    ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} @@ -80,9 +87,8 @@ kubernetesMasterConfig:    - v1    apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}    controllerArguments: {{ controller_args if controller_args is defined else 'null' }} -{# TODO: support overriding masterCount #} -  masterCount: 1 -  masterIP: "" +  masterCount: {{ openshift.master.master_count }} +  masterIP: {{ openshift.common.ip }}    podEvictionTimeout: ""    proxyClientInfo:      certFile: master.proxy-client.crt @@ -106,6 +112,7 @@ networkConfig:  # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet    serviceNetworkCIDR: {{ openshift.master.portal_net }}  {% include 'v1_partials/oauthConfig.j2' %} +pauseControllers: false  policyConfig:    bootstrapPolicyFile: {{ openshift_master_policy }}    openshiftInfrastructureNamespace: openshift-infra @@ -121,6 +128,7 @@ projectConfig:  routingConfig:    subdomain:  "{{ openshift.master.default_subdomain | default("") }}"  serviceAccountConfig: +  limitSecretReferences: false    managedNames:    - default    - builder @@ -131,6 +139,7 @@ serviceAccountConfig:    - serviceaccounts.public.key  servingInfo:    bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} +  bindNetwork: tcp4    certFile: master.server.crt    clientCA: ca.crt    keyFile: master.server.key diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 new file mode 100644 index 000000000..d12d9db90 --- /dev/null +++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: SessionSecrets +secrets: +{% for secret in openshift_master_session_auth_secrets %} +- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}" +  encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}" +{% endfor %} diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index ecdb4f883..534465451 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -2,6 +2,7 @@  openshift_master_config_dir: "{{ openshift.common.config_base }}/master"  openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"  openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json" +openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"  openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"  openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index cfd1ceabf..0738048d3 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -1,6 +1,6 @@  ---  - name: Install the base package for admin tooling -  yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=present +  yum: pkg={{ openshift.common.service_type }} state=present    register: install_result  - name: Reload generated facts @@ -14,7 +14,7 @@  - name: Create the master certificates if they do not already exist    command: >      {{ openshift.common.admin_binary }} create-master-certs -      --hostnames={{ openshift.common.all_hostnames | join(',') }} +      --hostnames={{ master_hostnames | join(',') }}        --master={{ openshift.master.api_url }}        --public-master={{ openshift.master.public_api_url }}        --cert-dir={{ openshift_master_config_dir }} --overwrite=false diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml deleted file mode 100644 index 3b416005b..000000000 --- a/roles/openshift_master_cluster/tasks/configure_deferred.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- debug: msg="Deferring config" - -- name: Start and enable the master -  service: -    name: "{{ openshift.common.service_type }}-master" -    state: started -    enabled: yes diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml index 315947183..6303a6e46 100644 --- a/roles/openshift_master_cluster/tasks/main.yml +++ b/roles/openshift_master_cluster/tasks/main.yml @@ -4,10 +4,7 @@    register: pcs_status    changed_when: false    failed_when: false -  when: not openshift.master.cluster_defer_ha | bool +  when: openshift.master.cluster_method == "pacemaker"  - include: configure.yml    when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" - -- include: configure_deferred.yml -  when: openshift.master.cluster_defer_ha | bool diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index c92008a77..9d40ae3b3 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - { role: openshift_common } +- { role: docker } diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index aea60b75c..c455a09f1 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -8,7 +8,7 @@    when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip  - fail:      msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." -  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online'] +  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']  - name: Set node facts    openshift_facts: @@ -45,6 +45,15 @@    register: sdn_install_result    when: openshift.common.use_openshift_sdn +- name: Install Node package +  yum: pkg={{ openshift.common.service_type }}-node state=present +  register: node_install_result + +- name: Install sdn-ovs package +  yum: pkg={{ openshift.common.service_type }}-sdn-ovs state=present +  register: sdn_install_result +  when: openshift.common.use_openshift_sdn +  # TODO: add the validate parameter when there is a validation command to run  - name: Create the Node config    template: diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 4931d127e..509cce2e0 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -22,6 +22,7 @@ networkConfig:  {% if openshift.common.use_openshift_sdn %}     networkPluginName: {{ openshift.common.sdn_network_plugin_name }}  {% endif %} +nodeIP: {{ openshift.common.ip }}  nodeName: {{ openshift.common.hostname | lower }}  podManifestConfig:  servingInfo: diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 12e98b7a1..aa696ae12 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -8,7 +8,7 @@  #       proper repos correctly.  - assert: -    that: openshift_deployment_type in known_openshift_deployment_types +    that: openshift.common.deployment_type in known_openshift_deployment_types  - name: Ensure libselinux-python is installed    yum: diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index e4fda2813..8bee99f90 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -331,7 +331,22 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):      # Check if master or nodes already have something installed      installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)      if len(installed_hosts) > 0: -        # present a message listing already installed hosts +        click.echo('Installed environment detected.') +        # This check has to happen before we start removing hosts later in this method +        if not force: +            if not unattended: +                click.echo('By default the installer only adds new nodes to an installed environment.') +                response = click.prompt('Do you want to (1) only add additional nodes or ' \ +                                        '(2) perform a clean install?', type=int) +                # TODO: this should be reworked with error handling. +                # Click can certainly do this for us. +                # This should be refactored as soon as we add a 3rd option. +                if response == 1: +                    force = False +                if response == 2: +                    force = True + +        # present a message listing already installed hosts and remove hosts if needed          for host in installed_hosts:              if host.master:                  click.echo("{} is already an OpenShift Master".format(host)) @@ -339,32 +354,42 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):                  # new nodes.              elif host.node:                  click.echo("{} is already an OpenShift Node".format(host)) -                hosts_to_run_on.remove(host) -        # for unattended either continue if they force install or exit if they didn't -        if unattended: -            if not force: -                click.echo('Installed environment detected and no additional nodes specified: ' \ -                           'aborting. If you want a fresh install, use --force') -                sys.exit(1) -        # for attended ask the user what to do +                # force is only used for reinstalls so we don't want to remove +                # anything. +                if not force: +                    hosts_to_run_on.remove(host) + +        # Handle the cases where we know about uninstalled systems +        new_hosts = set(hosts_to_run_on) - set(installed_hosts) +        if len(new_hosts) > 0: +            for new_host in new_hosts: +                click.echo("{} is currently uninstalled".format(new_host)) + +            # Fall through +            click.echo('Adding additional nodes...')          else: -            click.echo('Installed environment detected and no additional nodes specified. ') -            response = click.prompt('Do you want to (1) add more nodes or ' \ -                                    '(2) perform a clean install?', type=int) -            if response == 1: # add more nodes -                new_nodes = collect_new_nodes() - -                hosts_to_run_on.extend(new_nodes) -                oo_cfg.hosts.extend(new_nodes) - -                openshift_ansible.set_config(oo_cfg) -                callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) -                if error: -                    click.echo("There was a problem fetching the required information. " \ -                               "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) +            if unattended: +                if not force: +                    click.echo('Installed environment detected and no additional nodes specified: ' \ +                               'aborting. If you want a fresh install, use ' \ +                               '`atomic-openshift-installer install --force`')                      sys.exit(1)              else: -                pass # proceeding as normal should do a clean install +                if not force: +                    new_nodes = collect_new_nodes() + +                    hosts_to_run_on.extend(new_nodes) +                    oo_cfg.hosts.extend(new_nodes) + +                    openshift_ansible.set_config(oo_cfg) +                    click.echo('Gathering information from hosts...') +                    callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) +                    if error: +                        click.echo("There was a problem fetching the required information. " \ +                                   "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) +                        sys.exit(1) +                else: +                    pass # proceeding as normal should do a clean install      return hosts_to_run_on, callback_facts diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index e33330102..0648df0fa 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -18,7 +18,6 @@ def set_config(cfg):  def generate_inventory(hosts):      global CFG -    installer_host = socket.gethostname()      base_inventory_path = CFG.settings['ansible_inventory_path']      base_inventory = open(base_inventory_path, 'w')      base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') @@ -32,25 +31,18 @@ def generate_inventory(hosts):          version=CFG.settings.get('variant_version', None))[1]      base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) -    if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ: -        base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:' -            '5001/openshift3/ose-${component}:${version}\n') -    if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ: -        base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', " +    if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ: +        base_inventory.write('cli_docker_additional_registries={}\n' +          .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) +    if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ: +        base_inventory.write('cli_docker_insecure_registries={}\n' +          .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) +    if 'OO_INSTALL_PUDDLE_REPO' in os.environ: +        # We have to double the '{' here for literals +        base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "              "'name': 'ose-devel', " -            "'baseurl': 'http://buildvm-devops.usersys.redhat.com" -            "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', " -            "'enabled': 1, 'gpgcheck': 0}]\n") -    if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: -        base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') - -    if any(host.hostname == installer_host or host.public_hostname == installer_host -            for host in hosts): -        no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive']) -        if no_pwd_sudo == 1: -            print 'The atomic-openshift-installer requires sudo access without a password.' -            sys.exit(1) -        base_inventory.write("ansible_connection=local\n") +            "'baseurl': '{}', " +            "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))      base_inventory.write('\n[masters]\n')      masters = (host for host in hosts if host.master) @@ -72,6 +64,7 @@ def generate_inventory(hosts):  def write_host(host, inventory, scheduleable=True):      global CFG +      facts = ''      if host.ip:          facts += ' openshift_ip={}'.format(host.ip) @@ -85,6 +78,16 @@ def write_host(host, inventory, scheduleable=True):      # Technically only nodes will ever need this.      if not scheduleable:          facts += ' openshift_scheduleable=False' +    installer_host = socket.gethostname() +    if host.hostname == installer_host or host.public_hostname == installer_host: +        facts += ' ansible_connection=local' +        if os.geteuid() != 0: +            no_pwd_sudo = subprocess.call(['sudo', '-v', '-n']) +            if no_pwd_sudo == 1: +                print 'The atomic-openshift-installer requires sudo access without a password.' +                sys.exit(1) +            facts += ' ansible_become=true' +      inventory.write('{} {}\n'.format(host, facts)) | 
