diff options
25 files changed, 327 insertions, 70 deletions
@@ -6,7 +6,7 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise. - Install base dependencies: - Fedora: ``` - yum install -y ansible rubygem-thor rubygem-parseconfig util-linux + dnf install -y ansible rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography ``` - OSX: ``` diff --git a/README_AWS.md b/README_AWS.md index 6757e2892..d9e2ac5a9 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -105,7 +105,7 @@ Install Dependencies 1. Ansible requires python-boto for aws operations: RHEL/CentOS/Fedora ``` - yum install -y ansible python-boto + yum install -y ansible python-boto pyOpenSSL ``` OSX: ``` diff --git a/README_vagrant.md b/README_vagrant.md index f3e4cfc18..73fd31476 100644 --- a/README_vagrant.md +++ b/README_vagrant.md @@ -3,7 +3,6 @@ Requirements - ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient). - vagrant (tested against version 1.7.2) - vagrant-hostmanager plugin (tested against version 1.5.0) -- vagrant-registration plugin (only required for enterprise deployment type) - vagrant-libvirt (tested against version 0.0.26) - Only required if using libvirt instead of virtualbox @@ -44,7 +43,8 @@ The following environment variables can be overriden: - ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online) - ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2) -For ``enterprise`` deployment types these env variables should also be specified: +Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role: + - ``rhel_subscription_user``: rhsm user - ``rhel_subscription_pass``: rhsm password - (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects diff --git a/Vagrantfile b/Vagrantfile index 33532cd63..362e1ff48 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -16,27 +16,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.hostmanager.include_offline = true config.ssh.insert_key = false - if deployment_type === 'enterprise' - unless Vagrant.has_plugin?('vagrant-registration') - raise 'vagrant-registration-plugin is required for enterprise deployment' - end - username = ENV['rhel_subscription_user'] - password = ENV['rhel_subscription_pass'] - unless username and password - raise 'rhel_subscription_user and rhel_subscription_pass are required' - end - config.registration.username = username - config.registration.password = password - # FIXME this is temporary until vagrant/ansible registration modules - # are capable of handling specific subscription pools - if not ENV['rhel_subscription_pool'].nil? - config.vm.provision "shell" do |s| - s.inline = "subscription-manager attach --pool=$1 || true" - s.args = "#{ENV['rhel_subscription_pool']}" - end - end - end - config.vm.provider "virtualbox" do |vbox, override| override.vm.box = "centos/7" vbox.memory = 1024 diff --git a/bin/cluster b/bin/cluster index 59a6755d3..220f11d49 100755 --- a/bin/cluster +++ b/bin/cluster @@ -57,7 +57,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) env['num_masters'] = args.masters @@ -74,7 +74,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -86,7 +86,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -98,7 +98,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -110,7 +110,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -124,7 +124,7 @@ class Cluster(object): 'deployment_type': self.get_deployment_type(args), 'new_cluster_state': args.state} - playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) diff --git a/git/pylint.sh b/git/pylint.sh index 55e8b6131..f29c055dc 100755 --- a/git/pylint.sh +++ b/git/pylint.sh @@ -40,6 +40,8 @@ for PY_FILE in $PY_DIFF; do fi done +export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/ + if [ "${FILES_TO_TEST}" != "" ]; then echo "Testing files: ${FILES_TO_TEST}" exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST} diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check index ed4ab6d1b..b5459f312 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check +++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check @@ -83,7 +83,7 @@ def get(obj, *paths): # pylint: disable=too-many-arguments -def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid): +def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid): """ Prints out results in human friendly way. @@ -93,15 +93,16 @@ def pretty_print_errors(namespace, kind, item_name, container_name, port_name, v - `item_name`: Name of the resource - `container_name`: Name of the container. May be "" when kind=Service. - `port_name`: Name of the port + - `invalid_label`: The label of the invalid port. Port.name/targetPort - `valid`: True if the port is valid """ if not valid: if len(container_name) > 0: - print('%s/%s -n %s (Container="%s" Port="%s")' % ( - kind, item_name, namespace, container_name, port_name)) + print('%s/%s -n %s (Container="%s" %s="%s")' % ( + kind, item_name, namespace, container_name, invalid_label, port_name)) else: - print('%s/%s -n %s (Port="%s")' % ( - kind, item_name, namespace, port_name)) + print('%s/%s -n %s (%s="%s")' % ( + kind, item_name, namespace, invalid_label, port_name)) def print_validation_header(): @@ -160,7 +161,7 @@ def main(): print_validation_header() pretty_print_errors( namespace, kind, item_name, - container_name, port_name, valid) + container_name, "Port.name", port_name, valid) # Services follow a different flow for item in list_items('services'): @@ -176,7 +177,8 @@ def main(): first_error = False print_validation_header() pretty_print_errors( - namespace, "services", item_name, "", port_name, valid) + namespace, "services", item_name, "", + "targetPort", port_name, valid) # If we had at least 1 error then exit with 1 if not first_error: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 78797f8b8..eea147229 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -10,7 +10,7 @@ roles: - openshift_facts -- name: Evaluate etcd_hosts_to_backup +- name: Evaluate additional groups for upgrade hosts: localhost tasks: - name: Evaluate etcd_hosts_to_backup @@ -52,7 +52,7 @@ - name: Verify upgrade can proceed - hosts: masters:nodes + hosts: oo_masters_to_config:oo_nodes_to_config tasks: - name: Clean yum cache command: yum clean all @@ -78,6 +78,29 @@ msg: Atomic OpenShift 3.1 packages not found when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<')) + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 + + ############################################################################### # Backup etcd @@ -90,6 +113,7 @@ roles: - openshift_facts tasks: + # Ensure we persist the etcd role for this host in openshift_facts - openshift_facts: role: etcd local_facts: {} @@ -134,11 +158,32 @@ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + - set_fact: + etcd_backup_complete: True + - name: Display location of etcd backup debug: msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" +############################################################################## +# Gate on etcd backup +############################################################################## +- name: Gate on etcd backup + hosts: localhost + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 + + + ############################################################################### # Upgrade Masters ############################################################################### @@ -152,7 +197,7 @@ changed_when: False - name: Update deployment type - hosts: OSEv3 + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config roles: - openshift_facts post_tasks: @@ -161,6 +206,16 @@ local_facts: deployment_type: "{{ deployment_type }}" +- name: Update master facts + hosts: oo_masters_to_config + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + - name: Upgrade master packages and configuration hosts: oo_masters_to_config vars: @@ -290,6 +345,30 @@ changed_when: False +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + + ############################################################################### # Upgrade Nodes ############################################################################### @@ -309,6 +388,26 @@ - name: Ensure node service enabled service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes + - set_fact: + node_update_complete: True + + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + ############################################################################### # Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings @@ -356,6 +455,28 @@ when: openshift_master_ha | bool run_once: true + - set_fact: + reconcile_complete: True + + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 + + + ############################################################################### # Post upgrade - Upgrade default router, default registry and examples diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1b3fba3aa..b1da85d5d 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -51,9 +51,6 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - - role: etcd - local_facts: {} - when: openshift.master.embedded_etcd | bool - name: Check status of external etcd certificatees stat: path: "{{ openshift.common.config_base }}/master/{{ item }}" diff --git a/roles/copr_cli/README.md b/roles/copr_cli/README.md new file mode 100644 index 000000000..edc68454e --- /dev/null +++ b/roles/copr_cli/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +This role manages Copr CLI. + +https://apps.fedoraproject.org/packages/copr-cli/ + +Requirements +------------ + +None + +Role Variables +-------------- + +None + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: servers + roles: + - role: copr_cli + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Thomas Wiest diff --git a/roles/copr_cli/defaults/main.yml b/roles/copr_cli/defaults/main.yml new file mode 100644 index 000000000..3b8adf910 --- /dev/null +++ b/roles/copr_cli/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for copr_cli diff --git a/roles/copr_cli/handlers/main.yml b/roles/copr_cli/handlers/main.yml new file mode 100644 index 000000000..c3dec5a4c --- /dev/null +++ b/roles/copr_cli/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for copr_cli diff --git a/roles/copr_cli/meta/main.yml b/roles/copr_cli/meta/main.yml new file mode 100644 index 000000000..f050281fd --- /dev/null +++ b/roles/copr_cli/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: Thomas Wiest + description: Manages Copr CLI + company: Red Hat + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - packaging +dependencies: [] diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml new file mode 100644 index 000000000..f7ef1c26e --- /dev/null +++ b/roles/copr_cli/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- yum: + name: copr-cli + state: present diff --git a/roles/copr_cli/vars/main.yml b/roles/copr_cli/vars/main.yml new file mode 100644 index 000000000..1522c94d9 --- /dev/null +++ b/roles/copr_cli/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for copr_cli diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 2e1075aca..091ba4e2b 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -24,8 +24,23 @@ import StringIO import yaml from distutils.util import strtobool from distutils.version import LooseVersion -from netaddr import IPNetwork +import struct +import socket +def first_ip(network): + """ Return the first IPv4 address in network + + Args: + network (str): network in CIDR format + Returns: + str: first IPv4 address + """ + atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] + itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) + + (address, netmask) = network.split('/') + netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff + return itoa((atoi(address) & netmask_i) + 1) def hostname_valid(hostname): """ Test if specified hostname should be considered valid @@ -525,7 +540,7 @@ def set_aggregate_facts(facts): 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain] all_hostnames.update(svc_names) internal_hostnames.update(svc_names) - first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1]) + first_svc_ip = first_ip(facts['master']['portal_net']) all_hostnames.add(first_svc_ip) internal_hostnames.add(first_svc_ip) @@ -543,8 +558,10 @@ def set_etcd_facts_if_unset(facts): If anything goes wrong parsing these, the fact will not be set. """ - if 'etcd' in facts: - if 'master' in facts and facts['master']['embedded_etcd']: + if 'master' in facts and facts['master']['embedded_etcd']: + etcd_facts = facts['etcd'] if 'etcd' in facts else dict() + + if 'etcd_data_dir' not in etcd_facts: try: # Parse master config to find actual etcd data dir: master_cfg_path = os.path.join(facts['common']['config_base'], @@ -553,28 +570,37 @@ def set_etcd_facts_if_unset(facts): config = yaml.safe_load(master_cfg_f.read()) master_cfg_f.close() - facts['etcd']['etcd_data_dir'] = \ + etcd_facts['etcd_data_dir'] = \ config['etcdConfig']['storageDirectory'] + + facts['etcd'] = etcd_facts + # We don't want exceptions bubbling up here: # pylint: disable=broad-except except Exception: pass - else: - # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: - try: - # Add a fake section for parsing: - ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read() - ini_fp = StringIO.StringIO(ini_str) - config = ConfigParser.RawConfigParser() - config.readfp(ini_fp) - etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') - if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): - etcd_data_dir = etcd_data_dir[1:-1] - facts['etcd']['etcd_data_dir'] = etcd_data_dir - # We don't want exceptions bubbling up here: - # pylint: disable=broad-except - except Exception: - pass + else: + etcd_facts = facts['etcd'] if 'etcd' in facts else dict() + + # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: + try: + # Add a fake section for parsing: + ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read() + ini_fp = StringIO.StringIO(ini_str) + config = ConfigParser.RawConfigParser() + config.readfp(ini_fp) + etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') + if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): + etcd_data_dir = etcd_data_dir[1:-1] + + etcd_facts['etcd_data_dir'] = etcd_data_dir + facts['etcd'] = etcd_facts + + # We don't want exceptions bubbling up here: + # pylint: disable=broad-except + except Exception: + pass + return facts def set_deployment_facts_if_unset(facts): diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index a28aa7ba2..913f0dc78 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -6,10 +6,9 @@ - ansible_version | version_compare('1.9.0', 'ne') - ansible_version | version_compare('1.9.0.1', 'ne') -- name: Ensure python-netaddr and PyYaml are installed +- name: Ensure PyYaml is installed yum: pkg={{ item }} state=installed with_items: - - python-netaddr - PyYAML - name: Gather Cluster facts diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 8fb2fc042..30c0920a1 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -6,19 +6,26 @@ - set_fact: rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" + rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}" - fail: msg: "This role is only supported for Red Hat hosts" when: ansible_distribution != 'RedHat' - fail: - msg: Either rsub_user or the rhel_subscription_user env variable are required for this role. + msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role. when: rhel_subscription_user is not defined - fail: - msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role. + msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role. when: rhel_subscription_pass is not defined +- name: Satellite preparation + command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + args: + creates: /etc/rhsm/ca/katello-server-ca.pem + when: rhel_subscription_server is defined and rhel_subscription_server + - name: RedHat subscriptions redhat_subscription: username: "{{ rhel_subscription_user }}" diff --git a/roles/tito/README.md b/roles/tito/README.md new file mode 100644 index 000000000..c4e2856dc --- /dev/null +++ b/roles/tito/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +This role manages Tito. + +https://github.com/dgoodwin/tito + +Requirements +------------ + +None + +Role Variables +-------------- + +None + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: servers + roles: + - role: tito + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Thomas Wiest diff --git a/roles/tito/defaults/main.yml b/roles/tito/defaults/main.yml new file mode 100644 index 000000000..dd7cd269e --- /dev/null +++ b/roles/tito/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for tito diff --git a/roles/tito/handlers/main.yml b/roles/tito/handlers/main.yml new file mode 100644 index 000000000..e9ce609d5 --- /dev/null +++ b/roles/tito/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for tito diff --git a/roles/tito/meta/main.yml b/roles/tito/meta/main.yml new file mode 100644 index 000000000..fb121c08e --- /dev/null +++ b/roles/tito/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: Thomas Wiest + description: Manages Tito + company: Red Hat + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - packaging +dependencies: [] diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml new file mode 100644 index 000000000..f7b4ef363 --- /dev/null +++ b/roles/tito/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- yum: + name: tito + state: present diff --git a/roles/tito/vars/main.yml b/roles/tito/vars/main.yml new file mode 100644 index 000000000..8a1aafc41 --- /dev/null +++ b/roles/tito/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for tito diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index fdd0c1168..372f27bda 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -82,7 +82,7 @@ def write_host(host, inventory, scheduleable=True): if installer_host in [host.connect_to, host.hostname, host.public_hostname]: facts += ' ansible_connection=local' if os.geteuid() != 0: - no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo openshift']) + no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift']) if no_pwd_sudo == 1: print 'The atomic-openshift-installer requires sudo access without a password.' sys.exit(1) |