From 236b30d9b5816c18325c698081bb102afaae8d17 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Mon, 28 Sep 2015 08:54:40 +0900 Subject: Remove images options in oadm command --- README_OSE.md | 3 --- inventory/byo/hosts.example | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/README_OSE.md b/README_OSE.md index 79ad07044..524950d51 100644 --- a/README_OSE.md +++ b/README_OSE.md @@ -79,9 +79,6 @@ ansible_ssh_user=root # To deploy origin, change deployment_type to origin deployment_type=enterprise -# Pre-release registry URL -oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version} - # Pre-release additional repo openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index df1bae49f..c2c5090f9 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -24,7 +24,7 @@ deployment_type=atomic-enterprise #use_cluster_metrics=true # Pre-release registry URL -#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version} +#oreg_url=example.com/openshift3/ose-${component}:${version} # Pre-release Dev puddle repo #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] -- cgit v1.2.3 From efba9eb00b9bb8451cb73744c4a21f5c583e3d58 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 26 Oct 2015 14:55:43 -0300 Subject: Add subcommands to CLI. --- utils/setup.py | 2 +- utils/src/ooinstall/cli_installer.py | 108 ++++++++++++++++++---------- utils/src/ooinstall/install_transactions.py | 1 + utils/test/cli_installer_tests.py | 30 ++++---- 4 files changed, 91 insertions(+), 50 deletions(-) diff --git a/utils/setup.py b/utils/setup.py index 6e2fdd9c0..eac1b4b2e 100644 --- a/utils/setup.py +++ b/utils/setup.py @@ -79,7 +79,7 @@ setup( # pip to create the appropriate form of executable for the target platform. entry_points={ 'console_scripts': [ - 'oo-install=ooinstall.cli_installer:main', + 'oo-install=ooinstall.cli_installer:cli', ], }, ) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index c2ae00bd1..32b968794 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -367,46 +367,59 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): return hosts_to_run_on, callback_facts -@click.command() + +@click.group() +@click.pass_context +@click.option('--unattended', '-u', is_flag=True, default=False) @click.option('--configuration', '-c', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default=None) @click.option('--ansible-playbook-directory', - '-a', - type=click.Path(exists=True, - file_okay=False, - dir_okay=True, - writable=True, - readable=True), - # callback=validate_ansible_dir, - envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') + '-a', + type=click.Path(exists=True, + file_okay=False, + dir_okay=True, + writable=True, + readable=True), + # callback=validate_ansible_dir, + envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @click.option('--ansible-config', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default=None) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default=None) @click.option('--ansible-log-path', - type=click.Path(file_okay=True, - dir_okay=False, - writable=True, - readable=True), - default="/tmp/ansible.log") -@click.option('--unattended', '-u', is_flag=True, default=False) -@click.option('--force', '-f', is_flag=True, default=False) + type=click.Path(file_okay=True, + dir_okay=False, + writable=True, + readable=True), + default="/tmp/ansible.log") #pylint: disable=too-many-arguments # Main CLI entrypoint, not much we can do about too many arguments. -def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_path, unattended, force): - oo_cfg = OOConfig(configuration) +def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path): + """ + The main click CLI module. Responsible for handling most common CLI options, + assigning any defaults and adding to the context for the sub-commands. + """ + ctx.obj = {} + ctx.obj['unattended'] = unattended + ctx.obj['configuration'] = configuration + ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory + ctx.obj['ansible_config'] = ansible_config + ctx.obj['ansible_log_path'] = ansible_log_path + oo_cfg = OOConfig(ctx.obj['configuration']) + + ansible_playbook_directory = ctx.obj['ansible_playbook_directory'] if not ansible_playbook_directory: ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '') - if ansible_config: - oo_cfg.settings['ansible_config'] = ansible_config + if ctx.obj['ansible_config']: + oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config'] elif os.path.exists(DEFAULT_ANSIBLE_CONFIG): # If we're installed by RPM this file should exist and we can use it as our default: oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG @@ -415,10 +428,29 @@ def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_ oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory oo_cfg.ansible_playbook_directory = ansible_playbook_directory - oo_cfg.settings['ansible_log_path'] = ansible_log_path + oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path'] + + ctx.obj['oo_cfg'] = oo_cfg + + +@click.command() +@click.pass_context +def uninstall(ctx): + #oo_cfg = ctx.obj['oo_cfg'] + click.echo("Running uninstall command.") + if not ctx.obj['unattended']: + # Prompt interactively to confirm: + pass + + +@click.command() +@click.option('--force', '-f', is_flag=True, default=False) +@click.pass_context +def install(ctx, force): + oo_cfg = ctx.obj['oo_cfg'] install_transactions.set_config(oo_cfg) - if unattended: + if ctx.obj['unattended']: error_if_missing_info(oo_cfg) else: oo_cfg = get_missing_info_from_user(oo_cfg) @@ -430,8 +462,7 @@ def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) - hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force) - + hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force) click.echo('Writing config to: %s' % oo_cfg.config_path) @@ -449,7 +480,7 @@ def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_ message = """ If changes are needed to the values recorded by the installer please update {}. """.format(oo_cfg.config_path) - if not unattended: + if not ctx.obj['unattended']: confirm_continue(message) error = install_transactions.run_main_playbook(oo_cfg.hosts, @@ -475,5 +506,10 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html click.echo(message) click.pause() +cli.add_command(install) +cli.add_command(uninstall) + if __name__ == '__main__': - main() + # This is expected behaviour for context passing with click library: + # pylint: disable=unexpected-keyword-arg + cli(obj={}) diff --git a/utils/src/ooinstall/install_transactions.py b/utils/src/ooinstall/install_transactions.py index cef6662d7..de53dc3c0 100644 --- a/utils/src/ooinstall/install_transactions.py +++ b/utils/src/ooinstall/install_transactions.py @@ -131,3 +131,4 @@ def run_ansible(playbook, inventory, env_vars): '--inventory-file={}'.format(inventory), playbook], env=env_vars) + diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 076fe5dc9..1f74b5b15 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -76,7 +76,7 @@ class OOCliFixture(OOInstallFixture): self.cli_args = ["-a", self.work_dir] def run_cli(self): - return self.runner.invoke(cli.main, self.cli_args) + return self.runner.invoke(cli.cli, self.cli_args) def assert_result(self, result, exit_code): if result.exception is not None or result.exit_code != exit_code: @@ -111,8 +111,8 @@ class UnattendedCliTests(OOCliFixture): config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') - self.cli_args.extend(["-c", config_file]) - result = self.runner.invoke(cli.main, self.cli_args) + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) load_facts_args = load_facts_mock.call_args[0] @@ -146,8 +146,8 @@ class UnattendedCliTests(OOCliFixture): config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), merged_config) - self.cli_args.extend(["-c", config_file]) - result = self.runner.invoke(cli.main, self.cli_args) + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) # Check the inventory file looks as we would expect: @@ -182,8 +182,8 @@ class UnattendedCliTests(OOCliFixture): config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') - self.cli_args.extend(["-c", config_file]) - result = self.runner.invoke(cli.main, self.cli_args) + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) written_config = self._read_yaml(config_file) @@ -211,8 +211,8 @@ class UnattendedCliTests(OOCliFixture): config_file = self.write_config(os.path.join(self.work_dir, 'ooinstall.conf'), config) - self.cli_args.extend(["-c", config_file]) - result = self.runner.invoke(cli.main, self.cli_args) + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) written_config = self._read_yaml(config_file) @@ -282,7 +282,8 @@ class UnattendedCliTests(OOCliFixture): self.cli_args.extend(["-c", config_file]) if ansible_config_cli: self.cli_args.extend(["--ansible-config", ansible_config_cli]) - result = self.runner.invoke(cli.main, self.cli_args) + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args) self.assert_result(result, 0) # Test the env vars for facts playbook: @@ -401,7 +402,8 @@ class AttendedCliTests(OOCliFixture): ssh_user='root', variant_num=1, confirm_facts='y') - result = self.runner.invoke(cli.main, self.cli_args, + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) @@ -432,7 +434,8 @@ class AttendedCliTests(OOCliFixture): ssh_user='root', variant_num=1, confirm_facts='y') - result = self.runner.invoke(cli.main, + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) @@ -454,7 +457,8 @@ class AttendedCliTests(OOCliFixture): SAMPLE_CONFIG % 'openshift-enterprise') cli_input = self._build_input(confirm_facts='y') self.cli_args.extend(["-c", config_file]) - result = self.runner.invoke(cli.main, + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, self.cli_args, input=cli_input) self.assert_result(result, 0) -- cgit v1.2.3 From 201fd2f37d43419513b448cd99dd202a96bb8542 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 26 Oct 2015 15:37:30 -0300 Subject: Add uninstall subcommand. --- utils/src/ooinstall/cli_installer.py | 21 +++++++++++++++++---- utils/src/ooinstall/install_transactions.py | 11 +++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 32b968794..91d501952 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -431,16 +431,30 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path'] ctx.obj['oo_cfg'] = oo_cfg + install_transactions.set_config(oo_cfg) @click.command() @click.pass_context def uninstall(ctx): - #oo_cfg = ctx.obj['oo_cfg'] - click.echo("Running uninstall command.") + oo_cfg = ctx.obj['oo_cfg'] + + if len(oo_cfg.hosts) == 0: + click.echo("No hosts defined in: %s" % oo_cfg['configuration']) + sys.exit(1) + + click.echo("OpenShift will be uninstalled from the following hosts:\n") if not ctx.obj['unattended']: # Prompt interactively to confirm: - pass + for host in oo_cfg.hosts: + click.echo(" * %s" % host.name) + proceed = click.confirm("\nDo you wish to proceed?") + if not proceed: + click.echo("Uninstall cancelled.") + sys.exit(0) + + install_transactions.run_uninstall_playbook() + @click.command() @@ -448,7 +462,6 @@ def uninstall(ctx): @click.pass_context def install(ctx, force): oo_cfg = ctx.obj['oo_cfg'] - install_transactions.set_config(oo_cfg) if ctx.obj['unattended']: error_if_missing_info(oo_cfg) diff --git a/utils/src/ooinstall/install_transactions.py b/utils/src/ooinstall/install_transactions.py index de53dc3c0..3306271c8 100644 --- a/utils/src/ooinstall/install_transactions.py +++ b/utils/src/ooinstall/install_transactions.py @@ -126,9 +126,20 @@ def run_main_playbook(hosts, hosts_to_run_on): facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] return run_ansible(main_playbook_path, inventory_file, facts_env) + def run_ansible(playbook, inventory, env_vars): return subprocess.call(['ansible-playbook', '--inventory-file={}'.format(inventory), playbook], env=env_vars) +def run_uninstall_playbook(): + playbook = os.path.join(CFG.settings['ansible_playbook_directory'], + 'playbooks/adhoc/uninstall.yml') + inventory_file = generate_inventory(CFG.hosts) + facts_env = os.environ.copy() + if 'ansible_log_path' in CFG.settings: + facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] + if 'ansible_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + return run_ansible(playbook, inventory_file, facts_env) -- cgit v1.2.3 From 93b2a9bd2f4a3b93be3cb51ecf034000a652496e Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Tue, 27 Oct 2015 13:26:57 -0300 Subject: Use default playbooks if available. Because we're now installing from an rpm, we have a good idea where to find the default playbooks and shouldn't require the user to tell us. --- openshift-ansible.spec | 1 + utils/src/ooinstall/cli_installer.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/openshift-ansible.spec b/openshift-ansible.spec index a24ca0c8a..3b69da825 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -208,6 +208,7 @@ BuildArch: noarch %package -n atomic-openshift-utils Summary: Atomic OpenShift Utilities BuildRequires: python-setuptools +Requires: openshift-ansible-playbooks Requires: ansible Requires: python-click Requires: python-setuptools diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 91d501952..03f86a166 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -12,6 +12,7 @@ from ooinstall.oo_config import Host from ooinstall.variants import find_variant, get_variant_version_combos DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-util/ansible.cfg' +DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/' def validate_ansible_dir(path): if not path: @@ -382,7 +383,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): type=click.Path(exists=True, file_okay=False, dir_okay=True, - writable=True, + writable=False, readable=True), # callback=validate_ansible_dir, envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @@ -408,15 +409,21 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf ctx.obj = {} ctx.obj['unattended'] = unattended ctx.obj['configuration'] = configuration - ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory ctx.obj['ansible_config'] = ansible_config ctx.obj['ansible_log_path'] = ansible_log_path oo_cfg = OOConfig(ctx.obj['configuration']) - ansible_playbook_directory = ctx.obj['ansible_playbook_directory'] + # If no playbook dir on the CLI, check the config: if not ansible_playbook_directory: ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '') + # If still no playbook dir, check for the default location: + if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR): + ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR + validate_ansible_dir(ansible_playbook_directory) + oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory + oo_cfg.ansible_playbook_directory = ansible_playbook_directory + ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory if ctx.obj['ansible_config']: oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config'] @@ -424,10 +431,6 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf # If we're installed by RPM this file should exist and we can use it as our default: oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG - validate_ansible_dir(ansible_playbook_directory) - oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory - oo_cfg.ansible_playbook_directory = ansible_playbook_directory - oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path'] ctx.obj['oo_cfg'] = oo_cfg -- cgit v1.2.3 From a9143d5d0e7245e12e0597fa5105fdcbb85e0846 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 29 Oct 2015 23:42:31 -0400 Subject: Disable OpenShift features if installing Atomic Enterprise --- roles/openshift_facts/library/openshift_facts.py | 16 ++++++++++++++-- roles/openshift_master/tasks/main.yml | 1 + roles/openshift_master/templates/master.yaml.v1.j2 | 3 +++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 795f38341..748cc59cf 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -508,8 +508,9 @@ def set_deployment_facts_if_unset(facts): dict: the facts dict updated with the generated deployment_type facts """ - # Perhaps re-factor this as a map? - # pylint: disable=too-many-branches + # disabled to avoid breaking up facts related to deployment type into + # multiple methods for now. + # pylint: disable=too-many-statements, too-many-branches if 'common' in facts: deployment_type = facts['common']['deployment_type'] if 'service_type' not in facts['common']: @@ -550,6 +551,17 @@ def set_deployment_facts_if_unset(facts): registry_url = 'aep3/aep-${component}:${version}' facts[role]['registry_url'] = registry_url + if 'master' in facts: + deployment_type = facts['common']['deployment_type'] + openshift_features = ['Builder', 'S2IBuilder', 'WebConsole'] + if 'disabled_features' in facts['master']: + if deployment_type == 'atomic-enterprise': + curr_disabled_features = set(facts['master']['disabled_features']) + facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features)) + else: + if deployment_type == 'atomic-enterprise': + facts['master']['disabled_features'] = openshift_features + if 'node' in facts: deployment_type = facts['common']['deployment_type'] if 'storage_plugin_deps' not in facts['node']: diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 94eb73346..3a886935f 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -62,6 +62,7 @@ api_server_args: "{{ osm_api_server_args | default(None) }}" controller_args: "{{ osm_controller_args | default(None) }}" infra_nodes: "{{ num_infra | default(None) }}" + disabled_features: "{{ osm_disabled_features | default(None) }}" - name: Install Master package yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 527c5231a..73a0bc6cc 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -22,6 +22,9 @@ corsAllowedOrigins: {% for custom_origin in openshift.master.custom_cors_origins | default("") %} - {{ custom_origin }} {% endfor %} +{% if 'disabled_features' in openshift.master %} +disabledFeatures: {{ openshift.master.disabled_features | to_json }} +{% endif %} {% if openshift.master.embedded_dns | bool %} dnsConfig: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }} -- cgit v1.2.3 From 79297c1c1f98e67b531c5235798bdd508fd60624 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 29 Oct 2015 16:57:34 -0400 Subject: Pulling latest gce.py module from ansible --- inventory/gce/hosts/gce.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 6ed12e011..99746cdbf 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -66,12 +66,22 @@ Examples: $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance + $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson Version: 0.0.1 ''' +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v1" @@ -102,9 +112,9 @@ class GceInventory(object): # Just display data for specific host if self.args.host: - print self.json_format_dict(self.node_to_dict( + print(self.json_format_dict(self.node_to_dict( self.get_instance(self.args.host)), - pretty=self.args.pretty) + pretty=self.args.pretty)) sys.exit(0) # Otherwise, assume user wants all instances grouped @@ -120,7 +130,6 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able @@ -174,7 +183,6 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -213,8 +221,7 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - # Hosts don't always have a public IP name - #'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -222,15 +229,15 @@ class GceInventory(object): 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, - # Hosts don't always have a public IP name - #'ansible_ssh_host': inst.public_ips[0] + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] } def get_instance(self, instance_name): '''Gets details about a specific instance ''' try: return self.driver.ex_get_node(instance_name) - except Exception, e: + except Exception as e: return None def group_instances(self): @@ -250,7 +257,10 @@ class GceInventory(object): tags = node.extra['tags'] for t in tags: - tag = 'tag_%s' % t + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] -- cgit v1.2.3 From 9c5cec32545510875de3ca2e149e1aff2909102e Mon Sep 17 00:00:00 2001 From: error10 Date: Fri, 30 Oct 2015 20:41:05 -0400 Subject: Don't require tty to run sudo Set Defaults !requiretty so that ansible can run sudo without a terminal. Fixes #773 --- playbooks/libvirt/openshift-cluster/templates/user-data | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index eacae7c7e..cf57e6489 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -21,3 +21,4 @@ ssh_authorized_keys: runcmd: - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart + - echo "Defaults !requiretty" >> /etc/sudoers.d/00-no-requiretty -- cgit v1.2.3 -- cgit v1.2.3 From d75f1b5879dce664f86eea25dff66417618e379e Mon Sep 17 00:00:00 2001 From: error10 Date: Sun, 1 Nov 2015 14:49:09 -0500 Subject: Disable requiretty for only the openshift user Use write_files to disable requiretty for the openshift user as suggested by @detiberm, fixes #773 --- playbooks/libvirt/openshift-cluster/templates/user-data | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index cf57e6489..e0c966e45 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -19,6 +19,11 @@ system_info: ssh_authorized_keys: - {{ lookup('file', '~/.ssh/id_rsa.pub') }} +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty + runcmd: - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart - - echo "Defaults !requiretty" >> /etc/sudoers.d/00-no-requiretty -- cgit v1.2.3 From e2c30afd5af2fa952a3ef576f50c8dd304dfdd7d Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Fri, 30 Oct 2015 10:51:40 -0300 Subject: Add utils subpackage missing dep on openshift-ansible-roles. --- openshift-ansible.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 3b69da825..d3be338a7 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -209,6 +209,7 @@ BuildArch: noarch Summary: Atomic OpenShift Utilities BuildRequires: python-setuptools Requires: openshift-ansible-playbooks +Requires: openshift-ansible-roles Requires: ansible Requires: python-click Requires: python-setuptools -- cgit v1.2.3 From 3b58662809c5aab0974043006ad6b320dadfea14 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Mon, 2 Nov 2015 10:54:20 -0500 Subject: atomic-openshift-installer: Text improvements Improvements to some of the installer text based on suggestions from the doc team. --- utils/src/ooinstall/cli_installer.py | 2 +- utils/src/ooinstall/variants.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index c2ae00bd1..fc758a6ac 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -94,7 +94,7 @@ The OpenShift Node provides the runtime environments for containers. It will host the required services to be managed by the Master. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master -http://docs.openshift.com/enterprise/3.0/architecture/infrastructure_components/kubernetes_infrastructure.html#node +http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node """ click.echo(message) diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index ed98429fc..05281d654 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -38,7 +38,7 @@ OSE = Variant('openshift-enterprise', 'OpenShift Enterprise', ] ) -AEP = Variant('atomic-enterprise', 'Atomic OpenShift Enterprise', +AEP = Variant('atomic-enterprise', 'Atomic Enterprise Platform', [ Version('3.1', 'atomic-enterprise') ] -- cgit v1.2.3 From 2855bc043f8970136b72774b15c5306dccdaa040 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Mon, 2 Nov 2015 12:06:34 -0400 Subject: Rename install_transactions module to openshift_ansible. --- utils/src/ooinstall/cli_installer.py | 14 +-- utils/src/ooinstall/install_transactions.py | 145 ---------------------------- utils/src/ooinstall/openshift_ansible.py | 145 ++++++++++++++++++++++++++++ utils/test/cli_installer_tests.py | 40 ++++---- 4 files changed, 172 insertions(+), 172 deletions(-) delete mode 100644 utils/src/ooinstall/install_transactions.py create mode 100644 utils/src/ooinstall/openshift_ansible.py diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 03f86a166..bfaaea6aa 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -6,7 +6,7 @@ import click import os import re import sys -from ooinstall import install_transactions +from ooinstall import openshift_ansible from ooinstall import OOConfig from ooinstall.oo_config import Host from ooinstall.variants import find_variant, get_variant_version_combos @@ -357,8 +357,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): hosts_to_run_on.extend(new_nodes) oo_cfg.hosts.extend(new_nodes) - install_transactions.set_config(oo_cfg) - callback_facts, error = install_transactions.default_facts(oo_cfg.hosts) + openshift_ansible.set_config(oo_cfg) + callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) if error: click.echo("There was a problem fetching the required information. " \ "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) @@ -434,7 +434,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path'] ctx.obj['oo_cfg'] = oo_cfg - install_transactions.set_config(oo_cfg) + openshift_ansible.set_config(oo_cfg) @click.command() @@ -456,7 +456,7 @@ def uninstall(ctx): click.echo("Uninstall cancelled.") sys.exit(0) - install_transactions.run_uninstall_playbook() + openshift_ansible.run_uninstall_playbook() @@ -472,7 +472,7 @@ def install(ctx, force): oo_cfg = get_missing_info_from_user(oo_cfg) click.echo('Gathering information from hosts...') - callback_facts, error = install_transactions.default_facts(oo_cfg.hosts) + callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) if error: click.echo("There was a problem fetching the required information. " \ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) @@ -499,7 +499,7 @@ If changes are needed to the values recorded by the installer please update {}. if not ctx.obj['unattended']: confirm_continue(message) - error = install_transactions.run_main_playbook(oo_cfg.hosts, + error = openshift_ansible.run_main_playbook(oo_cfg.hosts, hosts_to_run_on) if error: # The bootstrap script will print out the log location. diff --git a/utils/src/ooinstall/install_transactions.py b/utils/src/ooinstall/install_transactions.py deleted file mode 100644 index 3306271c8..000000000 --- a/utils/src/ooinstall/install_transactions.py +++ /dev/null @@ -1,145 +0,0 @@ -# TODO: Temporarily disabled due to importing old code into openshift-ansible -# repo. We will work on these over time. -# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned - -import subprocess -import os -import yaml -from ooinstall.variants import find_variant - -CFG = None - -def set_config(cfg): - global CFG - CFG = cfg - -def generate_inventory(hosts): - print hosts - global CFG - base_inventory_path = CFG.settings['ansible_inventory_path'] - base_inventory = open(base_inventory_path, 'w') - base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') - base_inventory.write('\n[OSEv3:vars]\n') - base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) - if CFG.settings['ansible_ssh_user'] != 'root': - base_inventory.write('ansible_sudo=true\n') - - # Find the correct deployment type for ansible: - ver = find_variant(CFG.settings['variant'], - version=CFG.settings.get('variant_version', None))[1] - base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) - - if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ: - base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:' - '5001/openshift3/ose-${component}:${version}\n') - if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ: - base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', " - "'name': 'ose-devel', " - "'baseurl': 'http://buildvm-devops.usersys.redhat.com" - "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', " - "'enabled': 1, 'gpgcheck': 0}]\n") - if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: - base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') - - base_inventory.write('\n[masters]\n') - masters = (host for host in hosts if host.master) - for master in masters: - write_host(master, base_inventory) - base_inventory.write('\n[nodes]\n') - nodes = (host for host in hosts if host.node) - for node in nodes: - # TODO: Until the Master can run the SDN itself we have to configure the Masters - # as Nodes too. - scheduleable = True - # If there's only one Node and it's also a Master we want it to be scheduleable: - if node in masters and len(masters) != 1: - scheduleable = False - write_host(node, base_inventory, scheduleable) - base_inventory.close() - return base_inventory_path - - -def write_host(host, inventory, scheduleable=True): - global CFG - facts = '' - if host.ip: - facts += ' openshift_ip={}'.format(host.ip) - if host.public_ip: - facts += ' openshift_public_ip={}'.format(host.public_ip) - if host.hostname: - facts += ' openshift_hostname={}'.format(host.hostname) - if host.public_hostname: - facts += ' openshift_public_hostname={}'.format(host.public_hostname) - # TODO: For not write_host is handles both master and nodes. - # Technically only nodes will ever need this. - if not scheduleable: - facts += ' openshift_scheduleable=False' - inventory.write('{} {}\n'.format(host, facts)) - - -def load_system_facts(inventory_file, os_facts_path, env_vars): - """ - Retrieves system facts from the remote systems. - """ - FNULL = open(os.devnull, 'w') - status = subprocess.call(['ansible-playbook', - '--inventory-file={}'.format(inventory_file), - os_facts_path], - env=env_vars, - stdout=FNULL) - if not status == 0: - return [], 1 - callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r') - callback_facts = yaml.load(callback_facts_file) - callback_facts_file.close() - return callback_facts, 0 - - -def default_facts(hosts): - global CFG - inventory_file = generate_inventory(hosts) - os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory) - - facts_env = os.environ.copy() - facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] - facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] - if 'ansible_log_path' in CFG.settings: - facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] - if 'ansible_config' in CFG.settings: - facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] - return load_system_facts(inventory_file, os_facts_path, facts_env) - - -def run_main_playbook(hosts, hosts_to_run_on): - global CFG - inventory_file = generate_inventory(hosts) - if len(hosts_to_run_on) != len(hosts): - main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/common/openshift-cluster/scaleup.yml') - else: - main_playbook_path = os.path.join(CFG.ansible_playbook_directory, - 'playbooks/byo/config.yml') - facts_env = os.environ.copy() - if 'ansible_log_path' in CFG.settings: - facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] - if 'ansible_config' in CFG.settings: - facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] - return run_ansible(main_playbook_path, inventory_file, facts_env) - - -def run_ansible(playbook, inventory, env_vars): - return subprocess.call(['ansible-playbook', - '--inventory-file={}'.format(inventory), - playbook], - env=env_vars) - -def run_uninstall_playbook(): - playbook = os.path.join(CFG.settings['ansible_playbook_directory'], - 'playbooks/adhoc/uninstall.yml') - inventory_file = generate_inventory(CFG.hosts) - facts_env = os.environ.copy() - if 'ansible_log_path' in CFG.settings: - facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] - if 'ansible_config' in CFG.settings: - facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] - return run_ansible(playbook, inventory_file, facts_env) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py new file mode 100644 index 000000000..3306271c8 --- /dev/null +++ b/utils/src/ooinstall/openshift_ansible.py @@ -0,0 +1,145 @@ +# TODO: Temporarily disabled due to importing old code into openshift-ansible +# repo. We will work on these over time. +# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned + +import subprocess +import os +import yaml +from ooinstall.variants import find_variant + +CFG = None + +def set_config(cfg): + global CFG + CFG = cfg + +def generate_inventory(hosts): + print hosts + global CFG + base_inventory_path = CFG.settings['ansible_inventory_path'] + base_inventory = open(base_inventory_path, 'w') + base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') + base_inventory.write('\n[OSEv3:vars]\n') + base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) + if CFG.settings['ansible_ssh_user'] != 'root': + base_inventory.write('ansible_sudo=true\n') + + # Find the correct deployment type for ansible: + ver = find_variant(CFG.settings['variant'], + version=CFG.settings.get('variant_version', None))[1] + base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) + + if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ: + base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:' + '5001/openshift3/ose-${component}:${version}\n') + if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ: + base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', " + "'name': 'ose-devel', " + "'baseurl': 'http://buildvm-devops.usersys.redhat.com" + "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', " + "'enabled': 1, 'gpgcheck': 0}]\n") + if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: + base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') + + base_inventory.write('\n[masters]\n') + masters = (host for host in hosts if host.master) + for master in masters: + write_host(master, base_inventory) + base_inventory.write('\n[nodes]\n') + nodes = (host for host in hosts if host.node) + for node in nodes: + # TODO: Until the Master can run the SDN itself we have to configure the Masters + # as Nodes too. + scheduleable = True + # If there's only one Node and it's also a Master we want it to be scheduleable: + if node in masters and len(masters) != 1: + scheduleable = False + write_host(node, base_inventory, scheduleable) + base_inventory.close() + return base_inventory_path + + +def write_host(host, inventory, scheduleable=True): + global CFG + facts = '' + if host.ip: + facts += ' openshift_ip={}'.format(host.ip) + if host.public_ip: + facts += ' openshift_public_ip={}'.format(host.public_ip) + if host.hostname: + facts += ' openshift_hostname={}'.format(host.hostname) + if host.public_hostname: + facts += ' openshift_public_hostname={}'.format(host.public_hostname) + # TODO: For not write_host is handles both master and nodes. + # Technically only nodes will ever need this. + if not scheduleable: + facts += ' openshift_scheduleable=False' + inventory.write('{} {}\n'.format(host, facts)) + + +def load_system_facts(inventory_file, os_facts_path, env_vars): + """ + Retrieves system facts from the remote systems. + """ + FNULL = open(os.devnull, 'w') + status = subprocess.call(['ansible-playbook', + '--inventory-file={}'.format(inventory_file), + os_facts_path], + env=env_vars, + stdout=FNULL) + if not status == 0: + return [], 1 + callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r') + callback_facts = yaml.load(callback_facts_file) + callback_facts_file.close() + return callback_facts, 0 + + +def default_facts(hosts): + global CFG + inventory_file = generate_inventory(hosts) + os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory) + + facts_env = os.environ.copy() + facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml'] + facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] + if 'ansible_log_path' in CFG.settings: + facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] + if 'ansible_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + return load_system_facts(inventory_file, os_facts_path, facts_env) + + +def run_main_playbook(hosts, hosts_to_run_on): + global CFG + inventory_file = generate_inventory(hosts) + if len(hosts_to_run_on) != len(hosts): + main_playbook_path = os.path.join(CFG.ansible_playbook_directory, + 'playbooks/common/openshift-cluster/scaleup.yml') + else: + main_playbook_path = os.path.join(CFG.ansible_playbook_directory, + 'playbooks/byo/config.yml') + facts_env = os.environ.copy() + if 'ansible_log_path' in CFG.settings: + facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] + if 'ansible_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + return run_ansible(main_playbook_path, inventory_file, facts_env) + + +def run_ansible(playbook, inventory, env_vars): + return subprocess.call(['ansible-playbook', + '--inventory-file={}'.format(inventory), + playbook], + env=env_vars) + +def run_uninstall_playbook(): + playbook = os.path.join(CFG.settings['ansible_playbook_directory'], + 'playbooks/adhoc/uninstall.yml') + inventory_file = generate_inventory(CFG.hosts) + facts_env = os.environ.copy() + if 'ansible_log_path' in CFG.settings: + facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] + if 'ansible_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + return run_ansible(playbook, inventory_file, facts_env) diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 1f74b5b15..b183f0acb 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -102,8 +102,8 @@ class UnattendedCliTests(OOCliFixture): OOCliFixture.setUp(self) self.cli_args.append("-u") - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_cfg_full_run(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 @@ -133,8 +133,8 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals(3, len(hosts)) self.assertEquals(3, len(hosts_to_run_on)) - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_inventory_write(self, load_facts_mock, run_playbook_mock): # Add an ssh user so we can verify it makes it to the inventory file: @@ -172,8 +172,8 @@ class UnattendedCliTests(OOCliFixture): self.assertTrue('openshift_hostname' in master_line) self.assertTrue('openshift_public_hostname' in master_line) - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_variant_version_latest_assumed(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) @@ -199,8 +199,8 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('openshift-enterprise', inventory.get('OSEv3:vars', 'deployment_type')) - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_variant_version_preserved(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) @@ -227,8 +227,8 @@ class UnattendedCliTests(OOCliFixture): self.assertEquals('enterprise', inventory.get('OSEv3:vars', 'deployment_type')) - @patch('ooinstall.install_transactions.run_ansible') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_ansible') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_ansible_mock.return_value = 0 @@ -238,8 +238,8 @@ class UnattendedCliTests(OOCliFixture): self._ansible_config_test(load_facts_mock, run_ansible_mock, config, None, None) - @patch('ooinstall.install_transactions.run_ansible') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_ansible') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_ansible_mock.return_value = 0 @@ -250,8 +250,8 @@ class UnattendedCliTests(OOCliFixture): self._ansible_config_test(load_facts_mock, run_ansible_mock, config, ansible_config, ansible_config) - @patch('ooinstall.install_transactions.run_ansible') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_ansible') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_ansible_config_specified_in_installer_config(self, load_facts_mock, run_ansible_mock): @@ -389,8 +389,8 @@ class AttendedCliTests(OOCliFixture): self.assertTrue('public_ip' in h) self.assertTrue('public_hostname' in h) - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_full_run(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 @@ -413,8 +413,8 @@ class AttendedCliTests(OOCliFixture): written_config = self._read_yaml(self.config_file) self._verify_config_hosts(written_config, 3) - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_add_nodes(self, load_facts_mock, run_playbook_mock): # Modify the mock facts to return a version indicating OpenShift @@ -446,8 +446,8 @@ class AttendedCliTests(OOCliFixture): written_config = self._read_yaml(self.config_file) self._verify_config_hosts(written_config, 3) - @patch('ooinstall.install_transactions.run_main_playbook') - @patch('ooinstall.install_transactions.load_system_facts') + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock): load_facts_mock.return_value = (MOCK_FACTS, 0) run_playbook_mock.return_value = 0 -- cgit v1.2.3 From 44ea44a3738f961222d5656e965923ef847b1bec Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Mon, 2 Nov 2015 11:06:39 -0500 Subject: get zabbix ready to start tracking status of pcp --- roles/os_zabbix/tasks/main.yml | 9 +++++++++ roles/os_zabbix/vars/template_performance_copilot.yml | 14 ++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 roles/os_zabbix/vars/template_performance_copilot.yml diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml index a503b24d7..82bf78b57 100644 --- a/roles/os_zabbix/tasks/main.yml +++ b/roles/os_zabbix/tasks/main.yml @@ -15,6 +15,7 @@ - include_vars: template_ops_tools.yml - include_vars: template_app_zabbix_server.yml - include_vars: template_app_zabbix_agent.yml +- include_vars: template_performance_copilot.yml - name: Include Template Heartbeat include: ../../lib_zabbix/tasks/create_template.yml @@ -79,3 +80,11 @@ server: "{{ ozb_server }}" user: "{{ ozb_user }}" password: "{{ ozb_password }}" + +- name: Include Template Performance Copilot + include: ../../lib_zabbix/tasks/create_template.yml + vars: + template: "{{ g_template_performance_copilot }}" + server: "{{ ozb_server }}" + user: "{{ ozb_user }}" + password: "{{ ozb_password }}" diff --git a/roles/os_zabbix/vars/template_performance_copilot.yml b/roles/os_zabbix/vars/template_performance_copilot.yml new file mode 100644 index 000000000..b62fa0228 --- /dev/null +++ b/roles/os_zabbix/vars/template_performance_copilot.yml @@ -0,0 +1,14 @@ +--- +g_template_performance_copilot: + name: Template Performance Copilot + zitems: + - key: pcp.ping + applications: + - Performance Copilot + value_type: int + + ztriggers: + - name: 'pcp.ping failed on {HOST.NAME}' + expression: '{Template Performance Copilot:pcp.ping.max(#3)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_pcp_ping.asciidoc' + priority: average -- cgit v1.2.3 From 02a6d993509ac395165c504dba7b92c4f2eb907c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Fri, 16 Oct 2015 11:28:42 -0400 Subject: Fix etcd cert generation when etcd_interface is defined - Refactor certificate generation to properly accept overrides of etcd_interface per host and set the certificate SANS and peer URLs properly. - Add sanity checking to user-set values of etcd_interface to provide a better error message --- roles/etcd/README.md | 2 +- roles/etcd/defaults/main.yaml | 8 ------- roles/etcd/handlers/main.yml | 1 + roles/etcd/meta/main.yml | 2 +- roles/etcd/tasks/main.yml | 12 ++++++++-- roles/etcd/templates/etcd.conf.j2 | 4 ++-- roles/etcd_ca/meta/main.yml | 2 +- roles/etcd_ca/tasks/main.yml | 30 ++++++++++++------------- roles/etcd_ca/templates/openssl_append.j2 | 30 ++++++++++++------------- roles/etcd_ca/vars/main.yml | 3 --- roles/etcd_certificates/tasks/client.yml | 2 +- roles/etcd_certificates/tasks/main.yml | 3 --- roles/etcd_certificates/tasks/server.yml | 10 ++++----- roles/etcd_certificates/vars/main.yml | 11 ---------- roles/etcd_common/README.md | 34 +++++++++++++++++++++++++++++ roles/etcd_common/defaults/main.yml | 30 +++++++++++++++++++++++++ roles/etcd_common/meta/main.yml | 16 ++++++++++++++ roles/etcd_common/tasks/main.yml | 13 +++++++++++ roles/etcd_common/templates/host_int_map.j2 | 13 +++++++++++ 19 files changed, 157 insertions(+), 69 deletions(-) delete mode 100644 roles/etcd_ca/vars/main.yml delete mode 100644 roles/etcd_certificates/vars/main.yml create mode 100644 roles/etcd_common/README.md create mode 100644 roles/etcd_common/defaults/main.yml create mode 100644 roles/etcd_common/meta/main.yml create mode 100644 roles/etcd_common/tasks/main.yml create mode 100644 roles/etcd_common/templates/host_int_map.j2 diff --git a/roles/etcd/README.md b/roles/etcd/README.md index 49207c428..88e4ff874 100644 --- a/roles/etcd/README.md +++ b/roles/etcd/README.md @@ -17,7 +17,7 @@ TODO Dependencies ------------ -None +etcd-common Example Playbook ---------------- diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 0f216b84e..0fd3de585 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -2,16 +2,8 @@ etcd_interface: "{{ ansible_default_ipv4.interface }}" etcd_client_port: 2379 etcd_peer_port: 2380 -etcd_peers_group: etcd etcd_url_scheme: http etcd_peer_url_scheme: http -etcd_conf_dir: /etc/etcd -etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" -etcd_key_file: "{{ etcd_conf_dir }}/server.key" -etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" -etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" -etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" etcd_initial_cluster_state: new etcd_initial_cluster_token: etcd-cluster-1 diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index b897913f9..4c0efb97b 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -1,3 +1,4 @@ --- - name: restart etcd service: name=etcd state=restarted + when: not etcd_service_status_changed | default(false) diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 92d44ef4d..a71b36237 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -17,4 +17,4 @@ galaxy_info: - system dependencies: - { role: os_firewall } -- { role: openshift_repos } +- { role: etcd_common } diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 656901409..fcbdecd37 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,4 +1,12 @@ --- +- fail: + msg: Interface {{ etcd_interface }} not found + when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]" + +- fail: + msg: IPv4 address not found for {{ etcd_interface }} + when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4" + - name: Install etcd yum: pkg=etcd-2.* state=present @@ -49,5 +57,5 @@ enabled: yes register: start_result -- pause: seconds=30 - when: start_result | changed +- set_fact: + etcd_service_status_changed = start_result | changed diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2 index 9ac23b1dd..32577c96c 100644 --- a/roles/etcd/templates/etcd.conf.j2 +++ b/roles/etcd/templates/etcd.conf.j2 @@ -1,9 +1,9 @@ {% macro initial_cluster() -%} {% for host in groups[etcd_peers_group] -%} {% if loop.last -%} -{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }} +{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }} {%- else -%} -{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}, +{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}, {%- endif -%} {% endfor -%} {% endmacro -%} diff --git a/roles/etcd_ca/meta/main.yml b/roles/etcd_ca/meta/main.yml index fb9280c9e..d02456ca3 100644 --- a/roles/etcd_ca/meta/main.yml +++ b/roles/etcd_ca/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info: - cloud - system dependencies: -- { role: openshift_repos } +- { role: etcd_common } diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml index 625756867..d32f5e48c 100644 --- a/roles/etcd_ca/tasks/main.yml +++ b/roles/etcd_ca/tasks/main.yml @@ -1,14 +1,14 @@ --- - file: - path: "{{ etcd_ca_dir }}/{{ item }}" + path: "{{ item }}" state: directory mode: 0700 owner: root group: root with_items: - - certs - - crl - - fragments + - "{{ etcd_ca_new_certs_dir }}" + - "{{ etcd_ca_crl_dir }}" + - "{{ etcd_ca_dir }}/fragments" - command: cp /etc/pki/tls/openssl.cnf ./ args: @@ -22,25 +22,25 @@ - assemble: src: "{{ etcd_ca_dir }}/fragments" - dest: "{{ etcd_ca_dir }}/openssl.cnf" + dest: "{{ etcd_openssl_conf }}" -- command: touch index.txt +- command: touch {{ etcd_ca_db }} args: - chdir: "{{ etcd_ca_dir }}" - creates: "{{ etcd_ca_dir }}/index.txt" + creates: "{{ etcd_ca_db }}" - copy: - dest: "{{ etcd_ca_dir }}/serial" + dest: "{{ etcd_ca_serial }}" content: "01" force: no - command: > - openssl req -config openssl.cnf -newkey rsa:4096 - -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self - -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }} - -days 365 + openssl req -config {{ etcd_openssl_conf }} -newkey rsa:4096 + -keyout {{ etcd_ca_key }} -new -out {{ etcd_ca_cert }} + -x509 -extensions {{ etcd_ca_exts_self }} -batch -nodes + -days {{ etcd_ca_default_days }} + -subj /CN=etcd-signer@{{ ansible_date_time.epoch }} args: chdir: "{{ etcd_ca_dir }}" - creates: "{{ etcd_ca_dir }}/ca.crt" + creates: "{{ etcd_ca_cert }}" environment: - SAN: '' + SAN: 'etcd-signer' diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd_ca/templates/openssl_append.j2 index de2adaead..f28316fc2 100644 --- a/roles/etcd_ca/templates/openssl_append.j2 +++ b/roles/etcd_ca/templates/openssl_append.j2 @@ -1,20 +1,20 @@ -[ etcd_v3_req ] +[ {{ etcd_req_ext }} ] basicConstraints = critical,CA:FALSE keyUsage = digitalSignature,keyEncipherment subjectAltName = ${ENV::SAN} -[ etcd_ca ] +[ {{ etcd_ca_name }} ] dir = {{ etcd_ca_dir }} -crl_dir = $dir/crl -database = $dir/index.txt -new_certs_dir = $dir/certs -certificate = $dir/ca.crt -serial = $dir/serial -private_key = $dir/ca.key -crl_number = $dir/crlnumber -x509_extensions = etcd_v3_ca_client -default_days = 365 +crl_dir = {{ etcd_ca_crl_dir }} +database = {{ etcd_ca_db }} +new_certs_dir = {{ etcd_ca_new_certs_dir }} +certificate = {{ etcd_ca_cert }} +serial = {{ etcd_ca_serial }} +private_key = {{ etcd_ca_key }} +crl_number = {{ etcd_ca_crl_number }} +x509_extensions = {{ etcd_ca_exts_client }} +default_days = {{ etcd_ca_default_days }} default_md = sha256 preserve = no name_opt = ca_default @@ -23,27 +23,27 @@ policy = policy_anything unique_subject = no copy_extensions = copy -[ etcd_v3_ca_self ] +[ {{ etcd_ca_exts_self }} ] authorityKeyIdentifier = keyid,issuer basicConstraints = critical,CA:TRUE,pathlen:0 keyUsage = critical,digitalSignature,keyEncipherment,keyCertSign subjectKeyIdentifier = hash -[ etcd_v3_ca_peer ] +[ {{ etcd_ca_exts_peer }} ] authorityKeyIdentifier = keyid,issuer:always basicConstraints = critical,CA:FALSE extendedKeyUsage = clientAuth,serverAuth keyUsage = digitalSignature,keyEncipherment subjectKeyIdentifier = hash -[ etcd_v3_ca_server ] +[ {{ etcd_ca_exts_server }} ] authorityKeyIdentifier = keyid,issuer:always basicConstraints = critical,CA:FALSE extendedKeyUsage = serverAuth keyUsage = digitalSignature,keyEncipherment subjectKeyIdentifier = hash -[ etcd_v3_ca_client ] +[ {{ etcd_ca_exts_client }} ] authorityKeyIdentifier = keyid,issuer:always basicConstraints = critical,CA:FALSE extendedKeyUsage = clientAuth diff --git a/roles/etcd_ca/vars/main.yml b/roles/etcd_ca/vars/main.yml deleted file mode 100644 index 901e95027..000000000 --- a/roles/etcd_ca/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -etcd_conf_dir: /etc/etcd -etcd_ca_dir: /etc/etcd/ca diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml index 28f33f442..6aa4883e0 100644 --- a/roles/etcd_certificates/tasks/client.yml +++ b/roles/etcd_certificates/tasks/client.yml @@ -32,7 +32,7 @@ creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' ~ item.etcd_cert_prefix ~ 'client.crt' }}" environment: - SAN: '' + SAN: "IP:{{ item.openshift.common.ip }}" with_items: etcd_needing_client_certs - file: diff --git a/roles/etcd_certificates/tasks/main.yml b/roles/etcd_certificates/tasks/main.yml index da875e8ea..3bb715943 100644 --- a/roles/etcd_certificates/tasks/main.yml +++ b/roles/etcd_certificates/tasks/main.yml @@ -4,6 +4,3 @@ - include: server.yml when: etcd_needing_server_certs is defined and etcd_needing_server_certs - - - diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml index 727b7fa2c..3499dcbef 100644 --- a/roles/etcd_certificates/tasks/server.yml +++ b/roles/etcd_certificates/tasks/server.yml @@ -18,7 +18,7 @@ creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' ~ item.etcd_cert_prefix ~ 'server.csr' }}" environment: - SAN: "IP:{{ item.openshift.common.ip }}" + SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}" with_items: etcd_needing_server_certs - name: Sign and create the server crt @@ -32,7 +32,7 @@ creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' ~ item.etcd_cert_prefix ~ 'server.crt' }}" environment: - SAN: '' + SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}" with_items: etcd_needing_server_certs - name: Create the peer csr @@ -47,7 +47,7 @@ creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' ~ item.etcd_cert_prefix ~ 'peer.csr' }}" environment: - SAN: "IP:{{ item.openshift.common.ip }}" + SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}" with_items: etcd_needing_server_certs - name: Sign and create the peer crt @@ -61,7 +61,7 @@ creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/' ~ item.etcd_cert_prefix ~ 'peer.crt' }}" environment: - SAN: '' + SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}" with_items: etcd_needing_server_certs - file: @@ -69,5 +69,3 @@ dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt" state: hard with_items: etcd_needing_server_certs - - diff --git a/roles/etcd_certificates/vars/main.yml b/roles/etcd_certificates/vars/main.yml deleted file mode 100644 index 0eaeeb82b..000000000 --- a/roles/etcd_certificates/vars/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -etcd_conf_dir: /etc/etcd -etcd_ca_dir: /etc/etcd/ca -etcd_generated_certs_dir: /etc/etcd/generated_certs -etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" -etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" -etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" -etcd_ca_name: etcd_ca -etcd_req_ext: etcd_v3_req -etcd_ca_exts_peer: etcd_v3_ca_peer -etcd_ca_exts_server: etcd_v3_ca_server diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md new file mode 100644 index 000000000..131a01490 --- /dev/null +++ b/roles/etcd_common/README.md @@ -0,0 +1,34 @@ +etcd_common +======================== + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +openshift-repos + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml new file mode 100644 index 000000000..96f4b63af --- /dev/null +++ b/roles/etcd_common/defaults/main.yml @@ -0,0 +1,30 @@ +--- +etcd_peers_group: etcd + +# etcd server vars +etcd_conf_dir: /etc/etcd +etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_cert_file: "{{ etcd_conf_dir }}/server.crt" +etcd_key_file: "{{ etcd_conf_dir }}/server.key" +etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key" + +# etcd ca vars +etcd_ca_dir: "{{ etcd_conf_dir}}/ca" +etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs" +etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt" +etcd_ca_key: "{{ etcd_ca_dir }}/ca.key" +etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf" +etcd_ca_name: etcd_ca +etcd_req_ext: etcd_v3_req +etcd_ca_exts_peer: etcd_v3_ca_peer +etcd_ca_exts_server: etcd_v3_ca_server +etcd_ca_exts_self: etcd_v3_ca_self +etcd_ca_exts_client: etcd_v3_ca_client +etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl" +etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs" +etcd_ca_db: "{{ etcd_ca_dir }}/index.txt" +etcd_ca_serial: "{{ etcd_ca_dir }}/serial" +etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber" +etcd_ca_default_days: 365 diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml new file mode 100644 index 000000000..fb9280c9e --- /dev/null +++ b/roles/etcd_common/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_repos } diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml new file mode 100644 index 000000000..cd108495d --- /dev/null +++ b/roles/etcd_common/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- set_fact: + etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}" + +- fail: + msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}" + when: "'etcd_interface' in item.value and 'interface' not in item.value" + with_dict: etcd_host_int_map + +- fail: + msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }} + when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4" + with_dict: etcd_host_int_map diff --git a/roles/etcd_common/templates/host_int_map.j2 b/roles/etcd_common/templates/host_int_map.j2 new file mode 100644 index 000000000..9c9c76413 --- /dev/null +++ b/roles/etcd_common/templates/host_int_map.j2 @@ -0,0 +1,13 @@ +--- +{% for host in groups[etcd_peers_group] %} +{% set entry=hostvars[host] %} +{{ entry.inventory_hostname }}: +{% if 'etcd_interface' in entry %} + etcd_interface: {{ entry.etcd_interface }} +{% if entry.etcd_interface in entry.ansible_interfaces %} + interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }} +{% endif %} +{% else %} + interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }} +{% endif %} +{% endfor %} -- cgit v1.2.3 From e35c5778604eb46be4079b47f786318c97a3f8b8 Mon Sep 17 00:00:00 2001 From: Marek Mahut Date: Wed, 14 Oct 2015 14:44:58 +0200 Subject: Adding openshift.node.etcd items --- roles/os_zabbix/vars/template_openshift_master.yml | 82 ++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 1de4fefbb..cd702a814 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -31,6 +31,78 @@ g_template_openshift_master: applications: - Openshift Master + - key: openshift.master.etcd.create.success + description: Show number of successful create actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.create.fail + description: Show number of failed create actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.delete.success + description: Show number of successful delete actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.delete.fail + description: Show number of failed delete actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.get.success + description: Show number of successful get actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.get.fail + description: Show number of failed get actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.set.success + description: Show number of successful set actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.set.fail + description: Show number of failed set actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.update.success + description: Show number of successful update actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.update.fail + description: Show number of failed update actions + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.watchers + description: Show number of etcd watchers + type: int + applications: + - Openshift Master + + - key: openshift.master.etcd.ping + description: etcd ping + type: int + applications: + - Openshift Master + ztriggers: - name: 'Application creation has failed on {HOST.NAME}' expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1' @@ -56,3 +128,13 @@ g_template_openshift_master: expression: '{Template Openshift Master:openshift.project.counter.last()}=0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' priority: info + + - name: 'Low number of etcd watchers on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' + priority: avg + + - name: 'Etcd ping failed on {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' + priority: high -- cgit v1.2.3 From 5f9d5f11ddb2285d75281025d3fe039536d43bd9 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 23 Sep 2015 18:13:19 -0400 Subject: Add all the possible servicenames to openshift_all_hostnames for masters --- roles/openshift_facts/library/openshift_facts.py | 10 ++++++++++ roles/openshift_facts/tasks/main.yml | 3 +++ roles/openshift_master_certificates/tasks/main.yml | 2 ++ 3 files changed, 15 insertions(+) diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index e5aeb9244..163e67f62 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -22,6 +22,7 @@ import copy import os from distutils.util import strtobool from distutils.version import LooseVersion +from netaddr import IPNetwork def hostname_valid(hostname): @@ -486,12 +487,21 @@ def set_aggregate_facts(facts): if 'common' in facts: all_hostnames.add(facts['common']['hostname']) all_hostnames.add(facts['common']['public_hostname']) + all_hostnames.add(facts['common']['ip']) + all_hostnames.add(facts['common']['public_ip']) if 'master' in facts: + # FIXME: not sure why but facts['dns']['domain'] fails + cluster_domain = 'cluster.local' if 'cluster_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_hostname']) if 'cluster_public_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_public_hostname']) + all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc', + 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', + 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]) + first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1]) + all_hostnames.add(first_svc_ip) facts['common']['all_hostnames'] = list(all_hostnames) diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 6301d4fc0..a46b45b8c 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -6,5 +6,8 @@ - ansible_version | version_compare('1.9.0', 'ne') - ansible_version | version_compare('1.9.0.1', 'ne') +- name: Ensure python-netaddr is installed + yum: pkg=python-netaddr state=installed + - name: Gather Cluster facts openshift_facts: diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index e4602337e..4b39b043a 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -34,6 +34,8 @@ - serviceaccounts.private.key - serviceaccounts.public.key +- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}" + with_items: masters_needing_certs - name: Create the master certificates if they do not already exist command: > -- cgit v1.2.3 From 0f58ddd79113f00e9fab62b8938ba02dbe0bef12 Mon Sep 17 00:00:00 2001 From: Marek Mahut Date: Tue, 3 Nov 2015 17:09:27 +0100 Subject: Moving to Openshift Etcd application --- roles/os_zabbix/vars/template_openshift_master.yml | 24 +++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index cd702a814..6defc4989 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -35,73 +35,73 @@ g_template_openshift_master: description: Show number of successful create actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.create.fail description: Show number of failed create actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.delete.success description: Show number of successful delete actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.delete.fail description: Show number of failed delete actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.get.success description: Show number of successful get actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.get.fail description: Show number of failed get actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.set.success description: Show number of successful set actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.set.fail description: Show number of failed set actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.update.success description: Show number of successful update actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.update.fail description: Show number of failed update actions type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.watchers description: Show number of etcd watchers type: int applications: - - Openshift Master + - Openshift Etcd - key: openshift.master.etcd.ping description: etcd ping type: int applications: - - Openshift Master + - Openshift Etcd ztriggers: - name: 'Application creation has failed on {HOST.NAME}' -- cgit v1.2.3 From 8da7c1f5bc68110469bedceb0ddad4fdfc8b7e4d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 28 Oct 2015 10:39:41 -0400 Subject: Add custom certificates to serving info in master configuration. --- filter_plugins/oo_filters.py | 57 +++++++++++++++++++++- inventory/byo/hosts.example | 3 ++ playbooks/common/openshift-master/config.yml | 9 ++++ roles/openshift_master/templates/master.yaml.v1.j2 | 14 ++++++ 4 files changed, 82 insertions(+), 1 deletion(-) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index a57b0f895..d653b9217 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible from ansible import errors from operator import itemgetter +import OpenSSL.crypto +import os.path import pdb import re import json @@ -327,6 +329,58 @@ class FilterModule(object): return revamped_outputs + @staticmethod + def oo_parse_certificate_names(certificates, data_dir): + ''' Parses names from list of certificate hashes. + + Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt", + "keyfile": "/etc/origin/master/custom1.key" }, + { "certfile": "custom2.crt", + "keyfile": "custom2.key" }] + + returns [{ "certfile": "/etc/origin/master/custom1.crt", + "keyfile": "/etc/origin/master/custom1.key", + "names": [ "public-master-host.com", + "other-master-host.com" ] }, + { "certfile": "/etc/origin/master/custom2.crt", + "keyfile": "/etc/origin/master/custom2.key", + "names": [ "some-hostname.com" ] }] + ''' + if not issubclass(type(certificates), list): + raise errors.AnsibleFilterError("|failed expects certificates is a list") + + if not issubclass(type(data_dir), unicode): + raise errors.AnsibleFilterError("|failed expects data_dir is unicode") + + for certificate in certificates: + if 'names' in certificate.keys(): + continue + else: + certificate['names'] = [] + + if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']): + # Unable to find cert/key, try to prepend data_dir to paths + certificate['certfile'] = os.path.join(data_dir, certificate['certfile']) + certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile']) + if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']): + # Unable to find cert/key in data_dir + raise errors.AnsibleFilterError("|certificate and/or key does not exist %s, %s" % + (certificate['certfile'], certificate['keyfile'])) + + try: + st_cert = open(certificate['certfile'], 'rt').read() + cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) + certificate['names'].append(str(cert.get_subject().commonName.decode())) + for i in range(cert.get_extension_count()): + if cert.get_extension(i).get_short_name() == 'subjectAltName': + for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): + certificate['names'].append(name) + except: + raise errors.AnsibleFilterError("|failed to parse certificate %s" % certificate['certfile']) + + certificate['names'] = list(set(certificate['names'])) + return certificates + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -342,5 +396,6 @@ class FilterModule(object): "oo_combine_dict": self.oo_combine_dict, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, - "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs + "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, + "oo_parse_certificate_names": self.oo_parse_certificate_names } diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index ad19fe116..c6733567a 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -99,6 +99,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # set RPM version for debugging purposes #openshift_pkg_version=-3.0.0.0 +# Configure custom master certificates +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1dec923fc..4662c179a 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -199,9 +199,18 @@ validate_checksum: yes with_items: masters_needing_certs +- name: Inspect named certificates + hosts: oo_first_master + tasks: + - name: Collect certificate names + set_fact: + parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir) }}" + when: openshift_master_named_certificates is defined + - name: Configure master instances hosts: oo_masters_to_config vars: + named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" embedded_etcd: "{{ openshift.master.embedded_etcd }}" diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 73a0bc6cc..b429be596 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -22,6 +22,9 @@ corsAllowedOrigins: {% for custom_origin in openshift.master.custom_cors_origins | default("") %} - {{ custom_origin }} {% endfor %} +{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %} + - {{ name }} +{% endfor %} {% if 'disabled_features' in openshift.master %} disabledFeatures: {{ openshift.master.disabled_features | to_json }} {% endif %} @@ -133,3 +136,14 @@ servingInfo: keyFile: master.server.key maxRequestsInFlight: 500 requestTimeoutSeconds: 3600 +{% if named_certificates %} + namedCertificates: +{% for named_certificate in named_certificates %} + - certFile: {{ named_certificate['certfile'] }} + keyFile: {{ named_certificate['keyfile'] }} + names: +{% for name in named_certificate['names'] %} + - "{{ name }}" +{% endfor %} +{% endfor %} +{% endif %} -- cgit v1.2.3 From b41bca63682e11e2522540567c2bcf1d146e5d03 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Fri, 30 Oct 2015 09:50:11 -0400 Subject: oo-install: Support running on the host to be deployed This adds a check to see if the host the installer is running on is one of the hosts to be installed and sets i ansible_connection=local ansible_sudo=no in the inventory file. --- utils/src/ooinstall/openshift_ansible.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 3306271c8..bb1003ad6 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -2,6 +2,7 @@ # repo. We will work on these over time. # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned +import socket import subprocess import os import yaml @@ -16,6 +17,8 @@ def set_config(cfg): def generate_inventory(hosts): print hosts global CFG + + installer_host = socket.gethostname() base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') @@ -41,6 +44,10 @@ def generate_inventory(hosts): if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') + if any(host.hostname == installer_host for host in hosts): + base_inventory.write("ansible_connection=local\n") + base_inventory.write("ansible_sudo=no\n") + base_inventory.write('\n[masters]\n') masters = (host for host in hosts if host.master) for master in masters: -- cgit v1.2.3 From ef6df7220673de40d1c9854e105d7f134232e733 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Fri, 30 Oct 2015 13:01:09 -0400 Subject: ooinstall: Update local install check Update to check both hostname and public_hostname. Remove ansible_sudo=no as I failed to notice we were already checking if ansible_ssh_user == 'root' and setting it there. --- utils/src/ooinstall/openshift_ansible.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index bb1003ad6..4c9d30718 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -44,9 +44,9 @@ def generate_inventory(hosts): if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') - if any(host.hostname == installer_host for host in hosts): + if any(host.hostname == installer_host or host.public_hostname == installer_host + for host in hosts): base_inventory.write("ansible_connection=local\n") - base_inventory.write("ansible_sudo=no\n") base_inventory.write('\n[masters]\n') masters = (host for host in hosts if host.master) -- cgit v1.2.3 From 3574beed2b43d5fafbf0b833c1f39bb09cdf947f Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Mon, 2 Nov 2015 08:32:17 -0500 Subject: ooinstall: Add check for nopwd sudo --- utils/src/ooinstall/openshift_ansible.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 4c9d30718..0def72cfd 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -4,6 +4,7 @@ import socket import subprocess +import sys import os import yaml from ooinstall.variants import find_variant @@ -25,7 +26,7 @@ def generate_inventory(hosts): base_inventory.write('\n[OSEv3:vars]\n') base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user'])) if CFG.settings['ansible_ssh_user'] != 'root': - base_inventory.write('ansible_sudo=true\n') + base_inventory.write('ansible_become=true\n') # Find the correct deployment type for ansible: ver = find_variant(CFG.settings['variant'], @@ -46,6 +47,10 @@ def generate_inventory(hosts): if any(host.hostname == installer_host or host.public_hostname == installer_host for host in hosts): + no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive']) + if no_pwd_sudo == 1: + print 'The atomic-openshift-installer requires sudo access without a password.' + sys.exit(1) base_inventory.write("ansible_connection=local\n") base_inventory.write('\n[masters]\n') -- cgit v1.2.3 From 4e0a939abb578fc205fab940a22f40ef5a73abe6 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Mon, 2 Nov 2015 14:03:34 -0500 Subject: atomic-openshift-installer: Add default openshift-ansible-playbook This adds a default value to the openshift-ansible-playbook directory and also removes the requirement that it be writable. --- utils/src/ooinstall/cli_installer.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index b3b1acebb..88afcee36 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -379,14 +379,14 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): readable=True), default=None) @click.option('--ansible-playbook-directory', - '-a', - type=click.Path(exists=True, - file_okay=False, - dir_okay=True, - writable=False, - readable=True), - # callback=validate_ansible_dir, - envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') + '-a', + type=click.Path(exists=True, + file_okay=False, + dir_okay=True, + readable=True), + # callback=validate_ansible_dir, + default='/usr/share/openshift-ansible/', + envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @click.option('--ansible-config', type=click.Path(file_okay=True, dir_okay=False, -- cgit v1.2.3 From c03a8150cccb28b362d0552b97789f47b702c1e3 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Tue, 3 Nov 2015 08:18:17 -0500 Subject: atomic-openshift-installer: Correct inaccurate prompt --- utils/src/ooinstall/cli_installer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index b3b1acebb..63b794f03 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -191,7 +191,7 @@ Notes: facts_confirmed = click.confirm("Do the above facts look correct?") if not facts_confirmed: message = """ -Edit %s with the desired values and rerun oo-install with --unattended . +Edit %s with the desired values and rerun atomic-openshift-installer with --unattended . """ % oo_cfg.config_path click.echo(message) # Make sure we actually write out the config file. -- cgit v1.2.3 From 3a8b4f1315e28f35e16ace77560f040f08588722 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 3 Nov 2015 11:26:33 -0500 Subject: Filter internal hostnames from the list of parsed names. --- filter_plugins/oo_filters.py | 16 +++++++++++++--- inventory/byo/hosts.example | 2 ++ playbooks/common/openshift-master/config.yml | 2 +- roles/openshift_facts/library/openshift_facts.py | 14 +++++++++++--- roles/openshift_master/templates/master.yaml.v1.j2 | 2 +- 5 files changed, 28 insertions(+), 8 deletions(-) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index d653b9217..dfd9a111e 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -330,7 +330,8 @@ class FilterModule(object): return revamped_outputs @staticmethod - def oo_parse_certificate_names(certificates, data_dir): + # pylint: disable=too-many-branches + def oo_parse_certificate_names(certificates, data_dir, internal_hostnames): ''' Parses names from list of certificate hashes. Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt", @@ -352,6 +353,9 @@ class FilterModule(object): if not issubclass(type(data_dir), unicode): raise errors.AnsibleFilterError("|failed expects data_dir is unicode") + if not issubclass(type(internal_hostnames), list): + raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") + for certificate in certificates: if 'names' in certificate.keys(): continue @@ -364,7 +368,7 @@ class FilterModule(object): certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile']) if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']): # Unable to find cert/key in data_dir - raise errors.AnsibleFilterError("|certificate and/or key does not exist %s, %s" % + raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % (certificate['certfile'], certificate['keyfile'])) try: @@ -376,9 +380,15 @@ class FilterModule(object): for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): certificate['names'].append(name) except: - raise errors.AnsibleFilterError("|failed to parse certificate %s" % certificate['certfile']) + raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + + "please specify certificate names in host inventory")) + certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] certificate['names'] = list(set(certificate['names'])) + if not certificate['names']: + raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + + "detected a collision with internal hostname, please specify " + + "certificate names in host inventory")) return certificates def filters(self): diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index c6733567a..f60918e6d 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -101,6 +101,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Configure custom master certificates #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] # host group for masters [masters] diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 4662c179a..59c4b2370 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -204,7 +204,7 @@ tasks: - name: Collect certificate names set_fact: - parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir) }}" + parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}" when: openshift_master_named_certificates is defined - name: Configure master instances diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 163e67f62..28866bd48 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -484,12 +484,16 @@ def set_aggregate_facts(facts): dict: the facts dict updated with aggregated facts """ all_hostnames = set() + internal_hostnames = set() if 'common' in facts: all_hostnames.add(facts['common']['hostname']) all_hostnames.add(facts['common']['public_hostname']) all_hostnames.add(facts['common']['ip']) all_hostnames.add(facts['common']['public_ip']) + internal_hostnames.add(facts['common']['hostname']) + internal_hostnames.add(facts['common']['ip']) + if 'master' in facts: # FIXME: not sure why but facts['dns']['domain'] fails cluster_domain = 'cluster.local' @@ -497,13 +501,17 @@ def set_aggregate_facts(facts): all_hostnames.add(facts['master']['cluster_hostname']) if 'cluster_public_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_public_hostname']) - all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc', - 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', - 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]) + svc_names = ['openshift', 'openshift.default', 'openshift.default.svc', + 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', + 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain] + all_hostnames.update(svc_names) + internal_hostnames.update(svc_names) first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1]) all_hostnames.add(first_svc_ip) + internal_hostnames.add(first_svc_ip) facts['common']['all_hostnames'] = list(all_hostnames) + facts['common']['internal_hostnames'] = list(all_hostnames) return facts diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index b429be596..9547a6945 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -16,7 +16,7 @@ assetConfig: maxRequestsInFlight: 0 requestTimeoutSeconds: 0 corsAllowedOrigins: -{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %} +{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %} - {{ origin }} {% endfor %} {% for custom_origin in openshift.master.custom_cors_origins | default("") %} -- cgit v1.2.3 From 97dda64d5458dc7bd3edad720eb2a1e821b8b947 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 3 Nov 2015 14:02:40 -0500 Subject: added disk tps checks to zabbix --- roles/os_zabbix/vars/template_os_linux.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml index aeeec4b8d..68de2dde5 100644 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -194,6 +194,11 @@ g_template_os_linux: lifetime: 1 description: "Dynamically register the filesystems" + - name: disc.disk + key: disc.disk + lifetime: 1 + description: "Dynamically register disks on a node" + zitemprototypes: - discoveryrule_key: disc.filesys name: "disc.filesys.full.{#OSO_FILESYS}" @@ -211,6 +216,14 @@ g_template_os_linux: applications: - Disk + - discoveryrule_key: disc.disk + name: "TPS (IOPS) for disk {#OSO_DISK}" + key: "disc.disk.tps[{#OSO_FILESYS}]" + value_type: int + description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using" + applications: + - Disk + ztriggerprototypes: - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}' expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85' -- cgit v1.2.3 From 4e04a3719e867ca0bd6b54440f4066fdd6f6751a Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 3 Nov 2015 15:31:36 -0500 Subject: fixed a dumb naming mistake --- roles/os_zabbix/vars/template_os_linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml index 68de2dde5..d494f1bad 100644 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -218,7 +218,7 @@ g_template_os_linux: - discoveryrule_key: disc.disk name: "TPS (IOPS) for disk {#OSO_DISK}" - key: "disc.disk.tps[{#OSO_FILESYS}]" + key: "disc.disk.tps[{#OSO_DISK}]" value_type: int description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using" applications: -- cgit v1.2.3 From 4b85e4fae04a71cf2cea17018b9240b369f145d5 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 2 Nov 2015 16:03:33 -0500 Subject: Support for gce --- bin/README_SHELL_COMPLETION | 2 +- bin/openshift_ansible.conf.example | 2 +- bin/openshift_ansible/awsutil.py | 11 +- bin/ossh_bash_completion | 20 +- bin/ossh_zsh_completion | 10 +- bin/zsh_functions/_ossh | 4 +- inventory/multi_ec2.py | 375 -------------------- inventory/multi_ec2.yaml.example | 32 -- inventory/multi_inventory.py | 415 +++++++++++++++++++++++ inventory/multi_inventory.yaml.example | 51 +++ openshift-ansible.spec | 10 +- roles/openshift_ansible_inventory/tasks/main.yml | 10 +- test/units/README.md | 2 +- test/units/multi_inventory_test.py | 114 +++++++ test/units/mutli_ec2_test.py | 95 ------ 15 files changed, 617 insertions(+), 536 deletions(-) delete mode 100755 inventory/multi_ec2.py delete mode 100644 inventory/multi_ec2.yaml.example create mode 100755 inventory/multi_inventory.py create mode 100644 inventory/multi_inventory.yaml.example create mode 100755 test/units/multi_inventory_test.py delete mode 100755 test/units/mutli_ec2_test.py diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION index 5f05df7fc..49bba3acc 100644 --- a/bin/README_SHELL_COMPLETION +++ b/bin/README_SHELL_COMPLETION @@ -14,7 +14,7 @@ will populate the cache file and the completions should become available. This script will look at the cached version of your -multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache. +multi_inventory results in ~/.ansible/tmp/multi_inventory.cache. It will then parse a few {host}.{env} out of the json and return them to be completable. diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example index e891b855a..8786dfc13 100644 --- a/bin/openshift_ansible.conf.example +++ b/bin/openshift_ansible.conf.example @@ -1,5 +1,5 @@ #[main] -#inventory = /usr/share/ansible/inventory/multi_ec2.py +#inventory = /usr/share/ansible/inventory/multi_inventory.py #[host_type_aliases] #host-type-one = aliasa,aliasb diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 9df034f57..45345007c 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -4,7 +4,10 @@ import os import re -from openshift_ansible import multi_ec2 + +# Buildbot does not have multi_inventory installed +#pylint: disable=no-name-in-module +from openshift_ansible import multi_inventory class ArgumentError(Exception): """This class is raised when improper arguments are passed.""" @@ -49,9 +52,9 @@ class AwsUtil(object): Keyword arguments: args -- optional arguments to pass to the inventory script """ - mec2 = multi_ec2.MultiEc2(args) - mec2.run() - return mec2.result + minv = multi_inventory.MultiInventory(args) + minv.run() + return minv.result def get_environments(self): """Searches for env tags in the inventory and returns all of the envs found.""" diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 5072161f0..997ff0f9c 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' fi } @@ -26,13 +26,13 @@ complete -F _ossh ossh oscp __opssh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index 44500c618..3c4018636 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index 7c6cb7b0b..d205e1055 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -1,8 +1,8 @@ #compdef ossh oscp _ossh_known_hosts(){ - if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') + if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py deleted file mode 100755 index 98dde3f3c..000000000 --- a/inventory/multi_ec2.py +++ /dev/null @@ -1,375 +0,0 @@ -#!/usr/bin/env python2 -''' - Fetch and combine multiple ec2 account settings into a single - json hash. -''' -# vim: expandtab:tabstop=4:shiftwidth=4 - -from time import time -import argparse -import yaml -import os -import subprocess -import json -import errno -import fcntl -import tempfile -import copy - -CONFIG_FILE_NAME = 'multi_ec2.yaml' -DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') - -class MultiEc2(object): - ''' - MultiEc2 class: - Opens a yaml config file and reads aws credentials. - Stores a json hash of resources in result. - ''' - - def __init__(self, args=None): - # Allow args to be passed when called as a library - if not args: - self.args = {} - else: - self.args = args - - self.cache_path = DEFAULT_CACHE_PATH - self.config = None - self.all_ec2_results = {} - self.result = {} - self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) - - same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME) - etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME) - - # Prefer a file in the same directory, fall back to a file in etc - if os.path.isfile(same_dir_config_file): - self.config_file = same_dir_config_file - elif os.path.isfile(etc_dir_config_file): - self.config_file = etc_dir_config_file - else: - self.config_file = None # expect env vars - - - def run(self): - '''This method checks to see if the local - cache is valid for the inventory. - - if the cache is valid; return cache - else the credentials are loaded from multi_ec2.yaml or from the env - and we attempt to get the inventory from the provider specified. - ''' - # load yaml - if self.config_file and os.path.isfile(self.config_file): - self.config = self.load_yaml_config() - elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \ - os.environ.has_key("AWS_SECRET_ACCESS_KEY"): - # Build a default config - self.config = {} - self.config['accounts'] = [ - { - 'name': 'default', - 'cache_location': DEFAULT_CACHE_PATH, - 'provider': 'aws/hosts/ec2.py', - 'env_vars': { - 'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"], - 'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"], - } - }, - ] - - self.config['cache_max_age'] = 300 - else: - raise RuntimeError("Could not find valid ec2 credentials in the environment.") - - if self.config.has_key('cache_location'): - self.cache_path = self.config['cache_location'] - - if self.args.get('refresh_cache', None): - self.get_inventory() - self.write_to_cache() - # if its a host query, fetch and do not cache - elif self.args.get('host', None): - self.get_inventory() - elif not self.is_cache_valid(): - # go fetch the inventories and cache them if cache is expired - self.get_inventory() - self.write_to_cache() - else: - # get data from disk - self.get_inventory_from_cache() - - def load_yaml_config(self, conf_file=None): - """Load a yaml config file with credentials to query the - respective cloud for inventory. - """ - config = None - - if not conf_file: - conf_file = self.config_file - - with open(conf_file) as conf: - config = yaml.safe_load(conf) - - return config - - def get_provider_tags(self, provider, env=None): - """Call and query all of the tags that are usuable - by ansible. If environment is empty use the default env. - """ - if not env: - env = os.environ - - # Allow relatively path'd providers in config file - if os.path.isfile(os.path.join(self.file_path, provider)): - provider = os.path.join(self.file_path, provider) - - # check to see if provider exists - if not os.path.isfile(provider) or not os.access(provider, os.X_OK): - raise RuntimeError("Problem with the provider. Please check path " \ - "and that it is executable. (%s)" % provider) - - cmds = [provider] - if self.args.get('host', None): - cmds.append("--host") - cmds.append(self.args.get('host', None)) - else: - cmds.append('--list') - - cmds.append('--refresh-cache') - - return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ - stdout=subprocess.PIPE, env=env) - - @staticmethod - def generate_config(config_data): - """Generate the ec2.ini file in as a secure temp file. - Once generated, pass it to the ec2.py as an environment variable. - """ - fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.') - for section, values in config_data.items(): - os.write(fildes, "[%s]\n" % section) - for option, value in values.items(): - os.write(fildes, "%s = %s\n" % (option, value)) - os.close(fildes) - return tmp_file_path - - def run_provider(self): - '''Setup the provider call with proper variables - and call self.get_provider_tags. - ''' - try: - all_results = [] - tmp_file_paths = [] - processes = {} - for account in self.config['accounts']: - env = account['env_vars'] - if account.has_key('provider_config'): - tmp_file_paths.append(MultiEc2.generate_config(account['provider_config'])) - env['EC2_INI_PATH'] = tmp_file_paths[-1] - name = account['name'] - provider = account['provider'] - processes[name] = self.get_provider_tags(provider, env) - - # for each process collect stdout when its available - for name, process in processes.items(): - out, err = process.communicate() - all_results.append({ - "name": name, - "out": out.strip(), - "err": err.strip(), - "code": process.returncode - }) - - finally: - # Clean up the mkstemp file - for tmp_file in tmp_file_paths: - os.unlink(tmp_file) - - return all_results - - def get_inventory(self): - """Create the subprocess to fetch tags from a provider. - Host query: - Query to return a specific host. If > 1 queries have - results then fail. - - List query: - Query all of the different accounts for their tags. Once completed - store all of their results into one merged updated hash. - """ - provider_results = self.run_provider() - - # process --host results - # For any 0 result, return it - if self.args.get('host', None): - count = 0 - for results in provider_results: - if results['code'] == 0 and results['err'] == '' and results['out'] != '{}': - self.result = json.loads(results['out']) - count += 1 - if count > 1: - raise RuntimeError("Found > 1 results for --host %s. \ - This is an invalid state." % self.args.get('host', None)) - # process --list results - else: - # For any non-zero, raise an error on it - for result in provider_results: - if result['code'] != 0: - err_msg = ['\nProblem fetching account: {name}', - 'Error Code: {code}', - 'StdErr: {err}', - 'Stdout: {out}', - ] - raise RuntimeError('\n'.join(err_msg).format(**result)) - else: - self.all_ec2_results[result['name']] = json.loads(result['out']) - - # Check if user wants extra vars in yaml by - # having hostvars and all_group defined - for acc_config in self.config['accounts']: - self.apply_account_config(acc_config) - - # Build results by merging all dictionaries - values = self.all_ec2_results.values() - values.insert(0, self.result) - for result in values: - MultiEc2.merge_destructively(self.result, result) - - def apply_account_config(self, acc_config): - ''' Apply account config settings - ''' - results = self.all_ec2_results[acc_config['name']] - - # Update each hostvar with the newly desired key: value from extra_* - for _extra in ['extra_groups', 'extra_vars']: - for new_var, value in acc_config.get(_extra, {}).items(): - # Verify the account results look sane - # by checking for these keys ('_meta' and 'hostvars' exist) - if results.has_key('_meta') and results['_meta'].has_key('hostvars'): - for data in results['_meta']['hostvars'].values(): - data[str(new_var)] = str(value) - - # Add this group - if _extra == 'extra_groups' and results.has_key(acc_config['all_group']): - results["%s_%s" % (new_var, value)] = \ - copy.copy(results[acc_config['all_group']]) - - # Clone groups goes here - for to_name, from_name in acc_config.get('clone_groups', {}).items(): - if results.has_key(from_name): - results[to_name] = copy.copy(results[from_name]) - - # Clone vars goes here - for to_name, from_name in acc_config.get('clone_vars', {}).items(): - # Verify the account results look sane - # by checking for these keys ('_meta' and 'hostvars' exist) - if results.has_key('_meta') and results['_meta'].has_key('hostvars'): - for data in results['_meta']['hostvars'].values(): - data[str(to_name)] = data.get(str(from_name), 'nil') - - # store the results back into all_ec2_results - self.all_ec2_results[acc_config['name']] = results - - @staticmethod - def merge_destructively(input_a, input_b): - "merges b into input_a" - for key in input_b: - if key in input_a: - if isinstance(input_a[key], dict) and isinstance(input_b[key], dict): - MultiEc2.merge_destructively(input_a[key], input_b[key]) - elif input_a[key] == input_b[key]: - pass # same leaf value - # both lists so add each element in b to a if it does ! exist - elif isinstance(input_a[key], list) and isinstance(input_b[key], list): - for result in input_b[key]: - if result not in input_a[key]: - input_a[key].append(result) - # a is a list and not b - elif isinstance(input_a[key], list): - if input_b[key] not in input_a[key]: - input_a[key].append(input_b[key]) - elif isinstance(input_b[key], list): - input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]] - else: - input_a[key] = [input_a[key], input_b[key]] - else: - input_a[key] = input_b[key] - return input_a - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path): - mod_time = os.path.getmtime(self.cache_path) - current_time = time() - if (mod_time + self.config['cache_max_age']) > current_time: - return True - - return False - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser( - description='Produce an Ansible Inventory file based on a provider') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Fetch cached only instances (default: False)') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', default=False, - help='Get all the variables about a specific instance') - self.args = parser.parse_args().__dict__ - - def write_to_cache(self): - ''' Writes data in JSON format to a file ''' - - # if it does not exist, try and create it. - if not os.path.isfile(self.cache_path): - path = os.path.dirname(self.cache_path) - try: - os.makedirs(path) - except OSError as exc: - if exc.errno != errno.EEXIST or not os.path.isdir(path): - raise - - json_data = MultiEc2.json_format_dict(self.result, True) - with open(self.cache_path, 'w') as cache: - try: - fcntl.flock(cache, fcntl.LOCK_EX) - cache.write(json_data) - finally: - fcntl.flock(cache, fcntl.LOCK_UN) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - if not os.path.isfile(self.cache_path): - return None - - with open(self.cache_path, 'r') as cache: - self.result = json.loads(cache.read()) - - return True - - @classmethod - def json_format_dict(cls, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - def result_str(self): - '''Return cache string stored in self.result''' - return self.json_format_dict(self.result, True) - - -if __name__ == "__main__": - MEC2 = MultiEc2() - MEC2.parse_cli_args() - MEC2.run() - print MEC2.result_str() diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example deleted file mode 100644 index bbd81ad20..000000000 --- a/inventory/multi_ec2.yaml.example +++ /dev/null @@ -1,32 +0,0 @@ -# multi ec2 inventory configs -# -cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache - -accounts: - - name: aws1 - provider: aws/hosts/ec2.py - provider_config: - ec2: - regions: all - regions_exclude: us-gov-west-1,cn-north-1 - destination_variable: public_dns_name - route53: False - cache_path: ~/.ansible/tmp - cache_max_age: 300 - vpc_destination_variable: ip_address - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - all_group: ec2 - extra_vars: - cloud: aws - account: aws1 - -- name: aws2 - provider: aws/hosts/ec2.py - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - EC2_INI_PATH: /etc/ansible/ec2.ini - -cache_max_age: 60 diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py new file mode 100755 index 000000000..354a8c10c --- /dev/null +++ b/inventory/multi_inventory.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python2 +''' + Fetch and combine multiple inventory account settings into a single + json hash. +''' +# vim: expandtab:tabstop=4:shiftwidth=4 + +from time import time +import argparse +import yaml +import os +import subprocess +import json +import errno +import fcntl +import tempfile +import copy +from string import Template +import shutil + +CONFIG_FILE_NAME = 'multi_inventory.yaml' +DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache') + +class MultiInventoryException(Exception): + '''Exceptions for MultiInventory class''' + pass + +class MultiInventory(object): + ''' + MultiInventory class: + Opens a yaml config file and reads aws credentials. + Stores a json hash of resources in result. + ''' + + def __init__(self, args=None): + # Allow args to be passed when called as a library + if not args: + self.args = {} + else: + self.args = args + + self.cache_path = DEFAULT_CACHE_PATH + self.config = None + self.all_inventory_results = {} + self.result = {} + self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) + + same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME) + etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME) + + # Prefer a file in the same directory, fall back to a file in etc + if os.path.isfile(same_dir_config_file): + self.config_file = same_dir_config_file + elif os.path.isfile(etc_dir_config_file): + self.config_file = etc_dir_config_file + else: + self.config_file = None # expect env vars + + + def run(self): + '''This method checks to see if the local + cache is valid for the inventory. + + if the cache is valid; return cache + else the credentials are loaded from multi_inventory.yaml or from the env + and we attempt to get the inventory from the provider specified. + ''' + # load yaml + if self.config_file and os.path.isfile(self.config_file): + self.config = self.load_yaml_config() + elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \ + os.environ.has_key("AWS_SECRET_ACCESS_KEY"): + # Build a default config + self.config = {} + self.config['accounts'] = [ + { + 'name': 'default', + 'cache_location': DEFAULT_CACHE_PATH, + 'provider': 'aws/hosts/ec2.py', + 'env_vars': { + 'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"], + 'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"], + } + }, + ] + + self.config['cache_max_age'] = 300 + else: + raise RuntimeError("Could not find valid ec2 credentials in the environment.") + + if self.config.has_key('cache_location'): + self.cache_path = self.config['cache_location'] + + if self.args.get('refresh_cache', None): + self.get_inventory() + self.write_to_cache() + # if its a host query, fetch and do not cache + elif self.args.get('host', None): + self.get_inventory() + elif not self.is_cache_valid(): + # go fetch the inventories and cache them if cache is expired + self.get_inventory() + self.write_to_cache() + else: + # get data from disk + self.get_inventory_from_cache() + + def load_yaml_config(self, conf_file=None): + """Load a yaml config file with credentials to query the + respective cloud for inventory. + """ + config = None + + if not conf_file: + conf_file = self.config_file + + with open(conf_file) as conf: + config = yaml.safe_load(conf) + + # Provide a check for unique account names + if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']): + raise MultiInventoryException('Duplicate account names in config file') + + return config + + def get_provider_tags(self, provider, env=None): + """Call and query all of the tags that are usuable + by ansible. If environment is empty use the default env. + """ + if not env: + env = os.environ + + # Allow relatively path'd providers in config file + if os.path.isfile(os.path.join(self.file_path, provider)): + provider = os.path.join(self.file_path, provider) + + # check to see if provider exists + if not os.path.isfile(provider) or not os.access(provider, os.X_OK): + raise RuntimeError("Problem with the provider. Please check path " \ + "and that it is executable. (%s)" % provider) + + cmds = [provider] + if self.args.get('host', None): + cmds.append("--host") + cmds.append(self.args.get('host', None)) + else: + cmds.append('--list') + + if 'aws' in provider.lower(): + cmds.append('--refresh-cache') + + return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ + stdout=subprocess.PIPE, env=env) + + @staticmethod + def generate_config(provider_files): + """Generate the provider_files in a temporary directory. + """ + prefix = 'multi_inventory.' + tmp_dir_path = tempfile.mkdtemp(prefix=prefix) + for provider_file in provider_files: + filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+') + content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path) + filedes.write(content) + filedes.close() + + return tmp_dir_path + + def run_provider(self): + '''Setup the provider call with proper variables + and call self.get_provider_tags. + ''' + try: + all_results = [] + tmp_dir_paths = [] + processes = {} + for account in self.config['accounts']: + tmp_dir = None + if account.has_key('provider_files'): + tmp_dir = MultiInventory.generate_config(account['provider_files']) + tmp_dir_paths.append(tmp_dir) + + # Update env vars after creating provider_config_files + # so that we can grab the tmp_dir if it exists + env = account.get('env_vars', {}) + if env and tmp_dir: + for key, value in env.items(): + env[key] = Template(value).substitute(tmpdir=tmp_dir) + + name = account['name'] + provider = account['provider'] + processes[name] = self.get_provider_tags(provider, env) + + # for each process collect stdout when its available + for name, process in processes.items(): + out, err = process.communicate() + all_results.append({ + "name": name, + "out": out.strip(), + "err": err.strip(), + "code": process.returncode + }) + + finally: + # Clean up the mkdtemp dirs + for tmp_dir in tmp_dir_paths: + shutil.rmtree(tmp_dir) + + return all_results + + def get_inventory(self): + """Create the subprocess to fetch tags from a provider. + Host query: + Query to return a specific host. If > 1 queries have + results then fail. + + List query: + Query all of the different accounts for their tags. Once completed + store all of their results into one merged updated hash. + """ + provider_results = self.run_provider() + + # process --host results + # For any 0 result, return it + if self.args.get('host', None): + count = 0 + for results in provider_results: + if results['code'] == 0 and results['err'] == '' and results['out'] != '{}': + self.result = json.loads(results['out']) + count += 1 + if count > 1: + raise RuntimeError("Found > 1 results for --host %s. \ + This is an invalid state." % self.args.get('host', None)) + # process --list results + else: + # For any non-zero, raise an error on it + for result in provider_results: + if result['code'] != 0: + err_msg = ['\nProblem fetching account: {name}', + 'Error Code: {code}', + 'StdErr: {err}', + 'Stdout: {out}', + ] + raise RuntimeError('\n'.join(err_msg).format(**result)) + else: + self.all_inventory_results[result['name']] = json.loads(result['out']) + + # Check if user wants extra vars in yaml by + # having hostvars and all_group defined + for acc_config in self.config['accounts']: + self.apply_account_config(acc_config) + + # Build results by merging all dictionaries + values = self.all_inventory_results.values() + values.insert(0, self.result) + for result in values: + MultiInventory.merge_destructively(self.result, result) + + def add_entry(self, data, keys, item): + ''' Add an item to a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key not in data: + data[key] = {} + self.add_entry(data[key], rest, item) + else: + data[keys] = item + + def get_entry(self, data, keys): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + return c + ''' + if keys and "." in keys: + key, rest = keys.split(".", 1) + return self.get_entry(data[key], rest) + else: + return data.get(keys, None) + + def apply_account_config(self, acc_config): + ''' Apply account config settings + ''' + results = self.all_inventory_results[acc_config['name']] + results['all_hosts'] = results['_meta']['hostvars'].keys() + + # Update each hostvar with the newly desired key: value from extra_* + for _extra in ['extra_vars', 'extra_groups']: + for new_var, value in acc_config.get(_extra, {}).items(): + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, new_var, value) + + # Add this group + if _extra == 'extra_groups': + results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts']) + + # Clone groups goes here + for to_name, from_name in acc_config.get('clone_groups', {}).items(): + if results.has_key(from_name): + results[to_name] = copy.copy(results[from_name]) + + # Clone vars goes here + for to_name, from_name in acc_config.get('clone_vars', {}).items(): + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, to_name, self.get_entry(data, from_name)) + + # store the results back into all_inventory_results + self.all_inventory_results[acc_config['name']] = results + + @staticmethod + def merge_destructively(input_a, input_b): + "merges b into input_a" + for key in input_b: + if key in input_a: + if isinstance(input_a[key], dict) and isinstance(input_b[key], dict): + MultiInventory.merge_destructively(input_a[key], input_b[key]) + elif input_a[key] == input_b[key]: + pass # same leaf value + # both lists so add each element in b to a if it does ! exist + elif isinstance(input_a[key], list) and isinstance(input_b[key], list): + for result in input_b[key]: + if result not in input_a[key]: + input_a[key].append(result) + # a is a list and not b + elif isinstance(input_a[key], list): + if input_b[key] not in input_a[key]: + input_a[key].append(input_b[key]) + elif isinstance(input_b[key], list): + input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]] + else: + input_a[key] = [input_a[key], input_b[key]] + else: + input_a[key] = input_b[key] + return input_a + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path): + mod_time = os.path.getmtime(self.cache_path) + current_time = time() + if (mod_time + self.config['cache_max_age']) > current_time: + return True + + return False + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on a provider') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Fetch cached only instances (default: False)') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', default=False, + help='Get all the variables about a specific instance') + self.args = parser.parse_args().__dict__ + + def write_to_cache(self): + ''' Writes data in JSON format to a file ''' + + # if it does not exist, try and create it. + if not os.path.isfile(self.cache_path): + path = os.path.dirname(self.cache_path) + try: + os.makedirs(path) + except OSError as exc: + if exc.errno != errno.EEXIST or not os.path.isdir(path): + raise + + json_data = MultiInventory.json_format_dict(self.result, True) + with open(self.cache_path, 'w') as cache: + try: + fcntl.flock(cache, fcntl.LOCK_EX) + cache.write(json_data) + finally: + fcntl.flock(cache, fcntl.LOCK_UN) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + if not os.path.isfile(self.cache_path): + return None + + with open(self.cache_path, 'r') as cache: + self.result = json.loads(cache.read()) + + return True + + @classmethod + def json_format_dict(cls, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + def result_str(self): + '''Return cache string stored in self.result''' + return self.json_format_dict(self.result, True) + + +if __name__ == "__main__": + MI2 = MultiInventory() + MI2.parse_cli_args() + MI2.run() + print MI2.result_str() diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example new file mode 100644 index 000000000..0f0788d18 --- /dev/null +++ b/inventory/multi_inventory.yaml.example @@ -0,0 +1,51 @@ +# multi ec2 inventory configs +# +cache_location: ~/.ansible/tmp/multi_inventory.cache + +accounts: + - name: aws1 + provider: aws/ec2.py + provider_files: + - name: ec2.ini + content: |- + [ec2] + regions = all + regions_exclude = us-gov-west-1,cn-north-1 + destination_variable = public_dns_name + route53 = False + cache_path = ~/.ansible/tmp + cache_max_age = 300 + vpc_destination_variable = ip_address + env_vars: + AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX + AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + extra_vars: + cloud: aws + account: aws1 + +- name: mygce + extra_vars: + cloud: gce + account: gce1 + env_vars: + GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + provider: gce/gce.py + provider_files: + - name: priv_key.pem + contents: |- + -----BEGIN PRIVATE KEY----- + yourprivatekeydatahere + -----END PRIVATE KEY----- + - name: gce.ini + contents: |- + [gce] + gce_service_account_email_address = @developer.gserviceaccount.com + gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider. + gce_project_id = gce-project + zone = us-central1-a + network = default + gce_machine_type = n1-standard-2 + gce_machine_image = rhel7 + +cache_max_age: 600 diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 8d79c80c6..623b2938f 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf # Fix links -rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py +rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws -ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py +ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws # openshift-ansible-docs install @@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible mkdir -p %{buildroot}%{_datadir}/ansible/inventory mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce -cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory -cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml +cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory +cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce @@ -137,7 +137,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks. %files inventory %config(noreplace) /etc/ansible/* %dir %{_datadir}/ansible/inventory -%{_datadir}/ansible/inventory/multi_ec2.py* +%{_datadir}/ansible/inventory/multi_inventory.py* %package inventory-aws Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index 9cc15c0a8..bce6a8745 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -6,7 +6,7 @@ - name: copy: content: "{{ oo_inventory_accounts | to_nice_yaml }}" - dest: /etc/ansible/multi_ec2.yaml + dest: /etc/ansible/multi_inventory.yaml group: "{{ oo_inventory_group }}" owner: "{{ oo_inventory_owner }}" mode: "0640" @@ -20,17 +20,17 @@ - file: state: link - src: /usr/share/ansible/inventory/multi_ec2.py - dest: /etc/ansible/inventory/multi_ec2.py + src: /usr/share/ansible/inventory/multi_inventory.py + dest: /etc/ansible/inventory/multi_inventory.py owner: root group: libra_ops # This cron uses the above location to call its job - name: Cron to keep cache fresh cron: - name: 'multi_ec2_inventory' + name: 'multi_inventory' minute: '*/10' - job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null' + job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null' when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache - name: Set cache location diff --git a/test/units/README.md b/test/units/README.md index 3bed227eb..78a02c3ea 100644 --- a/test/units/README.md +++ b/test/units/README.md @@ -4,4 +4,4 @@ These should be run by sourcing the env-setup: $ source test/env-setup Then navigate to the test/units/ directory. -$ python -m unittest multi_ec2_test +$ python -m unittest multi_inventory_test diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py new file mode 100755 index 000000000..168cd82b7 --- /dev/null +++ b/test/units/multi_inventory_test.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python2 +''' + Unit tests for MultiInventory +''' + +import unittest +import multi_inventory + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name +class MultiInventoryTest(unittest.TestCase): + ''' + Test class for multiInventory + ''' + +# def setUp(self): +# '''setup method''' +# pass + + def test_merge_simple_1(self): + '''Testing a simple merge of 2 dictionaries''' + a = {"key1" : 1} + b = {"key1" : 2} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [1, 2]}) + + def test_merge_b_empty(self): + '''Testing a merge of an emtpy dictionary''' + a = {"key1" : 1} + b = {} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": 1}) + + def test_merge_a_empty(self): + '''Testing a merge of an emtpy dictionary''' + b = {"key1" : 1} + a = {} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": 1}) + + def test_merge_hash_array(self): + '''Testing a merge of a dictionary and a dictionary with an array''' + a = {"key1" : {"hasha": 1}} + b = {"key1" : [1, 2]} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]}) + + def test_merge_array_hash(self): + '''Testing a merge of a dictionary with an array and a dictionary with a hash''' + a = {"key1" : [1, 2]} + b = {"key1" : {"hasha": 1}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]}) + + def test_merge_keys_1(self): + '''Testing a merge on a dictionary for keys''' + a = {"key1" : [1, 2], "key2" : {"hasha": 2}} + b = {"key2" : {"hashb": 1}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}}) + + def test_merge_recursive_1(self): + '''Testing a recursive merge''' + a = {"a" : {"b": {"c": 1}}} + b = {"a" : {"b": {"c": 2}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}}) + + def test_merge_recursive_array_item(self): + '''Testing a recursive merge for an array''' + a = {"a" : {"b": {"c": [1]}}} + b = {"a" : {"b": {"c": 2}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}}) + + def test_merge_recursive_hash_item(self): + '''Testing a recursive merge for a hash''' + a = {"a" : {"b": {"c": {"d": 1}}}} + b = {"a" : {"b": {"c": 2}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}}) + + def test_merge_recursive_array_hash(self): + '''Testing a recursive merge for an array and a hash''' + a = {"a" : [{"b": {"c": 1}}]} + b = {"a" : {"b": {"c": 1}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) + + def test_merge_recursive_hash_array(self): + '''Testing a recursive merge for an array and a hash''' + a = {"a" : {"b": {"c": 1}}} + b = {"a" : [{"b": {"c": 1}}]} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) + +# def tearDown(self): +# '''TearDown method''' +# pass + +if __name__ == "__main__": + unittest.main() diff --git a/test/units/mutli_ec2_test.py b/test/units/mutli_ec2_test.py deleted file mode 100755 index 95df93cd2..000000000 --- a/test/units/mutli_ec2_test.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python2 - -import unittest -import sys -import os -import sys -import multi_ec2 - -class MultiEc2Test(unittest.TestCase): - - def setUp(self): - pass - - def test_merge_simple_1(self): - a = {"key1" : 1} - b = {"key1" : 2} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [1,2]}) - - def test_merge_b_empty(self): - a = {"key1" : 1} - b = {} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": 1}) - - def test_merge_a_empty(self): - b = {"key1" : 1} - a = {} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": 1}) - - def test_merge_hash_array(self): - a = {"key1" : {"hasha": 1}} - b = {"key1" : [1,2]} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]}) - - def test_merge_array_hash(self): - a = {"key1" : [1,2]} - b = {"key1" : {"hasha": 1}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]}) - - def test_merge_keys_1(self): - a = {"key1" : [1,2], "key2" : {"hasha": 2}} - b = {"key2" : {"hashb": 1}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}}) - - def test_merge_recursive_1(self): - a = {"a" : {"b": {"c": 1}}} - b = {"a" : {"b": {"c": 2}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": {"b": {"c": [1,2]}}}) - - def test_merge_recursive_array_item(self): - a = {"a" : {"b": {"c": [1]}}} - b = {"a" : {"b": {"c": 2}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": {"b": {"c": [1,2]}}}) - - def test_merge_recursive_hash_item(self): - a = {"a" : {"b": {"c": {"d": 1}}}} - b = {"a" : {"b": {"c": 2}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}}) - - def test_merge_recursive_array_hash(self): - a = {"a" : [{"b": {"c": 1}}]} - b = {"a" : {"b": {"c": 1}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) - - def test_merge_recursive_hash_array(self): - a = {"a" : {"b": {"c": 1}}} - b = {"a" : [{"b": {"c": 1}}]} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) - - def tearDown(self): - pass - -if __name__ == "__main__": - unittest.main() -- cgit v1.2.3 From eaa5e17d222229537a42b4bf17d2193d25fa67f8 Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Wed, 28 Oct 2015 10:39:52 -0400 Subject: common/openshift-cluster: Scaleup playbook New playbook to add nodes to a cluster without altering the masters --- playbooks/common/openshift-cluster/scaleup.yml | 70 ++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 playbooks/common/openshift-cluster/scaleup.yml diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml new file mode 100644 index 000000000..3f00e0020 --- /dev/null +++ b/playbooks/common/openshift-cluster/scaleup.yml @@ -0,0 +1,70 @@ +--- +- name: Populate config host groups + hosts: localhost + gather_facts: no + tasks: + - fail: + msg: This playbook rquires g_etcd_group to be set + when: g_etcd_group is not defined + + - fail: + msg: This playbook rquires g_masters_group to be set + when: g_masters_group is not defined + + - fail: + msg: This playbook rquires g_nodes_group to be set + when: g_nodes_group is not defined + + - name: Evaluate oo_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_etcd_group] | default([]) + + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_nodes_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + when: g_nodeonmaster is defined and g_nodeonmaster == true + + - name: Evaluate oo_first_etcd + add_host: + name: "{{ groups[g_etcd_group][0] }}" + groups: oo_first_etcd + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups[g_masters_group][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + +- include: ../openshift-node/config.yml + vars: + osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" -- cgit v1.2.3 From da95bc1be774413448d894f4bd330555f251aec0 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 28 Oct 2015 11:11:15 -0400 Subject: Refactor common group evaluation to avoid duplication --- playbooks/aws/openshift-cluster/launch.yml | 8 +-- playbooks/common/openshift-cluster/config.yml | 65 +--------------------- .../common/openshift-cluster/evaluate_groups.yml | 64 +++++++++++++++++++++ playbooks/common/openshift-cluster/scaleup.yml | 65 +--------------------- .../set_etcd_launch_facts_tasks.yml | 13 ----- .../set_master_launch_facts_tasks.yml | 13 ----- .../set_node_launch_facts_tasks.yml | 15 ----- .../tasks/set_etcd_launch_facts.yml | 13 +++++ .../tasks/set_master_launch_facts.yml | 13 +++++ .../tasks/set_node_launch_facts.yml | 15 +++++ playbooks/gce/openshift-cluster/launch.yml | 6 +- playbooks/libvirt/openshift-cluster/launch.yml | 8 +-- 12 files changed, 118 insertions(+), 180 deletions(-) create mode 100644 playbooks/common/openshift-cluster/evaluate_groups.yml delete mode 100644 playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml delete mode 100644 playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml delete mode 100644 playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml create mode 100644 playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml create mode 100644 playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml create mode 100644 playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 786918929..09bf34666 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -11,7 +11,7 @@ msg: Deployment type not supported for aws provider yet when: deployment_type == 'enterprise' - - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ etcd_names }}" @@ -19,7 +19,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -27,7 +27,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -38,7 +38,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 4c74f96db..57de7130b 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,68 +1,5 @@ --- -- name: Populate config host groups - hosts: localhost - gather_facts: no - tasks: - - fail: - msg: This playbook rquires g_etcd_group to be set - when: g_etcd_group is not defined - - - fail: - msg: This playbook rquires g_masters_group to be set - when: g_masters_group is not defined - - - fail: - msg: This playbook rquires g_nodes_group to be set - when: g_nodes_group is not defined - - - name: Evaluate oo_etcd_to_config - add_host: - name: "{{ item }}" - groups: oo_etcd_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_etcd_group] | default([]) - - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_nodes_group] | default([]) - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - when: g_nodeonmaster is defined and g_nodeonmaster == true - - - name: Evaluate oo_first_etcd - add_host: - name: "{{ groups[g_etcd_group][0] }}" - groups: oo_first_etcd - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 - - - name: Evaluate oo_first_master - add_host: - name: "{{ groups[g_masters_group][0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 +- include: evaluate_groups.yml - include: ../openshift-etcd/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml new file mode 100644 index 000000000..1919660dd --- /dev/null +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -0,0 +1,64 @@ +--- +- name: Populate config host groups + hosts: localhost + gather_facts: no + tasks: + - fail: + msg: This playbook rquires g_etcd_group to be set + when: g_etcd_group is not defined + + - fail: + msg: This playbook rquires g_masters_group to be set + when: g_masters_group is not defined + + - fail: + msg: This playbook rquires g_nodes_group to be set + when: g_nodes_group is not defined + + - name: Evaluate oo_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_etcd_group] | default([]) + + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_nodes_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + when: g_nodeonmaster is defined and g_nodeonmaster == true + + - name: Evaluate oo_first_etcd + add_host: + name: "{{ groups[g_etcd_group][0] }}" + groups: oo_first_etcd + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups[g_masters_group][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml index 3f00e0020..201320de8 100644 --- a/playbooks/common/openshift-cluster/scaleup.yml +++ b/playbooks/common/openshift-cluster/scaleup.yml @@ -1,68 +1,5 @@ --- -- name: Populate config host groups - hosts: localhost - gather_facts: no - tasks: - - fail: - msg: This playbook rquires g_etcd_group to be set - when: g_etcd_group is not defined - - - fail: - msg: This playbook rquires g_masters_group to be set - when: g_masters_group is not defined - - - fail: - msg: This playbook rquires g_nodes_group to be set - when: g_nodes_group is not defined - - - name: Evaluate oo_etcd_to_config - add_host: - name: "{{ item }}" - groups: oo_etcd_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_etcd_group] | default([]) - - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_nodes_group] | default([]) - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - when: g_nodeonmaster is defined and g_nodeonmaster == true - - - name: Evaluate oo_first_etcd - add_host: - name: "{{ groups[g_etcd_group][0] }}" - groups: oo_first_etcd - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 - - - name: Evaluate oo_first_master - add_host: - name: "{{ groups[g_masters_group][0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 +- include: evaluate_groups.yml - include: ../openshift-node/config.yml vars: diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml deleted file mode 100644 index 1a6580795..000000000 --- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- set_fact: k8s_type="etcd" - -- name: Generate etcd instance names(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" - register: etcd_names_output - with_sequence: count={{ num_etcd }} - -- set_fact: - etcd_names: "{{ etcd_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml deleted file mode 100644 index 36d7b7870..000000000 --- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- set_fact: k8s_type="master" - -- name: Generate master instance names(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" - register: master_names_output - with_sequence: count={{ num_masters }} - -- set_fact: - master_names: "{{ master_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml deleted file mode 100644 index 278942f8b..000000000 --- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- set_fact: k8s_type=node -- set_fact: sub_host_type="{{ type }}" -- set_fact: number_nodes="{{ count }}" - -- name: Generate node instance names(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" - register: node_names_output - with_sequence: count={{ number_nodes }} - -- set_fact: - node_names: "{{ node_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml new file mode 100644 index 000000000..1a6580795 --- /dev/null +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml @@ -0,0 +1,13 @@ +--- +- set_fact: k8s_type="etcd" + +- name: Generate etcd instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: etcd_names_output + with_sequence: count={{ num_etcd }} + +- set_fact: + etcd_names: "{{ etcd_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml new file mode 100644 index 000000000..36d7b7870 --- /dev/null +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml @@ -0,0 +1,13 @@ +--- +- set_fact: k8s_type="master" + +- name: Generate master instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: master_names_output + with_sequence: count={{ num_masters }} + +- set_fact: + master_names: "{{ master_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml new file mode 100644 index 000000000..278942f8b --- /dev/null +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml @@ -0,0 +1,15 @@ +--- +- set_fact: k8s_type=node +- set_fact: sub_host_type="{{ type }}" +- set_fact: number_nodes="{{ count }}" + +- name: Generate node instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + register: node_names_output + with_sequence: count={{ number_nodes }} + +- set_fact: + node_names: "{{ node_names_output.results | default([]) + | oo_collect('ansible_facts') + | oo_collect('scratch_name') }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index c22b897d5..8be5d53e7 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -9,7 +9,7 @@ - fail: msg="Deployment type not supported for gce provider yet" when: deployment_type == 'enterprise' - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -17,7 +17,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -28,7 +28,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index d3e768de5..8d7949dd1 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -17,7 +17,7 @@ - include: tasks/configure_libvirt.yml - - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ etcd_names }}" @@ -25,7 +25,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -33,7 +33,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -44,7 +44,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" -- cgit v1.2.3 From 452cb00fd7c6684614c3abdba52d8e103c136fbe Mon Sep 17 00:00:00 2001 From: Samuel Munilla Date: Wed, 4 Nov 2015 10:09:15 -0500 Subject: atomic-openshift-installer: Correct default playbook directory --- utils/src/ooinstall/cli_installer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index 88afcee36..b6b1244c3 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -385,7 +385,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): dir_okay=True, readable=True), # callback=validate_ansible_dir, - default='/usr/share/openshift-ansible/', + default='/usr/share/ansible/openshift-ansible/', envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @click.option('--ansible-config', type=click.Path(file_okay=True, -- cgit v1.2.3 From f1db40374cc98205118ff1f6320b8bffe9bf3dc1 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Wed, 4 Nov 2015 10:16:33 -0500 Subject: added the %util in zabbix --- roles/os_zabbix/vars/template_os_linux.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml index d494f1bad..fbc20cd63 100644 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -224,6 +224,14 @@ g_template_os_linux: applications: - Disk + - discoveryrule_key: disc.disk + name: "Percent Utilized for disk {#OSO_DISK}" + key: "disc.disk.putil[{#OSO_DISK}]" + value_type: float + description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command" + applications: + - Disk + ztriggerprototypes: - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}' expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85' -- cgit v1.2.3 From c792e1d19863e063a9544967f5b892030791bbd1 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 4 Nov 2015 11:00:17 -0500 Subject: Automatic commit of package [openshift-ansible] release [3.0.7-1]. --- .tito/packages/openshift-ansible | 2 +- openshift-ansible.spec | 41 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 92f545b25..6046a1a86 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.6-1 ./ +3.0.7-1 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 8ea9120f2..f819624b3 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.6 +Version: 3.0.7 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -249,6 +249,45 @@ Atomic OpenShift Utilities includes %changelog +* Wed Nov 04 2015 Kenny Woodson 3.0.7-1 +- added the %%util in zabbix (mwoodson@redhat.com) +- atomic-openshift-installer: Correct default playbook directory + (smunilla@redhat.com) +- Support for gce (kwoodson@redhat.com) +- fixed a dumb naming mistake (mwoodson@redhat.com) +- added disk tps checks to zabbix (mwoodson@redhat.com) +- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com) +- atomic-openshift-installer: Add default openshift-ansible-playbook + (smunilla@redhat.com) +- ooinstall: Add check for nopwd sudo (smunilla@redhat.com) +- ooinstall: Update local install check (smunilla@redhat.com) +- oo-install: Support running on the host to be deployed (smunilla@redhat.com) +- Moving to Openshift Etcd application (mmahut@redhat.com) +- Add all the possible servicenames to openshift_all_hostnames for masters + (sdodson@redhat.com) +- Adding openshift.node.etcd items (mmahut@redhat.com) +- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com) +- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com) +- split inventory into subpackages (tdawson@redhat.com) +- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to + warning (mwoodson@redhat.com) +- Rename install_transactions module to openshift_ansible. + (dgoodwin@redhat.com) +- atomic-openshift-installer: Text improvements (smunilla@redhat.com) +- Add utils subpackage missing dep on openshift-ansible-roles. + (dgoodwin@redhat.com) +- Disable requiretty for only the openshift user (error@ioerror.us) +- Don't require tty to run sudo (error@ioerror.us) +- Attempt to remove the various interfaces left over from an install + (bleanhar@redhat.com) +- Pulling latest gce.py module from ansible (kwoodson@redhat.com) +- Disable OpenShift features if installing Atomic Enterprise + (jdetiber@redhat.com) +- Use default playbooks if available. (dgoodwin@redhat.com) +- Add uninstall subcommand. (dgoodwin@redhat.com) +- Add subcommands to CLI. (dgoodwin@redhat.com) +- Remove images options in oadm command (nakayamakenjiro@gmail.com) + * Fri Oct 30 2015 Kenny Woodson 3.0.6-1 - Adding python-boto and python-libcloud to openshift-ansible-inventory dependency (kwoodson@redhat.com) -- cgit v1.2.3 From 7c9755a1732b97df15655709ce333ce79d558128 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 4 Nov 2015 11:32:57 -0500 Subject: Adding aws and gce packages to ansible-inventory --- roles/openshift_ansible_inventory/tasks/main.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index bce6a8745..f6919dada 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -1,7 +1,11 @@ --- - yum: - name: openshift-ansible-inventory + name: "{{ item }}" state: present + with_items: + - openshift-ansible-inventory + - openshift-ansible-inventory-aws + - openshift-ansible-inventory-gce - name: copy: -- cgit v1.2.3