summaryrefslogtreecommitdiffstats
path: root/utils/test
diff options
context:
space:
mode:
Diffstat (limited to 'utils/test')
-rw-r--r--utils/test/__init__.py0
-rw-r--r--utils/test/cli_installer_tests.py1132
-rw-r--r--utils/test/fixture.py254
-rw-r--r--utils/test/oo_config_tests.py306
-rw-r--r--utils/test/test_utils.py100
5 files changed, 1792 insertions, 0 deletions
diff --git a/utils/test/__init__.py b/utils/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/__init__.py
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
new file mode 100644
index 000000000..36dc18034
--- /dev/null
+++ b/utils/test/cli_installer_tests.py
@@ -0,0 +1,1132 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-lines
+
+import copy
+import os
+import ConfigParser
+
+import ooinstall.cli_installer as cli
+
+from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml
+from mock import patch
+
+
+MOCK_FACTS = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+ '10.1.0.1': {
+ 'common': {
+ 'ip': '10.1.0.1',
+ 'public_ip': '10.1.0.1',
+ 'hostname': 'storage-private.example.com',
+ 'public_hostname': 'storage.example.com'
+ }
+ },
+}
+
+MOCK_FACTS_QUICKHA = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+ '10.0.0.4': {
+ 'common': {
+ 'ip': '10.0.0.4',
+ 'public_ip': '10.0.0.4',
+ 'hostname': 'node3-private.example.com',
+ 'public_hostname': 'node3.example.com'
+ }
+ },
+ '10.0.0.5': {
+ 'common': {
+ 'ip': '10.0.0.5',
+ 'public_ip': '10.0.0.5',
+ 'hostname': 'proxy-private.example.com',
+ 'public_hostname': 'proxy.example.com'
+ }
+ },
+ '10.1.0.1': {
+ 'common': {
+ 'ip': '10.1.0.1',
+ 'public_ip': '10.1.0.1',
+ 'hostname': 'storage-private.example.com',
+ 'public_hostname': 'storage.example.com'
+ }
+ },
+}
+
+# Missing connect_to on some hosts:
+BAD_CONFIG = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+QUICKHA_CONFIG = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
+"""
+
+QUICKHA_2_MASTER_CONFIG = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
+"""
+
+QUICKHA_CONFIG_REUSED_LB = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - master_lb
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ node:
+ storage:
+"""
+
+QUICKHA_CONFIG_NO_LB = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ node:
+ storage:
+"""
+
+QUICKHA_CONFIG_PRECONFIGURED_LB = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: proxy-private.example.com
+ hostname: proxy-private.example.com
+ public_hostname: proxy.example.com
+ preconfigured: true
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
+"""
+
+class UnattendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ self.cli_args.append("-u")
+
+ # unattended with config file and all installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ if result.exception is None or result.exit_code != 1:
+ print "Exit code: %s" % result.exit_code
+ self.fail("Unexpected CLI return")
+
+ # unattended with config file and all installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=False)
+
+ # unattended with config file and no installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and some installed some uninstalled hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
+ # unattended with config file and some installed some uninstalled hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, "hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+ # If user running test has rpm installed, this might be set to default:
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(3, len(hosts))
+ self.assertEquals(3, len(hosts_to_run_on))
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_inventory_write(self, load_facts_mock, run_playbook_mock):
+ merged_config = SAMPLE_CONFIG % 'openshift-enterprise'
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), merged_config)
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Check the inventory file looks as we would expect:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assertEquals('root',
+ inventory.get('OSEv3:vars', 'ansible_ssh_user'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ # Check the masters:
+ self.assertEquals(1, len(inventory.items('masters')))
+ self.assertEquals(3, len(inventory.items('nodes')))
+
+ for item in inventory.items('masters'):
+ # ansible host lines do NOT parse nicely:
+ master_line = item[0]
+ if item[1] is not None:
+ master_line = "%s=%s" % (master_line, item[1])
+ self.assertTrue('openshift_ip' in master_line)
+ self.assertTrue('openshift_public_ip' in master_line)
+ self.assertTrue('openshift_hostname' in master_line)
+ self.assertTrue('openshift_public_hostname' in master_line)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_variant_version_latest_assumed(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # We didn't specify a version so the latest should have been assumed,
+ # and written to disk:
+ self.assertEquals('3.3', written_config['variant_version'])
+
+ # Make sure the correct value was passed to ansible:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_variant_version_preserved(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ config = '%s\n%s' % (config, 'variant_version: 3.3')
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), config)
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # Make sure our older version was preserved:
+ # and written to disk:
+ self.assertEquals('3.3', written_config['variant_version'])
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ # 2016-09-26 - tbielawa - COMMENTING OUT these tests FOR NOW while
+ # we wait to see if anyone notices that we took away their ability
+ # to set the ansible_config parameter in the command line options
+ # and in the installer config file.
+ #
+ # We have removed the ability to set the ansible config file
+ # manually so that our new quieter output mode is the default and
+ # only output mode.
+ #
+ # RE: https://trello.com/c/DSwwizwP - atomic-openshift-install
+ # should only output relevant information.
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, None, None)
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+ # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, ansible_config, ansible_config)
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_ansible_config_specified_in_installer_config(self,
+ # load_facts_mock, run_ansible_mock):
+
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+ # config = "%s\nansible_config: %s" % (config, ansible_config)
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, None, ansible_config)
+
+ # #pylint: disable=too-many-arguments
+ # # This method allows for drastically simpler tests to write, and the args
+ # # are all useful.
+ # def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
+ # installer_config, ansible_config_cli=None, expected_result=None):
+ # """
+ # Utility method for testing the ways you can specify the ansible config.
+ # """
+
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config_file = self.write_config(os.path.join(self.work_dir,
+ # 'ooinstall.conf'), installer_config)
+
+ # self.cli_args.extend(["-c", config_file])
+ # if ansible_config_cli:
+ # self.cli_args.extend(["--ansible-config", ansible_config_cli])
+ # self.cli_args.append("install")
+ # result = self.runner.invoke(cli.cli, self.cli_args)
+ # self.assert_result(result, 0)
+
+ # # Test the env vars for facts playbook:
+ # facts_env_vars = load_facts_mock.call_args[0][2]
+ # if expected_result:
+ # self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
+ # else:
+ # # If user running test has rpm installed, this might be set to default:
+ # self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
+ # facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # # Test the env vars for main playbook:
+ # env_vars = run_ansible_mock.call_args[0][2]
+ # if expected_result:
+ # self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
+ # else:
+ # # If user running test has rpm installed, this might be set to default:
+ # #
+ # # By default we will use the quiet config
+ # self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ # env_vars['ANSIBLE_CONFIG'] == cli.QUIET_ANSIBLE_CONFIG)
+
+ # unattended with bad config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_bad_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ self.assertEquals(1, result.exit_code)
+ self.assertTrue("You must specify either an ip or hostname"
+ in result.output)
+
+ #unattended with three masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(6, len(hosts))
+ self.assertEquals(6, len(hosts_to_run_on))
+
+ #unattended with two masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is an invalid config:
+ self.assert_result(result, 1)
+ self.assertTrue("A minimum of 3 masters are required" in result.output)
+
+ #unattended with three masters, one node, but no load balancer specified:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid input:
+ self.assert_result(result, 1)
+ self.assertTrue('No master load balancer specified in config' in result.output)
+
+ #unattended with three masters, one node, and one of the masters reused as load balancer:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid configuration:
+ self.assert_result(result, 1)
+
+ #unattended with preconfigured lb
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(6, len(hosts))
+ self.assertEquals(6, len(hosts_to_run_on))
+
+class AttendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ # Doesn't exist but keeps us from reading the local users config:
+ self.config_file = os.path.join(self.work_dir, 'config.yml')
+ self.cli_args.extend(["-c", self.config_file])
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 4, 4)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 4)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=True')
+
+ # interactive with config file and some installed some uninstalled hosts
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_scaleup_hint(self, load_facts_mock, run_playbook_mock):
+
+ # Modify the mock facts to return a version indicating OpenShift
+ # is already installed on our master, and the first node.
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ],
+ add_nodes=[('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+
+ # This is testing the install workflow so we want to make sure we
+ # exit with the appropriate hint.
+ self.assertTrue('scaleup' in result.output)
+ self.assert_result(result, 1)
+
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
+ cli_input = build_input(confirm_facts='y')
+ self.cli_args.extend(["-c", config_file])
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 3)
+
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, 3)
+
+# #interactive with config file and all installed hosts
+# @patch('ooinstall.openshift_ansible.run_main_playbook')
+# @patch('ooinstall.openshift_ansible.load_system_facts')
+# def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+# mock_facts = copy.deepcopy(MOCK_FACTS)
+# mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+# mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+#
+# cli_input = build_input(hosts=[
+# ('10.0.0.1', True, False),
+# ],
+# add_nodes=[('10.0.0.2', False, False)],
+# ssh_user='root',
+# variant_num=1,
+# schedulable_masters_ok=True,
+# confirm_facts='y',
+# storage='10.0.0.1',)
+#
+# self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+# run_playbook_mock,
+# cli_input,
+# exp_hosts_len=2,
+# exp_hosts_to_run_on_len=2,
+# force=False)
+
+ #interactive multimaster: one more node than master
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 6, 6)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 6)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.4',
+ 'openshift_schedulable=True')
+
+ self.assertTrue(inventory.has_section('etcd'))
+ self.assertEquals(3, len(inventory.items('etcd')))
+
+ #interactive multimaster: identical masters and nodes
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 5, 5)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 5)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=True')
+
+ # Checks the inventory (as a ConfigParser) for the given host, host
+ # variable, and expected value.
+ def assert_inventory_host_var(self, inventory, section, host, variable):
+ # Config parser splits on the first "=", so we end up with:
+ # 'hostname key1' -> 'val1 key2=val2 key3=val3'
+ #
+ # Convert to something easier to test:
+ for (a, b) in inventory.items(section):
+ full_line = "%s=%s" % (a, b)
+ tokens = full_line.split()
+ if tokens[0] == host:
+ found = False
+ for token in tokens:
+ if token == variable:
+ found = True
+ continue
+ self.assertTrue("Unable to find %s in line: %s" %
+ (variable, full_line), found)
+ return
+ self.fail("unable to find host %s in inventory" % host)
+
+ def assert_inventory_host_var_unset(self, inventory, section, host, variable):
+ # Config parser splits on the first "=", so we end up with:
+ # 'hostname key1' -> 'val1 key2=val2 key3=val3'
+ #
+ # Convert to something easier to test:
+ for (a, b) in inventory.items(section):
+ full_line = "%s=%s" % (a, b)
+ tokens = full_line.split()
+ if tokens[0] == host:
+ self.assertFalse(("%s=" % variable) in full_line,
+ msg='%s host variable was set: %s' %
+ (variable, full_line))
+ return
+ self.fail("unable to find host %s in inventory" % host)
+
+
+ #interactive multimaster: attempting to use a master as the load balancer should fail:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False),
+ storage='10.1.0.1')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ #interactive all-in-one
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_all_in_one(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 1, 1)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 1)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=True')
+
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_gen_inventory(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ self.cli_args.append("--gen-inventory")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+
+ # Make sure run playbook wasn't called:
+ self.assertEquals(0, len(run_playbook_mock.mock_calls))
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 4)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=True')
+
+
+# TODO: test with config file, attended add node
+# TODO: test with config file, attended new node already in config file
+# TODO: test with config file, attended new node already in config file, plus manually added nodes
+# TODO: test with config file, attended reject facts
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
new file mode 100644
index 000000000..62135c761
--- /dev/null
+++ b/utils/test/fixture.py
@@ -0,0 +1,254 @@
+# pylint: disable=missing-docstring
+import os
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from test.oo_config_tests import OOInstallFixture
+from click.testing import CliRunner
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+variant_version: 3.3
+master_routingconfig_subdomain: example.com
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+def read_yaml(config_file_path):
+ cfg_f = open(config_file_path, 'r')
+ config = yaml.safe_load(cfg_f.read())
+ cfg_f.close()
+ return config
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.cli, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, "hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ self.assertEquals(host_count, len(written_config['deployment']['hosts']))
+ for host in written_config['deployment']['hosts']:
+ self.assertTrue('hostname' in host)
+ self.assertTrue('public_hostname' in host)
+ if 'preconfigured' not in host:
+ if 'roles' in host:
+ self.assertTrue('node' in host['roles'] or 'storage' in host['roles'])
+ self.assertTrue('ip' in host)
+ self.assertTrue('public_ip' in host)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(
+ os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ if "If you want to force reinstall" in result.output:
+ # verify we exited on seeing installed hosts
+ self.assertEqual(result.exit_code, 1)
+ else:
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+
+#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
+def build_input(ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
+ master_lb=('', False), storage=None):
+ """
+ Build an input string simulating a user entering values in an interactive
+ attended install.
+
+ This is intended to give us one place to update when the CLI prompts change.
+ We should aim to keep this dependent on optional keyword arguments with
+ sensible defaults to keep things from getting too fragile.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ num_masters = 0
+ if hosts:
+ i = 0
+ for (host, is_master, is_containerized) in hosts:
+ inputs.append(host)
+ if is_master:
+ inputs.append('y')
+ num_masters += 1
+ else:
+ inputs.append('n')
+
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+
+ #inputs.append('rpm')
+ # We should not be prompted to add more hosts if we're currently at
+ # 2 masters, this is an invalid HA configuration, so this question
+ # will not be asked, and the user must enter the next host:
+ if num_masters != 2:
+ if i < len(hosts) - 1:
+ if num_masters >= 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ # You can pass a single master_lb or a list if you intend for one to get rejected:
+ if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
+ inputs.extend(master_lb[0])
+ else:
+ inputs.append(master_lb[0])
+ if master_lb[0]:
+ inputs.append('y' if master_lb[1] else 'n')
+
+ if storage:
+ inputs.append(storage)
+
+ inputs.append('subdomain.example.com')
+ inputs.append('proxy.example.com')
+ inputs.append('proxy-private.example.com')
+ inputs.append('exclude.example.com')
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ if schedulable_masters_ok:
+ inputs.append('y')
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master, is_containerized) in add_nodes:
+ inputs.append(host)
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+ #inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if add_nodes is None:
+ total_hosts = hosts
+ else:
+ total_hosts = hosts + add_nodes
+ if total_hosts is not None and num_masters == len(total_hosts):
+ inputs.append('y')
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ 'y',
+ ])
+
+ return '\n'.join(inputs)
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
new file mode 100644
index 000000000..56fd82408
--- /dev/null
+++ b/utils/test/oo_config_tests.py
@@ -0,0 +1,306 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+
+import cStringIO
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
+import ooinstall.openshift_ansible
+
+SAMPLE_CONFIG = """
+variant: openshift-enterprise
+variant_version: 3.3
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+
+CONFIG_INCOMPLETE_FACTS = """
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: 24.222.0.2
+ public_ip: 24.222.0.2
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+CONFIG_BAD = """
+variant: openshift-enterprise
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+class OOInstallFixture(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='ooconfigtests')
+ self.tempfiles.append(self.work_dir)
+
+ def tearDown(self):
+ for path in self.tempfiles:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+
+ def write_config(self, path, config_str):
+ """
+ Write given config to a temporary file which will be cleaned
+ up in teardown.
+ Returns full path to the file.
+ """
+ cfg_file = open(path, 'w')
+ cfg_file.write(config_str)
+ cfg_file.close()
+ return path
+
+
+
+class OOConfigTests(OOInstallFixture):
+
+ def test_load_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+
+ self.assertEquals(3, len(ooconfig.deployment.hosts))
+ self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].connect_to)
+ self.assertEquals("10.0.0.1", ooconfig.deployment.hosts[0].ip)
+ self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].hostname)
+
+ self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"],
+ [host.ip for host in ooconfig.deployment.hosts])
+
+ self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
+ self.assertEquals('v2', ooconfig.settings['version'])
+
+ def test_load_bad_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_BAD)
+ try:
+ OOConfig(cfg_path)
+ assert False
+ except OOConfigInvalidHostError:
+ assert True
+
+
+ def test_load_complete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(0, len(missing_host_facts))
+
+ # Test missing optional facts the user must confirm:
+ def test_load_host_incomplete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(2, len(missing_host_facts))
+ self.assertEquals(1, len(missing_host_facts['10.0.0.2']))
+ self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
+
+ def test_write_config(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ ooconfig.save_to_disk()
+
+ f = open(cfg_path, 'r')
+ written_config = yaml.safe_load(f.read())
+ f.close()
+
+
+
+ self.assertEquals(3, len(written_config['deployment']['hosts']))
+ for h in written_config['deployment']['hosts']:
+ self.assertTrue('ip' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_hostname' in h)
+
+ self.assertTrue('ansible_ssh_user' in written_config['deployment'])
+ self.assertTrue('variant' in written_config)
+ self.assertEquals('v2', written_config['version'])
+
+ # Some advanced settings should not get written out if they
+ # were not specified by the user:
+ self.assertFalse('ansible_inventory_directory' in written_config)
+
+
+class HostTests(OOInstallFixture):
+
+ def test_load_host_no_ip_or_hostname(self):
+ yaml_props = {
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'master': True
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_load_host_no_master_or_node_specified(self):
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_inventory_file_quotes_node_labels(self):
+ """Verify a host entry wraps openshift_node_labels value in double quotes"""
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'connect_to': 'a-private.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'new_host': True,
+ 'roles': ['node'],
+ 'node_labels': {
+ 'region': 'infra'
+ },
+
+ }
+
+ new_node = Host(**yaml_props)
+ inventory = cStringIO.StringIO()
+ # This is what the 'write_host' function generates. write_host
+ # has no return value, it just writes directly to the file
+ # 'inventory' which in this test-case is a StringIO object
+ ooinstall.openshift_ansible.write_host(
+ new_node,
+ 'node',
+ inventory,
+ schedulable=True)
+ # read the value of what was written to the inventory "file"
+ legacy_inventory_line = inventory.getvalue()
+
+ # Given the `yaml_props` above we should see a line like this:
+ # openshift_node_labels="{'region': 'infra'}"
+ node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash
+ node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash
+
+ # The good line is present in the written inventory line
+ self.assertIn(node_labels_expected, legacy_inventory_line)
+ # An unquoted version is not present
+ self.assertNotIn(node_labels_bad, legacy_inventory_line)
+
+
+ # def test_new_write_inventory_same_as_legacy(self):
+ # """Verify the original write_host function produces the same output as the new method"""
+ # yaml_props = {
+ # 'ip': '192.168.0.1',
+ # 'hostname': 'a.example.com',
+ # 'connect_to': 'a-private.example.com',
+ # 'public_ip': '192.168.0.1',
+ # 'public_hostname': 'a.example.com',
+ # 'new_host': True,
+ # 'roles': ['node'],
+ # 'other_variables': {
+ # 'zzz': 'last',
+ # 'foo': 'bar',
+ # 'aaa': 'first',
+ # },
+ # }
+
+ # new_node = Host(**yaml_props)
+ # inventory = cStringIO.StringIO()
+
+ # # This is what the original 'write_host' function will
+ # # generate. write_host has no return value, it just writes
+ # # directly to the file 'inventory' which in this test-case is
+ # # a StringIO object
+ # ooinstall.openshift_ansible.write_host(
+ # new_node,
+ # 'node',
+ # inventory,
+ # schedulable=True)
+ # legacy_inventory_line = inventory.getvalue()
+
+ # # This is what the new method in the Host class generates
+ # new_inventory_line = new_node.inventory_string('node', schedulable=True)
+
+ # self.assertEqual(
+ # legacy_inventory_line,
+ # new_inventory_line)
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
new file mode 100644
index 000000000..2e59d86f2
--- /dev/null
+++ b/utils/test/test_utils.py
@@ -0,0 +1,100 @@
+"""
+Unittests for ooinstall utils.
+"""
+
+import unittest
+import logging
+import sys
+import copy
+from ooinstall.utils import debug_env, is_valid_hostname
+import mock
+
+
+class TestUtils(unittest.TestCase):
+ """
+ Parent unittest TestCase.
+ """
+
+ def setUp(self):
+ self.debug_all_params = {
+ 'OPENSHIFT_FOO': 'bar',
+ 'ANSIBLE_FOO': 'bar',
+ 'OO_FOO': 'bar'
+ }
+
+ self.expected = [
+ mock.call('ANSIBLE_FOO: bar'),
+ mock.call('OPENSHIFT_FOO: bar'),
+ mock.call('OO_FOO: bar'),
+ ]
+
+ # python 2.x has assertItemsEqual, python 3.x has assertCountEqual
+ if sys.version_info.major > 3:
+ self.assertItemsEqual = self.assertCountEqual
+
+ ######################################################################
+ # Validate ooinstall.utils.debug_env functionality
+
+ def test_utils_debug_env_all_debugged(self):
+ """Verify debug_env debugs specific env variables"""
+
+ with mock.patch('ooinstall.utils.installer_log') as _il:
+ debug_env(self.debug_all_params)
+ print _il.debug.call_args_list
+
+ # Debug was called for each item we expect
+ self.assertEqual(
+ len(self.debug_all_params),
+ _il.debug.call_count)
+
+ # Each item we expect was logged
+ self.assertItemsEqual(
+ self.expected,
+ _il.debug.call_args_list)
+
+ def test_utils_debug_env_some_debugged(self):
+ """Verify debug_env skips non-wanted env variables"""
+ debug_some_params = copy.deepcopy(self.debug_all_params)
+ # This will not be logged by debug_env
+ debug_some_params['MG_FRBBR'] = "SKIPPED"
+
+ with mock.patch('ooinstall.utils.installer_log') as _il:
+ debug_env(debug_some_params)
+
+ # The actual number of debug calls was less than the
+ # number of items passed to debug_env
+ self.assertLess(
+ _il.debug.call_count,
+ len(debug_some_params))
+
+ self.assertItemsEqual(
+ self.expected,
+ _il.debug.call_args_list)
+
+ ######################################################################
+ def test_utils_is_valid_hostname_invalid(self):
+ """Verify is_valid_hostname can detect None or too-long hostnames"""
+ # A hostname that's empty, None, or more than 255 chars is invalid
+ empty_hostname = ''
+ res = is_valid_hostname(empty_hostname)
+ self.assertFalse(res)
+
+ none_hostname = None
+ res = is_valid_hostname(none_hostname)
+ self.assertFalse(res)
+
+ too_long_hostname = "a" * 256
+ res = is_valid_hostname(too_long_hostname)
+ self.assertFalse(res)
+
+ def test_utils_is_valid_hostname_ends_with_dot(self):
+ """Verify is_valid_hostname can parse hostnames with trailing periods"""
+ hostname = "foo.example.com."
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)
+
+ def test_utils_is_valid_hostname_normal_hostname(self):
+ """Verify is_valid_hostname can parse regular hostnames"""
+ hostname = "foo.example.com"
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)