Dive into secure and efficient coding practices with our curated list of the top 10 examples showcasing 'molecule' in functional components in Python. Our advanced machine learning engine meticulously scans each line of code, cross-referencing millions of open source libraries to ensure your implementation is not just functional, but also robust and secure. Elevate your React applications to new heights by mastering the art of handling side effects, API calls, and asynchronous operations with confidence and precision.
LOG.warn(msg)
return
if self._testinfra_command is None:
self.bake()
msg = 'Executing Testinfra tests found in {}/...'.format(self.directory)
LOG.info(msg)
try:
util.run_command(self._testinfra_command, debug=self._config.debug)
msg = 'Verifier completed successfully.'
LOG.success(msg)
except sh.ErrorReturnCode as e:
util.sysexit(e.exit_code)
_patched_execute_scenario,
_patched_prune,
_patched_execute_subcommand,
_patched_sysexit,
):
# Ensure execute_cmdline_scenarios handles errors correctly when 'destroy'
# is 'always':
# - cleanup and destroy subcommands are run when execute_scenario
# raises SystemExit
# - scenario is pruned
scenario_name = 'default'
args = {}
command_args = {'destroy': 'always', 'subcommand': 'test'}
_patched_execute_scenario.side_effect = SystemExit()
base.execute_cmdline_scenarios(scenario_name, args, command_args)
assert _patched_execute_subcommand.call_count == 2
# pull out the second positional call argument for each call,
# which is the called subcommand. 'cleanup' and 'destroy' should be called.
assert _patched_execute_subcommand.call_args_list[0][0][1] == 'cleanup'
assert _patched_execute_subcommand.call_args_list[1][0][1] == 'destroy'
assert _patched_prune.called
assert _patched_sysexit.called
def test_get_playbook(tmpdir, _instance):
x = os.path.join(_instance._config.scenario.directory, 'create.yml')
util.write_file(x, '')
assert x == _instance._get_playbook('create')
def test_additional_files_or_dirs_property(_instance):
tests_directory = _instance._config.verifier.directory
file1_file = os.path.join(tests_directory, 'file1.py')
file2_file = os.path.join(tests_directory, 'file2.py')
match1_file = os.path.join(tests_directory, 'match1.py')
match2_file = os.path.join(tests_directory, 'match2.py')
test_subdir = os.path.join(tests_directory, 'dir')
test_subdir_file = os.path.join(test_subdir, 'test_subdir_file.py')
os.mkdir(tests_directory)
os.mkdir(test_subdir)
for f in [file1_file, file2_file, match1_file, match2_file, test_subdir_file]:
util.write_file(f, '')
x = [file1_file, file2_file, match1_file, match2_file, test_subdir_file]
assert sorted(x) == sorted(_instance.additional_files_or_dirs)
def test_get_ansible_playbook_with_driver_key_when_playbook_key_missing(
tmpdir, _instance
):
x = os.path.join(_instance._config.scenario.directory, 'side_effect.yml')
util.write_file(x, '')
assert x == _instance._get_playbook('side_effect')
def test_error(capsys):
log = logger.get_logger(__name__)
log.error('foo')
_, stderr = capsys.readouterr()
print(
'{}{}{}'.format(colorama.Fore.RED, 'foo'.rstrip(), colorama.Style.RESET_ALL),
file=sys.stderr,
)
_, x = capsys.readouterr()
assert x in stderr
def default_env(self):
env = util.merge_dicts(os.environ, self._config.env)
env = util.merge_dicts(env, self._config.provisioner.env)
return env
def test_merge_dicts():
# example taken from python-anyconfig/anyconfig/__init__.py
a = {'b': [{'c': 0}, {'c': 2}], 'd': {'e': 'aaa', 'f': 3}}
b = {'a': 1, 'b': [{'c': 3}], 'd': {'e': 'bbb'}}
x = {'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}}
assert x == util.merge_dicts(a, b)
def test_sysexit_with_message_and_custom_code(patched_logger_critical):
with pytest.raises(SystemExit) as e:
util.sysexit_with_message('foo', 2)
assert 2 == e.value.code
patched_logger_critical.assert_called_once_with('foo')
def test_platforms_driver_name_required(_config):
del _config['platforms'][0]['name']
x = {'platforms': [{0: [{'name': ['required field']}]}]}
assert x == schema_v2.validate(_config)