Dive into secure and efficient coding practices with our curated list of the top 10 examples showcasing 'bandit' in functional components in Python. Our advanced machine learning engine meticulously scans each line of code, cross-referencing millions of open source libraries to ensure your implementation is not just functional, but also robust and secure. Elevate your React applications to new heights by mastering the art of handling side effects, API calls, and asynchronous operations with confidence and precision.
def test_yaml_invalid(self):
# When the config yaml file isn't valid, sys.exit(2) is called.
# The following is invalid because it starts a sequence and doesn't
# end it.
invalid_yaml = '- [ something'
f = self.useFixture(TempFile(invalid_yaml))
self.assertRaisesRegex(
utils.ConfigError, f.name, config.BanditConfig, f.name)
:param checktype: The type of checks to run
:param nosec_lines: Lines which should be skipped because of nosec
:return: a score based on the number and type of test results
'''
scores = {
'SEVERITY': [0] * len(constants.RANKING),
'CONFIDENCE': [0] * len(constants.RANKING)
}
tests = self.testset.get_tests(checktype)
for test in tests:
name = test.__name__
# execute test with the an instance of the context class
temp_context = copy.copy(raw_context)
context = b_context.Context(temp_context)
try:
if hasattr(test, '_config'):
result = test(context, test._config)
else:
result = test(context)
# if we have a result, record it and update scores
if (result is not None and
result.lineno not in self.nosec_lines and
temp_context['lineno'] not in self.nosec_lines):
if isinstance(temp_context['filename'], bytes):
result.fname = temp_context['filename'].decode('utf-8')
else:
result.fname = temp_context['filename']
def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM):
new_issue = issue.Issue(severity, confidence, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue
def test_issue_filter_severity(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(l, bandit.HIGH) for l in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.severity)
result = i.filter(level, bandit.UNDEFINED)
self.assertTrue((test >= rank) == result)
def test_report_contents(self, get_issue_list, get_code):
self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
issue_a = _get_issue_instance(severity=bandit.LOW)
issue_a.fname = 'abc.py'
issue_a.test = 'AAAAAAA'
issue_a.text = 'BBBBBBB'
issue_a.confidence = 'CCCCCCC'
# don't need to test severity, it determines the color which we're
# testing separately
issue_b = _get_issue_instance(severity=bandit.MEDIUM)
issue_c = _get_issue_instance(severity=bandit.HIGH)
issue_x = _get_issue_instance()
get_code.return_value = 'some code'
issue_y = _get_issue_instance()
get_issue_list.return_value = collections.OrderedDict(
[(issue_a, [issue_x, issue_y]),
(issue_b, [issue_x]), (issue_c, [issue_y])])
with open(self.tmp_fname, 'w') as tmp_file:
b_html.report(
self.manager, tmp_file, bandit.LOW, bandit.LOW)
with open(self.tmp_fname) as f:
def test_report(self, get_issue_list):
self.manager.files_list = ['binding.py']
self.manager.scores = [{'SEVERITY': [0] * len(constants.RANKING),
'CONFIDENCE': [0] * len(constants.RANKING)}]
get_issue_list.return_value = collections.OrderedDict(
[(self.issue, self.candidates)])
with open(self.tmp_fname, 'w') as tmp_file:
b_json.report(self.manager, tmp_file, self.issue.severity,
self.issue.confidence)
with open(self.tmp_fname) as f:
data = yaml.load(f.read())
self.assertIsNotNone(data['generated_at'])
self.assertEqual(self.tmp_fname, data['results'][0]['filename'])
self.assertEqual(self.issue.severity,
data['results'][0]['issue_severity'])
self.assertEqual(self.issue.confidence,
def test_issue_filter_confidence(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(bandit.HIGH, l) for l in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.confidence)
result = i.filter(bandit.UNDEFINED, level)
self.assertTrue((test >= rank) == result)
lineno=2)]
self.manager.out_file = self.tmp_fname
self.issue.fname = self.context['filename']
self.issue.lineno = self.context['lineno']
self.issue.linerange = self.context['linerange']
self.issue.test = self.check_name
self.manager.results.append(self.issue)
self.manager.metrics = metrics.Metrics()
# mock up the metrics
for key in ['_totals', 'binding.py']:
self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2}
for (criteria, default) in constants.CRITERIA:
for rank in constants.RANKING:
self.manager.metrics.data[key]['{0}.{1}'.format(
criteria, rank
)] = 0
:param example_script: Filename of an example script to test
:param expect: dict with expected values of metrics
'''
self.b_mgr.metrics = metrics.Metrics()
self.b_mgr.scores = []
self.run_example(example_script)
# test general metrics (excludes issue counts)
m = self.b_mgr.metrics.data
for k in expect:
if k != 'issues':
self.assertEqual(expect[k], m['_totals'][k])
# test issue counts
if 'issues' in expect:
for (criteria, default) in C.CRITERIA:
for rank in C.RANKING:
label = '{0}.{1}'.format(criteria, rank)
expected = 0
if expect['issues'].get(criteria).get(rank):
expected = expect['issues'][criteria][rank]
self.assertEqual(expected, m['_totals'][label])
def test_plugin():
sets = []
sets.append(utils.build_conf_dict(
'telnet', 'B401', ['telnetlib'],
'A telnet-related module is being imported. Telnet is '
'considered insecure. Use SSH or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'marshal', 'B302', ['marshal.load', 'marshal.loads'],
'Deserialization with the marshal module is possibly dangerous.'
))
return {'Import': sets, 'ImportFrom': sets, 'Call': sets}