Dive into secure and efficient coding practices with our curated list of the top 10 examples showcasing 'identify' in functional components in Python. Our advanced machine learning engine meticulously scans each line of code, cross-referencing millions of open source libraries to ensure your implementation is not just functional, but also robust and secure. Elevate your React applications to new heights by mastering the art of handling side effects, API calls, and asynchronous operations with confidence and precision.
def movienamer(movie):
directory = '/'.join(movie.split('/')[:-1])
filename, extension = os.path.splitext(os.path.basename(movie))
results = identify(filename, directory)
if len(results) == 0:
print 'No results found. Skipping movie file\n'
return False
action = confirm(results, filename, extension)
if action == 'SKIP':
print 'Skipping movie file\n'
return False
elif action == 'QUIT':
print 'Exiting movienamer'
sys.exit()
else:
i = int(action)
result = results[i-1]
def main(logfile, disc):
"""main dvd processing function"""
logging.info("Starting Disc identification")
identify.identify(disc, logfile)
log_arm_params(disc)
if disc.disctype in ["dvd", "bluray"]:
utils.notify("ARM notification", "Found disc: " + str(disc.videotitle) + ". Video type is "
+ str(disc.videotype) + ". Main Feature is " + str(cfg['MAINFEATURE']) + ".")
elif disc.disctype == "music":
utils.notify("ARM notification", "Found music CD: " + disc.label + ". Ripping all tracks")
elif disc.disctype == "data":
utils.notify("ARM notification", "Faound data disc. Copying data.")
else:
utils.notify("ARM Notification", "Could not identify disc. Exiting.")
sys.exit()
if cfg['HASHEDKEYS']:
logging.info("Getting MakeMKV hashed keys for UHD rips")
def apply_fix(
*,
ls_files_cmd: Sequence[str],
sed_cmd: Sequence[str],
) -> None:
filenames_b = zsplit(subprocess.check_output(ls_files_cmd))
filenames = [f.decode() for f in filenames_b]
filenames = [f for f in filenames if tags_from_path(f) & {'file', 'text'}]
autofix_lib.run(*sed_cmd, *filenames)
def get_metrics_from_stat(self, _, file_diff_stats):
total_lines = 0
lines_by_file_type = collections.defaultdict(int)
for file_diff_stat in file_diff_stats:
lines_changed = (
len(file_diff_stat.lines_added) -
len(file_diff_stat.lines_removed)
)
# Track total overall
total_lines += lines_changed
filename = file_diff_stat.filename.decode('UTF-8')
tags = identify.tags_from_filename(filename) or {UNKNOWN}
for tag in tags:
lines_by_file_type[tag] += lines_changed
# Yield overall metric and one per type of expected mapping types
yield Metric('TotalLinesOfCode', total_lines)
for tag, val in lines_by_file_type.items():
if tag in ALL_TAGS and val:
yield Metric('TotalLinesOfCode_{}'.format(tag), val)
contents = f.read()
cfg = configparser.ConfigParser()
cfg.read_string(contents)
_clean_sections(cfg)
# normalize names to underscores so sdist / wheel have the same prefix
cfg['metadata']['name'] = cfg['metadata']['name'].replace('-', '_')
# if README.md exists, set `long_description` + content type
readme = _first_file(filename, 'readme')
if readme is not None:
long_description = f'file: {os.path.basename(readme)}'
cfg['metadata']['long_description'] = long_description
tags = identify.tags_from_filename(readme)
if 'markdown' in tags:
cfg['metadata']['long_description_content_type'] = 'text/markdown'
elif 'rst' in tags:
cfg['metadata']['long_description_content_type'] = 'text/x-rst'
else:
cfg['metadata']['long_description_content_type'] = 'text/plain'
# set license fields if a license exists
license_filename = _first_file(filename, 'licen[sc]e')
if license_filename is not None:
cfg['metadata']['license_file'] = os.path.basename(license_filename)
license_id = identify.license_id(license_filename)
if license_id is not None:
cfg['metadata']['license'] = license_id
def get_metrics_from_stat(self, _, file_diff_stats):
total_curses = 0
curses_by_file_type = collections.defaultdict(int)
for file_diff_stat in file_diff_stats:
curses_added = count_curse_words(file_diff_stat.lines_added)
curses_removed = count_curse_words(file_diff_stat.lines_removed)
curses_changed = curses_added - curses_removed
# Track total overall
total_curses = total_curses + curses_changed
# Track by file extension -> type mapping
filename = file_diff_stat.filename.decode('UTF-8')
tags = identify.tags_from_filename(filename) or {UNKNOWN}
for tag in tags:
curses_by_file_type[tag] += curses_changed
# Yield overall metric and one per type of expected mapping types
yield Metric('TotalCurseWords', total_curses)
for tag, value in curses_by_file_type.items():
if tag in ALL_TAGS and value:
yield Metric('TotalCurseWords_{}'.format(tag), value)
has_errors = False
for err in app.style_errors:
has_errors = True
yield NitpickApp.as_flake8_warning(err)
if has_errors:
return []
# Get all root keys from the style TOML.
for path, config_dict in app.config.style_dict.items():
# All except "nitpick" are file names.
if path == PROJECT_NAME:
continue
# For each file name, find the plugin that can handle the file.
tags = identify.tags_from_filename(path)
for base_file in app.plugin_manager.hook.handle_config_file( # pylint: disable=no-member
config=config_dict, file_name=path, tags=tags
):
yield from base_file.check_exists()
return []
def _find_subclasses(self, data, handled_tags, new_files_found):
for possible_file in data.keys():
found_subclasses = []
for file_tag in identify.tags_from_filename(possible_file):
handler_subclass = handled_tags.get(file_tag)
if handler_subclass:
found_subclasses.append(handler_subclass)
for found_subclass in found_subclasses:
new_files_found.update(self.file_field_pair(possible_file, found_subclass))
cfg['metadata']['long_description'] = long_description
tags = identify.tags_from_filename(readme)
if 'markdown' in tags:
cfg['metadata']['long_description_content_type'] = 'text/markdown'
elif 'rst' in tags:
cfg['metadata']['long_description_content_type'] = 'text/x-rst'
else:
cfg['metadata']['long_description_content_type'] = 'text/plain'
# set license fields if a license exists
license_filename = _first_file(filename, 'licen[sc]e')
if license_filename is not None:
cfg['metadata']['license_file'] = os.path.basename(license_filename)
license_id = identify.license_id(license_filename)
if license_id is not None:
cfg['metadata']['license'] = license_id
if license_id in LICENSE_TO_CLASSIFIER:
cfg['metadata']['classifiers'] = (
cfg['metadata'].get('classifiers', '').rstrip() +
f'\n{LICENSE_TO_CLASSIFIER[license_id]}'
)
requires = _python_requires(filename, min_py3_version=min_py3_version)
if requires is not None:
if not cfg.has_section('options'):
cfg.add_section('options')
cfg['options']['python_requires'] = requires
install_requires = _requires(cfg, 'install_requires')
def probably_binary(self):
p = not identify.is_text(self.open_file)
self.open_file.seek(0)
return p