Dive into secure and efficient coding practices with our curated list of the top 10 examples showcasing 'filelock' in functional components in Python. Our advanced machine learning engine meticulously scans each line of code, cross-referencing millions of open source libraries to ensure your implementation is not just functional, but also robust and secure. Elevate your React applications to new heights by mastering the art of handling side effects, API calls, and asynchronous operations with confidence and precision.
def _cached_json_results(results_path, results_factory=None):
"""
Read results from results_path if it exists;
otherwise, produce them with results_factory,
and write them to results_path.
"""
with FileLock(results_path + '.lock'):
try:
with open(results_path, mode='r') as results_f:
results = json.load(results_f)
except FileNotFoundError:
if not results_factory:
raise
results = results_factory()
with open(results_path, mode='w') as results_f:
json.dump(results, results_f)
return results
if sys.platform == 'darwin' and windowed:
app_run_command = './{}.app/Contents/MacOS/{}'.format(app_name,
app_name)
app_name = '{}.app'.format(app_name)
if custom_dir:
# update with custom_dir is multiprocessing-safe
lock_path = 'pyu.lock'
else:
if not os.path.exists(appdirs.user_data_dir(APP_NAME)):
os.makedirs(appdirs.user_data_dir(APP_NAME))
lock_path = os.path.join(appdirs.user_data_dir(APP_NAME),
'pyu.lock')
update_lock = filelock.FileLock(lock_path, LOCK_TIMEOUT)
version_file = 'version2.txt'
with update_lock.acquire(LOCK_TIMEOUT, 5):
count = 0
while count < 5:
# Call the binary to self update
subprocess.call(app_run_command, shell=True)
if os.path.exists(version_file):
break
count += 1
print("Retrying app launch!")
# Allow enough time for update process to complete.
time.sleep(AUTO_UPDATE_PAUSE)
simpleserver.stop()
# Detect if it was an overwrite error
def test_try_acquire_locks(testing_workdir):
# Acquiring two unlocked locks should succeed.
lock1 = filelock.FileLock(os.path.join(testing_workdir, 'lock1'))
lock2 = filelock.FileLock(os.path.join(testing_workdir, 'lock2'))
with utils.try_acquire_locks([lock1, lock2], timeout=1):
pass
# Acquiring the same lock twice should fail.
lock1_copy = filelock.FileLock(os.path.join(testing_workdir, 'lock1'))
# Also verify that the error message contains the word "lock", since we rely
# on this elsewhere.
with pytest.raises(BuildLockError, match='Failed to acquire all locks'):
with utils.try_acquire_locks([lock1, lock1_copy], timeout=1):
pass
def add(tool, mails, extra, result, ts=lmdutils.get_timestamp("now")):
check(Email.__tablename__)
with FileLock(lock_path):
tool = Tool.get_or_create(tool)
for mail in mails:
session.add(Email(tool, ts, mail, extra, result))
session.commit()
def __init__(self, path, branch=None, component=None, local=False):
self.path = path
if branch is None:
self.branch = self.default_branch
else:
self.branch = branch
self.component = component
self.last_output = ''
self.lock = FileLock(
self.path.rstrip('/').rstrip('\\') + '.lock',
timeout=120
)
if not local:
# Create ssh wrapper for possible use
create_ssh_wrapper()
if not self.is_valid():
self.init()
del instances[k]
updated = True
else:
instances = {}
instance_data = instances.get(instance_name)
if not instance_data or force:
if obj:
instance_data = instances[instance_name] = obj
updated = True
else:
instance_data = {}
if updated:
with open(index_path, 'w') as fp:
json.dump(instances, fp, sort_keys=True, indent=2, separators=(',', ': '))
return instance_data
except Timeout:
puts_err(colored.red(textwrap.fill("Couldn't access index, it seems locked.")))
sys.exit(1)
sys.exit(ReturnCode.GENERIC_COMMUNICATION_ERROR)
except EthNodeInterfaceError as e:
click.secho(str(e), fg="red")
sys.exit(ReturnCode.ETH_INTERFACE_ERROR)
except RaidenUnrecoverableError as ex:
click.secho(f"FATAL: An un-recoverable error happen, Raiden is bailing {ex}", fg="red")
write_stack_trace(ex)
sys.exit(ReturnCode.FATAL)
except APIServerPortInUseError as ex:
click.secho(
f"ERROR: API Address {ex} is in use. Use --api-address "
f"to specify a different port.",
fg="red",
)
sys.exit(ReturnCode.PORT_ALREADY_IN_USE)
except filelock.Timeout:
name_or_id = ID_TO_NETWORKNAME.get(kwargs["network_id"], kwargs["network_id"])
click.secho(
f"FATAL: Another Raiden instance already running for account "
f"{to_checksum_address(address)} on network id {name_or_id}",
fg="red",
)
sys.exit(1)
except Exception as ex:
write_stack_trace(ex)
sys.exit(1)
finally:
# teardown order is important because of side-effects, both the
# switch_monitor and profiler could use the tracing api, for the
# teardown code to work correctly the teardown has to be done in the
# reverse order of the initialization.
if switch_monitor is not None:
def create_new_location(source, upload, move=False):
base_dir = current_app.config['BOOKS_BASE_DIR']
if isinstance(upload, model.Upload):
new_file = os.path.join(current_app.config['UPLOAD_DIR'], upload.file)
else:
new_file = upload
new_location = os.path.join(source.ebook.base_dir, os.path.basename(norm_file_name(source)))
#if source.ebook.base_dir else norm_file_name(source) #TODO: Remove this WA
ebook_dir = os.path.join(base_dir, os.path.split(new_location)[0])
if not os.path.exists(ebook_dir):
os.makedirs(ebook_dir, exist_ok=True)
lock_file = os.path.join(ebook_dir, '.lock_this_dir')
index = 1
with filelock.SoftFileLock(lock_file, timeout=5):
while os.path.exists(os.path.join(base_dir, new_location)):
name, ext = os.path.splitext(new_location)
new_location = name + '(%d)' % index + ext
index += 1
if move:
shutil.move(new_file, os.path.join(base_dir, new_location))
else:
shutil.copy(new_file, os.path.join(base_dir, new_location))
return new_location
if result is not None:
return result
# creating connections that were defined after adding tasks to the wf
for task in self.graph.nodes:
# if workflow has task_rerun=True and propagate_rerun=True,
# it should be passed to the tasks
if self.task_rerun and self.propagate_rerun:
task.task_rerun = self.task_rerun
# if the task is a wf, than the propagate_rerun should be also set
if is_workflow(task):
task.propagate_rerun = self.propagate_rerun
task.cache_locations = task._cache_locations + self.cache_locations
self.create_connections(task)
# TODO add signal handler for processes killed after lock acquisition
self.hooks.pre_run(self)
with SoftFileLock(lockfile):
# # Let only one equivalent process run
odir = self.output_dir
if not self.can_resume and odir.exists():
shutil.rmtree(odir)
cwd = os.getcwd()
odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
self.audit.start_audit(odir=odir)
result = Result(output=None, runtime=None, errored=False)
self.hooks.pre_run_task(self)
try:
self.audit.monitor()
await self._run_task(submitter, rerun=rerun)
result.output = self._collect_outputs()
except Exception as e:
record_error(self.output_dir, e)
result.errored = True
def _run(self, rerun=False, **kwargs):
self.inputs = attr.evolve(self.inputs, **kwargs)
self.inputs.check_fields_input_spec()
checksum = self.checksum
lockfile = self.cache_dir / (checksum + ".lock")
# Eagerly retrieve cached - see scenarios in __init__()
self.hooks.pre_run(self)
# TODO add signal handler for processes killed after lock acquisition
with SoftFileLock(lockfile):
if not (rerun or self.task_rerun):
result = self.result()
if result is not None:
return result
# Let only one equivalent process run
odir = self.output_dir
if not self.can_resume and odir.exists():
shutil.rmtree(odir)
cwd = os.getcwd()
odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
orig_inputs = attr.asdict(self.inputs)
map_copyfiles = copyfile_input(self.inputs, self.output_dir)
modified_inputs = template_update(self.inputs, map_copyfiles)
if modified_inputs:
self.inputs = attr.evolve(self.inputs, **modified_inputs)
self.audit.start_audit(odir)