From 07b0db75878bd541730c51c79622101aa09f4a81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Tue, 17 Dec 2024 01:13:51 +0100 Subject: [PATCH 01/14] report crash with shell scheduler --- pytest_parallel/plugin.py | 13 ++++ pytest_parallel/process_worker.py | 55 ++++++++++---- pytest_parallel/send_report.py | 72 ++++++++++++++++++ pytest_parallel/shell_static_scheduler.py | 75 +++++++++++++------ .../test_crash_reporting.py | 49 ++++++++++++ 5 files changed, 229 insertions(+), 35 deletions(-) create mode 100644 pytest_parallel/send_report.py create mode 100644 test/pytest_parallel_tests/test_crash_reporting.py diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index 3544bbc..11e6c67 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -10,9 +10,14 @@ import resource import pytest from _pytest.terminal import TerminalReporter +#import signal # -------------------------------------------------------------------------- def pytest_addoption(parser): + #def sig_handler(sig, frame): + # print(f'\n\n\n\nSIGNAL_HANDLER caught {sig}') + #signal.signal(11, sig_handler) + parser.addoption( '--scheduler', dest='scheduler', @@ -98,6 +103,14 @@ def pytest_configure(config): assert not slurm_additional_cmds, 'Option `--slurm-additional-cmds` only available when `--scheduler=slurm`' assert not slurm_file, 'Option `--slurm-file` only available when `--scheduler=slurm`' + if (scheduler == 'shell' or scheduler == 'slurm') and not is_worker: + from mpi4py import MPI + assert MPI.COMM_WORLD.size == 1, 'Do not launch `pytest_parallel` on more that one process\n' \ + 'when `--scheduler=shell` or `--scheduler=slurm`.\n' \ + '`pytest_parallel` spawn mpi processes itself.\n' \ + f'You may want to use --n-workers={MPI.COMM_WORLD.size}.' + + if scheduler == 'slurm' and not is_worker: assert slurm_options or slurm_file, 'You need to specify either `--slurm-options` or `--slurm-file` when `--scheduler=slurm`' diff --git a/pytest_parallel/process_worker.py b/pytest_parallel/process_worker.py index a3c7f22..e09cc3d 100644 --- a/pytest_parallel/process_worker.py +++ b/pytest_parallel/process_worker.py @@ -3,10 +3,12 @@ from mpi4py import MPI import socket +from pathlib import Path import pickle from . import socket_utils from .utils import get_n_proc_for_test, run_item_test from .gather_report import gather_report_on_local_rank_0 +import signal class ProcessWorker: def __init__(self, scheduler_ip_address, scheduler_port, test_idx, detach): @@ -14,6 +16,11 @@ def __init__(self, scheduler_ip_address, scheduler_port, test_idx, detach): self.scheduler_port = scheduler_port self.test_idx = test_idx self.detach = detach + self.use_sockets = False + + + def _file_path(self, when): + return Path(f'.pytest_parallel/tmp/{self.test_idx}_{when}') @pytest.hookimpl(tryfirst=True) def pytest_runtestloop(self, session) -> bool: @@ -26,20 +33,35 @@ def pytest_runtestloop(self, session) -> bool: item.test_info = {'test_idx': self.test_idx, 'fatal_error': None} - if comm.Get_size() != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly + # remove previous file if they existed + if comm.rank == 0: + for when in {'fatal_error', 'setup', 'call', 'teardown'}: + path = self._file_path(when) + if path.exists(): + path.unlink() + + if comm.size != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly error_info = f'FATAL ERROR in pytest_parallel with slurm scheduling: test `{item.nodeid}`' \ f' uses a `comm` of size {test_comm_size} but was launched with size {comm.Get_size()}.\n' \ f' This generally indicates that `srun` does not interoperate correctly with MPI.' + if self.use_sockets: + item.test_info['fatal_error'] = error_info + else: + if comm.rank == 0: + file_path = self._file_path('fatal_error') + with open(file_path, "w") as f: + f.write(error_info) + return True - item.test_info['fatal_error'] = error_info - else: # normal case: the test can be run - nextitem = None - run_item_test(item, nextitem, session) + # run the test + nextitem = None + run_item_test(item, nextitem, session) - if not self.detach and comm.Get_rank() == 0: # not detached: proc 0 is expected to send results to scheduling process - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.connect((self.scheduler_ip_address, self.scheduler_port)) - socket_utils.send(s, pickle.dumps(item.test_info)) + if self.use_sockets: + if not self.detach and comm.rank == 0: # not detached: proc 0 is expected to send results to scheduling process + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.connect((self.scheduler_ip_address, self.scheduler_port)) + socket_utils.send(s, pickle.dumps(item.test_info)) if item.test_info['fatal_error'] is not None: assert 0, f'{item.test_info["fatal_error"]}' @@ -61,9 +83,14 @@ def pytest_runtest_makereport(self, item): @pytest.hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): assert report.when in ("setup", "call", "teardown") # only known tags + sub_comm = report.sub_comm # keep `sub_comm` because `gather_report_on_local_rank_0` removes it gather_report_on_local_rank_0(report) - report.test_info.update({report.when: {'outcome' : report.outcome, - 'longrepr': report.longrepr, - 'duration': report.duration, }}) - - + report_info = {'outcome' : report.outcome, + 'longrepr': report.longrepr, + 'duration': report.duration, } + if self.use_sockets: + report.test_info.update({report.when: report_info}) + else: + if sub_comm.rank == 0: + with open(self._file_path(report.when), "wb") as f: + f.write(pickle.dumps(report_info)) diff --git a/pytest_parallel/send_report.py b/pytest_parallel/send_report.py new file mode 100644 index 0000000..412f2c0 --- /dev/null +++ b/pytest_parallel/send_report.py @@ -0,0 +1,72 @@ +import argparse +import socket +import pickle +from pathlib import Path +from . import socket_utils +from _pytest._code.code import ( + ExceptionChainRepr, + ReprTraceback, + ReprEntryNative, +) + + +parser = argparse.ArgumentParser(description='Send return the codes of the tests to the master pytest_parallel process') + +parser.add_argument('--_scheduler_ip_address', dest='_scheduler_ip_address', type=str) +parser.add_argument('--_scheduler_port', dest='_scheduler_port', type=int) +parser.add_argument('--_test_idx', dest='_test_idx', type=int) +parser.add_argument('--_test_name', dest='_test_name', type=str) + +args = parser.parse_args() + +def _file_path(when): + return Path(f'.pytest_parallel/tmp/{args._test_idx}_{when}') + +test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error) + +# 'fatal_error' file +try: + file_path = _file_path('fatal_error') + with open(file_path, 'r') as file: + fatal_error = file.read() + test_info['fatal_error'] = fatal_error +except FileNotFoundError: # There was no fatal error + pass + + +# 'setup/call/teardown' files +already_failed = False +for when in ('setup', 'call', 'teardown'): + try: + file_path = _file_path(when) + with open(file_path, 'rb') as file: + report_info = file.read() + report_info = pickle.loads(report_info) + test_info[when] = report_info + except FileNotFoundError: # Supposedly not found because the test crashed before writing the file + collect_longrepr = [] + msg = f'Error: the test crashed. ' + red = 31 + bold = 1 + msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m' + msg += f'Log file: {args._test_name}\n' + trace_back = ReprTraceback([ReprEntryNative(msg)], None, None) + collect_longrepr.append( + (trace_back, None, None) + ) + longrepr = ExceptionChainRepr(collect_longrepr) + + outcome = 'passed' if already_failed else 'failed' # No need to report the error twice + test_info[when] = {'outcome' : outcome, + 'longrepr': longrepr, + 'duration': 0, } # unable to report accurately + + already_failed = True + except pickle.PickleError: + test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}' + + +with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.connect((args._scheduler_ip_address, args._scheduler_port)) + socket_utils.send(s, pickle.dumps(test_info)) + diff --git a/pytest_parallel/shell_static_scheduler.py b/pytest_parallel/shell_static_scheduler.py index 980c476..e7f97a6 100644 --- a/pytest_parallel/shell_static_scheduler.py +++ b/pytest_parallel/shell_static_scheduler.py @@ -1,5 +1,6 @@ import pytest import os +import shutil import stat import subprocess import socket @@ -39,8 +40,7 @@ def parse_job_id_from_submission_output(s): # https://stackoverflow.com/a/34177358 def command_exists(cmd_name): """Check whether `name` is on PATH and marked as executable.""" - from shutil import which - return which(cmd_name) is not None + return shutil.which(cmd_name) is not None def _get_my_ip_address(): hostname = socket.gethostname() @@ -90,28 +90,56 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, main_invoke_params, n items = sorted(items_to_run, key=lambda item: item.n_proc, reverse=True) # launch `mpiexec` for each item - worker_flags=f"--_worker --_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" + script_prolog = '' + script_prolog += '#!/bin/bash\n\n' + #script_prolog += 'return_codes=(' + ' '.join(['0'*len(items)]) + ')\n\n' # bash array that will contain the return codes of each test TODO DEL + + socket_flags=f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" cmds = [] current_proc = 0 - for item in items: + for i,item in enumerate(items): test_idx = item.original_index - test_out_file_base = f'.pytest_parallel/{remove_exotic_chars(item.nodeid)}' - cmd = mpi_command(current_proc, item.n_proc) - cmd += f' python3 -u -m pytest -s {worker_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' - cmd += f' > {test_out_file_base}' + test_out_file = f'.pytest_parallel/{remove_exotic_chars(item.nodeid)}' + cmd = '(' + cmd += mpi_command(current_proc, item.n_proc) + cmd += f' python3 -u -m pytest -s --_worker {socket_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' + cmd += f' > {test_out_file} 2>&1' + cmd += f' ; python -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' + cmd += ')' cmds.append(cmd) current_proc += item.n_proc - script = " & \\\n".join(cmds) + '\n' + # create the script + ## 1. prolog + script = script_prolog + + ## 2. join all the commands + ## '&' makes it work in parallel (the following commands does not wait for the previous ones to finish) + ## '\' (escaped with '\\') makes it possible to use multiple lines + script += ' & \\\n'.join(cmds) + '\n' + + ## 3. wait everyone + script += '\nwait\n' + + # TODO DEL + ## 4. send error codes to to the server + #script += f'\npython -m pytest_parallel.send_return_codes {socket_flags} --return_codes=\"' + '${return_codes[@]}' + '\"\n' + + Path('.pytest_parallel').mkdir(exist_ok=True) + shutil.rmtree('.pytest_parallel/tmp', ignore_errors=True) + Path('.pytest_parallel/tmp').mkdir() script_path = f'.pytest_parallel/pytest_static_sched_{i_step+1}.sh' + #print('script_path = ',script_path) + #print('sscript= ',script) with open(script_path,'w') as f: f.write(script) current_permissions = stat.S_IMODE(os.lstat(script_path).st_mode) os.chmod(script_path, current_permissions | stat.S_IXUSR) - p = subprocess.Popen([script_path], shell=True, stdout=subprocess.PIPE) + #p = subprocess.Popen([script_path], shell=True, stdout=subprocess.PIPE) + p = subprocess.Popen([script_path], shell=True) print(f'\nLaunching tests (step {i_step+1}/{n_step})...') return p @@ -125,17 +153,22 @@ def receive_items(items, session, socket, n_item_to_recv): with conn: msg = socket_utils.recv(conn) test_info = pickle.loads(msg) # the worker is supposed to have send a dict with the correct structured information - test_idx = test_info['test_idx'] - if test_info['fatal_error'] is not None: - assert 0, f'{test_info["fatal_error"]}' - item = items[test_idx] # works because of precondition - item.sub_comm = None - item.info = test_info - - # "run" the test (i.e. trigger PyTest pipeline but do not really run the code) - nextitem = None # not known at this point - run_item_test(item, nextitem, session) - n_item_to_recv -= 1 + #print(f"{test_info=}") + if 'signal_info' in test_info: + print('signal_info= ',test_info['signal_info']) + break; + else: + test_idx = test_info['test_idx'] + if test_info['fatal_error'] is not None: + assert 0, f'{test_info["fatal_error"]}' + item = items[test_idx] # works because of precondition + item.sub_comm = None + item.info = test_info + + # "run" the test (i.e. trigger PyTest pipeline but do not really run the code) + nextitem = None # not known at this point + run_item_test(item, nextitem, session) + n_item_to_recv -= 1 class ShellStaticScheduler: def __init__(self, main_invoke_params, ntasks, detach): diff --git a/test/pytest_parallel_tests/test_crash_reporting.py b/test/pytest_parallel_tests/test_crash_reporting.py new file mode 100644 index 0000000..4cb81a4 --- /dev/null +++ b/test/pytest_parallel_tests/test_crash_reporting.py @@ -0,0 +1,49 @@ +# TODO These test file was used to develop crash reporting when scheduler=shell or scheduler=slurm, +# but it is not currently integrated to the pytest_parallel test suite +import pytest_parallel +import signal + +def test_seq_pass(): + assert 1 + +def test_seq_fail(): + assert 0 + +def test_seq_crash(): + signal.raise_signal(11) # SIGSEGV + + +@pytest_parallel.mark.parallel(2) +def test_par_pass(comm): + assert 1 + +@pytest_parallel.mark.parallel(2) +def test_par_fail(comm): + assert 0 + +@pytest_parallel.mark.parallel(2) +def test_par_pass_fail(comm): + if comm.rank==0: + assert 1 + if comm.rank==1: + assert 0 + + + +@pytest_parallel.mark.parallel(2) +def test_par_crash(comm): + signal.raise_signal(11) # SIGSEGV + +@pytest_parallel.mark.parallel(2) +def test_par_pass_crash(comm): + if comm.rank==1: + assert 1 + if comm.rank==1: + signal.raise_signal(11) # SIGSEGV + +@pytest_parallel.mark.parallel(2) +def test_par_crash_fail(comm): + if comm.rank==1: + signal.raise_signal(11) # SIGSEGV + if comm.rank==1: + assert 0 From de1eaa85a144241c9afb22e3420750d4042e0a61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Thu, 2 Jan 2025 17:02:03 +0100 Subject: [PATCH 02/14] Minor (cleaning shell_static_scheduler) --- pytest_parallel/shell_static_scheduler.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pytest_parallel/shell_static_scheduler.py b/pytest_parallel/shell_static_scheduler.py index e7f97a6..9dc62eb 100644 --- a/pytest_parallel/shell_static_scheduler.py +++ b/pytest_parallel/shell_static_scheduler.py @@ -92,7 +92,6 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, main_invoke_params, n # launch `mpiexec` for each item script_prolog = '' script_prolog += '#!/bin/bash\n\n' - #script_prolog += 'return_codes=(' + ' '.join(['0'*len(items)]) + ')\n\n' # bash array that will contain the return codes of each test TODO DEL socket_flags=f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" cmds = [] @@ -120,18 +119,11 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, main_invoke_params, n ## 3. wait everyone script += '\nwait\n' - - # TODO DEL - ## 4. send error codes to to the server - #script += f'\npython -m pytest_parallel.send_return_codes {socket_flags} --return_codes=\"' + '${return_codes[@]}' + '\"\n' - Path('.pytest_parallel').mkdir(exist_ok=True) shutil.rmtree('.pytest_parallel/tmp', ignore_errors=True) Path('.pytest_parallel/tmp').mkdir() script_path = f'.pytest_parallel/pytest_static_sched_{i_step+1}.sh' - #print('script_path = ',script_path) - #print('sscript= ',script) with open(script_path,'w') as f: f.write(script) From d648ba476289e03167f194ad0e357cf0e8d628eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Fri, 3 Jan 2025 17:14:43 +0100 Subject: [PATCH 03/14] report crash with SLURM scheduler --- pytest_parallel/plugin.py | 15 ++++----- pytest_parallel/process_scheduler.py | 31 +++++++++++++------ .../test_crash_reporting.py | 2 +- 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index 11e6c67..8b34025 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -14,10 +14,6 @@ # -------------------------------------------------------------------------- def pytest_addoption(parser): - #def sig_handler(sig, frame): - # print(f'\n\n\n\nSIGNAL_HANDLER caught {sig}') - #signal.signal(11, sig_handler) - parser.addoption( '--scheduler', dest='scheduler', @@ -56,9 +52,14 @@ def pytest_addoption(parser): # because it can mess SLURM `srun` if "--scheduler=slurm" in sys.argv: assert 'mpi4py.MPI' not in sys.modules, 'Internal pytest_parallel error: mpi4py.MPI should not be imported' \ - ' when we are about to register and environment for SLURM' \ - ' (because importing mpi4py.MPI makes the current process look like and MPI process,' \ - ' and SLURM does not like that)' + ' when we are about to register and environment for SLURM' \ + ' (because importing mpi4py.MPI makes the current process look like and MPI process,' \ + ' and SLURM does not like that)' + assert os.getenv('I_MPI_MPIRUN') is None, 'Internal pytest_parallel error: the environment variable I_MPI_MPIRUN is set' \ + f' with value "{os.getenv("I_MPI_MPIRUN")}"' \ + ' while pytest was invoked with "--scheduler=slurm".\n' \ + ' This indicates that pytest was run through MPI, and SLURM generally does not like that.\n' \ + ' With "--scheduler=slurm", just run pytest directly, not through `mpirun/mpiexec/srun`, and let pytest launch MPI itself.' r = subprocess.run(['env','--null'], stdout=subprocess.PIPE) # `--null`: end each output line with NUL, required by `sbatch --export-file` diff --git a/pytest_parallel/process_scheduler.py b/pytest_parallel/process_scheduler.py index 8245395..9dfdb61 100644 --- a/pytest_parallel/process_scheduler.py +++ b/pytest_parallel/process_scheduler.py @@ -1,4 +1,5 @@ import pytest +import shutil import subprocess import socket import pickle @@ -35,8 +36,7 @@ def parse_job_id_from_submission_output(s): # https://stackoverflow.com/a/34177358 def command_exists(cmd_name): """Check whether `name` is on PATH and marked as executable.""" - from shutil import which - return which(cmd_name) is not None + return shutil.which(cmd_name) is not None def _get_my_ip_address(): hostname = socket.gethostname() @@ -80,8 +80,8 @@ def submit_items(items_to_run, socket, main_invoke_params, ntasks, slurm_conf): slurm_header = '#!/bin/bash\n' slurm_header += '\n' slurm_header += '#SBATCH --job-name=pytest_parallel\n' - slurm_header += '#SBATCH --output=.pytest_parallel/slurm.%j.out\n' - slurm_header += '#SBATCH --error=.pytest_parallel/slurm.%j.err\n' + slurm_header += '#SBATCH --output=.pytest_parallel/slurm.out\n' + slurm_header += '#SBATCH --error=.pytest_parallel/slurm.err\n' for opt in slurm_conf['options']: slurm_header += f'#SBATCH {opt}\n' slurm_header += f'#SBATCH --ntasks={ntasks}' @@ -93,22 +93,33 @@ def submit_items(items_to_run, socket, main_invoke_params, ntasks, slurm_conf): srun_options = slurm_conf['srun_options'] if srun_options is None: srun_options = '' - worker_flags=f"--_worker --_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" + socket_flags = f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" cmds = '' if slurm_conf['additional_cmds'] is not None: cmds += slurm_conf['additional_cmds'] + '\n' for item in items: test_idx = item.original_index - test_out_file_base = f'.pytest_parallel/{remove_exotic_chars(item.nodeid)}' - cmd = f'srun {srun_options} --exclusive --ntasks={item.n_proc} -l' - cmd += f' python3 -u -m pytest -s {worker_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' - cmd += f' > {test_out_file_base} 2>&1' - cmd += ' &\n' # launch everything in parallel + test_out_file = f'.pytest_parallel/{remove_exotic_chars(item.nodeid)}' + cmd = '(' + cmd += f'srun {srun_options}' + cmd += ' --exclusive' + cmd += ' --kill-on-bad-exit=1' # make fatal errors (e.g. segfault) kill the whole srun step. Else, deadlock (at least with Intel MPI) + cmd += f' --ntasks={item.n_proc}' + cmd += ' -l' # + cmd += f' python3 -u -m pytest -s --_worker {socket_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' + cmd += f' > {test_out_file} 2>&1' + cmd += f' ; python -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' + cmd += ')' + cmd += ' &\n' # launch everything in parallel cmds += cmd cmds += 'wait\n' job_cmds = f'{slurm_header}\n\n{cmds}' + Path('.pytest_parallel').mkdir(exist_ok=True) + shutil.rmtree('.pytest_parallel/tmp', ignore_errors=True) + Path('.pytest_parallel/tmp').mkdir() + with open('.pytest_parallel/job.sh','w') as f: f.write(job_cmds) diff --git a/test/pytest_parallel_tests/test_crash_reporting.py b/test/pytest_parallel_tests/test_crash_reporting.py index 4cb81a4..a45a561 100644 --- a/test/pytest_parallel_tests/test_crash_reporting.py +++ b/test/pytest_parallel_tests/test_crash_reporting.py @@ -36,7 +36,7 @@ def test_par_crash(comm): @pytest_parallel.mark.parallel(2) def test_par_pass_crash(comm): - if comm.rank==1: + if comm.rank==0: assert 1 if comm.rank==1: signal.raise_signal(11) # SIGSEGV From 01849fdf48d0b409a465856fd13f27af7ac606b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Fri, 3 Jan 2025 17:30:45 +0100 Subject: [PATCH 04/14] process_worker: remove use of socket here. The socket messaging is done by send_report.py that is run after the test (even if it segfaults) --- pytest_parallel/process_worker.py | 36 +++++++++---------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/pytest_parallel/process_worker.py b/pytest_parallel/process_worker.py index e09cc3d..49d3849 100644 --- a/pytest_parallel/process_worker.py +++ b/pytest_parallel/process_worker.py @@ -2,13 +2,10 @@ from mpi4py import MPI -import socket from pathlib import Path import pickle -from . import socket_utils from .utils import get_n_proc_for_test, run_item_test from .gather_report import gather_report_on_local_rank_0 -import signal class ProcessWorker: def __init__(self, scheduler_ip_address, scheduler_port, test_idx, detach): @@ -16,7 +13,6 @@ def __init__(self, scheduler_ip_address, scheduler_port, test_idx, detach): self.scheduler_port = scheduler_port self.test_idx = test_idx self.detach = detach - self.use_sockets = False def _file_path(self, when): @@ -41,28 +37,19 @@ def pytest_runtestloop(self, session) -> bool: path.unlink() if comm.size != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly - error_info = f'FATAL ERROR in pytest_parallel with slurm scheduling: test `{item.nodeid}`' \ - f' uses a `comm` of size {test_comm_size} but was launched with size {comm.Get_size()}.\n' \ - f' This generally indicates that `srun` does not interoperate correctly with MPI.' - if self.use_sockets: - item.test_info['fatal_error'] = error_info - else: - if comm.rank == 0: - file_path = self._file_path('fatal_error') - with open(file_path, "w") as f: - f.write(error_info) + if comm.rank == 0: + error_info = f'FATAL ERROR in pytest_parallel with slurm scheduling: test `{item.nodeid}`' \ + f' uses a `comm` of size {test_comm_size} but was launched with size {comm.Get_size()}.\n' \ + f' This generally indicates that `srun` does not interoperate correctly with MPI.' + file_path = self._file_path('fatal_error') + with open(file_path, "w") as f: + f.write(error_info) return True # run the test nextitem = None run_item_test(item, nextitem, session) - if self.use_sockets: - if not self.detach and comm.rank == 0: # not detached: proc 0 is expected to send results to scheduling process - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.connect((self.scheduler_ip_address, self.scheduler_port)) - socket_utils.send(s, pickle.dumps(item.test_info)) - if item.test_info['fatal_error'] is not None: assert 0, f'{item.test_info["fatal_error"]}' @@ -88,9 +75,6 @@ def pytest_runtest_logreport(self, report): report_info = {'outcome' : report.outcome, 'longrepr': report.longrepr, 'duration': report.duration, } - if self.use_sockets: - report.test_info.update({report.when: report_info}) - else: - if sub_comm.rank == 0: - with open(self._file_path(report.when), "wb") as f: - f.write(pickle.dumps(report_info)) + if sub_comm.rank == 0: + with open(self._file_path(report.when), "wb") as f: + f.write(pickle.dumps(report_info)) From f68be8548306642cc6abd42e6f0c45003978ff29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Tue, 7 Jan 2025 16:10:34 +0100 Subject: [PATCH 05/14] Documentation update + minor improvements --- README.md | 287 ++++++++++++++++++++++++++---- doc/images/test_fail.png | Bin 45134 -> 47905 bytes doc/images/test_skip.png | Bin 8742 -> 24419 bytes pytest_parallel/plugin.py | 9 +- pytest_parallel/process_worker.py | 5 +- 5 files changed, 258 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index e4296a9..0dcffe7 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ pytest_parallel =============== -**pytest_parallel** extends [PyTest](http://pytest.org) to support parallel testing using mpi4py. +**pytest_parallel** extends [PyTest](http://pytest.org) to support parallel testing using [mpi4py](https://mpi4py.readthedocs.io/en/stable/). [![Python 3](https://img.shields.io/static/v1?label=Python&logo=Python&color=3776AB&message=3)](https://www.python.org/) ![CI workflow](https://github.com/onera/pytest_parallel/actions/workflows/test.yml/badge.svg) @@ -10,32 +10,42 @@ pytest_parallel [![Windows OK](https://img.shields.io/static/v1?label=Windows&logo=Windows&color=white&message=%E2%9C%93)](https://en.wikipedia.org/wiki/Windows) ![License-MPL](https://img.shields.io/badge/license-MPL%202.0-blue.svg) -## Introduction ## +## Quick start ## + +**pytest_parallel** automates the execution of MPI parallel tests. Let's say you want to precisely test several algorithms, each with a specific number of processes: ```Python import pytest_parallel @pytest_parallel.mark.parallel(2) -def test_fail_one_rank(comm): - assert comm.Get_size()==2 - if comm.Get_rank()==0: assert True - if comm.Get_rank()==1: assert False +def test_A(comm): + if comm.rank == 1: assert False + +def test_B(): + assert True + +@pytest_parallel.mark.parallel(3) +def test_C(comm): + assert comm.size == 3 + +def test_D(): + assert False ``` -Here a test that should run on two processes is declared. When the test suite is run, the test will execute on two MPI processes. The `comm` fixture is an mpi4py communicator that is private to the test. +Tests decorated with `@pytest_parallel.mark.parallel(N)` are specified to run in parallel on `N` processes. The `comm` fixture is a communicator private to the test, with `comm.size == N`. -The test can be run on two processes, e.g. with: +You can run the tests with MPI, e.g. with: ```Bash -mpirun -np 2 pytest --color=yes test_pytest_parallel.py +mpirun -np 4 pytest --color=yes -vv test_pytest_parallel.py ``` And the following output will be produced: ![example failing test](doc/images/test_fail.png) -If there is not enough MPI processes to run the test, it will be skipped. -For instance, the following launching command: +If there is not enough MPI processes to run some tests, they will be skipped. +For instance, the following command: ```Bash mpirun -np 1 pytest --color=yes test_pytest_parallel.py @@ -47,11 +57,11 @@ would lead to: ## The `comm` fixture ## -The `comm` fixture that you get when decorating your test with `pytest_parallel.mark.parallel` is a sub-communicator of `MPI.COMM_WORLD` that is unique to each test. +The `comm` fixture you get when decorating your test with `@pytest_parallel.mark.parallel` is a sub-communicator of `MPI.COMM_WORLD` that is unique to each test. ## The `parallel` decorator ## -The `pytest_parallel.mark.parallel(n_procs)` decorator takes one argument, `n_procs`. +The `@pytest_parallel.mark.parallel(n_procs)` decorator takes one argument, `n_procs`. `n_procs` is generally an integer that specifies the size of the communicator that will be given to the test through the `comm` fixture. @@ -60,87 +70,243 @@ The `pytest_parallel.mark.parallel(n_procs)` decorator takes one argument, `n_pr ```Python @pytest_parallel.mark.parallel([2,4]) def test_param(comm): - print(comm.Get_size()) + print(comm.size) ``` will run two times: once with `comm` being a communicator of size 2, once with `comm` being a communicator of size 4. +## Installation ## + +### Through pip ### + +```Bash +pip install "git+https://github.com/onera/pytest_parallel.git" +``` +Note that you can pass the `--user` option to `pip` if you don't have root permissions or want to install `pytest_parallel` in your home directory. + +### Manually ### + +**pytest_parallel** is a pure PyTest plugin. It depends on mpi4py and Numpy. + +To install manually: +```Bash +(mkdir install_dir && cd install_dir && git clone https://github.com/onera/pytest_parallel.git) +export PYTHONPATH=install_dir/pytest_parallel:$PYTHONPATH +export PYTEST_PLUGINS=pytest_parallel.plugin +``` + ## Schedulers ## -**pytest_parallel** comes with three kind of schedulers. To understand how they work, let's take the following example: +### Process-reuse schedulers ### + +Process-reuse schedulers are mostly useful when you have numerous tests that are very fast, typically unit tests. + +For these schedulers, **pytest_parallel** is always launched through MPI, e.g.: + +```Bash +mpirun -np 4 pytest test_pytest_parallel.py +``` + +Here, 4 MPI processes have been spawn by `mpirun`, and **pytest_parallel** will use these 4 processes to run all the tests. This means that one process (let's say the one on rank 2 of `MPI.COMM_WORLD`) will likely be used by several tests. + +The main advantage is that the Python environment is loaded once and for all, hence if you have 1000 tests that take 1 millisecond each, and the loading of all your Python modules by the interpreter takes 1 second, then running PyTest will take approximately 2 seconds. + +However, the tests are not completely isolated, so if one test crash (e.g. due to a segmentation fault), the segfault message may not point you directly to the faulty test. Deadlocks will also be difficult to pinpoint. + +There are 3 kinds of process-reuse schedulers: +- the [sequential scheduler](#sequential-scheduler) +- the [static scheduler](#static-scheduler) +- the [dynamic scheduler](#dynamic-scheduler) + +To understand how they work, let's again our previous example: ```Python import pytest_parallel @pytest_parallel.mark.parallel(2) def test_A(comm): - if comm.Get_rank()==1: assert False + if comm.rank == 1: assert False def test_B(): assert True @pytest_parallel.mark.parallel(3) def test_C(comm): - assert comm.Get_size() == 3 + assert comm.size == 3 def test_D(): assert False ``` -Let's also fix the number of workers that we will be using to 4. This means that the test is launched with: +and run it on 4 processes. + +#### Sequential scheduler #### + +The **sequential** scheduler is the default one. To enable it explicitly, you can pass the `--scheduler=sequential` option to PyTest. ```Bash -mpirun -np 4 pytest test_pytest_parallel.py +mpirun -np 4 pytest --scheduler=sequential test_pytest_parallel.py ``` -### Sequential scheduler ### - -The **sequential** scheduler just takes each test in order, one by one, and executes it on as many processes as it needs. The other processes are sleeping. On our example, this would result on the following sequence diagram: +This scheduler just takes each test in order, one by one, and executes on as many processes it needs. The other processes are sleeping. On our example, this would result in the following sequence diagram: ![sequential scheduler sequence diagram](doc/images/seq.png) -While it is not optimized for performance, the sequential scheduler is very useful when you get unrecoverable errors (e.g. segfault) because then your PyTest report may be incomplete. Running tests sequentially allows at least to find which test is at fault. +While it is not optimized for performance, the sequential scheduler is very useful when you get unrecoverable errors (e.g. segfaults) because then your PyTest report may be incomplete. Running tests sequentially allows at least to find which test is at fault. -The sequential scheduler is the default one. To enable it explicitly, you can pass the `--scheduler=sequential` option to PyTest. +#### Static scheduler #### -### Static scheduler ### +The static scheduler can be selected with: +```Bash +mpirun -np 4 pytest --scheduler=static test_pytest_parallel.py +``` The **static** scheduler tries to distribute tests to minimize the number of idle processes. The process is static, that is, after test collection, it determines which process will execute which test, and in which order. On our example, it will result in the following scheduling: ![static scheduler sequence diagram](doc/images/static.png) The scheduler works by steps. Each step has `n_worker` slots (`n_worker` being the number of processes that PyTest was launched with). Each test will try to find a step with enough slots and will consume `n_proc` slots on the step. If no step is found, a new one is created, until each test has a step. -While this scheduler is more optimized, since it gives an a priori scheduling, it is not optimal depending on the duration of the tests. Let's look again at our example, but let's say `test_B` takes much longer than the others. We will then have the following sequence: +While this scheduler is more optimized, it gives an *a priori* scheduling, hence it is not optimal depending on the duration of the tests. Let's look again at our example, but let's say `test_B` takes much longer than the others. We will then have the following sequence: ![static scheduler sequence diagram - bad case](doc/images/static_bad.png) -We see that processes 0,1 and 2 wait at the first step for process 3 to finish. +We see that processes 0,1 and 2 wait for process 3 to finish the first step, whereas they could do meaningful work. ### Dynamic scheduler ### -The **dynamic** scheduler spawns a new MPI process which acts as the master scheduler and sends work to the original processes. The scheduler tries to schedule tests requiring the most processes first. The scheduler tries to send work to idle process until all the processes are busy executing one test, or when not enough processes are ready to accept a test. It then waits for a signal that workers have finished their test to schedule further work. +The **dynamic** scheduler can be selected with: +```Bash +mpirun -np 4 pytest --scheduler=dynamic test_pytest_parallel.py +``` + +The scheduler spawns a new MPI process which acts as the master scheduler and sends work to the original processes. The scheduler tries to schedule tests requiring the most processes first. It sends work to idle processes until all the processes are busy executing a test, or when not enough processes are ready to accept a test. It then waits for a signal that workers have finished their test to schedule further work. Example: -![static scheduler sequence diagram - bad case](doc/images/dyn_anim.png) +![dynamic scheduler sequence diagram](doc/images/dyn_anim.png) + +### Process-isolate schedulers ### + +Process-isolate schedulers spawn a new process for each new test. Or more exactly, for a test that is specified to use a communicator of size `N`, **pytest_parallel** will launch `N` MPI processes just for this test, and it will do so for each test of the test suite. + +Or course, these schedulers are more robust: even if a test crashes with an irrecoverable error, the other tests are not impacted and **pytest_parallel** will report errors correctly. But remember that these schedulers need to start a new Python interpreter and load the Python modules for each test: if you have a lot of fast tests, the start-up times add up. + +If you use a process-isolate scheduler, contrary to process-reuse schedulers, you don't launch PyTest through `mpirun -np N`. Rather, you launch PyTest directly and specify the `--n-workers` parameter. + +There are 2 kinds of process-isolate schedulers: +- the shell scheduler +- the SLURM scheduler + +#### Shell scheduler ### + +The **shell** scheduler can be selected with: +```Bash +pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py +``` +The scheduling algorithm is the same as the [static scheduler](#static-scheduler). + +#### SLURM scheduler ### +The **SLURM** scheduler can be selected with: +```Bash +pytest --n-workers=4 --scheduler=slurm test_pytest_parallel.py +``` +SLURM takes care of the scheduling. This scheduler as specific options: +- `--slurm-options`: a list options for `sbatch`. For example: `--slurm-options="--time=00:30:00 --qos=my_queue"`. Do **not** specify `--ntasks` here, **pytest_parallel** will use the value given by `--n-workers`. +- `--slurm-srun-options`: a list options for `srun`. For example: `--slurm-srun-options="--mem-per-cpu=4GBb"` +- `--slurm-export-env`: should the SLURM job use the same environment as the terminal that spawned it? Enabled by default. +- `--slurm-additional-cmds`: commands to pass to the SLURM job that should be executed before the tests. Example: `--slurm-additional-cmds="source my_env.sh"` + -#### Design alternative #### +## FAQ ## -Instead of spawning a new MPI **process**, it would have been possible to spawn a new **thread** on process 0. However, it would then require to use `MPI_thread_init` with a value of at least `MPI_THREAD_FUNNELED`, and in practice, `MPI_THREAD_MULTIPLE` to ease the implementation of self-communication on rank 0. Here, no thread level is required (i.e. `MPI_THREAD_SINGLE` is **fine**). +1. **pytest_parallel** gives me a new communicator for each test, but my code only uses `MPI.COMM_WORLD`, how can I use **pytest_parallel**? +The [process-isolate schedulers](#process-isolate-schedulers) can be used with tests using different sizes of `MPI.COMM_WORLD`. The `comm` fixture can then be discarded: + +```Python +import pytest_parallel +from mpi4py import MPI + +@pytest_parallel.mark.parallel(2) +def test_fail_one_rank(comm): + assert comm.size == MPI.COMM_WORLD.size + my_algo_implicitly_using_MPI_COMM_WORLD() + +@pytest_parallel.mark.parallel(3) +def test_fail_one_rank(comm): + # `comm` is unused but you currently need to write it down anyway + my_other_algo_implicitly_using_MPI_COMM_WORLD() +``` + +```Bash +mpirun -np 4 pytest --scheduler=shell test_pytest_parallel.py +``` + +For unit tests, process-isolate schedulers are very slow, and **[process-reuse schedulers](#process-reuse-schedulers) will not work**. We really encourage you to generalize your function with an additional `comm` argument that is used for communication, rather than forcing your users to `MPI.COMM_WORLD`. +It would be possible to develop hybrid process-reuse schedulers where processes are re-used, but only among tests of the same communicator size (and repeat the operation for as many communicator sizes there are on the test suite). If you feel the need, write a feature request and maybe we will implement it. -## Prerequisites ## -**pytest_parallel** is a pure PyTest plugin. It depends on mpi4py and Numpy +2. Can I write an MPI test with no fixed number of processes and let **pytest_parallel** use `MPI.COMM_WORLD`? + +Not currently. **pytest_parallel** is designed to dissociate the parallelism specified for each test and the resources given to execute them. +If the need arizes, we could however: +- implement a mode that would use the number of processes given by the command line instead of the one specified with each test +- add a `@pytest_parallel.mark.parallel_from_context` decorator that would mark the test to be run with the maximum parallelism specified (that is, the number of processes given by the command line) + +3. My test suite deadlocks. How do I pinpoint the test at fault? + +There is no magic technique. Try to narrow it down by using the [sequential scheduler](#sequential-scheduler). + +A solution that we need to implement is to handle timeouts for the [process-isolate schedulers](#process-isolate-schedulers). Feel free to submit a feature request. + +4. Why is the [shell scheduler](#shell-scheduler) using a static scheduling strategy? + +The [shell scheduler](#shell-scheduler) uses the same scheduling algorithm as the [static scheduler](#static-scheduler) because it is easier to implement. We hope to also implement a dynamic scheduling strategy if we feel the need for it. + +5. Is SLURM the only job scheduler available? + +Currently yes. + +6. I want to use the static shell scheduler, but I have the error `MPI_INIT failed` + +On some systems, using `mpi4py` without `mpirun` does not work. For example, using: +```Bash +pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py +``` + +produces the following error: +``` +Error obtaining unique transport key from PMIX (OMPI_MCA_orte_precondition_transports not present in +the environment). + +It looks like MPI_INIT failed for some reason; your parallel process is +likely to abort. There are many reasons that a parallel process can +fail during MPI_INIT; some of which are due to configuration or environment +problems. This failure appears to be an internal failure; here's some +additional information (which may only be relevant to an Open MPI +developer): +``` + +In this case, try: +```Bash +mpirun -np 1 pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py +``` + +7. Can I use **pytest_parallel** with MPI and OpenMP/pthreads/TBB? + +We do not use **pytest_parallel** with multi-treading, any feedback is welcomed! Regarding the `shell` scheduler, we explicitly pin one MPI process per core, with is probably wrong with multiple threads by MPI process. + ## Plugin compatibility ## **pytest_parallel** is known to work with the **pytest-html** plugin. It is also compatible with the xml built-in plugin. -No other plugin has been tested. +No other plugin has been tested, feedback is welcomed. + ## Implementation notes ## +### Use of PyTest hooks ### We use PyTest hooks to schedule tests and gather report information from remote processes. That is, mainly: * either `pytest_collection_modifyitems` or `pytest_runtestloop` to schedule tests * `pytest_runtest_logreport` to gather test reports @@ -150,17 +316,64 @@ PyTest expects its complete "pipeline" to be executed for all messages to be rep * make it think that every test was run on the master rank. * de-activate logging on other ranks +### Implementation of process-isolate schedulers ### + +In both cases, we have a master process (the one launched by the user) that will spawn worker processes. The master then waits to receive test reports from the workers. + +#### Information exchange #### + +The master and worker processes exchange information through sockets. Master creates a socket, then spawns workers by giving them the information of the socket (its ip and port), so that they can connect to it to send their report. + +Actually, in order to correctly report crashes, the report of each test is created in two steps: +- First, `pytest --_worker` is launched. It runs the test `t` and writes a report at each of its stages `s` (`setup`/`call`/`teardown`) in the file `.pytest_parallel/tmp/{t}_{s}`. +- Then, when `pytest --_worker` is done for the test (either because it finished or because it crashed), the `pytest_parallel.send_report` module is run. It looks for the files that `pytest --_worker` has supposedly written. If the files are there, they are sent to master through the socket. If one of the file is missing, it means that the process crashed. In this case, a crash report is created and is sent to master through the socket. + +#### Fake-running a test on master #### + +To trick PyTest into thinking the test is run by the master process, it runs `pytest_runtest_protocol` for each test but with the following hooks: +- `pytest_pyfunc_call` does nothing (i.e. does not actually execute the test) +- `pytest_runtest_logreport` creates a report by using the one that was received from the worker + +#### Shell scheduler specifics #### + +File: `shell_static_scheduler.py` + +The master process gather tests in "steps" according to the [static scheduling](#static-scheduler) algorithm. Then, for each step `i`: +- The master process writes a bash script in `.pytest_parallel/pytest_static_sched_{i}.sh`. +- It launches the script. All the tests of the step are run in parallel (through `&` and `wait`). The script pins exactly one core per MPI process. +- The master process waits to receive the test reports. +- When it has all the reports of the tests of the step, it reports them ([by fake-running the tests](fake-running-a-test-on-master)). +- It moves to the next step. + +#### SLURM scheduler specifics #### + +File: `process_scheduler.py` + +The master process writes a SLURM job `.pytest_parallel/job.sh` that is submitted through `sbatch`. In the job, to each test corresponds a "job step" launched with `srun --exclusive [...] &`. The `--exclusive` and `&` enables SLURM to schedule the job steps as best as it can. The job then waits for all the tests to finish (this is the `wait` command at the end of the script). (Note: contrary to `sbatch`, the `--exclusive` option of `srun` does not mean that we want the ressource to be exclusive to the job) + +Once submitted, the master process wait to receive test reports. Each time it receives a report, it treats it immediately ([by fake-running the test](fake-running-a-test-on-master)). + ### Performance detail ### For the static and dynamic schedulers, on the master process, we must execute the test protocol, while not really running the test if it was scheduled on a remote process. This can be done by hooking `pytest_runtest_setup/call/teardown`. However, we observed it was greatly degrading performance (x5 !), so instead we just copy-pasted the `_pytest/runner/pytest_runtest_protocol` and hard-coded the execution shortcut. +### Design alternative ### + +Regarding the dynamic scheduler, instead of spawning a new MPI **process**, it would have been possible to spawn a new **thread** on process 0. However, it would then require to use `MPI_thread_init` with a value of at least `MPI_THREAD_FUNNELED`, and in practice, `MPI_THREAD_MULTIPLE` to ease the implementation of self-communication on rank 0. Here, no thread level is required (i.e. `MPI_THREAD_SINGLE` is **fine**). + +Another possibility would have been to use sockets for communications between the scheduling process and the worker processes (instead of using MPI inter-communicators). We used `MPI_Comm_spawn` because at the time we had no experience with sockets. Re-implementing the dynamic scheduler using sockets may be useful to make it more robust. + + +## Contributing ## + +Any contributions are welcome: bug report, feature requests, general feedback, pull requests. If you want to contribute a non-trivial pull-request, please begin by opening an issue explaining what you want to do in order to iterate on the best way to do it. -## TODO ## +## Future work ## -* Binding of MPI processes -* Schedule a test not only if enought procs are available, but also if the procs belong to some common NUMA domain (cluster, CPU, node...) +* More configuration options for the binding of MPI processes +* Dynamic scheduler: schedule a test not only if enought procs are available, but also if the procs belong to some common NUMA domain (cluster, CPU, node...) * Reserve more procs than declared in the test. Useful for loading and scaling performance tests. Possible API: ```Python @pytest_parallel.mark.parallel(4, exclusive_numa_domain='cpu') diff --git a/doc/images/test_fail.png b/doc/images/test_fail.png index 745c45fbb48d1e04051416228da04a0e719e9ddf..ecd249037c4414a154250161ecec68921d439040 100644 GIT binary patch literal 47905 zcmc$`byU=C*e=SeuL6RCB3&X1N_U6SjdX)_cee_NNJ|V2(nHtKA=2G7Fbv%dL&tf1 z-|yS!oc(=!t^LPd=df6yO#GhT6Zd`H*LB?!@?7gZ-$_~O)S9Jo3e!z~^X7j78?H5`_ z0DW&s#!|%hD?3a0+7Ivj4e}4O8 zR92E67_@V+X*aA|i*zf_R-5G^w|Cb5c~Hum-(O$QYtYa4v)kEJDl!+Lz{LXF)~vFE zr3NfRYugAF(pVZ}gJ7toiG5OH;@7nL{0GO1ip8#fE<(Om=YLdlouw%E;EQO>t<&;R zj=45#IG5p?4K;Pv6P!6q^S{B;K(PE%>9 zq3plq%;@kgAv8ExjA8BQqL{Hz^-X@?pNTyPV@;IHmC2!_r#G&gF>I4O*_a(teUZ;V zFI(riywrmylk=l@9Z@-z5uIfdBoZW&mYC#r6D&=V3^{XuJh zb64c1LI67(8wm$pg=690@$r3=_QPhR!p@WDiIo^s^*H?}VrImok*R!$8b!)PRVN0T zT$n8vJ2$0}osYho+hJSq=f%aZgf7QNX;+=kc1!kBT251x_CNjVabDBs@yxWBlF}(pViN7I&LSARb!4-1!n zuoko4&&kehuU46rJM@u-sJvxoGRpicUhKwP41<`(g$UU`uKp(F=JFo9GZr3u4DP_^s7N)o!m7wMic_NPhVm&G{T7k&r* zfGdI#&gJy`Z<6r%xOlIH^>l`!VgudU>HawZ4x{c(YR6q@vO-=Yk??0+CxNvE*m|S?vwN#lvYje%I z%t?{D4THUtUuB#e&t3#(BkPCW!kU=mT26g zTH37*DN~2zBbq>tnSxWX>T<%-O_zTrO6v8G&^*PU$E1U64%i|6i$^bM6+O>iFyEH4 z7xT@ySsiLW73r0gl?|nQDpEjC-wxwkf|JNvlai#CC>2uTKgSgbe5q9W>C;?T|M<$v zcy_k=j2{^G#!CwKTRFLb4HvJNm|ss<b+3DnhfDI)upeQRACPCxE{dWVDDOK)yy zgza+ZhE9DKxzR)}-xW^ciV(TfD;GDr9o`#iZ*TFxD_6bZ_y57iO4U9J&vkqvtw#DW(eiz`l~L7@@*p#Yq)7(*I+gM#c0QCam1_ z2L63?G`GO6TCKjElya1^uB191&Vr9Y*Vkw1QXc|MoTp1GS5J=?0{c&_SUsKBYo(u3 zl98cUz{gioA%$bJgZDvQ9lmKX<^wQ*p_Z=5=)F8Jv>h_ChmsL%4xYL9-cJu3j^|r@ zYQ2S(aT}cwF{STsERE8#e8U}G$a|Yl5$8UXCc&V-Y?UO($i$HC6`w_`SYB0aNJ6YN z1AmA^LdNPP7g{{}qYpn2_h;6(){jI)b(ux6Njy9jD{r`h*qO^p7;e$k&Vs0 zwl=6n$%pM%Cz2(Qo*#InD6Cy1_p2&7I0ATuE*^*-Bq9XFoJ^PH5+ z=Dc$AnmE(eJlo)UQ-v)g+g#Y6UPmWRMon4O<8+$U)*TOw_5b&;dOB@$ZnU(Z3t6Po z;Xy|A6odYB!T71z9)n%rjOuz5{Dt#_d|9U$R559V-QbRf9yV2y{&QzTPhu|pty|!|CPMR*LccFOF4z)PWo&1s zXK+ZlOOk_WQx%4K`g!W_&U!1@Y@gF=)EF9P?U0ejJrXz5-U1<-Q%&um#H8`YbbiLc zXY*Sg5-t}aNLSxbPsnUdO;K$E?r)=Vb)SmEt)Sz)r6<U^Nd2yJURNaJ&bIO1 zXi>MFOuNDYw?Ljz_ft-mH2F*jDPVis2a=DE${3V+z}p((^)TRlj#F(Tc!-SG^HFf$ z^5I3({d?eabFozTiFQf&*6#XasTWn1RdeBEtTFj57rRo9Zbqi7N2K#^r#_|}vxv-N zbh@~9HL%Ab<~Y9)qy{lgY0P~3Q4sb+(T|3#SEvVl!k3z}Bd-F%-$<(^TTTbR8!9c! zJ-Dax`D6Pzrs`LMz8}?3uC`j#zlzz-POz-pa-p|1gK-bAT1>ldOuzB;y=m1G*3eM@ zZf&*XH*0M~vw@HME(gxbQ^uRJVm+1pqn@vvhnMwmC*9vg^~!<+UY>9tKPLywyX4uO zouTPOQzT`a%gxbicIRWew6t-%A-`L(ENO+t)9W-Pvn@m|@|1v(pg^^t;QMzhTE&D% z{k!6s51*PS_Wk`NZ;2B?22DtSVNW zL6L6_HHubFKa_uFxS1+jdPlBLglMo6L|o}*0ZrOOe)`d_r{d%WVg1`knEWw7Vqz)qp&bxWfexm zq$-|aGdmAERAORCyEw{hNWp6Fo^k9kud0YVag)MJH~h2P^NLpoDZ>@aoq2o%L@yr( zZ%~xJ@fFxE%q_6mcjreBC*uT*6BzYnZjSoOkDji3KkD@Dic_3S4kAwGl@FfE?%n`B zBsh%R+XSiKoS2vu+LtICLGrA!FKy&kbk^_S$Zv=ydUi%Nnm9QBcY+e#9P+dAg5nY@ zvdLc+#S=>~tGql%|G&`C-bKdq`;)-5Y2s+~zYBZ5sjd?M&~TPQDI@8&vPd)VyfN1#4A5f?XeHi{iGV>*-%Y4IPO z_MWc|b9uSep1}kX1-YeFrC$`gVUfEhTwX^hOUzAy18k!k@g~4BUSq)^q#6GyM)PK!zda)b~MQ4DoHDi zfce;b`&Jd&Kdy?$-delKUc9>lPTvW?y8>m1^#^(9XdPAN7*O4KUkudn3h4U{-sp|Y zl45(AnP-PuTYV3GRM^``O6cD_xZz=g9nhLm>46#3(?8G~hN!fjw8p{?vHUm#Fw3-o zo1|C-F_)b{xWtvq>Z1_NIirGGYg4I}YQ4wbZ~uvEcXS{;HJx3U-FWu*4+zhlwov%_ zrpw2`d!pDc_s`D|24D}A>|fk^aubAd8eIo&_4bahWDnY5xd_btdF+Y#u=j0!Zg-E$ z7xX7|Z|UGyHwq|W+LhK8v^{1pFJr^=XTM;O+7uWoJNCUXtzw+=#D>8iAu^POutz8x0KDOMl?2}hm)ADFXTt{ zIagLJDW)^@57{GJ|3dS)Oh`ol|l+Z}Mb_Er=&~gDt z!=0)8nCnB}9kEhVu-}gQcMo3?ZxSk|#*R;fSX)KhHb;6TVW169WE`WT=|!goWF^*h z+?;sxCMFIwA4|TaXQByA!0dnA)6dO`KQ+EVQRlUItzt=l_U`K6S7=01fCI-(zwIaS zH8mh0vF^QKSp+z!(iqT!e3nn;jgTbC~HhL z2K>3$T`~0ULf^kKR6Z2?ChHuut{!XnU20t?j)Ai8naKQ&iLv2r0D?c0)Wq7URWi8f zG)I@${Zz~*OeQDO4LZ?0zWhN z(DWiS&L}uY7-^|f;Z5n$d$SFZPeXH}`euo>##A-r&R)RaQ{Orsk&`_I6@ZtFOiXI` z+Lv`Rvo$lbXCl{M0%!;QSa;oy5IQyR88fs`5kZCq*)Q>-`OqTs>)UmILeR^wH~PZS0j z-EQmG$i=|4U)syt`jDY5XqDkV;WZP{I>tAnhWQs(hozPkD?E~j5n zKfx2R7#Neo$hd5HKw{f-s-45Bxv^jP7ACX<`dqRNCbQgVrdDUG^AeqD$@V`Uh-#Z330KA_^GLN6SJMUM#-!D zcHw#gu1=@hSeJi4K-T(%@Qr?i?SoPXicgE~cl*}Fp}jV-vY(M*yM5_;E+sWtxYWkX z661d-U|uJWXb;{)J1}^*yuy*tMIG3vu6ge5>KZWa`*OrVC>@@5u+Ump&u29o(4h^$ zWpBIKua7JA&x5V3s^cM{6IQmZ7C#-nCKAI0?-ttoTdZHbCA^iDK;YKr+S-BEWpZcd&(BQtGa zpi`aT%ue3K2eePszlR$B`c=7RViLQvSAw{K&)-^mdy90BW@hTCsg1Xe(9qaxX}g(M zs*%Y5kUJIEO58K9h71g>Cnocg5~~%c9zN??H#MDcs{0&`%1S%!n3x>Q9wQG6o6}^V za|+de^JduYt~ApqBJG0(HRm_ryg5O`Lw53?|j$K;-q-qP2}2hLQybluwu& z>?84o9!fL;YnexSQAlp?{kl0ydU_pK zHvvB1tYStg6LVA3IGKrp3Q&uo@%f_-!_J9FXIT;hqflI2lyLZHT4UThe*e7)ISGu- z4l_kA&TxoS#N;Few0JkF;L5p9j)YN#sZ zsll&blaJE<2`+%-bzvmHeh6#LxLMXy-Jxx`(@--VFll~+>tkpxiSbtT&18Hm*~T~S zybs3B6b$M)k&)G`g3~**Y>8AI#DPBR3oPHd8kph+yN4jw9J}_<(B1`$f^)|go(tWJ zcFV6aa67%pCD3`pnG`82EBuzx)v)~Qfd}Ob+>iSCd{mE<3mZ3aow$7Stm%tLbE)-6 ziSDV>QzaT2X||bcbQ~At0Xo`n&c{}CG#8Q(3DG}yOu8wsB3oQq`T>eTMQUq{&}1+s z83^d`8u)D#2jh%0ED!3Sv$nQ3UKR;jve8S`EH)dq03#2LMpc@z_=JUHQr*tXfNc%F zb$a_kz#+M4pLC0V7xgkm-s2$Sp`b9@#_xj_a1R5viT$Y^N%nR^$hi~&PpVLnteFqW z6&>?_MCrmiJ@%HkB5uN3^;92O;s<0QGv^bJ(S#?Qij$N^^M5GkC_`AkbynWo>w^EXGP(DS=$oi95Q_b|GLoib* z+EK!JvY3Yjak& z)O*PsXOaG!$xXHjgMtQf++N-wU5AX58`cn#e-eD0O2e8v`TtH3{6F;^7m`1$2wECn zv}{c@9xXgH{lwR$mk>K&Q4z1xV{Wrb z?ovG!HU0VWf#b~yYt3;FENknB0fzu|j@i-HXo`X0`?33jyMciU`+I;DmvYVv2?wpm zw<2tb&^}T90|7ni8H$S5o6BGE9>PmD?h+sUU8(QwyCr+y!gL)=rur;MnRtz2sB5hdKwD_G+5Q z=LOI{5sdIR{DpSlf{G*p1@f(cM}|HPz3<6Je>62kb92NXQ!1h)Ph45!3=}fYmYFAo zQ}dE@*VY(}m*@Ji+r0e(qjtQ{nrE-Y_1)rinG{=Mfc9c~cYD89V1CSk3hk4h#}WDg z^ZMHI4GXg(^uvdSx*4rNTzAU!FO9k)Qs)Ox1E?kMPH)`Xy25M9D(D!QB4)4X<4iFA z#p!20m2Eu}FncKss#lA1bpVmzi4b9bHE<~N-#{bv_Rdj`j*0eU+xy@tjj2|qhI)vN zLCWr`-M^;?_)WOuYwV@9RY4BAUWJW}58dp0^MVAvul7p$=}r~Wz68hCOZum zcGh;U$*uSyk(HQ`D804CS9O7g#@H4k{K=o4B-PH&QcX?kr=YTqN|rK|!Pcfqk_cO( zd!eC6mAtyUf02Z{3DD#gH0tWAIIqs}%350k7Ds*JWsHnM2YCs!pa5FH=lDh) zs`^ZXO=Z9Hrn;uK>_d(+VC!{xJb$Zxv}1k-E0_mm(MwwSnY*z&M6+Mh<2xOcU> z_`>!+x;f-}jtmteUJC+LAZ~Q1>}gX~M8TK7b;UIHn#`gTz%NI9$3z>Z^LM`$V?yZr zn)AZ+l+^1@5!wi!Kk=o($;x?adwaqPR=qFIYEP*|4?21i z=L`QmLBn4*Yt%Gow+EG$28tKn^77;KuhSGZPjm{{F%_90g{pL1!DC8?#Mqtv8v3Th zr2BJ?Fnt>K#*b6i<5c)iDb82;04#eu@76=+H46}{$w_w86!HV3)PaFGl;x*|#c)+~ z?wZH5S>j9xY-~IF=iuSp#qC)h%bp~h%-o^jJ+HGJXtIib69_#38lz)lWQ}c>G8!DN z!BGqZeZOcdgPuf8mkDZrw6yey&Y>(yzSy#c>k)+Jco)ARn4AQ!@kVXTz)x{SXoeE6 z05%s#U^g|Qp();9yZ-CC&lK6PzOd8J!ItT!H#dh#rDaLZ6){6b(5LCHkgT{bR2=>S zidQ(Xbzk_##K?@#t9Sf~t=<^L+Uj@R`jO4Z7NS%H0+WgyHOy*sl8|GFp&pFvqc0l6!Db>E_@Z~o;xY3Df zp4FV!UB*V2UjyCtm#4}ZZFYL=Ds_eY$FqBMg^=oR$ao!yAc$q}SM!vidFcDhjK-SpkdJWo(@Gz6ty!!d^@1kWG*? zvr{+QNz;S7FT>Q0uEk3!1iJ=O64R4aR3_T5e?N@(KM8@pduPk6(*ktD&3;SK!AnE^ z`91Ox(XKGP+YwrspqI;gsmACyZrzb(UXBCSzh|VJHIH z2Nr$dRzbn+6yCC8x^nj_ykCN0azQD&>H5Z#CKu=U2v7<8BW?FYm36}8ERRT~|Jy9N z-8@w$=>8X4JwxNlg(YSSL>g8qAeLIF5frv_P{h4aP(>d;Vf|WHtpf@?o_7M^?aT%3I>|6~*Rs2u+mpgHNd9}|(*|KZ0P}u1p+C#rXts2CzUV7S=AgI0z*XUC)Vrz(rPQlddI5 zxQ%Is+F!X1rDa~DBLtdq2b0Z>%?ZzD=OgM#(1tk|*Aei&;L;k7vuVYTmb@OSD;LXM zX{O$6Dk{F-XEcEg4;GWU6I?$_A3@d@9nj@3eW106aN2%p)R|LZf5Vp^__ zLJ4#@U2If8od06?CwolF7uL7Dy1vrBJFBCvTNGYx=*xkXMx$M)vxwGxmA zWGFGSa!q=n84uL&3l+y;*oApZ^Sh6RbvH4Q*t}iiD>~GjiacH)Tas%OUDZqh0@<8z z>PfMJ)n^vTeKfR_nm)y#6f$yH*+4em*`3!O0xdhIqr=hLf8w@a2VPpumKZ7g87^CJ zwa~8qMz`_uMi9B<;N-eX`b-2vn#X=SjeW>b+iAh=*zpB1+ zrj9q^wE(w9se1aa4B(9LprOGzc8c7&Dh8=L`2@~SPo}4vzuVc?%;k34))~INwkVx9 z=FG+(v3J*0QPdw*II^TSEPAJBp=g3biTsh73E#4^;^x1hNL6q@K9W(8d&L2t6A!(} zHADK`61zTP+S@yhZ^o}Q*BG5BWR$S;ZVJS`UsCy*<8E#jxI%8iPS*VD6#NTryJOdy zqnCzKCK1KZqU;e18Z$E>E@hrs{b6Ldd0w&J8o5dEjc{nuqPggS91Rp5A|is*uKkug zMDqX}*Xbr=7Me`Y`Sp#9lb7Sjk~Jao&iqqbBP%}F`D;Q#hRe0%Ar8e`ZUfaQ|E=!D zoPGaa9BlB@!>=LC{g{-jMo1ha>5t1QO!3T5e=UNrV#xvD-41~-U+S3ln_WG4cyoXv z$Pr<6JjG&W&^$o}Z;vbH{w~sLjKXE}w;UU*^g7vHU)npI2;K&QG-)0ep%xPaS?8Gr zvIqGsjV;R>jc}JY(_y6V%LCs4$>7eoZJD?Xue|a{w^^% zt~5s`g9ZWJ@myS`4BoWc`ta&V?tImPZt0kHCO_ltuQ=drhHA9^Y8ZZXjGs(J*(ecD^7DnM_$^>|6)bBR~u5@;K^V z3ETRjNE}j8Vv|NB#DEq?$03V#*|kd5lq0Wlcj=dtC+D=RF!+EQV|lXRcbbm=Bvycq z?UkCp$>=Ee4<>VWoE&AXi zNc`>_RbFJfQp_))k_0-N!xBZ%C8gSzK%&i`unN)i0hXOmEW!paPjjSf7TcVnVkrZC zAp`Re2sSdW9@Oy)UY?E-Or^t9Xzrq^11$(yH_Y3OP+RMixCCnpE4on-Z-%vxz2Vy^ zw#3iHpakkNdZHQ||4alN6yMDx7~<+YXRdL?sLAFDt<&fV&^E`_HMlR-3Tcl}eUQtb z)BUwgPhr7S+|kN(;SmeqS%7K26&ID#c5*sv_HFV$z4W|NM+*7Ln=L0NNQ6-gaC6=q zFZ^|%x&}R5f}a9UR4}*RX>YE|^LU_tppcFZJMu-0OqHUjBH8yGE`Y|(govj~3i4|m zj@s$%Ta1~Xfw&dLbm7UBt-qzMq&OI{GkZ2!k7~NUpRY1(|39Q&6~*5`=WSSu32(Z0 z&+hfBDi2QhpVN->1VUe0x_)?prBN~{Co6c;M-eX3w&(;^b(9}yMKl08fQ21ZV(_6% zCTCJX#X7pN`F803)&5*PRK3vq3LW@~ls+2%Of4*E(Z89}@;B*iNVi)_PIx8)?!04f zrdV)r_3-lVhX6&^pt2szQD;e48u)-?Tq>k1sg^CQ=D$MF5?z&om}~MnsdYR2;s%RP zD^$AN7;!hStFD0U58z1GL^HyK=HD`4 zH@)mXs({c_N=DD7Fx;1rKB)cUAdx{fuE{^EPG)7NGCL?UUOby3Zl539o84|*?(Mc6 zkxD#Iw=4Y7w4;L!IDSKA zYN@u)41?w+03P3BA;XXAC&&VDQuBT@6;i!}V;iZaz^LoFuJrKlKn~TkS>LdbGI6(0 zA76Q8tMLxJG2}UxxPoe)*xiH$4mh)^93+p!CGy5LT}V<3WV6^C6B51703aSS@C)kU zxZLYwcDXAV+XrGNe7Dd1j;*HR(IZNaz3FuLP2R8bx131nSSgcRS zA!~Q1ZSLcaB;kV}*>Lu6#Ob1OMQ(Y@{6`8Vz}d;6FyB&~pqz=ae_2NHmh@BE_o5po60*`Kc|s3>H6TZ|z*WK|X;`lCxl`TVhX~b|KW_^rhkp0M zTkbyH2;byi5qjRgq((M6d3H~iOP=noCMWXN*QQ)7TZG1K^$zsM_N~gF#!IQ-b)@|T z-j4k!2N=8Vz7v&H1E-e4eaVPNltM4v&Pw{LR1;OL_TOmNI^l<(dU!fdT?^D`mdZfK zeMN&+=@yd{s)*tx@<0S^Zlwakze}V>tWa8L?wwUt@NL5|1zSja!T`b4q))ZiYtPZ2 zHY{2P>c8WLTc3(QxBw6qt~&yxuh#qMm|*D$(VVS>PMzXxP-B-_1U@AhWrHHzq1|CnsCuu2$+`CWXbzl&Vwysi8BZUPvGSkplr04 zTFJK72o5LR-(EL2G}Y77f?mmW<66Nk$J`+`1yWOaRs~n{eMsR=t1qWoMTvFqxM5g+ zX^)|hRuhtPP53Qka2x(q^)+-Xe}WClaj1OVLuS&-Ts+Bo~N&1jOjRBnIXP>&pX@K>uf}R_e;9@{cZd@ zm#j%cO-(K5i|b5@KK5i6VL`-1TgUUt;?T@u2W15;*dJe38DA~iMq%P_yU=Vol zn9SvT%D}p?5O0||7b%^{=AP!J_0J*Nyp~R;o)-um z?#I#-lv=_q3k#rFC%;8Oyx+bpuE)AZ-*gsd)59)HWrs{XxqM)V$ zfmm1HfB9(DQx>^$bO8`txG?F!+y8}?hVL$AyN)C_&xH_9rbQEXTeZ8*s*=Zicdp{; z>pKc7%u8nw19n}Se!oSt-}t-xcodf?$RCg^?0h=E@2)KQ53jRuNW=jf+#F3D1B4I; z)nT5(V-ilQovKY;iqg!Yos;O=KE6<@lkwRl*Gb9?z%bs5e#NHbPP4a?aorwD=P7Aw zafQKz?QX1A+NJG_brdmr2Kui(uef$~{7&+=B8v<*7BEOaJHjaz;Dj;+Yw5kb&8!M; z3!@#^6@azkt16)2eTewDwwi{x-PKJ1Jqsl6(qIE*-UynyiBk@OMUzvXrsXO zn=iIDEY}3pXMRbMoV>!;PW3y8@jt~4A^r+!vkNA$QT=7sr?sFJk|OZhV<7{ScTLEB zo9i~93j-^BI~aaJZ13-DbRafEE0gd4@6b~Jr=K%K_^;!X(%pBTaySycx4As^ujBOp zt*Ok|vW{Qp*w=Y@e3%8)w~d;W_si`hma7K@p|JfszhMeiSk)OND$_ag1DDLFEMek6+UPca6i0gXmrU45rDI& zf3q7?hr@37b}Q#BhB;gJ+b*xe%Vhi9;H;l3EzH1gv$jIWdfq%Px&@o2KW>}Kc@j)39R{m;uEH&wL{KV(CTHL zRuyzq>>3)I3)=v!Pm$)rCrHb_ym5=9@9V{89$Y;JS)8|Gzvf|ADJ3$efxhHT?#PDE zXPBIBv7n|3$c}NVNdI@2sj;UiKt2^9O}$td8tbFx;P4pBGr$#*h#+yi+&RR83@IeD ziqpjK@`mOM2naD-%}qn}eR!S3Vr6pRhr3+Ba*PozlhYNMN`{9oohK~x;B#HZ-KXJ% zfZ%+DUsHhB<@t@{%FuX&rJ(P1{$14#yE);yptrwz_bOhe0tKf{n5J&c@fEq;+Hy%* zVxraw_w4AtDR9`BE!Do&V4{^v5_msra}%vIK1cjiMST~>k9;vYK&$miOVxSBm^_A^;d;$23RK*K*^t&r}Odf0tbJ~?($>c$z7zD z_#j=PKQ9ukvcudUtl!MwwVa1dV+;t6@NIGqr{`2ApdrTHr#gCIkz){-h(ZWx3jqy& zY}xCQtY(!r_X7ORz53!&))Q6}SU?yPHrus~`)oSRgfQDZtkn4%~x|hCgL;3jU!F0-?X= z^j}%wzNd*x`OdGaqNb*z=DPHu2uNS@k4bpmIE}`F8Vd=!A|Bi018Rc6IU}>GDz{(@ z>~6-QqWZH#YyU!QhS`U}GYVXhID40M06gic;GNCt#neXb_5V|# zw8S9+V#mvD8d`2bfs8veZu|6P4W=5y0sH^ilMbJu`}3avt*yQF7Y1;NL$eJb3WUMT zM5wJUwbvJi=+&GG#f5lyBImhg9YBqFbPw&F##kbt!`^*RQj*4Sbaj=lj}s$j=w`+Z z-LCQky2nzFuw`IWQW6Qhtn98`|7TJA#=4CxRJx{)4r*(H-D{Tn>$q&*n7o{PCunZ) zTjD`1UtC;_OB`k`aU(TP!8xwkKTnd#%9&HRu(-*yv^;}*W!`11hv|FT-qM#2`un5c zvWF0BYinPhM*qke**tDtF)d|XI8PDqj2CuTPq1<%(UOYsyYM)yMLslRaF;=n`fX&Oz3@U$l>7w&$_SNl2m_0)7=u)m!R&hZc%#}Z_7xM z3cAsN-Tvh9lMz9ECHQw<|3C>2c*8J8faP;krx~^JUMurjx2bD5`*1!)f|C*$$Lk{o zcdnrmR$B(m5AUuw_bO&U>nF9Xeb4xKRb!(_ve&DUg+Lm_hJdSX4Ay0dy|E-7z4Od>Kwz-I#k z^*jpK$l83?J$vl^R{W=Z;nVr_e6RD`&_rCH_LMJC9 zC&5-f-9p5`9o!tbSUIIzASLqPsN8P%&*UE(cZG!uJjctM^gLq_e;G|w8BVH^8s)&r z@+PArm`NFWasp)s{Y#aEaL$DOd0i7x4tEJMCLOvstIEg3eA5RBUt^1*P^y8*2iy>= zZWw5Zvjw&(D~5!nu?9tJYN@SuzzqCS6b4R;h%`2snyEOVUWpWN(l$@lK~}iZF_qG? z+JV)TFRUr==jAncd2n6R7jQ;X3VHl8VJo(=A)yV5Fjyex*xHD253R80CY&1_QDQ1( z2Az(I0I6n#ZuAc#Q)3$dM8FmCLpBIA*t~0xYQ5*&`aQ1C<=BEF37;P6==5x68eZgO z-_aHvM5~xGIpE;nZ}Y(+BOpuErhluk z;B~Td+ggA2HQb}|z-V~kv-M>2`Ya-Nz^tL(HI$5{uA-$AxrHR&#-X2Qf#&j3`A!uD~15$Cw;(Ce@xH8|T^6q#qO+$0+n-+I=Vj*V>Hxt$x zEq5iX$-b}l-k^v=lxa)&NzS?=8~kxG`hUdOF?+Zw1+1oM;FZM1#hII$;S1Rq<$CtF ziZ&yBx%^>0zkh#`$g)IsLQ;MGC#whv8MO{(Zv}xvMJsV8*a#)YFk@M07k zWvz>-SJO~4;PH3d)|h=9HuqA{yz1hZ5ugoz|LImEVnnT<^S~BtHnWB8z=%Zov^C%w zXI`hq;c(lYNe47s{1!piP*aB`{L`&bXZzolm>IS|fr-frwZUt*)(@PuNd@tw>i@Of z0@hXwumzPVy^O4}{|xz4UNg`^16l*d;^=|ZB^3HP@_X}*(OA@oF=3Ghq=y5vKGY{V zycT@janSv&oQLx*w8sBecptVQR0e0)kDB2mO*LdlB-AD@Dc$qN?u=G{vU9y5BSQd?l=&{! z5zilW%*sL{Nd%9Acq@f2Z0-lQ^$a!(dv~UIY59hlBK2TCYdf1WS~K>HTDj&uv}ia;DLd6Ax`l{&h#Ibh3)*c`ScrKU^=@p z@R=DH1D`bT$DYWzy`s@nMVUJI%?Gud5_@i28tixR!(eit@+&0u(6#2Z|G7{&XiKjc z`tZ3(y`$2E4`q3Y;1-F_SNu3^;7?{Nd7uABcFEq+Obf6+wIutVKGw@yC+WCR^L(us;@@8%5B2o|Gn-%Mz^bvimmJ_nzJ13>)lxonNQ?AP*q;ofIO7b7qN{r zxIEA6U#f1h?QUyPthi*=ilkJr^;%H0E!uUI)jmG={aj6vqC`)wD=J5Xt$i8?7o`-) z_c%B!MAy5%pw{5QXmG}7bM3rHT26jcK=~FstSG&SS!3Z>@AuND7~wu&F(T}}reEBL z44rKv*1MaLNemP$FBn)D`g=R8t8-~qJ5iNgx2kj$-2DT6-WIB{sl)GoMw!nw{n5|# zY#FKAf5R_}VcRNpa|Z_~#z?yMmdFyoghFcfH;Sv57iIHQd6qs{60 z3eq+juE@6-`(d!UTl_xptW)h-$f(yynvKB=_8MpX=e~~pzvXEN+yvPBMoR5d&SswS zEmM-Ke-7W+rL?a*ePS=VJMef=yP-n3ahuyaIQd;STRB%!9Rh_26(sKO(&H=nbg`({ zlJBaZ`&W#4&q?w}-W~a$pQCj+Rvur4^WIF;gbmkmB~zSZ1r|Ld2HJ0pp?6hIxcqVj zHI1C^@oGF!0LYkz`sZGH41WZT8g$YdD^-5mrz0x4P8U8pKM%ssrWiEKe~{$B%Bttf z1o>>H9h~V4T@7sY4)*9kTkKl8Kg$g1Bu8WZ+!C)B(O z6cgH257*rLV32pYJCIT!;%N(gX{MG2EsjS1ElW2sAKb=eI2ZO7qWX5bH?GF0PtjLx zT1o8+`|0!fxNa-88Hg3=YZ4`ixCS-aTm0`wKYl#ydO$#ML;W>H*zX!}Wzg)B`ihr} zb2FgPN7V}OS3qmsS@|;uPEna zi95>A_|(B>*T&oi4&txM@t#y+QrgnYomn{IcaaG~b$zeZ)4k*kzLQmrK3xgJV5Q?J zjbfr?f@NnAiD^?3JZ_?(cdw7k%K?h0nDdHLX1^4a+{a+A#>~3Ey{g3`?(3oT=N38= zN8!prL%8n)1pWYNvC&!^Zbe04c-%kT?UWA0{Z}W1T^OaelFW_=RL4C2R1HiDZ)SDoQR zWfxhsxRsc^kiiT`IpV)mitpuWZ&EN1c?+zU)=M>S#}!e@1~T8IqyLP))^TOsU&Wud z%@m!~o_}h_!PnD#bpFyruV9x~=41O$`s}S1_7U*uB$m4Ieamg*9=!t@7r?IyemhYWCM+a5&QlVy!n`!q4GAF<5pLgz7mOatjSm0NE zhmqLQCwi74=|46;rUut@lz$8vqUF#Iu!~;+Bl}{W77WGZX=)?9<`rn8v{u|L8E&ev+X@&je3|~KlRPy-nOTXnJlTU&Pu(Y6 zCm-|Y)65#}Ix*jM&78QE#jnY)u->ecgq3pn_kPi|S5rw01KXL+blxBQWBqP{fFslz@9^GizB zH8!7&WIYK3%qSXpNzD57Yl^FxL0B-djJT7mS+a#{{(Y8<3Uim34&0rTYu%UKT9Vp0 zLa$~|biHwXQ@AS2idiN`+uhh5i|r@-?T8Zpq4>{me(%w|K>~$N1}44nWbQ+i^|a_U z)q?#Gt?h~__lN%%apwUPRkQ7T6f+2-f|3;^=bVEwfD$EV5F{fxXB9~zAd+*E97b{m zCBrb}JPZgps7sayQT^TChYFrd-dw||E<-9dwVPv7eJ;2 z^;;N&VLZgQuC}#S75PF_L|`bitYVTv35N1Nh{yU5wmhH08n9>k7Z%(7i&^;>67O3q z>@gofs*|=P!Icg@H3c{e9IFH?R}c{$W*2E*RfZG5nq$q9=7!}O(2OOb+)HW0&!0KQ_zrX?z> zW#c=~&FW$@&fTG>hvEhasZLAFY`A%>mC|9(P``t>XEll_u^nMO%`R!o?oEMGZ91>O5ZQo(p=^AXuOi3Jn^py)v&<$DNljdvw%)$Qn zofgvgP|I@gQp)VzejU^0(8D@GOj1N}z^=&5eM^H!@PZdG6k`8#ad4TQgrL1BmVElip3T21K@vcrf&u_d+V zKmPZhKP4MRh|-Q54m*_b>pxS^`Pa`{YC|&v@;G*17YgamrV*yqIc;^@DnU2VD$Mc4 z+Q;y?$OjAHTjP_Df?d~zEmhSoysz1PQ;rK~;pny9{4&kB{eDLX>Stv0R`+yB?@ZZv zd-jpU?JpZ|4g4(Zpi9c2=+AV5reBY<{GvncTWaskg>8{VO}{s#{8etkgE%w=*g6AAkLxkPKSH+%S-p%Y* z#+X`bZLj(+jgZ$ctt?mMQgKx2n!2yNcTwB@@j0+#rZlkgx%oI-X4337|(th z*Vx#Yk^gB+W6HZO6)jcOLiMnViqo)2{_BB3n22z?EH&B4bH6tdM2>YchPXHODFwSH z=345fW758PISdeXT)#g4QA#G9ir;NUAl*mQde~Iie!KPbRLsafHagLq}=M^E#X_NGfZtLu)i^HK3 zZgzulB(;wZR09c;njw^9@ogw`qjU7mBl|ewrOB*uY=uN)jXLoVkNFOS=JUnD`iO+> z)2tlo^rgLPPGVU(iQ?YW4>N7asiIz`0X^9Px`SMUc zrzADDXf_eCo?6%B^zEXMTo{ec+wR+}ItUDf?6-N#mw4CDqRGX~*2L8E(nOv9l*C_D zpqkuSdSL;=fx1$K85u%hB+I6z#d^DYafh=9T1CsO+NKv~6^$ty;r7i$_a2T~{7$8A ztfrR`aPM9jVwW-C0zD6@T!mL9&+4-{IWO*dKpv8j=BVD>Ja@v{z|xSln7ize*zRvR z<6nv6Vd0eg@36Au7Y$BA@-R9zmC zeW&HO4opHoi(JA;+!uyk4A+PqNkg>Hk5d_KOE`)X`PNxu;)~4jI=I&}CnTmXKxchZ zI{%g)WqRi53vQv@!_$=Ni86C5knCQ`Buc~F=y$g0e!gC!jtQmoSniGjPNae;wLkxs zM`tWcNvh%c&dz?L^HGSD_hE{N>$i|Xm)N|B^AQ^g!FRPvMaUZG;0}`^t17JfW=e&F zqot|(H0s$pfmU?1eAs8XurQvHQuS3AA<|W@ChX*Ak%;;iMmbp5fyi+buEa()`*(VJ z@X(IuqoeC0pz_#HTs|FJ+t?@Hjxc|0eo< z@orHeKBGi01gsW9)AwV|ATcg`kFcOq-IWYXhu1Owjg8Z$&$6V_TTYI=gR|at(YJ4w z&A~H@LJ+&nuE&$^+k9FLGX0*Oe^TOYi;POsQq!YWDw`tssyL-o5gvT!W0SKMGTe_+}>U&K$O2OW1qZ>qVZ!PvxdjcorQV(Rqu~=NNI}m)+HR#{kDAEZR`C(?(ne*U= zL7=_EC~i@6b!yS);ZrecQH1}(X^(OVgGs5;8MbEA{-LR%8=09PMW*(+>(2Mm0EQ`Q zCAHVs4@$OH#;}?_>IlTTc10!d5vF7Oh5g0l%%E4w#I{$q>7$;dNnC*HO{U*-t4+*H zKun5`ra-^6V*St5O3}XEH-8z$Ygezt!^X5Ukk$Kr=FZ4)Bo-s)e=<6^Y$tlYQo3Vl zS=F*JvpW$gLFJ37TO}Mo9(CG)>uVL|2IAY+KPNv)VCd6@-WbF($5gr1dd*Lc;dLFo zsQH@kL2BFI?zGia&nV$HZ?7(E5c6gStw@A_%dF#760~iq_txdWYih#58O+@Y>7E$y z-`kj4Q_2LY6+;Atxcl07r6O0|a$I?3MKhgh`N*{Eo}RAR6WlzlG}(d_5w1_z1VvLq zbba#`p4`*jrqz=^T$vgqbBWar>j24M10>D+!?mP`TjzUtdD6OP)%J@!fmzmV5~&9v zvu$fgxM{xJ*}4E>&Pjhk*kgjcacz&Lq2l(DwH3d1&hRuod+rsFk3nKLe#fwEj%md` zh?0E9wYGj75Ug7a&e#F#7}DK_C~7R;sJKhrc-UK53o0POy!{~Gnb^fW|LlBK$@cCd zaCspAhlp=YEJbGbN90y*4w*jy_aKjf;;?jZ2x)m$je4A0KW1M0K|H zx?qe(pypg^>+{-plcOvLJL_bNjn?+$2LtcwfJiakE&Uv$Q(9)XVta?m*R1P8CgzsV zOw7<=ntONxx}5u262m>uLSgBSB@jAsGCL10%BO5@Ol$!+P;>Js?wmLeX?$)wW+K-@8 zGjqGSO25O+H_NtXQF}+1^T*<@ zvuZ!(Wce_L{v}OsqsxoJ9n#~$I~*E$g}vqbcwLKbb`D&+Dc%P~TFdw;;(93DQ# zLsgZZ7A101C6>JG;v>#7@I$FucGv_G8QM7Edub9~n^nUPO{q^^G&a67+)y-X{xOjX8KqcP_Alzm%ZhfEDx%m}D6E%c zX{lP1`OSg18R}1Q@eN|3j4|eK-$GWr)MLIH>(YZuhqH3aqu<@CsGXw!s`9{XZk<}m zpHJnuAM?k=Y_N0GY;qc9dO$L7EurP%OMsE=wDdM=g(=MeS_1{Nt>fZ16w1zXlbNXRqv(yKNX#+MtJ#O9Gss+DtS5V^`cv% zmoD$_oXcJsG_G9ipL416sf&!Xt*S{z#JzuypUq_mWysRm+FCz7Eq=-s%rk-%`#M0K zo@j+!;H`g^Q?J6&Hp4{Hb>lj`yI0u9+268If2*y*mtoKmI9YKR5a5u!>7X`;5_dnu z&LC}z4Aj(|i;C>MW(@4q+3H)}Chp3WK+Uli#O(e}55weB8g%1N71ZM?jdbfKnv9QqFw47ti znwmXc51o-*XwJCx%}^PFl+Idp@=Q@WRFm#7Y`Fb{AZzGFCP$YZjx`=h!SLb^Nf1FCN4)$q6PYZdXXj;sIq}Nk-=Ko{1$H~C8gwpyGUHwqS zurFgg-<6X?R&iYM^Jl0Vl)2b^YT^!_5eF+i(Jh&m&!Eh))CKDPs|Twx*^BugfpC0~ zfo-!8852^kee>z~^B-KmBkOG~Z|D3dd{+IJXNKc~vw0xq-FCPb=P`H;i1#p_V18r1bZ0k;xLBItc8Q5-(c$ni;K-iAp2&@9j| zQopYfI>lO87w`%~rN31%J3HX*ji_GH!KlJ#ZrxOQ04rr;^q82tuq)WNYakfM1SueE z^CyDwuAv`(_5|e&yNV1!qt+bEP;FFHJTpj(+J*;1K?@7S*RPN60nY*CIc7D@V))88 zic?Z1Vtf7U?1aP2)Ly+xkle{YKH>vIki9}Xw-*I^IMgf}Kgd;Dy1EAzk)3YEdu)!T zQ68Y8A&O^2noet}l+}H2KiJnKe%LLTatREsBw=rKMXp#nX6Fpsk6I(vaD{{^T9T8xfVtHk|uc9*>*2+(!v75=QXgIylH5Ij8u`oGm!;i#I`x)7A9GTM*Msz{adE5~=*Vg|ljEx$yG$rzxi%Cu4{f+eD3N zJmap|@N7EY%*x7+jjg5u7G3t?=QrXlqwgX{R>z5nb4bJ4$2WfV3=T48Iw%?&bEZoG zkG?`1{L5EYR}P}~XRqYS6`8nj1;w!hIAs5QaM#WAbKNjHZoaW(n5{b-4GmXt#m^Dv zj|wF0_Z&jL;RoHNqicY{Y&+bqT@U~Il0QD?VImy;@QwskWJocRBhM_kb+^vCQpU(A zwPSccTOi3uGNYjtZM|m(Vl3Rbh|DJMjqK-{#_q}qD~}#)+j8g7(@{+~u4Fhy zmfC9|p@^vmEZOBDWm3Vr9*t{jYh%i6PlIcGwLs}?2s%#OK;ZkGJ4qFmyNQck)%Mej z(Xee_)7g~kVdA=nxtlsN?D6Cx?J#rg%L zI5=w^v{|gtJ;Gk<+s^MqgLF36>J6(Hw53^Nz=Y8@t-5-R~ccVbzKVYVvC!pIU`HVlXV&Dn{ zIeCFAKr}JIl>KW57O~CIkzS$A3IdHFLQ)ulxS5$YEg6(;A?V zFu!|z{aVP(410=@T29fkEIh!GK60U;1c?67o5{LOv~_f72*w9}fQCXhXQ4)?1!|av z)gqUct>dA3pWneqMf-jby>!*xY)%znB?$u|f)<!Z+I7|Kx?sZ31lo$T7XT)MZ12mWq5VrjUyyGrv=x5ob8 zx-&$3qYXt6FLxb3`W@%#YAf!!ZMC#TVc}L(PqTWFZ>_!^oYtjKIKT&3|)|1ndZ74?y>7I(uR{Jb;8wb|7Up#fEL>`9u#-lXNC?CjQT2aRqN}8;X z>z>nld)*0-^_HQvKYqUBk4)pH+ZxO)F0!i1O%{v@l9gsK6tv$wCW-=Pxb}%r4ng7A zv3#RLW~Pt`$vuj1sZn<1B=AP3z>v|WPpj=nVC4Wh8b^ID=cf7AZHloNUj0PaB{V_q zniR{zhl?ZGl;TbxYGaq_Wbc<{q+U_4k-a;=yX4`~xVbC~0e(X7$B&fo{Yp|I)c6;B zok#DE^@rCvRu04K+;Z34z0GvYkv0{8P> zuTS#v{AnPivGxSZF24wI3pajDFGhDP183?n@Y?@_ca1(fDx{Y$2`X_uH|X6dy3>A@ zrnjei61yqjDKs^nN(mt)ko~hX?k)Ll`$`!mn1zWoD_J9k553_8!UVl`_a*=W12>OK zPPPr3dRN`>w*Nj+@I3Fzy$I)0fi+$hE$bUN@4b+t$3=%ipz;K*EFJW6|A2qnBfxif zxRfm1)jj`Td0lQI0w@E^>UkD_VC6q=Qhhjik-a*C{)jV54j#dWI;qbmoojzY zIuahT#H#0BX~<|}>mAof>k0(c_Vw#j(ay88IYUF>aWETX*L&cq@)B|lRz6W) zPNp>z1VaVM%66p-SDEa-SoepDHjSEc>v(^{e35wzG>$xfiM7%l)3uanuWS$=l%f?@ zdk~}^egjwer@du?O(N;qSEZCB-j8i&)A6oGX4{bz{Qq`;-Ga@C|jy+9QSY6%%Cx#>EUuk%%Ys!k-vk?e99f$nHr zzs^mN*P_k3=&;d6Ek0to2Vas-OA~|8u&dPfRDwcs&hSOG?MIK|by(Fg9f~GQeSoW)suwzEkk{SGdIWG+o}EH^%adiN&0D82EN)wC3@AARo&g? z6;4iuTFltGx>eRVMLIfTz5rvTI&C-C=O4)1@Z(>uH8XWSAKk43#=R@a$=9B4Yr-fv zf|LgYKFT1fs_HJ5+i4Gi{cS8wT&01b(ZTehU>I_>2ls{DSJ$ZX`(ZswiV87x-U>;s zEs^O)v8CL{!*tjh9N`EO{Q<-&29!8Z)ZRJ86Qyc-!m&M55Z%M)9&O;b)gFrDI@srl zrc)(t89&DRUv)p%%HWhGwwT zur(4nZ`8ALsdUX8HYvcaJ+Rz%@CE#c?u$0Vslgj&i3-k8gNBHS6ZhA(m#>ExP0fyv z->S|`t#1ZX7OTLa8~q5YuwmFgE?a<_3AZ>05zDB`{#3C#{kZ&@cS_ukn>jcx8Y>@C zC*Zz!%R>%euD_39=Ay~UOdqJb%f|!R(%-@s#2z=4laT{lv^2_NJNf90x@+|D< zPjNdtnxH5LLHExcA^{(RS_YF~9Z? z@rT*ZpX0i(5yEjKMJ`JTRx+gV(c0=S<<7?Eo4fwAxI!;1S?qy5dwhsopT`s)P9@I@ z=mzqR@^{Gaq@;e$^vcR2nYT#FLLE$1YwH9ap47FEME84&``@#fzfl;*TDvs?ah%^;A)!B~mODq}_M!K&n{6>}zvG%}d zvw-hE;N^`7-e*MsV>q*BoyR9)?mHHC7AHnbOo;l!oAC;ae;hmPRY~%y!VJmI`Ta*> zW)u-;a|Qom0Z-=JQd`?+lE@q{d+Qo~oFeyQu*#b{Jf#@jxD%J(YEt)((??Ge<7s6z z0;=}@w_}hKpD0#Fc8>YfS7!>}-5o+C{cy+9=DN5Zyd&$dFN#e98g^|!#%KgDJDBTLTc(^RjnPPg?DEynw2?x;J8 zj*dLJ*KmFd8`=P}g$*`>Se|Ey*BA6QM|2XRsHiHa{TF8I(ABmxgVUZ>4>Mox1AWz* zn|qw?BC-``)8E?R^Wn{Q%^a`R83lpGiInreInm*Iq(lPT{E>Vp-M6w43m^fOHx|D; znskK6;^RotI)HEnj-`$}0rju)4Gt0a$NML(>l6Nn27xU3_@BSOj`tVk?2`(>3ebnw-C?v)h-RpzZ`pGWNYxbZi37YJG1n+`a__uxm4IBkQjIY`G$H zjw4+AV#DO+Le!X^A1sdv6Gc1i|5yhj0dJkfJ*^f@t8B@E@2k(vd2C z{ePp5-xan4lXlmfi1P%-_ECSDW0ESwCH`Y$BXGRet--WR5d46I)N_7AM@#o12`Qh` z0z^XNeUGWatCy2CHWp^mfo#hS3`*tf1AaaqTz?%PfJwK#cG&>#pf(i`Pa8S;cY4gd zFrbFTL!q{K|EL!02U$&y{FK@zm4bk^YXeZ7X#@nQKx{KJN`={VaEqg+rFvs3+-C0g zUTq|+j`dx>m0EXvQf|A>CXpK7B{>klZ1joE>5#4-11WgxGFrWbA3rX!>ZU|TmDzQ` z^k%&$j7rif-bAq#flyJgcH<6mj3smhKm;Y)jSd|Pfh)&XBpbliSCX>y5AO(3MD)Bl zvUmVX`XUpRJX!t0IN_|p&FXVl6$-@=Np-Nd>!`&@=JL=tc56aRBF=^vMF#&s?WSj; z1q9uVVkRabx1IMhQu9zvftA{`Z8BlE=`Y(72(R4%2?K>rMOJA)f|#t zUBWY+AG8?;uD)&{k&NT7HlkAbj;7G}-S4O4hGIl-(9xyZ26ivhdzuuH-zw_l_0mYUs3LjNWy<6YX76}{|k6vf)<#iR9 zbsb1}nk}l*a5+Gw&_ zN#ucB%VGvKGtZY2b`%siH^wI*WT2V2NqGfl`Z}1og=-%K6Y@})gEVMxx%CPtxQrKK zzqca>GhZnxtPE`v#Oks+Ij>9nDb2S-l4~hD8vi;sXaF+(#&ktI3pQQEQ&rDU+^?ivKxL9vg>y&PGqRY`f}eg4IZeYJqA_y~SE3Kye<4MBw0TUZ?lLadGGf zyq<1<^k)B00Y~$0F0c!t{(eBCtC_FGx^FKyc@nZ2wl3~+mww+mH%Z<^dffd-q3(tE zNnzoDxVFUktPaf3yUmg_MZ)=HFpb(k^k4B`=jb;k(X8GFu64n|uE$ejHX$pKa|FmV z8{lXDpN|^iMW1*CV`bM@pqI~`v_cf0SFh)^dflZI^$iVHh0pI{e0KU`on>{|+eoU` zUMQD+wBV|!S9M+z+T2ctTBMAPH!R28qcY9)E#l6r)2`|G5-h zHkBJLT5kroQDP@;R{gttciMltFJY`-4{SFdq{f}<4+y+UE*;`Cn$+7%-*Ki^Q0S8% z7TdKoh4!Sm9;y_K0VUr5XgS^5c0xllP0-0>qmmSVr21qZeDt7OCAL3(RITSxXa&cG z*(C$U4sSCtmv{bh^CH-ke1-JwFenK^|6Ow(;|M;NuG+eQ zT#hn!Wy6y+*IyloW~k9>+k=_MRNgE_P*-C!1s{#NsruqC4&-PPNz39dFtcGL&C?x! z9Xg#0pDgk3(EYS!Ty=vE`X)Nm;og}&D#k#dPP<~gf17F5zePEoh-&#TW|IGN&M>$s z4*jr-P0}E;(#N88GAqP*JUn+->{8p548vO+zDQJF^o~qodkCXB`J!EZJj#>a>3vjK zq7rDdlNH|8#o;5rw94RkNOsshEjv2{o1YyuKG@NaKwJ4(Mm`M}JSnm3Kl$|8@YTw} z&slF796Pus;#c{X(^Jt4-gO67_NV_NqQ}9BbN5*T#=!242;`_%%s)Xq)lVxnFV=HD? zg5=Nw$~iVp;lzQt1{`L+v8Ujaid8Nw1){!q+i8_4sTt_#Q?@i5O&mJu(S#S?;UNko z7I1aL{rIs^F?ZqPN4?nkv}oFUw|q}eb!q6p$yjdd)}fclA121T4OJ(3qdxlb)~>(r zlV>PWlvyv#l+#N}UnQp)8`hD=kISQAf%0n?7oFU39@6lY{4!au8)M9=Rk3JD@P%xP z{hOm6``vNX0hs~?!<-QzMh%@ySY)QjjuA{=U~WyG)kP@WzSLc@BS{0NT==rj4mi~{ zC#xqpcqk$Oiy-WwI`J!dL!NJijWhh`Jhz>bU5j@U7P)TQ(jBTSxWHT$80N?{tiI4t=Sm>pP+uq#ft_`g2)xbJx5$Xy6WXEGbnYu1Z}a z<_;Yf1m5UW}>v*FvC!) zE^(wXnH-Sp|1r@1Q%H!@1N${iYZ$CUm5Hmbq~6^m^iv3!r{HOX;G0}v0UJ5&-Ht<(1u~YVqaj_kXyN&i@^KBfQVAF|*hAp%0*&vfnGI+E%wWR_peux*o z>U!u4P%cP1k;+-uq^$DB^a^Tjbt@Ri9@q8)?fg>fBYaj+{l2pN6GP~4g+9We!UyqV zYxR^KA`=4oyqn=~2A*U}_Ng3xRq&;)MQ8`|O`Co18l4EhZ3sQKqnLA0fxb#B=z_7C zkH;n%{Br1~mE>F_9iyyh^w&-^8=T#bxKtU;D})4DPc=KFTHB!XdD^T8qMRX9m#g`~ z-tA+UlCbj|SmBii)Ra!!BY{&)7Jjqb4!tk*Dg_VMw4nW@f@Z%qVvTl=TC;4dZg=@B zi1-)RMfhQoTYuUu+!P3uYdpe74P1g<#=fOz9lz*5h$)n4VUZL5UKS^J z-lVYAm0r&eA!Q%7DvTzG-}GNk3gZ(Qd;8Ji&@rukc&*Q4^I}WB&Z{tMoB%jez}W!R zTh3e;BI`Sl#Gf+;e{cx+kN$T4YsRpR{x@{k%V>!k?4Hwcp%VeNF#zchC6(BLhBG{r zxkb8!vll20SydfI_iZw>&2M%$b#e74Xz^di4)i@XpeAxtt;MxtoBiC&>SR;llKB5` z$26rVdLqc<=BEMA1FSCj$U@}#(fNoHY!`W)gfbBZYE8JUg^EENF#pcYql|s8T-%!(kb&nTB-m@xn&fv$g zeJZ0Zl*DntL1c5q++PUjBU@4N4d4XccNa*c5q5jG@x9B|j8c}^?^hT<6*T~D67$mA zkv20AY^Xh*IfAhR?q5p|Ew=T_Y{7|h2dd`2Ca61PLSz=0*~7aq(koT*Fm;4s@?pkV}EPMc2?S-kjG) zDz4~xxL@p`V!bK4pfrGf+U*BsI9D7yU-HdLZW%i6Un5tp>0M$sc5Gt;kVs(ck1SmB z7$q)#5;piF(Qg5lG+Tzd%1w%>VIBADAD~=(YB*663S9TLAQfeuI53uXXd{LvFYS34#@FG^n&5#Bw?9y-k>pyb5Qq`_Z{!7 z$8QMzg`Zq9e>j6z{x{Iy*e+o8q+)IPvJQWEx$}?~s$QJHr3#vz*Jy&;3iz^g?El?1 zb17smiznj?r%6uMme(xEc^|~0AkXf-`f}2%_iUWz-#CqrQV^uPYLLLj;3t(5LdBc24#X^;7Dt!`0JN0!|rQ zlIQ9t^?9DXOk%@AC5>3K)khIsn=`*n{xI`Gb9Dfa|t_oOJ*&cjINl9l(I?;U&K7HkD(kAx78x9b`BxMWxy9;BW1PMDeDIXmmI5dS!7DtB_y)C~AcF=$Qv>J`LAj9IRmlPwzXtT-;$ zM3);XHB1OBKIAQ5OJvo5^7|egDL%QJoEyl%cT4zZL0c|iC3^2^SW(eSQ}Zdb*BL@u zT9$>ycIbbMuO-Wj395g4kWJ}+4g^}5ug|-pCge=@wQHu2!*_u|3wLriT3)ue z8!D8NxV81c=lrNoVVxvzNRfFg+>3NcsZ5Zc(Nacpd_EZ~b;%XXZi;;P8>x#U{66`F z{jjcKZh6a2Z!;+=NzcF+q2{EK)3N90xEqN=G09}dv>hvzY|-CW{0b{c&HP}(MMODj zO$~#ii0(cN3x4&ix||^EUyAls!czaKsOJmG>(ECZU~EhvpClqDYOl03Jo`4hVu@J$ z)Ym7N+HdurP~BjJ?0LVs29h?~w?Gr?P&bow6RFN8-LA{`{FOCx>mSJz3E1@3^@2PlQx$8?TE*@B*u<(KcSvAjnNk zWgijTh4Ax(#Mp3`urf}npg*Mq{UvKt?IcXI@bi zByMG;wV4YU?GV0IM({Bl7$r@TIJzLB(bby2Ncg|lxb5S`v3>hqRuWnm(A;G~Q4JAR z%)U=g7lRyQA75gGx~4T7340sl)-|Q<&>K>rQwk89*j~;LlHHs{SAQ)6&TUGu+ClE$ zq)#VD)6&(=DrR1QxP4Jy>#SStt#E?U`5q5JG zIHdy3-fyS$1B~A+0#PkJP#$l+BqJD-r0u5o{8z*Ixx0LhJHquxwSehbr*xCJ{*#2f z*L`Y?`9g*V3!qWKEG*QR9Uyp#w;@wQ_#ZxkGBRew*$ZNtGT%frxfkpGffJ4v#}_vYv5&85k0pVNte zoBpSnbp*)&JED!q1H zMsomc{i{nkaLhi~Px*Jobd+k{mEw}nd#R;Ucf45>av7VfxmZbUa04h);WDK=q|?Pl zBvm4>y4b&_N)33Sh!3)m=RW6wI}V6kfc37QorQM$Y1{l$?d)@(79*7k2j@Ud3+9g?)af0+#+q60nvdOSRXQmfMrsG{Ua?E890Q z4~BXVXP0;FP7`a)+ho>1p8d?TeLLKN=W34)l9mZi5gbw`e#1?wcK<$Rb6Q8o+1Ve& zGTS;MuuIXAAfa?)b<-dry=8eF>cq{XFRDf0#XZ#^ylcg$?#h?j?Pt2g~B?9!B!)n0D_l?*ZB_Wks>rP z1hA?ol<9g=;c#?xNs&babwv;u^bk9{NBt=U!y5O80xK*w@q{BHQaFb!IX7l7o>e^% zhxvx=gba-lMAimSKl`CNJH_?J5Oge9DYp%)}h-yH-v5Jo_ z7U9IN@Sk8`SMvq(N$PR8sC(cGHC25e zbR6oZ0$#wzk;@8OdQ?|rSCGCGV9@yp=^~B$t;Zx5cDBmvURWhA; z9ArGK_RW4Y<=YInXMx! zOf}tX$57p~@vr^XN>i^Rt;3T}^B4FQ@;?CIjwb30&|2Hk!bwTNpHIH$81Rv;Z@q?> zLy1Y64YeQB-23wNrJ)A1glfV0@cPK6n_VxW|6QBbxhNG8F+%LmexS@q+kky_ux#aP zOOq*kSg&=gtt{JT;QTe!{1@jhre}==>iy;sOjkSZv5Z5QpB@;Bm#h<|P49s%3H*q@ zxjVrQ!fP>af(E_$Gsu5fL|mh&5im4#(&`y}1b8_M(EkJ`=9;4Tl1k81*%iK=wGc|L`pfEZ*h*JLEUu=^Za43my~RGw zH9DUlkRPnxY%E2g2f(x6Sy?$!%hO~vv^?96&5KV`ptp$}OdGNU%Fp@!wk()G6;gRL z?f0#noGc+0)~)Z0hTinCY{ki4;Nvw=5xdf}zAI5(HA08Yfc%PUmMO(X-kXubG@6x^MwRO^%?Ky(9=&=Cn)6ZkH!GKrMO51S$`=x zGBbv7+yfCYS71fij=A-mb0}o}u~yK?^ttakhb)av*RMX81UoY69J+3f!4CFImkrSv zji67#6PtFz4tpy^W$MD_(=KJ|fUMt26b2iwih#}A56E<1Ao}j$WHi;TY8@4Ryh<88 zV#FHfD2SoO|EJK&(v^b4urL^FT!EA4C&@P^Q_lj7K@Qgzd>{uDY|whg&b0dM9_S8^ z(o(;M+M`cO7Su=`Lvcm+fud6J;<0fyM1-!X>6DI;5R@hMY-YxV1eW{l+mEDQoO$}J z4fPp6!;JyW7D%Ldk$;9TXf7X@%F-5>)%cmXpPxp3 z?;rX2jk&8Ug1Qc3_GpBC_+tU%UucdUk5hmzIOi$RPs}K~b<{mzxH|frXBi&<(hR?C zauzM%iNW`Luu4M$}PgK3nzKN)dw(a&Nv4LaG*8ha& zDi_;%X@cmJOe+3ew(tnQVc5E+NVibR04^>7;*6S)*-evCmC_f?l-nmDR#EA@d4#qD zwCH%CTv*Olk#a^;+W|=avAliPn@Ng1#*7=@~H6 zWoI95tfqb&W?^F5YuL6Hi*PH#@YAv6S#X1=pm_!H95to7Hg42hA43{*6;_jDiFaD@ z#QIa$y>>FNKa7#eX&a81V(^<%Hl&>>>@3}<9LCYI54efc6pAKysL82^R_Vv(Pq%$5 z%E46Qh(wlndkv&a39uAtnb63xx=m{zXFMM-T=QMp?!?92Zr0mYR?0hAzifagmX#>9 zwe>{5Nq4;(x|q&_V_D$=`NQSqa@0jV5w7D>@2sK{Li5>DUUuCM+F>(YnZYL@{*C2c zSW~VNalU%b;jB$dv3o*l+KY=#c^j?`j+yi-=RZ)u{9E_xEP7cLy)M_HSW#+P)VT_l7;%T!(rS%MnuK zk6U_BQGMom`?>izD5UtTVz|khO~C(F!_Z7H0m{=;B{iL*OaHX4u*mjYB7`yPB8}1b z4`GJE*Mn_kh*`@S3H1Z#W{ZJJ&;v$wsy5USFJm}O+EG0qiGr3y-V5`L#^mt<>|$=# zw@ccQ*9Yff({O$zHCtXHBA1cIjo0mZS?6PUudK+>PRK}KQRlNH(Iu|9d@kiQhO+kd zXC|<>yh-_GSF>)ZLar;n*uHvGYfgfsKbz8fMuiuBw$ruM-@nnfMTyT-!L$7#tFS+< ziHa(X^if`M=~6@e9V3rSX_tubaMQ8EnDet}QL#k!7dnPi6>Kc5*S=!M_4@2q&(A2$ zwcYhUw>kQ6i{Ttjl*p5WmIemJ`CW)vPP?uS6ToaXBxja#@O!}lG{i7~DOJwBDH;*3RzJF`g zQ#l)Zx`UgHuaz#5{%19DV;j0i(cAugiXqS5Xvw30IOkPb+I78}tNBfC;XSJJn9cW8 z=`0_g-1_)LxNUXH0;JY9hlVQQktCd8dEjhh3d^ZW7ma=bc956PFe7=m_dL9L&U}+v zR?=)HKu+d@R50&QB7y82rO0$c{gBZSCmw;h6Wa_H|y|Jq!;KY89L5G)Ob_WPw;e*V52(Xa06K6%4G8B^nouc%WJBsW%jh1bqTZb*so`n8QI+8wk_ z)Z>oDD#76;wS0MHsJuK6-tDqv(JAZpTQ~|A&@$1}Kiw$HgV)pT=g!jiARLg+)2P?? z|5s~Y85LC+wR?sXkPrb;(x79I?na~pq=%Lc>1Kuyl?DarM!LI8Qb}>>Mq=o$A@A|K z_s3m#efQpPt$WuxKW5F3nfJVB$Mfv7-~G%SXO%|RXKJ(F%`_aAkBIXep43D|oq63M z_El1B%MI-sJXJkqEsy{^xF=u_azJP?TfhWn^;i>UIa|f|lKKZaqr5rn?%g4u=jQmI zRkAqdyNpZqvyua01K1svHRef-#v)~{#b<;R%z5nPK(!gmvTwBdUOkAJx5>xW@6Twb-S&Ski5gV7M z_E~GqW1ZxrUt^@AlO+$le36Wd7+~MORffA^TdGi?inmAt+dRv_KtG@>`^u%Hgc>!3 ztgqM|T~$-p%8eN)m5(xSikW5$kJjPiO$`173cbg*-{di~@n|my_c8wuqn+uAX#R7% znUOZ+>1-;kUve-iAS5LH%sG)Se7ZJ8xK?hi;v?4Xo{(itQeQIPuHn4ZEdTC?4`H<( zv}|Rrk;8@j=~pkrB}>ZDlNfL{cBrDo)9v>WX#L6$s13@eU$JOK@6oSDOFVl)I@2xd z$=tK@DP5u*G3v*{@_;mSUb zlzL~tJ3m&jIHFu~Mb`e%`Lm<*qO|&VG*{aFn=WNB(Apvit&l;;0)B z@sLHr1uLMWqT+H#&&*^3| zRCiFpn!5r<0`W~mGye#SQ|V5#?%m5I0!Rq$u_!R{-W&RfSJaIEc;5lG+am+`GBH>K zHBE?}(zLQd`WBCsjL(nGt?Cwyt8~lNWZp8KoDRn5ATWCXzss|fZ{6|jIP0xdx@4rC z_WsrG18F77mb2>2)S|X-Cf11^gC#{Rq8#WgG&29m);8aR{1IakaptgT@q@*?93z#j zqnHy1$kV6C#(D<&8pppx91HbT;FPrFe24`M;PmaLJR9w;QGI1K)mb-XdnP@bT}oJ0 zRW|_&oZMEdEq09!#m7rK?|)|Ldupeo$x43jgVzaP%9e%Lf$LR15w0zuPmk4*<$et~ z0l*i48hc_hBUG2ZJuNe~gSF#v2zDEmHfH-)?9t$-5kdSkPk58mo${$}znJ@cI;%Pg z-s=4@0PKlWO?|1c2;TbMkT(3HGA?o`YyZr!)q0hoPB@J_xa;ub)T|C67g);X9}(p1 zY1|CgyZSkoVB}1KpY$F#i_7Ql3#k8HxqyFJ(g=DfeIECG$M0-BSNz#mzfUi9!KeTh zdnc4~*C{CWjEv4*U1mg4-^m@vlyYa}^Thc;0Vi(1{e=%#r2D|=4l3Q%#Y6n)#%~A@ z6!+bS>WNyLpM2A+7WoS186PIGJfFw-)R;wqriKY=bp-)%6DB80FDkC>h2n=;qc-W{ zZKAZ`i9aB~sJ7;B=LeaqGY2oq$-DwbI6d=FDb%-8i^+up5gukLVrY-u{)0m*r;RgN@QhmvwOg^W%4(fd=0 zx>lZ^pA6E|zWQx#eBJ6-jAz;AVr2CA9r88bUx*(*s0#|XTnP!u`%M|cG_e9kQ03QN zU2(2f{vt{XT3i)V+y(eFY5C&AQ%f`X8zU8x2JWcc30r8}|mL~-Pg?6L_A z4HH_LZTE+1(#6GZaEWo8(1S_UOnw#*CrO(2welYy!9|987j7eKiK*sR*2QTA&TW7h zLifKFZO?n7A2l@h+Q{B75t;ZtwIzUP!tleg*-J~eZSC~(BuTozhrC~Nq|C&KfmoT4(PK6F=ZzWw z0U?HKj=NQpzx@QlT4RynjxF9IJZ9VS+usz|avt+!=pAjvyVEhpDFS(x>&K5c+C0{d zJdJm=ZTeNgVYKEObaz*uRQhSSAKdGFj*DpZj+N%j5O=)2mb;s}{_!MEksI-CSSg0k48|EN!TR12?nODk5g_n)c%Yg>T@GmEQ{*KeIWO+8}zb3U>8c4jLCs zpB|kXZ=mXZuj*zWKUUuOHm%An1#eSm|0%^^=b^k%13>Z1Z;J~DUmX6}wFhN?A&FCr1AnA84WYK^2MWn(k&~w}o6t%4f09;L*PaM% zz>*S`PIkX)!+c?LgJpwFh|3-Lguc*bG9-$(-J#v%O)&(RqFf0xd-u+QkWga(wyr2U zJFn^T=lK;!>HAI`B6I1q`8lk-JQh5>KTR_;$;sckWry~yyVS|tua{KU7#POm;`;nV zO;AfDbI?>g#U`&Gpe=~d2TT*vkU(miT20?Z%Z64V9cj^i{_1{hZwpJ3Zt=1??KL=|J%jnh>Q)ay)n^)L?LNI?RCo2vH-$?&xWC zzBVv5q6Kt_qbmY2T^%j*^K){H1%~xc!H~$Sl0ZSOux-mA1LI30 zv~_27CngrLUEdWP5mQ`-)L4|_At3OCLh~>|naj+)CKwrM%mGRW@i3|JAnE82>8>dr zXmn+|?pH*Z8+rT^Kxl`xBrvueW)6=B0u+OgS=KNU+ZhR4r@Mk5GkpU}PvlXWI@)`8 zwmK&tLkFpG!EKQDzi6Z1Z~LKx12ih<&9^d_7MtDj^Ta?6;m;V?H;At|N=oNjTi)Ec zM5rXU*2#j>XVfnp;MeK%(~md*ZD@1f-}*&UR=OEl&Dujvz20%1;PZY-!$UZs=?t$| zMqUcvv8%VJ<@4nc*Y)`L2QtCEgXx%=pxk+PmW)6QDPcP=&(eq#^Vt1&Jm9=_4_7Pn zkMGt#6V)T=FpOgt*KseL_OKJ*;}>fYv!b*qs(sJ3 zwL!NO6*uOLtyB)|S{_6kGPH&MDjTV@9?6#>>0SxbT8rLXo&R%f@*;DwJs#&62bkj) zq^VWZg2pi#3Hl$Y!F^HYxw)3PxkqStduFBrc#7G38TJ{IfuHgZ1*W!LXrm*n5cX2Ia81JOtUaRR5t2- z-Uvetz^FKEPgM+aQzptdp)59t4~n_Mh@Fj1!IAl?~b)& z7+rfx+}a8q5cXl1mN`i|-tVTVHE8AG%Stj8yAi~UQNO*HmoI9ejj&_kmx75ctl+f1 zqO6YpIRk?zcYp7KIQl4eg2p^rJM;qnaQ9H=Z`*Dp${sVNVUV8dVPNJ^zm#v5{T@gx z-Fa5gPox=RF`eAc8_Kk<*!xGyi%Y>l`x&CH6L)@>g+*1Jy~D$*)aI5Bw^z_U;6CDA zM5v_GG0a6z;n!}&eLX}gm@k5=)tZkcE&H32^kKq{4ZsC__sKd#bY-Q6d<510bA0>5 z&o(KHLy6O%=}Q?c2+Y<|BEEeH^cna3(zSdM+B3;%EZ*QL;QFy|rS*98mHKPuf&Rsf zUmDW5vO9NM180`FFYf(4L>`Lz-#}cKVkW-gEZGVSpUGnmcMM2UA1EL@pR$v$->ogZ zI5|50o_y||ChjpdI_~97CC=y1m8$V7a|{K^MgZ_6O+i*l)7NwVpOUm`*Jj<(oKeA^ zZx5qIR`GVz``JgOaQ*V;AHQ$l?m2U&);|z<0Y4u+pRiwOa+p1X$b807V~rbRE(=)t zVo^LV<}mD+!CGyOtH$jH+1bflgm_5QJo}r_SRu?| zMTi5*V;{%8U3&m=4F_FiuYn?7JLtPS#L3yy{~Q^)UH9I~uiF$M(&8Ynp|$t}tl=_Z zNO@eU3_WNxNRLIZ(F=Z5IRO#Y2Hrzr|N3YI&r>Oq=)frb+e8H;Sj5M6;66lFQu}yq zJ`Y0!V$HGNYyA>xBE41)L8HZmk91&E>!w)%u#cBw?$v@72{cW&CU$rKc@= z;KRxKe0lk;`{Sc{4go%a_6%1~aBd?<^4q<+{-VM%PS&60_I910ZB9j*HI=Wk91!l!b6aXWY`C|tLReP>d6EW#I+U2y7HUQ%4R$)$PT`YzT5wTB~f ze$3bXtLtCNRHZY17%1uMWd7&VG2i&OjJ(A1-(@MJ8+iWHaw}5F>efVhB;&HQt&G!6 z@?(%%fhKD>!O@+dp8?NZD(F}yjNXl&sHpXjRSrK)7#$b|!->ODUie zodG^d_t`oEWfhS}h~Gaqz|Zy5_;3CJ&fOOyvgb?3=#`Z`EiKKQd`hb(!fYKdrK%uN z9EswNKLqsat;K^|`{CHa!Az1o!l!B=w@aVv2w`vz?6NpdQ|Kug7JUHhimwv(8t(**5n=(zx~+k zDG!9yAQ8AVxW#rO$N5=FDCm=^d+a!*GfmfU6Px0o*#JiX$cNZTVrya)cF+d2JXPRI zH@<92XqTv-_mOPc`$vPPd{MFf$w?{2<6EHBfiqR96hcEE4Hw^)IW}>5%H+OPu(Jap z12k1UR}Al&6p5^-?OENv&l2iPm4;{xsFS?sB(r@1SpwUFAc94F5J0ai+B-f(&Oe;r$D@pxdOa%5T0~O=4Qv!)TvlMgSa@oGxgV*Ov7;XD+4m z;=l|HNHUAN%|3_H`wB_f%z=eAbjFL~pcLS<(eu08{swP5Rp@nIj#2C7O8s4JT`dg> z-$i3?`CZG^EV%YpBedWmgHIA`T$!Ej7wq+Znx|x?r<=l~##z7_Y&ze(v@oC1 zc0QcZo`wZNT7zZxZFKGq;7dc^4obKaeBRz97D^Mqon68Huvg9PVtgik*=Ce>^ z12frCs{S-nG&L(5=Y%ZC={UVT#~0E>L=o^yV%qylM@c+Zs@MIDWl!A}i{F4L&?7mr z^@hrhj=!OA2qEmShXiVJX1yAbi2jDUg%q z5BeQ7PR2XHs$p4b*7+o2rhn)4^+d#{0TZVaW>An^jaj#M7i%5_muY@&_w)EuphDReCCYk-Yuch8LcDg3XOW z&5cxIYZN}a0&-Nx(7^C!k-}@RAbs=yoxC~v|5wSIFhk{#js_B9-& zpL#}qd#iYCRQivIqbXR%l(zjuTpF6Hs+y{fy0g6UeEPT@m^RvUaJI5h#WEIp(^pCq z?d|3-k&^Bk&4@uxq@XXi;Cvd(cBNsCzeK3(Aqtq-PIO@k_#C8UN7}ISR#$9@9S$gXSVqy%r?#b*c9v}96Ed*uK9YL-e zr4!*ET&J+HWov4KZDKH2bBN5t%w)z72=_H3_)o~#escX*p;2YJ+K8V9Z{mb8=pnQ} zie^12L0NP(zOw@oZVL|WrYwI&3F~DuWS`}`C4R_d9{7!;uEmq zw{Ftokx1w><0ripwQ{g|{L;;Oa7PFLm+^*Lc#PaJD_LI`FhP&p2mV*4Bzm$VAeYiH zMkZ;f5;>{?rnL={;(j{hujX9zkJn47qt)MI1%RdaADabQMLwYRdnYIxfMCHbY@FaeZu&5;_s2~hOLpDFgcN6J2MewyWsTaKUfaWsqU1B5T3FMLG29rmaQGn+qa$1zh4qX zD7V22-4}m<*Lo(tC_C{xQ%Gg5zV$Hmy~#rzm`?E7)rVOE3)gbD{F0QXBaVM)%qw^~cVF4R!D=lIa`D%kIR{*!+8j21UiJj#lPBx3+DP zJ)9^!61jaTw6$0AdcjZyd2LrTG_0fB~5}8?T;%pp49c~~v5zboaH}>zKZpd-Z zfi%oS#z{PJ!ZE`68=#s@L1bFUbqB>|%1X#E4hI#gqiNjTJkmjoTN;F!G2*$6Wbzo+ z_^}*`d5kF|V}=r4+uHoKa!iw!)`dgV5*s)D&f>$_+r2Na(sV&YUfTEXV3``M3xfi> z4o1y+h|clwM{wC^`o34I0|_vB{~E<_X@1rF9j|qWi31BaGao!ZOExU97h{$zcJD66$2Nx4UsOK?O4e zShG*zG_k^QvOQ4y?~ zKE1h`9G_Si`Gww}7;2oJ?&aYAMcp1~@}I{HpcnWA8NJ-yGLxyEv$6xXQ37$LBCy^gNZU^OpjTR)YUbzs zfA(T4yEsWr^OLT@Qwn(jj*C~|LaxRBy^zjMxj06L@9Z$bU^7mF=W9nYI#N<@1|#Z&T}N=AD?f_cXRBu)t;erX?Uwl2Q%# zns|!nr*0oP0L55=h|tqq)S!=2nq=@kY)IuC$&<{RX9NdJWQ^Ef9i!Q(sXr$tsWf^< z-_YIZvK3?V_v)=QBy5XBo4r#oywg@^Q5Ol>2zk^TDSpdR7kNEgpeM&v!3p6=0wT^^!&zl{E_C~XmKYf2W9n3)q@ zKx)K}@rJG{zvsBJ2bTq(Ul7-&S1THy+};+5kGIWEL7u#_LQyIWu;#jGIlsK(6>&qE zSMwd_kY_UAvuD=@_TQqzAc!^<&V@a!n<|no|hC`K-2bU8r^ZOYUKv439 zQ>xrtrR1~U3mTI&_?gfHVtT+D;DpS&rz?|ubpA>X+AA(rQX=rBczlzP@N+&t5^Qz- zZe|?Vk(#Gs)6`zLIqBfAd|AU%XBAa@YUFti$;-nCnuOK$Jwt81K-;^SEZdl?lX2@CQy)YVx^Pu*csXnS@+RwJMILA+lho63_JIg6Y z6brT~dz{G#4fQOo88mISo1_cjwDuSGYh?K7ob}-z#kgr@EED@HY!TIxyzN?8FC@D# zi@Y9dc99dqX&=rNjLeF9*R&|TH!q>dwD8S>PUlkfuO!_AN!hFL6xYD~01L_c^w{F& zHz}>eU&?E*9)SvVyRQ29z)`?j$9{+R5djWaH;|A<*4A%fQN$TcsQY=$417NhH zx2`j%!--T;T7kJ)?QGD-CrIs{iH@9H?%U~QG+@*};?E-CANPHQiik+z?dH^+!R~KZ zoUM}H^zzo0K6etf3f;lMJ@R|+N=nMcCou&z^zBl_TQlp%PFc8||NUvqma@Ha2m`VGa^lU#y z(TD)>)=91%*(4&2SYf6b`*o*YunIVtmA{*N)pkXHc`HW_v>&=zlwIzzFa&(CD{3dz zOqYo~YZoAdFK9+y(drW43&fy!z!3=RpqRonz?gv8c7)B#y$|@+L2g1}f>8pg!$2l4 zJQ}6hBGv`&FcT+i_z5E;HjL2oOj$2ZM)4W$Wj7zPur460rP;-2@`;T{vV4oFX+W=E zY-xc}pD!J@xwsA-oI?rkn|~(N4(SmwG^`yN6&qJdF}sz<#$Z&mF)>NfHCXuOlKvXY zZl-r(7EPw*5r6Uj!OD)sn4dxGmrBS>_ zo}E_2^$;(j?ZL!$!w#!bB5~xH;lMrIm;ibYyjutyn1V0`i~|8LmQ*I3q1NMMm;orvdl^97{eleC%jIQr z@3WQ)1g*K-J}mBicA@$gybSKjvjq(q%7bc|_*}-j^chOknD*4Ll-FfBm)}atMP2rK zhOve=b*Y8njNDI`u^GvN@1~AXXaLOkZ~{f`{5S@&SB4(S5bGyjl3J?Z_lW`5OlnGE zfXhz!=PN?B*)ccVC8A7>mZ?t^m}~*4KotNx2SOFV?lH-5uhS`KHsNE8+R8_d5Dfn2 zhHeDLXut_-4ExVXl#zVX{5t$%KWj%@ZVN#EZs8nac?43B@Ul5i3r;R@@V z)|H_(-vE!R3MxRdlhc9LWOgev*^oIvB2WUNVZwEQbAZ=u>Ob0g)T}D4u<`w0iaFPV zl7VK4YH58htgm2OJ*$Hhx~fV`6H<{dHB~41m&^Qj*Ygm`dk%|a67BevUJ1p;g%h)V zqG3%BNz|TFfHC1 zHE{uRCDFmy_pTR~_UTL5e2-_iy|UE~j?qC$@Ok^9w8IW(n}*Y@&LP!EQdN*@ZR{tC zEKK9&D6fQIj~V^%@h-vb#|NJ%*PD8`&RPpr$s{pPax*w0NHeCDjV)*-lLbevWUs_w)e0Fx?mE zO9~tu#*0e_5`4o31Athw2vbW7Sf8F+#ldRX-q4+x7+|xcpaARq#)r#&KIHvo_c)RV z6{f#4w~Q8^sq=lXave%z4kRS!4%X1g8*ZOTeB+!vBNhYZ5iiRFS~-z(yEBpBLJ>s1 z$#%%#zXzt9(v8H~N<5m$adK|KmbBWi1~mlAd5D@h3eUc!pu*c=5)!$&D4tbwng@U7 z>Xqk&m(LeBn(mIpjIE8K9dJ+g9yJz>edI5}3y3eQClYrn@Eq{%c&tmbikUz4DGFGC zbO!(jIE(;ZhgnSkfz*kI`5>K?=#07TX=+PL^rz0iygYZ-@u(vgV{!ksbb`PV4o?2u zg3s(?k@JlkDv*INXEy~X2p z1YgHH69%Ui8}E9kZXXA(5yS|W2m-_eGQgC&Hi=rX1Tpq879`z~23#O41kpMX977g- z%5;weN@vR`Df^Ndk`@oW|3OBb*6i&-`j9>?I%DHc($0tHNU^jBU{hK4=pE*GtCKzH TLH^#qhIR$HH?n2YCLjJAGpE^1 literal 45134 zcmce;1yq#n+BQ6jptLlSf)dgoA+0iWOSg1)w?%h1NH<8AfP!>4LrZrJ9m9Y5Jp0{i zzwf*E_pSA>fBlNE*4)C}bIo;~=W!lEkb;~zCK?GE1Oma7lz_g2K#-^*kb60g?t@3( zYM`uvFDM4m;!w!lpTA$5^JBmxsCE(>jt~es-k*Q>Ajzr3;K7GZlCq)?Hy+?&vLJ2S z;_pKsPa%@fH_C1^yYsH(^5&1N$Q738%fwVFGq?kqypnS?xcrz;7H-2_6d?%hDdYAiE^Ju6Q< zB5FtnuXWz*xgMCT2-sM)qnJ=fZC-{HG+$#2J;IE!;`K^wAEx>o8WzgI!O>1BbY-~s z5t~xjfk~tGSSmhWl_0OS%BIP>IKimziOv3K2d<)8${-6~DrsU8D*Vg&zVP8f?K5`v zAX&{S577f%;me2Ga?T-gEd+F)ADCV0ciY?m?-PqY} zu_y(7ZaZar;ulf``KxUYJdd3`Jg(RResWn(KBwLZ=hnWGaFM?M+BxqBn~a48Lj;x9 zY|AL>+sxrS@q_(&EABEaB|g%3QNhalg$^y!*R!xP^;+f1=|+{lc;@7$2G;t!rF$t? zZ~<3tNm0>`GCe(!cFnRe(;i>vNS#W7n&|w&W}oc_lT$Poh^~f4q3wY#a|#c*DFZPv zf1?yRa{kTj?Z;7#jaR*E((P?+FRLn71$1FoXIp{;&Xq2A)*{wAV)vsFD@_FGbjie+GrX` zm4oVPc@L%w=I7*adQLiBe?+2{B^yHFEY+%gnlLX9X)u<4TBTe4cIO zZlR*T{dt%TtdNwHlS`lPm9>@9Z!~@l4ZDXEv#tjd@~0k9@ukDkvxj zd?5yHXlT&!Jfq6OX%868;NKX_m-oabL-W1Z+?#bZs7RFVA&6G2Os!ddXDXRU<2^Q< zBbQc{AC5P25fqal==%l=-xz$t?qvowTKyuaRCtl6|6bwnlc;pyX6FQiS$05A&HaIP zBlXac>U4AsCT6~T%4n@KqmGV_P{y76inN-K&f3ow6I+sndUwl_(X~0x48u~$*O_<2 zqa((5Vo)uJvIyVIO!U1RT*XJ%%O^0$#>Pewlv&nP;e^2G=;-FUy?U?HSg$a14X%&mymC9LVlrnGwu!n0fAJB zBqH{h!1Epifvm4OL@{qiM^GqHise|l=o+?^nrgk1M`=SEN=~7PIp0MGS2vX(+`Or|~GX>gqHmT4=uM{3#t>z|cm@ zWU-q5Ki|3y4)4$~<5uNOKA5Pu_-t=ckWOn0^yDd%HsY+@LHd|LNKc}wvho-mjj!5v zDpx7LeDwOb8f>74GDwcn)!0}^2lYuwv`fPu#Vq0zNw5O*h zyHi#|e(39Rg9Z^2OR09Ve_+{zi>yZLDkO>r)1~mj;3%Po1AQM zx$TTLW}N2QohwAgVnUPCHm|IVMNsE>2=4Qu z@I|&bpGG3j`q3q&u+I(SMfgFFlGXO_>@M)4uTJ)jHOp<@=<7Q<^)Glol=ebNP^+}6 zlm_Kgds{o5ajI-%Z9|q=xJ)1ErM@mWve@72_KXafcvTu`(9zK5h?r7&OjVT_8NtPh z6}s&St_sRmOT{PP9Yux+lZjRR=@2cb{R4lxhRWhfXc+ z5#alt_D6R(UonAtqpS73IFg}Ng)yFT=C149io@D^?{eRURToMw=(F5v4+T$h$h^dZ z$;kZdS-X2QBTkruLo6M)s5H2v8%jS7tj~J^d>Dmlbv9DH`NdPVyD-nE!onG{7N+wq zSK}`*NjV>{qaOIrozGNqF*Dy5kAL;?9!na_t2LjGF8}(N0%5n3H&9VwuWjK>x9J}p zjoTEq8g4WsbX!^ZKwGMoa@aQu{t5}{R;@hyv+d%g4Kquj#jsx82CFtxn)2E+jzUAP zv)F^09UibP@7DKHq!Y>;Wt;|w`mWu3SQQn^?c$NZjXu7c<4hH_^uONKpp#4AwiW*6 z+^8ibWf8J23wth&4lmVB$B7z3!Emul{i7KtEzm!gY=n2uUYrD zN$Ly-XXNwn;q-KU{l$fNLEVN&yCrg2z ziHT`^d_2-|(Mw2ANvYlEL)tBbf9sUe(6C{=sFw!rCO}TUCsX(g*T`4}?o;%w^f>@h z*Pp1^BO*4kz)Ie1vW0~4T5N6^8yUqeNIZRPW@#=-sG_WlkB|S{(Z8sqpa49WD!8%H zY%QKd+Fhm+CruaxH8+2v`A$41vruE;y0EPlZ1WG>>u@vii*|A9r~>ixGGiP3%U#hb z6ICyv3>~Y9N@N_cVs{+Rd#S0pxN02k=T$0Ee(|fb9!u6wY4y_L)t7fni&ftI>ea(egwYZpAI$=aujTGjskE7@?&E|5D8dheD4$#e&o5BE&3$yxuTef$b^RwB^w}XGUY=wZudoB9^O+ z)+JV0`K~5TR94n<8#3Ig(P?yIM4wpf>V;CJbS>rk4G#V9Er^W`lLk2#d*c0*--b*H zs(<4w*5>t1`^5pRI+a9wShU|6(!*GF_rpU)P}J9Wsm~fT+S59}d!O$QBZS}X&o5S3 z4$;$z=i{{R1z>b(V1Z)xx&1FZei3{q%L!gBU z`@(-K(Er5v;79$p3;(fl|LcYS_UwOOfHctGglt=kHH^^?onp1833-Kz4&5^D&g{=O zsz{&JfWjk{+pBaOVGy0Lamk(ctCtOPAgt5bjspU*8y9?LsQH!*XYJ4pk0eQ&j;nWI zFFk32_ob4`^JL>mHc3TAzKK`DzHbfQ8tRutcA-y>9+#Rc{9q%o|ezn%# zL?&e0sL@{2Nz45tX(bxBAqPuBwopTzt*_*_zW3(_drB2()~H{ArCBIj-JI!rby)Tf zSrM?#7etQ_FQs>qbf-oJe#hZ7pL9!1yF#%dm~jxFguPxy<{W_LUR>?%kc?$oFTWfA^f-ovTw>Q@NCxJZI^aPjB5W z+QU+~yE;21rOZdZUxFG-%43n3kU{BifH+I@jE;%n1vuvR0|GCt>k08@s|Pu`*R@>G zxXYwL=o>4!Gm-sQ|%W22+FJa)uZ!sGf9&5h1V?}T)keGG>dkS@uRZIgG5 zy7aM{&UVRpOk(5X&&C1+ok@Ls&k=$+g#`uLN5a0xCrQ@)F4DD&s|_u+`lV5Zsm0dP zy|1XL-zh8a+M4nkMK&jC+G(i`lXUu=$g=IxOKJIQ{UzNK1p4l2!IKse?_VOiV zp(Q8&x&uI-6ar4ay4hDK1s;)G3BRsHg3nQ3ThMB#a4 zv!XgW^JN*OJ0}_<{`>?%!L?R1-5c;A33`0pCZBA*#g=IDtxYpmtHr^CPrlAJl262; z&`IlA^~99&WA)i8*Vc*^9ee@;*9$Afn`3_-oCXDDLMq0YnX>BIG z`MOmqCXAGS8wMxp@{%_(Mzujf(P{C};d5Sn)O8Yy0gfFgwj=E9VD@BU?k}CK)>pD5 zB?)(Mm}|O=T7j!WuU!ad#HJaf= zWI4fMHk(J#G@Y-;TA@y6cWD9E2qx~tv%1iTnGT)-85#Kq74;z&1v}et9UBj;;@kMA zmb2kH0MdgGE$><)e0;m1KWnr-TjMWq+m{@hXh*%>u&RfY`8Yus^1)WNR3f%-#hid( zFVKgJi%Pe9B`An9m`<}tg86-7ntx3iY0^;X;%klPy0-}HfRd8E8q4c(ZiXd+37s;w z;(wrxJ??*^josyd2ugHxbkm{ag$AXE06~;b8~p=)Xxv61$)KHAQ~$#|XWsy(>c;a+HH==NN6Wkg`t3NJ4f zzl+k)xPYOHB8x1Qi1`uj*hl7GDTt@!>N< zcpxz@9*)TK_4wY$HF!qu-+gB}w7%um9?7-O1icf{J1)2&#K;($8Xx~g`QB9chn&Z4 z*YuFQiiDID!hPgj0_qBnwe}F3%xabC4mW~=;8bb>64^C2GBViTA6tTBWGrd>H8V4c zDvkjk4Xg$im#{dUrluw|HaS_frWqZ6JUx;;gv6_n;R`PDB$>jDSIHaHX^4MBVWK@foa0gM0R~hi6A_G$I!{tMa)aYl(H03|7Uq zQzU;Kf{TxpmjWtL^dfJ5*1UN}ZP3b=s3U*`6)W<2^E0)6I-#hjm2^Tfz-fEfXP>OjnSw&ZKO(XU zP+auP%%8*Jg}PLd^~g$JKOK&jQc+Y?R8WZQS20+rBJuEuBMG?3=QK73=(LBfA}=rg zWYyzNTFpep@wU*^)$m#NuU|P2Gc&b&Zxop|e&d|(jqMoxShJk6tqeiALNSh4P`~lU zG$(_r5+>AMiIpm@A&>z$rPq~|Y+cH*ZJ1!7f134W`Er-z-53Er_jqBL*7cGJ0so#Hvp=<5hj zTRFM7bX#srm0vu2_H48KJ-?O~|7?p7(XD81dWGV==DRbT!lD8+zlP|6M%Q9!wtY`Z zQj+M}q9hY*oU~RC%jN7TD+Hp<6EVRhbA0R+BK{zG-6$k5y{J$vQS%3zRj4Qw^Gm}y z7v~s0zHV_ri;8NLufraZ{$%k~`OYMl`9zUuP{i4PMw;9cdD0uA!b5e{Y-)-*d< z8Mal_-Cm`T5K$g@F2}+zD$bCuEG^AJi^X;%wwkZy>e316FmSEYpmh#CJv}ArPa!9w z^j%T?dV;cX!vYviLnGzuth?M%Sh1FLuTWOg`9ADy&rp9sT}jFCf-S&?JFS7JkLDnz z^^S#sfn=SnzMn54fuX6iK6dU$KNIKk`u_ZeURWyxqA>r!Vkd`@UPeM{n4W=S01rz_ zvY{+HMfc{`308>9&cLwuK!eZE57V#V6#m6lC3dZ3f{6dsf$K=yoY%ZV2S2i?oX@qkYoqln$RduBxhH)oW<<42X!2pBxM9hV7$E&2gY(qRUIlhJ}X* zg+#EXH8^_@WZq|A8=BH^NEe%a>KmI!pq)vQo4^R9JhPr>NGl7Uk0n(Roo)ZHE{t1Ei(U$t2w0X z6amu3|AD9ZzoGqqoi+a74k178@Q^tH)|N$j`EoJ)p>zqx)y~A-^udNdCh6+hhM>pE#c-4%YnEsO-Ekn{E$?XmZ-W5ZMNthbMKBO5caRPLIaM7lr1dmU3xAAwCcOjFt*} zPFr&g1qGWUD|Gt5u}QhCy4fc$8$&IoXtOT9JYXSh4fKFDC*H{K5SNM7SPoD=U(U<`(T>gfj`7~Z#0vwj2AXg? z)ar)AWx6m#!_DZ3cB6KgN40Yir1S^(5fWC0?DAmtrQDZ?3Q&|AhpdzHS6A7Lb@mqZ za0-_hK$TZ6gozjfM`}Goi8Uq!OC<))kI7}xR9;ILtt#IubZAq6C!H-KyXw( z1l&PER*8VcY9OGo7g`+a`t3?(fZ7F!G;B&i+N_Vad+~RbuMW$XQtIxHWP%^ehA!gu zXVs+hd)@8zTQ%z^`jk9!Yn*4nYOFHxva)9A6ZcSGYbGY>9n&8lpOL>B&vP!OK_|kX zdh~%{hj^&Ys4Z_ez0&`4KH0pA%(=VwA#Z7aV~G&8ThWk zu5X51=~8$sYhBM>Bx#HnImv`wPS_V?qCdz;E5WY_?#RIyDC|Z&j|oxvU`ulU$JA7# z3c9oNX{6_QuK(BLHcFxG(Je-S$={z^eY5>B#Uh+WN5@N)y1KzU6gxidc}^MIaI`3& zA>?y>eqnEjkI#_)On)qQjIyb=&T2$PQnFBYzOh)92@lOrt!(~v!2Q<~?`;SnUfyZ^ zF8D!EYzi-SLT8>;-TZL!{sB@^(dKAG1v${Mb{}FrEu@^)x1QamNfYE^E-gcMk#aBOdkbq3qgYPGUgTvi`i1P;%^b(I zhLN#*Z7uuNU|Qzq=Zizd zOWC7|A5u6q1S8&sdR=F$BMSZ&xG(UMaAgh5&_zaXzrTfAi=Mm=?L;bYImCB^eD#1r zPvW~Jp?-1wDe~j0$_~bMN4-q~@7`YbZiFRb;oZ{Y31mrPgc%kPh=c`YJyyH-=cQ6} zP#?#Tya2C3kC>Von>~#Y@ojf7@}=T4e<*Y)=q$LEc$9c>n9V;lh~pQ&+(|&SLU-d+az-Xe0Fg9vsRMfmFeSV>nrs9UZl%W+~%edqY_nnexjp z8Xj{QNk}+Fm=!1DhM?=UD^U91bi_)n{!l;}A&k24AHN9SjgK8j5hoAw>WCHNOwCM& zDwA>a+JvE5(&)=nM~HI;mIZAQu@k5$=OVw06K2S>$H0mEZu{sDq4ib!_KD_;)1bNJ z0vZt;gGRN~#&54w{HXY;Mf~#uTii>no+PuVFf#g2KJEzKg7GZa+WJr8FA^!yZcC3x zo8c?^k8`IHkJ3<6J7>iS&?-Zd$TXv~g`ghWn&?hoSN!D#kWtah{;nyxiF97#pZ@I1Ivf$&G`Jre_n7q&fkU&d9TJy7T>72>?9Mof@;GTk zcoAl}aQTwg(7KLs5t(M_NmbxNAa9&@rm=wqW0to8bync=_lLpo_28|@#LkdkI-*95 zSqTOPfgXf<-1{MgrGb3jdu9<-$peo?@ZXMWLGhrmGg3A%0cURI3uX`J)4-2BDzqp1>G=- zi{3z?HqX-s42iDb%j!TX zYANa&hP=MTdQ~rKTYniCa$&IdPO-o0QOK9&AlV}W>h%m&1JvMWiT7)34dj@6M)Hel z#gb~GW_h=h!Rkr(-yxvBfFT#Mcq*?C-acF;#rh1J8S)#L0~+`3EqffR+2&Q&!$I9i%5=D zkd|U?|G@6$K4E)ZBKiIFAq5?38fqnKI)CjD!_Vx{GK?e~nxL(gM2El&48z_`Blp9Q z^K{Wz`SFjLEOD&Q;{pR4aSQDRk3S0e77oYhI|U|l7$T@`* z3$-)-wr@rZ8h58>CKx1BQQup5``4Kf*sTipXLpj{HqY{d&O)Kkv?qLh*-Zj`1KF1t zPw+QJ4a_)1=-iwV#7*6EL}I(-ri%&ylYpaCqTcV%^i$y2MB-u%g~$!RGrOrnEy}tB zo1G_;1jC?paVjEn7SzcRvb<%g7wjXd>^O>a{$=n@h=?XQ8RER5CMwg>i)CE(voG?j zvS^|0y!}MQZ#bi=s4e=3Wd+nJ|A4mcCFVSS1Kij~X6JabvP25pwa2JIx_EndW3|h# zw<8m41Gjh;SrlnUp0N+$DUK%awDmGJ^r8i>KX#0uPuawas!P1js8<~kc>h2VE z;Cjf`6Gb8L4Jz!i_=>b^25F_2CR#C(`f$3_SYeEAy5ZJym%7XI@nAc|@61L;d{ zs7j@meSHov3ME8~LdT6Kj^)C3UC$$m;1NYCr&eLx?lw#FVaho^aYM-9?1As|!b?o~ zUsCM<_e}r)9s4f{=N86Ifc8C)_1$N0)Gs&0XL5MBV4Z-wRE-sks-_Y9Dnbi1A%qWf zTgia@tK4*^R<2uDrXxo#jEn2V2>y#F4=Ia=1t|$jBF{#D&&tn_Q}Sf|tE;aRZ2Z!xe&%r^ZO${rrqVF;_?T z>hS865wSDnG0w#vWmGcmLyj`L*hD@(R6BHT#F6RrBqe z0^c$qvg}CM=e!r%-e9#m`tcU4A$nI-_IsB&^aXIjoL!#m>}C}f0wqA=KXBFhF$^_} z`XT{Li5rL=sR*Eg`|PY&{#*83>0L2WK)AYVHBi=|P{$3{>V32J+dO^LQmvZ)p`nPd zkoFg7e~=WSR9Qvk(yK)wkx}CrVTj+`%)E8sswH=U1wz{lAQ#7uD=)^F%4`!)aj+GP z>sCI{n046&^O>BK)WUXET62oae`!HNc8rtx%HZym>}mFe7J4RASpG4v4gpCUXs zISJiNBrY!%iYHv3w4U*rZw8~;cj*neja!!W_4Q?Z|{TZ25H^z@w*VhRN2qd$};sK%g`e`H+At9mEee?=-8K{8nS~^!` z>`mcGZ_wZh@B(OTRRB}hLk*4lj&Lz?tlgC(;{9uD*!PZsA7Age)cU6LQ(o>~y4F9n zFf#+#ZX@zVDUT^#Zn zYAQz|FCmfon>#?KBs1R?c6T#4@4Gwi8yN_mJ873A)3Gi?NSvEFc@Ng$-Ih~`6BVV6 zW(!C+&y%ky*Ee9skba(aEQpIZJOX_GZkBAqU(e9pX@9`>5f|DAwljMW4ai0ssF)+?}r8r$elg*+xC)Vb@~@Cys^wQfng2|GhREl zMMVWtL#R6orIyCh64FD!Wu`foYjC+%Z37#YvD zZpW7Od_+T$|Ju&a!pVhyZooGhhE&zm%GK)jftq(P-+D*B|8u_Cb*R>s5*^-|oJ`Ul z!0S}CrCsA7mF#G%G=1s&zo6*p*6Ixfn zEJ`cuq+4Ypa=6F;itZl5_dS}x58pW;O_qWdiU;~2iSa45j9|;i!~KZ5dw?3jvF)~L;_)GHn5zni7#z`b52PjHip*M))rAwk7jn6lU~ze1G)Q&zecUo5%8hVF!p_ip+W9w z{_bUmvP*eXptt?sHEsn0U@Y=1)6>#r$QTm#F!ME&usgTmUTmZVWV^)AbptJmZy1caMGI>bxeEHFGKI zN*9H&!&6Yx!ByHO83jev^+ebAzD>BcLrG)#@e3tdQ!KzhhKkyvlJ#bF3mNiMApKo!Q4i(e7_zw#%}QD^Bw5QALzz0 zG{{I34oppqz}2hHE<+L~tu@P!)HPatZh5yK1J+5!i=~hA^J?`?v?DeqV z<@nm1@jq&C&A5fV0{wC@jc)<7L2cdSMPVVS01uEdv7CuN$=yt$VxZOjX)AS+ce#K! z7#xiaVv=Ui+#iEPT|_=YdLc9vV?(mcbm-F9)HI<^*8O1E-cU>wDwh7r zS?uZKFT!3PRlmO2(K{f!3Iq$6mTSISAr2r zva$n%werHVLM%#oYU;)2WijLdyt^}1u5*csnwm56&!{J@FK*|r&!b0yM;u1<#&@3O z<>-}r%XJND9iZ#t)mgKR2ngaB5gYR9LRHpXHa%x40#j4mC0q`nFB1&EyEArMuR9IN zYVlQ9$;rrgoFha4quDoCSx`XF%{}=o{&$^?%Z9xn*yrliMzH=TN?~E4i_M;`CbuX( z%{r%^gKO5jg+_nq-KJR_0j|J^5?tZpg(Zd!8UAmuozO1@( zcX$7dO=&&bvU3}NJ#pj3V%v)Aa(gA`nwABcEof9%VJTUL2G1oQBBqUvC%$+OV97|C zmgp@ytydRXYu2qY(timMo~t)^w6Y>gm@N24WE44%ejGbj_}RHXg(u_PJ0VkS%nkS@ zAa%7H9m4d9513m4=S(VWkLLNPRI_G!tIB!8*76pK#Zlm8pJxCttsZm^XhnAb{p0ZR zW|CQJ9s^?)pB!sbc*H&R<3~jC)Ci4F(_uHe-d0CW&U0XG)~hlR6GQ2uDA8n9zQ9TY zuoOO#caa}S$fSu&jL+0W#OJ#GUij->lJHP~zaNm5ykhSw3nX)SPGX$Hs^n4yCKi1H zcbw{CW5)>xQYs|@F41Zhct)LpNxZwnXB`h5xLVxq$8LA4VgKIPN2b)u1zL+V(XH3z zas$lf<>Ghuz6#at?QNG^E1moy5{|h-3FT_zF|n;6{7G_a;G_`nI;rm$Ji971-BD$B z-JR1hv2dqB_iF=s<;l*h+UPgXfyig;;7vnDM=Su{@O#xt$deWNZ#ep0tEj3{a2fnM(Qrh;cI@rL z^RPXO6`}^`8N3FtxbGTQ8)<2;T@L8!yD8Z{=H5T-+TA6~7|=ySL(|s$Y{(ib>B8Er z?H1U#7{`;Y$iB>`BadFMX(#RRSR2dL7?t@kXCzs&%^Hk znVN6UQ{efrbceoM{@E*f#E?% zGuwvd`Zn`|ZdxHbLRO`x6yT*^dDS1oZt#HUD1}ewK?LEYL zRc|}=8_AQD%VM*pqpSArDF}MJnhe$lNO*x#sFRkC&h})^%F1fIN}Pw~dSZAuabw>F zh=DyBA85MiFXdZ9YAl{qTaN|n?2Z*Ds54>Jd&OCr26udX^Aq%F*gzh<^vBoZ+R?Fz zTX)195X|%MZs4A&`u`TqE2CzI`f_sN>P(ZPqXtS!@08R$Zq{G* zXBq~ag%I|$GB9b@ecbs%1v?maJy_tfFr(G5GBAGwB_Y|?mZIQs8L!%44Zlq9@5PEj!{ny#SVe8Fg9Hf>14Ubo?O{3?*&!WXihh4wj)KuA&x^RjOD^uZBu%%*X z`B*hx=^=F8uFvM5Pd#ZD3&px(blXi-1W>C?x8b`=G2w*_GCl9)55!chtR+PR5k8sr zx|phJynKe3lusT4jLFdPYV(-u%*%B zW0`K#5&0lnlmtFG-2E{-%^Dug*7o)0Na`?jzPg2JDG25l9qoL_3?^`lJyth^0Bpi0ofvFJA^D6ow-Cd3uv*icKzo$K)G9C3lKgdU%jA+oku@dXkFZ{@&+y?ZXG@t z_GW9lXWz~gyJ0FFE+qC*)wHm}JEgJS>uc6$FO@3k|6!lul4_+AzEhMlpvX`w9JkH< zR#5Qu>+9n})dkUwNC> zor|ZIliw;WDjkPfq{tmE+PmKB@0b3lJRpuwABGEhWPVc^MYghpyWL2|{hz1A#BDDj zwfaS1JqH?@5&KK&ulZU>8eJQ3E-tR!htn|ZZ!x-e?A-Cvmfa$ttUi;D%q=TRPmVX4 z^^JUP*4^71y|4}p5sGr*z!je^?EAA~8f`I?2MKA)_Qw09I|{yD?|nnu1g5*>MOFJt zrGTy#;FB|zIxS!7o2mStqi47vz4s}o2t$@3=eEVF)ix8+X>|&Px{ol?N!h&${6q~| z^a&GmBU2lJQcNB7D5DT!9H6l~k?EhVn?!{F@%2|70d09GFe!s(K(?W-tN~^V(y7uH z&t!1WtUZ+N=+?}kU-mxy^u9i!;4>P&sL0L`gI?%h)Vti$o(sFH#KwVS4$VC-CKePViJjBQTMtIa zZDal=VVCef2TK=LCtn_NFM(zSc!Bih0!#e=YDOpojmJ!|RZ zKLNQUfJhDXrnotk6;*vsublm8LZDgI)fmi$h>78Gre;_rO-Qo3rc4U2xJR&upP+gh z*rQP8#cB#t6O|$&1}>fEHeI$-HQU#BKDT>THJn_w$hTaMXLH9UCHHSy12h0q3xZ~W zL|*52cO{SPZd2A7jk@3P%iV9#;&|pjdg|j#p3)=%9!!|o?)ODMxDOHV3IRx_N2(|TrgB%2PZJoO8zq_ncu7u+g!cGH2r#aCxOZ?REZ8eP6;_k#DI z1|-Am8DK!VqeSra4-Bj!N<>jr2vjoJ)EMXwuGR{4YxpyG_v?VM$z@VGjZch^k57A$ zkJ{rg;Xjw&q&6YSl zyrx*veA0z^Zho({W(eJC9w?wT`FMjSK`G?XmOH4(nw?DOOAs5F%wwr(@cAF}KggMZ zjiX%jP}*#IIP^~*tl_*(IAE?QSDncRAs{uVTL8c+aFP!sbKf4apG_a(5#1n-j=g*Y zk-GG%dd4EOaQ^GX#*yLP!;`7WQ4Y3Y%5}EsMr%v%iSu8QzB7)%rq$eAi210{9 z2nZ<<<*(X+A68(}<{LSiTh%%Dmb1x=dZ{0Dj@nPz$}2R%Jd< zq?c90kxL@icNc>L7kg*d+owB?&a#>{7C&z@r=d>g3xcyP-c0?ZmKGKml|SXfwm-hC+NL~-6UMF_*geh!kLCKJC#!4U56FE#|C zo!2MPa^%yCHlxrDt{v`Jw_&6Nin;AI*iL>r3d$AWdeit^5?@+&0+*S{zVOm0`p#s@ z{=$HmyqcPpmgi>Vn|6Ob5YMPcuRs>$8P3 zDsGpYu|q%fbsu2U+VtJ>I?8^KF6hf{y3huuv74a`K+3k_;V##}QdV;Fle-58g}m2? z#!L~bEw^ioYOV)|Bcr1rx57K?{c&}2qLK!E?;*m`Fx>m;@XvN}@7jads?{N6f4}^k zJTdVuuR3?-@OFQ|st0&m3)?|Foj~+2$_sOwbj>*Jyva=8re(iwqLGgequtJ{Y!t+2 zLAevs;(qbsg`n4oTW*(TkCJcE-lg_$AHTQ1FGGl2trv?*OUa*H&JB3`1a*nVZ~S}P z*j9w&lhPwu&&*yL^ar1|s^*CL9kJ^VP37eFbNdp!gEQz%-+yhj+A?SjxfgA!;a;NS zdei+gSDKJT`v$#<_Qyd<3kX=Cr3y+y|BF$&?dCc3M#|R>x5{bRXcehSHt)8Vl_WE>RIneyQO^_=6NAEzhh=p4pi5`G&FVX~Sj9_S6UC-}-yZS30THr=K^$h8+t9<{-Mg>-8zF6Xbk5Gs{93ahF%wt;fjl;Ov(}%sLg=&- zoRqZnhY)Gn?`f;!?y?p$lQ^uACQ??3ODNY4L%9dJQ2RS(@WJ+e_Wz2A{Ow8j|35pj zn)`wF!uN32wy&gw2{!flVMLK~kpRsd{Xe)_yPis5g}Gz)uC{D)1(PFe3XEb5aSZZ3 zqw&l-C3!`a7YTYamgChH+`Geitv=`~vdyU{Idu`X-`(A0nQ$e!0hc1=yZ-COPr9v* zaybkIIGEa)xVR=V+fhJeY4K0Dk+RTKSlS!Q9|DFSpSZU9dDHPiLr&X;(Q@5ATeai5 zZ%$hl)RmOewpo4Cy?MP#9CjOg-h_X0gH9QO#g&D}u}u2<3-!LOEppji(&bhoL!ufg z*Y-ckPQKJ#IhW30rRQo?4aW3&(vTV%8Ii#Sd0huf*jIF*?|uUl-!L{X1(lZ8=(k%S zdEW+nA^oQpz}b=~?8!^_75~!7$x@xekC=z^VU+w=0E*i#Hpl2D-Oy5BYzD_-D~ck! zqmE!N3N@&di(-qaU~X9>1bj0;CTVPhlMe&y3C+#YNXe6zdEb4=o8949F6^}HVH=$w zi*@Jo{%+*^&CN(Q`St86uggtrd*nxwWD)?VT=wTENlC?_y}i8;v8=Ek)q7kVqDq=e z`zF}}Yv%hT96DM2r&FF~{84k`1dis-HSdHsJy7SIN4^V9a z-CQn)gX-(uB+o#zsI}3#O9{yLGhWH?aDJ8*Ld23G)vfs%n+~9b+P+m5sVg2 zW{Ze{S)K$IJ?Es&)Qp64c(fj1q5kBu41D=#sXp%x^lxvht@#OiRa;+|ol^=)NSXbm z6)eqNqNniKnh1Xr#<@&?RdcuWd-NI*YIpFP21db2?MAPfwqRMuwB1+z`cvu6E1jXE zqoY0~I2AVS2LO$mDy;HR5!z z&<-8XmF{Zxy&&VOF4k++y}Z1v3B!O{D^DnUo@_QcZEY8ht%IarUjp;eksCBNA;Ig#_#P-QI;wA;3VtDd%*O^7FHJ#ohC$$SB3V(;ZbpLj3xM zTLI@@FgnfwooF3?nGrHNtbW$_iN|)Ksc|q#(DQdpm;Cj~K2>4uZZxCv0uGKMoKV%o z)qc6kKtHi+d0WCh;g?FG}xgdqR7r%DV>UpufR$PIn z&BV-v0>dU_cd&BmPeEM~iZIr!vo-|HrPt6J(4L*)u8PXabHA%{PyR<-C4S-k^e`3r zdc$4g=x6EBj(2KmbvvdzeBP6hzvV=;vY1o2rNY(HHSIyDYD$y#(8M}a}9vfY zO>yRxKnj0hScBui^*JSoLBvE0r}6HoW#^?QC7B1|R#sJEQ=Z9Q#wH)XI9>W_2!@k6 zxa!B<+UT)=n}Yo!3#Qk9&w3=#Q%OhGSa+4>-Mf`K#7E;)h}6D)TSGXz6kdtpe^vGiWj|NI0ys^Ynsl1ZnFn52?_W`5ndFx z?^KwhZV|WNIF^lV{;_}mn2Q}8{WA0(!N-*+o{O)(>i-F9I ze*72}wzk&Zh)=!*Ew;q}EsWC%IQQ5pRS?X0!*98x^~QJYkI4V8%!3`%8+kPzMnjvrR-@T*1Cf|mr?*yMOg8dO|Fb_PdG&Iu&6_v4 z2?fQLIr(>Rp7YWlOBE}re9ji8Ko}d#kEN4OcbF-ch>b~eyzC7V2?z%Y4lqU!e+vL@ z5V#5Kk~q&ckR9{nQs64#nw>b#do!2orSDxp-o#?OK+yzMR1`T^&7ogwOK5pmbx~qb zs2O;(O}ZijyI(JU?H@1_xNf)Xn{@OcI^SQY@K`I)?&L%FdmG-Qs;H`}tQ-r~OitDW z%FOXpu;D^4I2yQ*9snuH-F8-S?d`jFN;Ne#edx8CFxe4y&L+njo;MaSy&|Am(`Jp2 zb85f42Wh!@dHQglU-19o?Ja<^eAn*Lhwe}bB}Jq`P`Z_Fq)WORr5mIaq`N`7yHgSA z?(XiE{@?s|oW1wi-#2s4%sDd*Iu7qEJlwIawbr#RULWF@e-6p2o7I{IQhBS;C1$tH zEat=9bqy3Xz^{BS(NVq){qyxjo6G6ubd$ygA^T?u{%TQb99kNh==rnAoJV~LYOkdb z;81{GLUus{|3`a_J@?1;CzIU9h&t+nFW{R!Ileh<+CKUr}xVZoB8o3vKWHs zQM+!52R=IP_a8G!*t~k0jrtn z!F+!{enEHjKx&6%)^IpEUvWkMnQ-()G%baOn!100-`%I(91;7`TO=$IbW$T@^NfrP zUgu#UEXNv0*TgJigI&x?cB|b@G9Sb|Rl0<16}RTm%E+(j=}Y;Hv4)1m3of<}Dhg&< z-PNyUQ$V+c_>+za=P@n!Awf}(a;Aeut3KK%ks(BwIR#B~)2s#l=p>mgUBHH+S5_;E zl|#_eSMNGmEByuY>d0O%Ga~zJcL|5&5ly(*;lp#qaqsl;zu4zJJWiM{6TB|mP4a3A z!0){9-K~mS$h~IVQ*yDg(%RFU zZG2{7XjtZQ_6B62kO}cMD7m<-Zz~bz3?xLa)2o+Z(BU<5j!r6_F|QZy~tu0JZCW-}}tOz&S<2`4vT zbF}tZ&P^-H>g$8XCV~$wjg2?cXDy*bvpEF?*QaW`AQo#C2_{jVPN`aYpmVly%-VZk zb^wp@r7t=$XLfDegboV@#n?>8groPs*Hq8Z!9g(@nL4{miOo6J{MZj4dK;b)z0sTO z@WyAPu`=A^iohPWt+XB(YCiVUHNN>2ncA63KuE~R>7ZTb!r+nm!uLJKukP_Xxc$44 zI%dl~3pT4isfT^S@_`1C&i!$mvf?Ov)6a-F%Z&j-xj5jpxST7J2pSBaNKCOi91Op& z@}HTSCgOK7m@eeZ%PS$MS^~8%4R_ON7u zbViBA1Wb5=2*y))A(373sA(tDOM{H8kw)zbu0VK zqo8(vi0IuMKK}P_jx~tJ@g9E~Sn%X`cqD*K=4*=g@EP0eF=_(@{uDqS zp?~;HCRH&n?pmO=(#$m_GEn!0f~ukonf^oRq-ivF>&o!kHgD@q4nhBRqsii z9qBsSe5Xe23XGYZ(^gBkb+}1ug3Gwbp@*ZB_<-{4QG_VJO3cOxZA_uifN9}8pHhZ2 zIh#3T0=1r5BjjDYgx9NZ2)awXQQQ`Jy*5(uon%nMQU6_kxnw}Qh zP9eq!z4Sd5jVKiq@Hm>`;vO(sn0&d(oLV&FjgvEUzx!yt*-3+LjW;i;nUnQWw9i}{ z?OVg^T@faj#Vu#KQPS^-t{FenX|}l%ICb)VC-43A)v$h|!OOe(^0A zs#Mb8RBvJ0^ZSFu8V??q#^XLqbeD#Ddac^Dpy=qg)U~ANgPyW)!MOv)IlSTYJRc2C ztN}4?kr4@x?e?rwWt-^yeva)lhlI+$Cy72_XJlmLTJQ2bN=d)iUx~#BtHq7fiKS<- zQ6p3iErR^Mx!O@zZ&G-tc_lx1XVcNRfU-iAG=z#S~6^SM#QBSdvOp1NZ%TxK`Ts8$|%l`ZCgC#K`8wb97 ztdhWPRF`k08+}&Z8G<5rvgG_<@7jVq9bov$<@NX@vh=#Tg_9o>lc?WyB%wmJ=T&d3 zi|9LGp-8Q7fE4e74gL6;w0YHs0SB;_&Wp|l6$xMvAnh+eiqmm7E|td(C2yD-lE zBh;08%6QbQBw4Mj3u5$Pyn-s2BfGu@1{ODUm1jvR(`&d~^-c|?JaNPK)O!DZb9GW0 zue_!L<8a}Kp3SQF&)4RlnCU6igQ+UPBZuiD#wU&ISCcbS!6!uA_Vp_ApSowKv}i>P zzw4*e)^kiaUvD@z2(t?yr+Zo^jAb26e&1cqJbWIW_F`u(AjvuVXM;xJi9}B-001skyi3`?I_9^pv0n#2fPKPe)~Rc+@#v(w1bt+&J4& zFup?%b)pZFN#)9l>o{4csHu&MjY`@jEghKeA0L}iYrf1l?Ck9=EZp4%=SRfwuyNT( zBcp6uk%GH=F|kz}4!rgCKWl#20t^dCd?vZZqnsplG;J~+SY>y2B`W4{QO z=e*DrR+Gc5Y2xI;FdbKqKh?{eeIiM_#m;}i!sugVEZu&O0k!r-NKgzq!qBbLuku7H z$BZ20lprI_@x)h?tJM=DOIFb8>zJ}$dtmJ4&T`P)u6t>mE1*D6rxc!r7Wns>(BNb% z`q819LW35P61H22uJXb(7cUP62_?*wqQ!!B?YG;VK1wG&Ru{dzvYd&Di5b<}U!D#6 zo*One$W^etsB?d6S|}v1q@!qu4%zLDpXoY{@v;NwICOaVIUfl&J!XAIf-bcC(1{0P z8k~@olE{Aqtiuj_8zI_ef893_M1~`&Ao`0jmhsUE9gc@64W4LE?@(Lskl@FHKjpwoEf=1p?BAW?@Ho#A1!?JdOMG{#SioI^Dm>8Af2&LRGReFMJ2>h*2s5c7ky5@W#}XEG;dyx@K0To@0M$*8{kV zprA{V@~r}TzMPx@1ev5A^?)mysZZWNJ1ZJDi@vXhne#R7okLERe0b>#)@|hvholD; z%JL|hYBC*~c>Fhc^Mc+bBobP@tIV7_+dp?(zVKSWi8=@qH#7llx^LpMvJB1KIPL7~B#wE&{m#&h#K#_V!Bu4$ z@aj0{ijpmGs8MBL+TJ1e>bmLK7x>mfbAr5HYVi&dbQ6;rJ($c{+zu@qQ^F^K?-Xh3 zXjBP==$)=3t@+Z1j15sFOd+u1iuq5LvqUs$F5k)p?4v`u2l-8oJ6%}Agl;6#l1tU8 zrNMk3HNMM{6I|RNB=)f}7de|LuC7q$DRMatwsQ4+3w9l9YVf(aIZfeBf)>?wZCTl7 zf7b`SeJ&<0-OkA9i~W!XXD8$P=Y{>9aI!>cHNHjZ!lYNzIQdf`v#(kT|1 zVy1Ehg9l!H;iw<<3`Y!*JnhT#Xx_aMGF7gWFpW7*xhZXui{3PrXv0L?6JcVQD+t~N z%5h-(G6`>zSz&5)vitU_+5x9lioS@gy?yXEbSi10gKc?M=Igu>A*?W0k6#{@>B~QF%y$3vT3GLTPi)P>helR=NnWA99 z?#)v5WMo~&leJh>5^{94xib57z3S=_^L(5X`82DzsRQXNdQqM(Aq&R`fzFw!qChQi zMta;@mkP@L4W_9~CZ((6dg?uIp*y&RY~tFIK@)Qk6v<7M(3hbe#foLC*-kd9La%ju z*)ZEb?@|qO&(MilcyJ@G9Qparb=H(*TVNJ#j=X4xbD&VOJCsT9u~4=qWh1lEllSwQ zI4oi!!l~-(QaP`t!Wplt#ThoEIOx`9&4$_-S0Qq zXA2$to}BFLD!c9;Ge1fM+f^Mw&(J)y$?LtPLp57RC`mkiWL+VYs zrJ1)tdiX*BiqpB8zIZxQY+n7m`|!#Ug#2*d!ZCG#%Wxc4wBi9R`z9bfX;^Fu zU$8B$_SIOP@%VYKSfp-b{T%EyGv5rg2Z$}u2RqRB7uf#E0=^4uf!woJ+lu#Xm6et2 z*S*AVCfnXFJSS&f0j>QTuWqt_Z>1$HA9^=|ol7^t(xWI(I9@U~yjpmBMcD55;T8Mg z>{hh-IEbiIHm2IC-WhhRO!te-4)GBFXk~U<6xG3Q(wK|=a5f3Bo)R<&b)@Nc9-ZBP zt}dP06}*OvllrZ)!$wI#bz!r8*Eg}FP1*P@#5rFNWx>5+sIg*dx9&t;rETw@z?z^3 zw1;#l7&ogn+l>>Evi27(vJ8s~MiRQ8*?g4APmlNM`IE)9$JPFUJ1~{)$TmdJFIe3;{veVO~d zO!G;HtH+y<0>;c0pFiKUSkGS_Z60-Z=M%1)k z;M3i19wECmNT;EqV5Qic28mEEDyV)16Yj*MscAgZI_|$io}F>fs>kSejmF{5{rq`T zA&i#ndZxn^A08fBb1$K!Xxbx~~ zcXufg78cRfYeq&{l4$&=#{+W&NEi*A97NY!?}LYbOD9D{C(r358=jul`tTrLSuQ9! zN<_ut6Q3CTbdqJwR{I)SOwETIg@Y|%YF5+!uuj!K77x1=Sefj%vQ~d-{`4pRzA-Fy zLJr;Nersoq1~V~Z&heObey1`KDRzOgo|1npuWm%=vbsBC`igkAiA|j1`;4f{0#B_J z{pv+(eaw4SYZ;3bHWEl6qzH?%I;Gu|eC8}`&laHa%#L}wesPr3M6&A~myjxptq5=m zrtsN1RBpg8w5NiWF)mJrNWa}Z3%DCco}getW{>X&-?JRp#$6ru(sFA`bZ&a!OX~Q} z9UNA8I%Jafqob1EI9A674qa7R7EF{02KfH^b*0S2%zAs-`7S*7({}F^2$H2?ESapC zN$o0>SeWDC`?R+~Vt4!d1plb`cGmD-vA3+YzP`cH$~8WW<1P{$&VA0O)NYL%Pup<~ zvUo!P+TN>}RK484!h8z6R0@EDpret?>VNa9zh8=yIluVl-w`zP({DyDt~JaRkrF~{ z?aqHF0375C7m;dn|Gi2!_HZRKC*_F-Z*#COs!0EQzgznbjl6H zv8^pGrM}W?Mui$9A^?t!5(VjX^1UmU2UB$6Vu3-FBt*X}Pj9D^JxdW;;lm`iP@Ncl6V&A*j8b;;g6?tTn#WmX9NNQ(-d~{JXUdmnGqC z0U$)B>TA9H>wUeL-krhPFgH5hnXj_FLijiwR22{8L?M%_oAwODL`}r;Qg>Kc+%QZQ{ll>{iHbYK4 z=!-9|p)lv{`TBd_zHX0a>GODX92eiIXYdw9q(L!|E>?EY19-Svjh!n zz(J{uxh~JK%}mX@*i}d@kr}S7&tmB)WfPF0*b+n#Ka2~3+IAl0Qq)7W0$_h7_4=YB z>5-8_klR+K+Gw%0tnJDF>||9JKH+OnQ3KzrPPk8K~6_xPhe<{ivG2e|iUY__!mUBdi`f!q8FX%P+M5a;7%;fZ3D+qUo3Uc6t4jk*ewkbvpGw;B|(&k#dqk z2JFqS-dVMniV8l2iWao}&CJ}q2g|ifi!&A^#i_FY_N5r%#8>jui_-?D!vae?- zXRP=lJSsXm`F>{^bnzwX9{|cBV7_5s$>!=}@1*^B;I`T#p2;~s|G-S} zrg?YK&{SoVePncL;9`D?t~NYa5%2`!338vGy>;6<9^=$v#=yY9Ejf;K#d)-~JS%xQ ziIqBF-3^ZoBsB*cu^bu7Ah(U4#Dz3N$=J!MDUa&ipwB;lI_BEWp5R-s&Ge3*Dygd{ z(@m;6K0<~EZ9m5p()=bS>^c!&-R}c7ZVfl^UjQC#PyHy`za%n4nh4P*m_e0@0c}Hf zuP7Neujji%TnttdKpSsI$^cayD%orJR zH?HY(T?w5SC5m%jj5p zm>M7(YHny+w|p|Nyz=D53-YnCROOhMCEfHEqU*fh$J!p;m$VF)9o0o_)ms?3!!p7Ds4`GWjJX6BI;-edxM&6I1XrbM}< zvZi9q#3U}!k%Z4;Gvg2$*S2uJ_|-_Z6u9ZjxTwrjviko?>|Z$yRRkzF31t%G{AAzR z*Ii*Fa=^p2+L5*qG7|0~${Zig_v(~O9={n*h2MB7lBZQu8K={9j#s=A zTgGaSLY6;}SmkD@Xt-zt`A{!HtyF7% z+wRU>5*yf>B>y?Pa7V&~@iL`)0;$&O%>P8N&PjXoHcFF z%*OWV({A&pDz1Y={r^JUFJ9mP{!bq8^(3w^>C=Go4I$-yV>zBgFjCh2m?P91&Zz%R zG4O&&2$@Wg|382`YmDu`k?LWeI1<1Gj7&C4e>gt0*%r1CURs^D!_MY$P5i)%r;lU) z6WFID{`HlAXUgC299eU)o4592ym;5%X3M?vt-8j+;V^DqWFhYTH_;8pyjzcfgGdm? z=9XF~i0Jxs6S$p>l3HwzX8l^`+eX#;el^-|ROEPQR9Lm`asTL^m@V}vilH>#n^>TJ zwfd>fdqbw=Lq%~vr&yu@0PPp)^H<3hzoKux89S~B^ovovxGB#~--z=wnnN6aR_|$M zuC>j)3%2G}Y$~iQ^4c2*aRC2o^>eZwe=M*!)F>_e*v+SQ=$y@3XVTwakp|YgiJd#`=0ABh z(6%ZIo@Un#Atu56t-Gzepc)vjg3d<8xF>8rPzLFM8*yB-P`Oy`{-!+$h|8i&;gT^$ z*vP*3R~MT^CGm1Yuk%W@#xOOJ8x0Iix5i6K2po;qIaD)(=SNyXRZzQ~OB5cHXuWYH z_-WeK&WHwfT=3mUlhTC8tO}JO)~ClW%nj6m{bY|fyNjg9`sYZlkN>9g9A%9^2Atn4s;A#r}BY;0`Dg7&M!wBzQ-8G(UBF*#rr5$8^-1E~d}Lt6p_MAOS_MO% zz^MdA#QdZCg zJDmLKtV(qqJOk~vzZ%YMV{a8+cSM}Tb0a~Gjqc#X&dyGi9uETSIZz2bYSymF1Vvr? zj&#dZkuLVa{qi3>Q`%0CcF{F|V76K|Xin2R$+pg|x60w&ibN@B{&3Wn%@7EnYb^&i zb390f-@G!zB|iBz)?O#ao~10)V2Q-XD0^4g$bkh-SMI$hKPWt5o9TX*zzy!@?LpQ=V5Y&Vg;~CKt(#ffrnR>p#{ULyEg(oz3=HRGW*AbE`d&9809-ziyrn zjj?0BdV+R&n^R1o*vD7)kq`?a>lhCp(im8-4BxvNKIa6#hhhB#+>OjmK1%TS(FlE- z1#7sH&t?qZFeU0Ua|h!Z9td=w0MB|rphJg%`%S)7AW7!4mv|kz(PCU8^n&Kq`X$mX z+Tp^|_U;W6Por5c{eLSvZiN*|ibB1cVIUm**wLDw%YG-9Zg;<>H4Fr;7Vb+|rq^0T zFi9P@x|QthB~8_~?tB6aL*dSe!+3$rwCFJjH?;y>d@EEQ+%BtL$whl3#!AL-6>zGI z5BR2d6J?V*FK%O`LF>pACubmqZ2W0gpkZp!LT%o8-Qiz?fW+mYD)FV=|5~Czt;;4s zbe9$kb3bfr)+zbFqH+n0ut-)&k8mH_%lGbNKAl<#jJWZ=#-D3yUL#1vH>7~Pio>5t z3ew%GnPjD*LuTkHG@;X2NiXtnc$(ebzQBWd+k2ML#?P&@&GdZF^vkl@3*F0;Fop7t zGDn%_8mv+7U6ppczaVl^Xpk*q;CBImos{g^A3yH4MpQ4)+`*9v4HjvrI41zF5RBIV zz9KI+aurL}Y9d`In{Ui*%~cO>B$YJ0SC5Jho8>$z)TF6-;Cn;PU#O<6d``~1RB7qbcc$#gp5^b*ferFeNsRI zC+7G#vtB8uQtI2_d>J~%?)0q30c6aNNwr?}I?+5mq7M1|H}-y-e@uzL_%Hf?Xc`q24f5(K;(Bx) zkbDQqn--^~!d)7lAA>_f=S0liw!PHtMQ`3Pk4-YvIbT-cdu;R0xSkhEYgF?&{mHGY z>>VG!s#)PzzzHvTju*5>lUF#VcqYitOO$Md6!7gC>+(-+@4pm<&g=Y?S>YIuVKX8Z zT~iYjI;V+?SFjMJ#uG^B^Ko{p#fWE;{lZ5%6|9ZyUti#HL6z#$SQEjn+0Mw|X4k*) z+tMvnoDN3pqk~@WLWdJ)9)<{ta2bcXQ3wh67<*H(oJ4@T7>nl_hNY-~QQ!j3Oi(9} zgE>_~Y;Ru>(&VKV7Je%zp!4irvWb&2(jTxjD+Ffxq=}LSRkhJ)(PA$_TFSTCIA|3*)KT=yuxqqc5>Jerb&X8##X?77&pt z;^kQdJw@S9*pJe5)pTPhC*C(V2B(lgQTT@JO>oln^;x`VeQqSmXZ=HLxn%+}!z)Rd z9q2LsWqt}Zp-mq;7+Rd2oPFffX>n2L<1&`lV-650?YAg`Qn=hj zLVGmJEb>xFCc1||wQr=hcTfp|d~WlDGtKt%9&0M?tgOAs>apL`f$e&n9Cl+v9TO;_Pz-k_M z|DFAN7BA{V(K$HEHJQkAnaRuC;>WUy8V8oS^zm!fc zmH$yXojF%TO;%h$kzKPj#`uYS9z?iJrac!8)z?iLavQJUF>P!$Y8=K!0IT>#^W*CS z8Jo^8x}->2jf$>M^!3T9I`-*QN|f~ZgM4u>WG(mj?Dk73t|?x;yQ&zh4P!Q=;rzc< zo4S|Zte~*i#~FDU8!UP$GTf089#cD+xwT;);dd`N zs9sB))n~QC?^M;;5Vw3Q%9BKvn`l&%P{e5oxx6->xqCwIg2B)$_NB!!eZxeXwHhcK z>ac&)Rn*^bYb36ShDs%}=KQuRgi39G@cg|z3ud2r2flI?0gK>!%)anV58b;7{Qe72 zaP+JJdEGh36Ixe&cq>Yxzey!#curYMp2;_O0aXZ^EoDbD2c-|^Te_pq|0t^bVNXhA z)mcXjPg`E9OSTu8(ctEGlCsXQmZJWhanisyI%hj3ASr3yJTOb7=z^=&utDTcGZnW(0vd-TtI$p3rRm z%uk~VtUWB9H_x<-X5ylfX4~?v*|keN;}73(t>*<&xLxdtob(%2Z1kIJV$@VrJ>&iK z2ShBH(Qh0%QOPD>>Rk@2*}9E#UzSv}S_!{!ABWz`7 zL3bu@M^quhiq)Z@p~V^zqMHbx<2(MzsQ19QF~sg(*US93`iw803y{KS5Q(4YE46BY zy$fm!@Abb+H)r_{n*xgzfcKldI>Tsk)Yfcxa@2EoX_!F|*UE?t3NmTwirK4+Ga}}z zS-;l9FJkpdbGb?mKE$~@9De_>N*!w?uxzI-{WlL$dAU5^_TF~6)6(Zx%F3|m0NDwG z%LsJiLORo3t2a{$>{j#JGdZ#nW)g!{E$)10#W<)cCD&YheD9nqe_VYdX5Rj&SV`Xz zk@uRe-p}vSI0%pHV*7{A8**||>xC+A<_fblSF)PP-F!m09%GyLw?^E6Qp)k*KWgu0 z4GEb8_y^HTA!mSu`-l!IRV^IK5RrPi0cI|e;h>U}ith2-a4_&{R;OR=s-3v6dsJDq z8B5|N*s(aD(OT|Qlj^OcOiU~lsCbl__Kc^Qd-mW1VHe36{dA_epkEpcS^^Fmsiy~d zzgY?{-=k-nUU(-b`)?9BM0}w;8Cgz2N_|Dw8&i_D&{bW~fuotx|G12ronqa&^VvJo z^gy{}2TD!eY#=L8p^MxV)Kam_fE^kmYAAX^k}@|`9Vu;0g$0RWww%AA$A*3vbG>`5 z%F6fNtZquiXHTh^W)IloVvhT;pm?<@(64z7cXQ9<{%QQ1sf)|(xcv3omS+IuqYm%< zneuq%@Osetcb^~~w74`yT+XUAPzGAh%Jrs(M~wSJ&XX1!{+xXNEI`#^RUX!qZ0U6$ z0$ecqzhzG~D@Xf(y9^W)NBzWbT+ya<>N_i|sUY0oblA2l$y|-9t7uaEHlAat(G)_= z_3|a=4a{*MyQ&h5=}cTz@UCRvFcqClAODPC~8dof>?+OVsw zZ*8*4?1%|DIbVa1&CShHReH$A{$q7T)vZ;}g`j`PX~IgIUKxnzCr4AFIF=umcF}`_r>yoPfTeeQsym1N-SdpPmCE&B)Z_(OJ_{ zH;pwpKyvPDC1ySz+RG@gCiy^#cYH0YDVdw#uJ^x5Vd3slpy9^J5U6bP zm_55QK=cvK`q9nBEqBVr5CbtuIKwb-Th!w8TY%l7)F+b z%Q?t|=yfSjx`X9pyu)sl@BYe7#g#j~RGyDVS(Gn1taHf*1lBx>j4+Tybgs3&m{E6? z_I|s_=ahSFQqf$ZMb+LYlYb?p6tzk<+p@WT#xOD2NKFR=*#-Bi5~yzt2+$!ILSZBt zd({iw8Ru|0VzdaMjHF>-gduF(+s(rP%s4J|F;{}`wFzRXcRZQfzqdiPm0zGK)8?M! z-_nFD-+5FLWE(+fQS!;mlo9ZdUsxp90bdS&R*+5QKsvZ!+q>!GdI84%3>@E`@=wyd z>TjYBL`5lbb+JQ2y3Uc+Uz@lP7Vd!F)>vt-nc}4mAVGZE@6X;o9~~ZzurB7A4$Bv; zFKGGj8aUFSS~pgck@X^IaAxdGrj!4;EdfCElHpb5^Y)Eb!XgdB2CB^-NCLFLwoVW|9nWTDggf+S3Sdi;B`p zA*gmXf2Hn!m7Vf_TXpsIlr$YG98Aqu7+X%4pMhR(V3??`9&aJOkN^I~4WUexURK<& zC_A5Yc57+Vj+?`vH}?_#9crQbG#;7I<>hDJ&fx)N8<}+J`;X)99m1NtNr{iiA1m6- zBw`)UOU&Ehwp1Ptae5Ng9Jx~#!9Q=G`uf_FbY(4*?I{IL`?5(qr%N2Ci(-#rpsksr zm$v%sr|ExeRqDPs2eZz-o8f@T8yp*|dvZKr+|%sz;U4Any%k8{DTjGHB_{rmRSCq7 zO9jk;HiI{Wt|2AiGM9~6^|3~?XL8wlmJRn z<9xJpFt@Dz>Hy`>&hlD@D5ll?X&*YHwfEciye;&AP6ooNrlwVJQf)g43(MAa%sD;F ziL>e$x9CjTZ&Xh4P}>L+W>tdUcrtT*=bwmcZq}F9FhorA6kisK1=r)^-vk_^4$2Q* z-w?4*YON*AYk|V#k!cLLM*Z#0>m7GuQo@~&Lq}qZKQO#nzm4UlIQ|%=NK4FPqVow1 zbTF})4Vo|RM@JN76C3%bUR70ZcexG7U4MUnXJ`A~m`myx!|ipk$7Q5^sU_zq&sjJV zU}3b$+Mi8sqL;@}g^RJdoy>JWr^nPrlHew-~_yW2jeB)feEVrRQJ1fDuwL(t83ExBO)}Y zyu1P^QaijIJ+{3jz+@L5zLfcOMN`wI>pLKDhgLqR&}-!9bX1jBemJ@`J~~1HYSH$z ztGi0s?eR<(C@8tBEvw3^!OF+P)U>Z$Aj#vgTctuf=?CcuZ(eVK)2@Dcrbx5GtaI^Q zAO*M+6XN5~cegTs?;P**J2C0~0S#@0z>U}yN$FDC(4@h+_3T*#+&P#e=F*vFW*_q9 z8%|I=uM5XmjqU)6%+jOq183SB$A;lU`%jf$+o0kygrE$=C7!~jstUc)+MtRR8&aSu zp;Kx!Xo^M_Th-AFta8R2Zw7hxZxh(<-8en3N&%1GqevQcI373STq$?f-XV-weKfhoyKIf|Ic;Hu6pMODA*V?YGg7;%*6(a=s7 znl5;6deQ#hh*wvI|DRM~)+$)q<)IlFjIKO(LnM%xi{0AP-a?|P#cp!s1MNDkj_4Dg zdbfNO-FEzJ4tmZ=A)+8UVET&(V7BO+@(FP{pzJGsi&;1JzbPNGT6~Fh(1%Xh@RE`I zR@_ELFKXLfG6-ed+mz~9+vm#F=aXo;6crU!oF1p0m1?$TQXQxe8h-D|Qx2W=UD^8{6${`=Gb^Lsg&qC* ze;M2UQy+WZ^H0;-=dYTFq*N{Kx|Z)ItSxTNxgW$Pfz7!^lfODg-d6w<+k=w!;4}lD zvCVk%`}?NFTQkO+e)kWTVr-~-gpOm!eb`Y#PG9Qr*FtcFJpFu~DKLKg`3b&UfRrG_ zh&AXr>I>?pvZYiHwcUNOnF@gx;vYQY=U0OyL#4Ih`f};eD|yK4+9FWG=pSAHoYFF+ z2dNG(&Zz1Yk=HYLf9y^4w@`%1AB;tRfzNN^TVKLbLOcX7U_8FRJ6dn+&hSBYf>V7G z2FruQA<-u27jYp!h_#4=Aq+*LRDx;3_%wF#wYf}ue^|XsP@q;kg1hgDO-}jPP+d z{{sxPKU@mq^-hq{i*Jni>(zI3uK^UuiyvddLu~e&Va5_B27+~r zK@+DFzL57)1E$^?C3gCIC2bm^{6Vd?(P4043m`YSCXChdz}QfgMut_h^gUNn^BGF) zU8InPBXfZFAHf!1#Kfq9|d4GGFE*C5XxUZ3zs30i7ltWMu#fubyl&0`G42M`78G#h?DV*v% zohO3>UIv30LXz_a_)jv!FuNmUDD-*FWaS7fO1{yH8;z+K|Lwu%%Bk-(s(oYUd`t6| z;;l9;Tz&}?T5GHq#oJdHSf;s<4z^yg0LvpUBnYBIiGwMH>4nLDbZ99x{sa8~4%?FY z0N6=^_*``O#oSrKzYrd1yi#yLb(2g%;PdmBN|Q!b6W5`Dffiv*s0-617ZqSna6ciW z@8VEA@6OQkx|LTI^(KU2_rrxH9Kk(ev7==A+WL}JdUH_6)sO^|f`2Nm^Aco_1ZO*r z9Ax~pp3I*2nF$L)J>G68gT!7~he#Mk&xJ6O|8P@f;i&_NJ5XU2vjBI^tjA|95R2&ZYtMLTW?{zbO5hT-JqKkOX zVIQ_+*z^)F#=~#!A6xYA{?mW7di|pZEcQ+VMH1zYk?d}_g#olRr@sOIq$&P^f~*@DtJ@pLoA-)}JbJwxd5Y8O^C$sWyI>f# zMx9YnJ!%rG{xoQjF{7XLJUl$o_GlX*7RX)8R(pr@6xJeriP8SvgjKtX;mP@`=g6U4 zN}w$MW&i!CtHY!Z(l)K9SE2^cd4WiXsoB5>M)UYpUNu;=CpfYt@;c7D&L7l%RRH+l zNHYN2j1_d(nc<)=zCeI#Wnmh^hq9w)1+WD~LZ0HCtcr_>#MF1&?JQgzkC~H;qT7-V zwyb5UVH|5V;_zdFW6&yL?9t-)pJZ_*C~neFK2|*Xi$x3_>9?qdM2plIShykzluzP> zTH(D!1z)zxvSh;Ks+K=afV3Yy6-bLhysuY&D%OgWh6V%E3g-n8Xwl&mz%ZqLPG_+c z6T`#sgd@sHm-mz56!fNO^&pUfM6jt5{(SYfWr14X!!A6*hCz=N__`}=21|&n2%$%Q z_K@5Yr(l4EFk8`K1(EF#!JrnF8$=ty^vNciQ#cM`!AGqIuzS#=^!Jg)YU%XGlKCm_ zmgdjDciZb!27nonl>4rrp05D-a`u9QN&Q5h5MC(m+iTc87n~Dvjjknj;@V_ zhsS2Kqp^_(Hv>RQQWCD8%J1Ln~B4CFBLkD4cz4n%aZ&n@w;fE;SDAQ1Mgmjo;^yL}2qFQBOG2vOD zTBr@i197qW>l+A``Vf(c%ojsPMhC{5FL>%&5q1T|2117!f4nIUK>{X=Fr^S;OJ1)i zIE&efhOG|+wjZ9o*CTkmC#7(M@QV-!_M}9bc~M^g482RFepg=Y9{2lsmA+9z+UDke z8luF-FEd0i@loll=bJUT;Y@61xG-#NBgI|P{8t%2jHFG>`N1gk6KMcfZO$t7UiHY%o5FjfO(zRJED?-}i-soV(On@gHZM9DuYA0!-W0|8 z7trF!AsU8l=IL9SZ(%we|!HY76sIPOX17S}}R& z8=Go@(Kd$fpr<2CPoNhVn&O6e%8xVqU55w;Ds?~cxroSyX$@($@v4U3hulg+$hl)! zm<*gFa3mm)taQb6RRyneFtA)q0w0D4p_cY~=Tf@JB(P9-7 zi)v&(-XXA+|2Xs8th6K{k3He|ik5)c%s^L8Zqi3|+hq^&^gvcFioOrl>+nnG4-yh0 z3=Hr0$WUFKb3u#x0kdO&@!kwF8Avt;UA@0Ms1#XC@2LMx6Q46QFdEN&Yo*of(&AG2 z7m0J61MB^k9&cx}u@AOguZ(9d-IM^L4jjtWq%N^d9zrx9lWOwDzLL#Uv%m9=ZdBuV zhq^WPLa9Ik!W_2wg7^gX7E;EZgg<3|{>%=Jf==bs$X(Ej9zuM}?I*CR?Yrmw3W5a) z8-fn{WDGY76Ij#Dzb#oJtRg65I1TEEwnlp4dOck#Fx4!Avz>C?Ankxr4kfL$mM zO;b}-;9x$I7qiKc9~h=E14de`QyF7C_iGG$e7h9Z%*XNnRX2aPhKn84zJ0q5xSEyi z?)wk0e~~$3PT#72fQ6t`rfhZqc2kQ|m5;w0ETp5tdnp0G2m`fBz#2gokgb;~yxYRf zd>`x~g$_!XWSbxXE%ekkPGvzlnSw6%oewtHfXJKJal&s!m+;w$^JVgmD1@-m2*H&D zJ-_KgEk_!M(2#9l&|%iGH%H>&4XI%WVUxwimhuh!7N*T10k2|+0J?{8#-^)<&oYnk zUeE6~;iNrIWAuih#m`-?_5-yHwpYfd${)@0Sq7xAk8p(E>nMr_PG(N1)D4jbDB##r zP>Wf5qQ8!JcWKb7|D@#MWzm`z3TDU9U&z12Mh2dTq|1{J*XJUD?1|pY_#%eCq2=3n z{gf#NrM<7gu8S~5?0L< zDwkuQGepp3#iZQg!xW8AiHliw$42s`6WMgJNV;Q@{eA-DHr&;PEA#l;`Pg>!xkEMB zX|^Z*Z2=hWI3#4Gk_sfe|0Cy=GX)N4`--@;6z}G{_5XoR^tVd#?I21En3N7l(+fN?7!3a(W zzK+6?LizUmy;PfAuEg~GezN}({4qo_dd56E>>@87^zT*8d)C$riR|y))z_J=WP|u` zTZ;8|f@lx^{v5v+pF04U2iU8EBZckpl2k=n_fu+tk4v|uNVnH#fF%ZD08WR8;o-Uu zmTfiFxY$F9{dp{fBqd4b|DFaCc%zz03(Kel1fa9)h4pF|7hmklMm56mq9dYfhjrvB zB6TW{TlWQTK?wWeru~?4g8n$!k>arNy{a=KeJ^M*6d^Xk;3Tnqq6d<4z1jGZ0>6$1Rg~jRLX=pa0sL@L|IGEZ>>!pH8&SsojrICP(>=jXkW|| znXJJjwJ-diwyruV>TlbB>FyF~>6Y#mX$7PkX(Wg40TdKOkPhjPMx>E4NTs_`Vn~r1 zLPp;4{_cBg-F4qu^T+VzyO=eH{XKi{vp-=I8uqvS(r%dIC9t6*3N)1H>H@uvZkYUr z(&y(_i2FoEwiqbS#l!A8&c8QKDgVo*zJxA~E#;@&jd>!)`nl zg9+)qkcq3GlnIai%@JEss!05Q|zK%d$t-FH3pgLLf$GPi+zTdXC7_o#t zfa*j~9v2p+s|BZ!W(IW9?;iqpKDax(*_TvQ&>l&ev60(K-1*z;-2n0SSMf9>E%iU| zR96#*{QA{X$OJT8D2=@W_HO2H7G+LaGm+%QvebO0j&(ALNqNo=}m5hH-8aC1R?ae9^=OmZTBN4Lmpzf_isgpOO z$k4peEJWxG)=5)S<6ogrlJJn#!^om|MtN!7#Sso6;x z)M&kf$4|Ln-xfwsEC}F1}G#9Hu|~@@oLqw_3ztwN{o6p?~IY1I@IMYlN_u{g#5goYi%VyXcBxQ7Y6*Q zBQdbqx^;-#f|tEtsRWtMdop(S_j9VJqeJmPv8ypt^rP9sI*Kr_lEW^H!$OkH%md*-Mi5*{ewK7%o_Q`kzY-sF?JFW5neWXvP(HmxE@8bReSO0oEpROe{@wSXlI%|CI z@yWajuPKZlIDG(&SptK|eFKNxUDs&rQ<4sPK8HLPlx+C%=~uxoe&!=U=u-zfYhF8- z82Y-jv=ewwK0Vd{8M8VC(vgVHwb^cOYe5)TuZf3JYU;)5gE5%T?viQ#=kFGHZAK;r zm=MdX$5dzy%b?tfJUAwYycTY*(-Zc@+Lm5^(}pr_Wx%j$>`7vaA~P4z-dxygnEg6g z;<>7(r}smM@+2+{MLf284m_H6ah`s?4k|4z1^SwNf`T}3{zbVuY|Gm+j;EVzyc4M{ z;{&&R>QG|IgPz2uc^nA5ka(+N2#=d zol!;RwQ@5LkYm?AyYt=kvMWw<62wUVkq0N(H*P@794ZK?W5hv)It^!?|JdY(Bo8Ba zxOWcX(il&N-Na?~M-ostH`1XSw@?If7EOL15SQ_2*4n61zbAfv(hH+`i_hQ8QX!FN zNiv1!;%=2zzmcOytiZPBWU1-H%JEQgRj?R$(3sb*?}wJdm0ZbSN!cHTU^5jdTU>Zh zpC#FA^gb2oSf*27&*(XAWp1va@#0m)_aB>ycUVg+o=x7+hn(%6wG-t)Yuna1!VT|@ zYONLvqL}6omKNWt^z-vl-8B}&Mg~hwc8dpl z#;>lMRtE+YuVaRWG_Uf>LCJ{@CEJf>&=bf0Y$NN$13o5|bbCDm-!U6ztP)L68Qy z{*3b4<{H`7=Q%h3SmXjws_dk^#aGv!WFwXQdZLyT)8&rW7-=b2nh}pqJbnl%Ba->e zup{ngKSq`9YtVSQ&!2-Zr?Dk&C!6=E(3grI;1ZtE~YHw;ipnXPiuv? z3}b2?{AbEh<^ExmRB0QA8$!iu*p_X<>stqv55$N(EFX;0M34}-HTTTBJiWXNT(AJ~ zaNr7is|#I!!O;EPK`~`1>~huyy=QssZiAE&e4?X(Y;ep9K||xau^Cqy;J-tO-`$oo z?fl_WbE1Cg(55v84iVWsk4z#V%gY>d8avkJq9WkolsR5}r?{x}s2*B7rH$|tYbuSW zsi>;j+1ugwv%JNWPs%{S8n%n)x#GJ&R|sTr39*X{rRsS1|3pXlO-vtoSr^vS^g1?t z@E5Y$?D`aXad+qIVylJ5tIMNF(3RLUM~B5z^9TDL%J(`Jb-3QW{;m=)RGg4NXlNk$ z=78J1| z`6mv}Za*(y*wysW+-fYKm82vm3*unKE=Mqj1<_^j4hp?3E%jWkpY%pnSERV-p?+Wg zdTsZ_s4iKH=;zT&zrAo6a$jFpH}HC6=PJSco(wQAGw{{+u(Epmn7ormS92z?)jY9o zr9Ns!WpD09L@4qgqyFCeMXD&~_i$b!GEAk7m@*EDU!P19#r8awyP_8qNio6y16;?* z8TPka1GMlkiBm>b3?)+E22ybO^LzH%!j{^dot^f&D+aYqQq01?nMVoyE{OvzDB-Z9A}q1+(=!>b(+dr#HAqZdM@NU56lk$1BqqoO z?M_WPjYhB?b@ml}LaYfk+oLMf(xaZM zRyl+AwI(~>5~9^g(|W&govK;Aef@k#^5l1Z)^8zmKlm++tPH^p4yzz||2h$qn9*s- zhi3PKKN4YUa5RhB`@z9iy;_mLmWt9^LQ#Qk=Ma=mpV_gTM^`&@R3FXOpX4v`Jz`9p zeP0?3tXcE>`{KgS6#`a3o`(VhZ#;gbB@yMch>22AxRp&erL6+RtC{M>g2ByKOipd9 zvmwj*Iki^ZEMeawYR#*vCdb2(7Hzz>JC8l`3OHQeSWu(&2u{A`x9lh{)Hi*6f6Vce#oKbs|foGHVNqR<|RnF-_L(uvakVAE!Z&vYD znTz&HQF62nja3V|AM)qbGT&e(G2<=Y&0UlDaYq&G_z$K!2|#LPbU@N;+iP#8X33b3 zfat@+^Ar}`sHtZh8C4}El&A}r^*<@w5zHko0x4<3fO7FZZo>+%GRMl$zXP;s9QrJU} zwEwqZlox8NMkO{eJtJc_r@UdZ!{w11U@f|Np2t^KE}YEG)hq^dh1@Vgb8g6{HR#3% z5r(SV^UXjs`@4%d%}I%x>u7S30T>`3uU5PUMfKUKbXrN>)8RcA$l1W9TU@}>Z<#!TBsrAlSU$(kPlcR4-DB+z6P(HlS)^vsMw9| zW}fb=J1>JoenRh5cWTn$^)b|){{Z+Qd9F0e{p{6hGkH*4)45_X9Vm+0YIZ~Q4YL^6 z&E}M*ynPUF?dar`!d5X}Hr(RZFqSu-mxJ!|wueMWj0zVMWbXJ^L2hGh4P_kXdo7VI zo~vl^!pKO2Fi#b=OhRFTHvjd_sz4APr)u_-fcbRsyTDpiwe8?$pL0Q*m-}+NVKIKN z8YVq$U>I6<3Cgh(gm!1`);b!&e%9Qe@VV`IzFCPB@dx9;?E;S=^rS;JF1(7SKLxx~EL z1Nxq*iF2TXZP1li7i=z(@?T}O3R_zIrs&85`-bKl?i9`%ldjXmGdhsSTqoZK)B#*) zcobGO__ro;=AS zVbY8jz{YlQg@J-q$NCkcRB}-{?}VH{cNzzK2AAxQ?=uH^FElg&f{8XOOD!>X111Ix zbX>pae&a=+$t30GrsU=}%sMeG(+N75w3%B@x~QaIp%~4x?6Ygb*;(8CFd&WOkdCkk z5ne@_oNoFie&$---FNdsIi5X7 zbdTC`zHA}4^xvD=ygZQ{&(7w2n2*-;P{9w>ZdefN3dy1&EXr3sUeVvzRnXlLa?{nC z>16L5xW45MJ&zB0otbLyVoUb7>QARGK76*E@&5dexr;vRg?jQE@!r1R4#N<4cYqVS z%A@8to5KCmTPm#|llEa+ELXgwV)aW28=h7In-&P)sY}Os`!inJ!5TW~sj}_k3G=HE zkDoY@^Sr(saK5ripOk!(k;HpDM`H#=HvdVQf9%=`RR+^{OW=xfi{kD#Y%{y7KsH9r8GJZHN4Aob+^ z3i+`2yMT8s=H1>WpVeLiC@U*#bc)&yhepwT!g(nsS2f;$hgR|U*>Ab#_rWejS2Z+$ z7l((JS2eN@zf~!`O?A6Z2XQV_r2hwgz$+6i!+ImpUYWju{vCu(#iz zi=*NW1Jt7y54ArFfKD@Ys`1Eu9Bi3g`xmhFa>o__9lFl~q}rI_EXBXrVOAy#3hP z{Iu?C>V#9cZ_rTUV@8QZT!Ub1T6~Ya?|rtnBn6P0CTG8ytbIb58=V>vTicFjJIjwu zO{IQ(XB`(jl^z7NsjyPMWP4ETSR+ONzuNWpgF5Upe$I}d$$GokmClfvyXTM)@C&&$SqL+dW`4 zX7-ZS-Oawd=u?(-uVZ;u#p$>v1#-Uo1}qP^I7N3RqP-;9k6q^*`3%O8Nq2&|%bc6L zO5au5%MIyDg25Vd;R%zGmI-VfP1oI@o$?fY=2;{uJjUxae25Y6xemYI#*E~bj5lIq zdEI5=_Ce$`>@^W%W4>d|g}S$BFX#27zLSyQklY3y>7-2g%w2kQXBXZx>-~{)wlbn! zA)Q~M5u1p_`bKI?Y>Qts8Q50DmSAC{>{+ZSF zkiy6qglAlMhwUVF|9pc5O*^PEWrMFvCS}!LpZ@>NU!ck|(3DS37q`1IIH&q5zp;gl z6^j|##b=EUwsGS+ONDJjab@L|thmR~1eKVyWpR-m$a3TqDP67fg>8Sgeq`XGop6?X zh(9ob0Y_j!iHg4n<+!Vw{SM?|I&n|bv^P>Y;}TQp4~)>+cRitm|puX>{HC}5vv#Uvm(w*vxYCrK}BZHw?pA)AKzS^^6&YCeovQE|6H-FZ&m z7A9BHH$HxLC)`&wsvzfC=qjK24_6sKm_F?N@5 z`kLrPy6C-5)@@a<^AV3{7civ6=R}0xiDR^<^I+r%j5R zoW>TW_Lr{mD_wH@h=)NL1tl9rv!=yGj}2YpqLQSm=Z&)8(MR^M$n3q!HdErANhJ|G zN3cOl6OW|OBi|bRrc_m37XCB(?3riVwj&E}yD4r|=o22O7VU!IW5&}yl*k?g^_atV zl*`?0=3wyfrUy!dzfbKX@$gsG`S+x^2j+A6`E={5Wk0!pwYwWgcwcdE#$Re40WA7# zW^T$igl<*&`J(mkDAYAKMtJLGBr;i^Nh5@^pkFaHr!h58V-lGEe(v!>o65)BKH@=l zW}E%D;e3|xST^`v0Snz44f;2{F*Ig~oLdslYyFWK2JSDaJq&ZjkHsvoYMUVe*zU66tCaj^itdRb+qH@qAIyyey&YTM|eNaoAmOgTkRPz~;k+LD; zusP}88l)X6&-%|gjhc|I-Puz2W3>@cyp1Jj|7t!Ce)d@C9Bo?_&WlBY27$mx$AsYI zXmpU~?OSa%dNhzp#_`X1eQec);nBsi<-#}2>7{Q4hSaSWG_1K)8_hO155H__In9Kje=V&VeqL6M5DeDTBFa)7e)oy{T`^H&l&z$9W8o(F^zw(VcuLnK zd-&rtv5T;Jtk4PZ?s9&AMhdNwVWGHmAzaV3B71CrRDsr1!N8OfqNcbT32(!cEm3?1 z7Xm;E2S|`*j`W>w?Ma_HJjFsl4nwZQp^AOSrG}RWCZC`5N7W4XX(?L;52;&7&Z%}* zL$|vOKAe?uD36^gat-k*3NiF%vppD}ax*#W=j-J6CJ*!TtXJ!+tDouKDy!_Xy9+5^ zjfl9ZajCk9L^Q7>OK^b+c|N9pvC-|BO~)n-6GdeOcsgb8MPtcNe5@zE9u#e+*7Tw! z2*+s?7iaYzZgfadQ4c)MkndC}Gy`xr^hA`GR(0E{GE+8~!#W{aR#Plgg zL2!XmrrFL2L}76bS8wb0RlGVTA4yKp*2PtJ1w$j`lPW)sKMv|&jnvw*iMIQ`jy8;r zRMt=YI($0Ir@3A2sVJl0?r`nynG&|>vi!&p18$T?C9m<~NK-Si#;l*W=|cU7kqquJ z#@XL+Uww+UrUMrUV`+Ae+#kjA(<9G6j44pmgPsno7S68YLGq*pce-JCxXKsYmJ9Bz zr6((zw@Q+AO$O|xcjzFxWg2# z)3cSK4n6%Vtpd zQ0W<>j8mQ_9cMj-;MWgpGq4TL9GfnEZ(7(@l$45#f;rM1Rk0v@o&j5S2Wl-PLtUR2 zoa{zhw}?4oKI>89hoq%`8yYc7Pv5G1Ywg)M_ewWTpI-DH={=8ufH7UsMudhEgw=`r zi%pSiO&W_)pX4@nasd{p@<$H*i^qaqv+BASfr=j=;0vSuohcV!d%@8i;CpOb=$N|i z=z6ei$j~4do#>lmmj02k%q~#@RO=tqhgF$f4z6jSHI|hxAtLitbro*q=A4%Al##nx z?WsGKq#6MNqj3;*m-}2LK}jXw-^}_=j6-gQ3`*ryZecwsKciBYj~6x+6^g^hR{82f zVeIP;_2(Sl-+vpH$chpLGXV+oPwKxoDp73NU)k=ZOrNZVDL6zEOs|l3!o2v<&NZHM z*y1UnDYC=sNTHroI04(!6t7p966mcEq>E0fj0R~luMwl2;Bh%K26t7JS@qbDjErpH zVuFytYipC3HlO)bFP)l}`y!%k(-Ic_xU00*>Ope1qeG>!QFBH4yU~#$+j_>z@`;Hq z*_EYgZ8>>sJ+3JMK@onQ;1y-sFh0ynw4OFo5Lu<@Hh838(b_Rm(XW}X7|=LGy?%|T z;kuz?qc>j3u3^DRDCfv*2o=#w(3gtWL=DBHw^fx0;Q4r5MK&Z@4Y;})(#w}ww4Vv8 z7({|eYT!mS4_&+*3^=;bI2f-MSQjJ;e z*cc{U8T(|ESX!i^0m;FM-a}AIowE=?$*_K+ZQ5q&(A!Z>cyN47o_v(~`LbL5wsSss yCg9OAxKYgDrGrj<`_8}Kg53W10`4*D*G>G4XaD!m0RUwLlN8kl_iB}GqW%Yq5-v~x diff --git a/doc/images/test_skip.png b/doc/images/test_skip.png index 5856f08701ac0a35c42acfcf208596da3a7dc364..ce563d2e01f257d43bd64d2d8c24ffd3e195a902 100644 GIT binary patch literal 24419 zcmc$`byQpLwV!P?h_CY5G%-oH3$f9 zXc7=ydw%yiaE98)xeEAo!~L~_*4?{zrxw)a2?!n$D1cvSd8cm7co|U~Uf$V0w4!*# zR{S!N|K`n(k2kRCo(r?1TMPNMH4uzi3FD-dVaepdRL+d2J+>5TNOtFL=u7Z-#z!|} zzbzkj+tSfKes;}Yl3A*G4gn36%76xL<@+@v1{jl+Ij*|!A%q~Wo&xxT;KS)nR_TAv z+$yW5@P4>+bk@l&&6&xp;~$gkQByyB?docoH*x`w$zq006Kze-(bzI_-ma;Kj-u~d zjuS#)hs~Q)Q5!QBP#$YxxTrJFGX8kSdcfgqCxin@0zCSMYxRkJ0O72^DNDT5IeIlv zjiIGX{C(nc%{=_^nXGf&gzrt@GEy-nvLxkPT+8J}L0)>H+<7(Vw)9Z3*6cQm8bx6D{p(iAz^~{yc{r zBWVc#n#>>6zTk9xocH|;x5JE6)2YY$gwhH|L#I$dNkOXGUN^u=)mZ#$7E9>zGBevO z`5DFh>f|o3HQga~g?OTOyq%bs&O?YviHI!-Wjrtv`}M0$XJ_7i&1vJ!{vLZ(WsO|5 zaOa~faS!Z^tMAUwj{TZnKG`EB3GnNWcxC2wfOHXw?n^e-)FHO@S^Ok<9a6sdu-=_>$%U6 zQE>K9-T(MS5_0uDR!QzXUlx{N zACKOL!DglG9Dv2#0W65NY6WOl@`QA}#RB@~pl`t?_$$*JFhbZ_%zfiYbT>W!dxzCT zCVyr|2+#AF&`BAYntM)$jyi>Beh0y}HENyu&iav;V?E9v`+AGD$5Dp3+0J(dAzkzVWHNamInfvAk2@IB4BlY zW+uJ`zU3ohH*Rj+>Bfl~l?J|j`_1u&4e(05zfJoblxiM6EU3b7rU+ji+jAm1C6K62 zRG-3%GWgVM>xp)e5(-0Tt?Gpv7XINVf8&rx_Xz_7J6O-Y<8ygO`6RHl<4rgCd{(QB#WXRXFrq3U> zkO3dp*Vj+L!}9aXUm9x^gPuZM-K|9!;s(9N1>!;}rKC0y9tvuz3ZcVEHF0ph?58Q}qt9yaAa9Q1k*)*J7x!;pTx+?s-`M~I}6(Y>o<|`s$D0)4LBI;wf^5~nuQ!J zG{T34?1DKt8=vB;l#Ml%!5f;cR$lNbWm#4-SNp_YhK|)nS!OgkAY-F%DTXtp_Pu{9 zHw#mW;YJ1;Z6NvVp|&%>;C9#VBKk9({yM8n`t5vROF=r)jcM2Uvb26sY3t^sq!?tn zjyBplH;-l+mGcys5*zfE(aVPP?_e;KiOG2|F`QybDSOD|#+?dbLpI84 zi1+EK7hu<+T>gHa7*&I0Wp{6VDnt~#NKT9IB;gK;&X`|Z>0iPdqj$IbPxqOnGDnMh zJ;Wr*o;Lc|8I?xz&@7`XNeK@CAN0Gw)cW$;3zKcuOyG^mxW7-jOqFs=3|$;Zx9uPd z*k4mGVWQHDEBu@T{XDhs2K76lB2}boq^#sz{5IFE8Xe!GQJZ6qCK40n2BJTrqh*y?LG4oZ7y)@0@A z<8wA4kOq|)dYUVjm&g*E+Mh?^-NiI>4aDJkRE8zf>Vw&ukF((!aOx&CFOTnGy`eF*gbzH zf34E(A*{z3ImMyVRHk|n5B(&Tl*HZ|x2ESQEF?3)D4h!e1!P|ms4+8>glK+) zdF>B%#)Xhf#>UrM{ghv(fAEAdY+Qk~wz|qti(jK#paDcz_**^ zX{>Z()3(5fMo9mA`2W&Z*nMCg|EDXVdltRWLTOC`1;5BnnPHIl$-b5f#b?9fFhoHLiEos5G56I;10B#I0VNula+_avz7^x z;t{pN9h4l4imqVXg3S932?Gt_qr#7VXetwtrHOg0ZKu!FjyT@Mb{Edlxdqr$(JBu_ zXFZZ0!mRbOpZHRssV9^5-e7>hQ%xr{)9T^s6?mGZ#vytBHBNcuQH-+RZUGGT4DD9$ zFrw;kc_5cTL@(W{>fnr$I0iY_4@3f*>Nr+ zUZt8*>0+BGE8O$mG&w2&geXawQVqjRv772Hi0$zn&E@;IV90neakW6Rl}}!C0@3_o z@}+N|88)pP>zdM2iYv0jD`pQp&gw3|WmlrK-!x@^t~W8{b{-YXvkK=RhfX#ntf;Vt zNh&bfx)4bUO^y~9k9E$N06q%k&@d(LVp*(OG*f-io!R!<({<-yF2K_xwiY)O6zS{j z6SlFjIFR~u_$cx-jJawZk4 zOx14KS+3gfCtZ1-CP_twy2^97X;oDv(e2NLKjweWN|G1!4tM8i5TdP;{ioo7C+Xxu zrI#3TadA168Iq1hYu^wWJSN_Id`EoJ{ZqJ-g7#~u+K`u_qe@RggEM(r)zNVOXf1#u zlWEHiSirA-n%&!>;o9?s)HD`9ljbyBSfcmBP?DinR~`}t10j)Ns>Ret5|xmW?&-$L z6KVV#*$?97#_#Radt8{Z{83gL=>LNmuQ}b%W@Ewp&9Pt1iAWwA8l7SdHfz{lpAyXt z0V31OCzi;&GMS1fip8a(T3d@5-|~QBy3(ip&hJUIAKDzSouHi?zQ4vaq`ox71iY*+ zAW^xp7|NRHvxg*d0D35ZD(7uIPnaTf!%3`b7i*lK^o;?CL+m^#3H&`Ny?NcL6RtPJmxr#^Y&MU`lUlt#?;it^K;U(n&E^Oh7Qpu5@OYd^N$T* z6ssvHKe-yvLDGI#%F0yQdv=+^!oEp8cqtGziAN|`VWDn>YbDxy9r6Y{{LjJn#@6HZ zzo!l^djgYaP&X+T0L0+N;*E_R3n<*Fpg?&Dt)u|sy?67|56EV4WD4JLlC=&MXPBt(Lht&kELk{qd~gC)l3&iZZo^v84YdRpB}mdBnEKYWI5CLEIYCeQN;ffy7!gHO#+EkfTr z(k3)kQ@nrINvX#8<rFsE90p84d=S_W<`kKx!iykU_W z590;w#U^Jxp)2O-epc`Zfc|ZgwvK}!;@I3G#I*F2{GlhSzxo*gSe(V77GK|R|K@dG z=@wvRwu#UBkV^~l^8( zkR8-t`KyI9>du!hzm1IgGsN_3e6bjH#kTl-kTI2%6fhb9uz!)Qg*WLNV%{663DHFw zLk||UVD2jF*zP|G2@_9yP^v*r^T#DUtc+`@?x!6Y;Oo1(e}8OwY~7!MUhf%<2s=0U zd%F3j-OKPFpQUj)y(t;E_yrb?%g)QF@N%(y*mV}wkU9l#J~?W?)-KoYx9Q0go#Boh zJ=Wxm0~wp!=diJfh$+aaq+?bV)N`MzH2C>kMs3DJG&PYmLkY0%3`u;iQrY;#)SNt` zIiL@tcqpbB7dO(`IRUTKIzHAef|oLF&Q>NN(UMZupy+O;mV`=l-u`fUhOJV{i&386 zeKWiG;EQ%yB08aDvqaKaZaF}j+7Vh7v$0J+ar=VznKy;g2*bg2yVue zJTCUbabIjCMM__|FuU6_keWpQ{~fk}u$w1~UnB5-*h3W#QCl9^)FhJs}({Z)9Z50s>y21+hX8ZIH-`Hpx3g3_xO( zbNkS74q}O8f{605Rpb8tWHOocO#_`z)>xb#zMrX6hc8_x_}k&#mj=MF#@fpR?1Y7! zeR!}eH~|(v@ejhClUEXOmi9mTyWSoWYO_vLw4|Cbd4gAqOw<@cBc)3W+n4Y+Uccs$ zxX3nrH-D)UV5dJH*BNnTO5DL-Ya;9G)(4Jr2UuZK_qyrkXcB*il?^KAroqtqYXpJ} z|KN^%ld0|Py5YP(f;j-IaOg8eO6WPhK>G zQ*1Q>0Ug0VH@s$H`6_F1Qe!_B7bn@+m}>cgxP=bZ+ZhxJWInZ&{Tf_T!E)Z-KEkd5 zSwq~Gg&+m%My`8IT9q|^eUcf$&kULYfC&t~VWC<5kWQpR(h@TPBC=|`TFLi!MWC(G3X zAiH5YKRINUVp$)`v6K5IB$S>W8=D^c8qA3BVixz8UFe+ZRokhC<8L`TuQXMWC|sRLZKK7yFM?qU^7~d0jMvt6nutJYO-49|~O?4>L}? z>_-6T(<&Jey8iB?d?Kr7h03RjwhN+~MkKm$C}h zjsDrEPhVI7$4`Z)ZZgS8h@A$KmW9X+X&;T!ZC}e@FU2dyCs2;#b@wofi{CrG*b#D< znGVRSY)X2EqQ8}L{EUeiXx=R}efKDOz$+fvEq(FDBoE=e<I%e?W#4fv0B zp8ml^o6-(-szoYF3Ku7F8NGAXMVPhbCSaTsKMfB7vdtQ!H}7i~~I6gW;Q0;g4yyNg12`7 zocUT)Lkc;-nW-n&{WB`6FO7`gCFA^+eLidU3-EjU7PX0YnlUKTs6e(eGMYkM@6m)2 zaSR9T?ndKxN#YZdm84kXB-!JRcaDQ@w?B_u$!cuWO6vzQl6Sa+{{BBFIr*WXQMYb> zAq)znrjmrkYqz9dCzuYbTe(9}(rd|?m(kcHJ~a{cuq(aY2mEYy89OyMJIiqXyPZ|} z38lM;iQfeqXWoFMWC}~9CLJ+ZPy|?IP=w$m#e$F*FcF7uG5eJrU>3!@BnQhoT*lv) zp7&~ad9fHIrk$rLvRfv4P0ujLDbmz6Txc37f+ai5Stg5gq+#)$teD5thY0Z})w6EK zXj&pL+mai0P8;B3Nv`~rO52y$UQE%U2@QxAR+xZShJIKhbaT_qN0h9FCag<|ty?fT zjh$UwQ8Af?LW27SK?#{`0Kwm6k-4U>E~_nC->|LHfIN-*9dCi`p=e2o&?ib|rm1mN z6?Juk04;i4jTww+JA~+Ca8W3ATW$9Y9>Z!bz}r9W{kQ{#W4)6ug9&foDkl; zv9UQ9j~shSd6@vq)2A%0MK@qLs3`{HcXxw@ZxOi6y$ikrJcBMSEOasg>=%KA-ZS8V zCOOs{F+F*3mzG2Ck@xJxv5K&8?JoxFRpFWG*FkO1nl9ei7EIbK^DCFXd~1)abzj|~ zMj%26gTU1}_euehT98qq;5q@%2;9WPf}I>+=<~XZLSy|8*RZE>zLvZ53`=H^lM~+9 znCA_eLvz)Btdfh%&T&HQpahg)SPT0uk)EU<0s}&4r>_BUsfXvE%E4$b= z4!H_4IGcjc8HGg@neH!8a30s*`)h|>8c3Z0SXdbu8P;5y(dP3XZBzxfv3f;c_w3vzVC)FkZqs`~(D^owi#y34Yj04h4Sbu{?ne%K{A5*5=e5 z$i`i#vHq!nmU_G$R~mHB<(oX@;uxAF8|S&yHRun`VqRZ=6eFl}N}ipMX@t2!F%-*R9~7+1}szf3Xv&y%BpbB_(x2Qj97XVByz= z9gdtYPv39n5B8Uq)=x}K1)c{HPrNRD+v{V$poSdK0%g=It5x)l1x4HYY0sgm?|o0?B4#fRs^Zh7qy*mgzp(ZM${+yk zSz^?X7#d&zhYY6bN38U=l>?!W=Id3DU;q5pzOYs^dHEtq**8tqlJkZ0yNB-&SNZ^+ zA7DOu-h>!UPp4W|if!gJDpxt5)|$r`>g7yUc@m1Jv9K@v0jQC_{y+Q=i3&e`0zk5S zHoqEk2GCu@jOO+(l_Puz4wNo#ZNU#QuTU+j@p94I-Z};F!&m|=MVUjqqV6CWEP1j& zmXAWba2&};P3Y?b6$k*E-v07svd$iuSW++OH6}lfqA6t#&Hj>SkfbIBJKS16*OUBQ z4Px@eqWY~)K4rL+zj*ha3&fKOAkbxh%3GcWAS8wa_yjl3dMG^87VPk^)=*|B_ftZ8x`r##5f7v{>DQ; zn#aZg09GI0(_Y^t7367s&DIz%-;;&5Ad;De3`t1ufgCAPi0-F+Ht4Cz&M3WySF!Dq zp#vC_SvjPEFI3GFcu2$|E>7p*^0nvWrPIGR$vHWxe_4l_tGIsP3%K_sx zY|2#{Fkv8BJ-$`eRP9~x@=^KooNnuZr1O=j6>U=9G61uIJ=e{ZW_&K;mKgVe6y00b zKcXwlMG^HphD{u7EJIy>T;C4_ND--}oH5 zW?&vS*Gzf$i&(@eVSA?7NX;!3ASMYqoh{svGy#>~B#3Omx8J^6JXQY-7Eqa`ZL{qvO-Si)97Cx9;@S0~B;mDI@UE zx0q`+c(dlZ_`#-7K&jSN`@(K*nJ;?s__GSkEY5&@?$xs;T4D~BpO2RsPqua8!vlpy zJfiPUEC7!A5Z7EY{iGHiXtiFM(P9EiNcA(_1?YtjI=QI8_RsALB+#v1zls8=R5Ff9CvmEr6`l z=A*dPS^4IG1(%%s2$mnKNA*%a2N^(zT`2d*^IJCpaUV%KVteY_|n}jz6!%f2cQ2^F^PE6d*|H<9&|$Ox!&>ib)yM zBsbEGf?LYZ3aG?IrlGP$8eSQ?&C!IR7~|-sF%Pv-o|k+5@lnHLw-u?b%L!kMpstlM zM=tMfeO>L)m=)I*>jLdiDft|L8lxaI=m7OtUmZ?FZN%)rIp-JaYph3 zg#y5hbal=&<;vcU`f9wlu1J!XTvQJf6eci2S0$3tFu*#0eg2t*JAlw0bZl00uO40$ zqrPXKLFX~+#tDORUHzjldq`kadG$i{N_-A? z56U?};0?>F7sFDUtYrHJIBFW7f)^?bA36)K<2gQ+LfVB3{!s(~e!`5v(Ejdz$zLqq z-OaAhQ(Qpv(SrsbZ$TLVFcJJM6vF==m=Rvq!N}n4Mr*v0`KI27Uy+>Ldtwl6!(32r zzsn(^Bc}KhR3SE+Qr9MO!_Ft{G0wMM_e-L2pVAl|mvZ~u0m&wg&29OGF&&465IrLU zyA@qzyg^Qm9gEvH5p7~CYb%~)0sGAmB>mG@K%+KR$#_xe;Du0z?8zZ=JT=lPxScGV zu^k$DA1CyuQYX#iNQ0CCWWl)rNm`q<$}B=3oP8A^IHsBzDhWK8{7@gx*+R$>?9>tO z6d=xu4X+v0J0Dl517|-iuCwNT!x7h`$P?_Z8I1yf8o{=UU4KkQkL5G&r<9Z8$#6YT z%2!#(q?5(gNV&_ivw-(jf{A~Wi1GsdoS%N~P5#5i^h8nOE!zY4$phDL;|=_H!!N2P zxQ1>53owbu@Sd4aHI!F zm!6!IQs1euKx@}qVl3I(+uIw8^_ZNVg45S|0j+p%3*vv{HcFW#ud|4eB(qez-`$dU zR6X?;xfa%EZzM5Ak{E=AbXW_6ImZ+^)c9?3MW2W^ShTN#xd$xX4yah*z!uA2S~ zUXt2&KcKqb!I1Ifw1W9ah9s_H5OO2OV(t(hMTy@O*?N^3m6fU)MNLo4VQ5f$tNn2S z?A&gq9*JQ7@mMbokBISp zKLA&1=$4v-E+XrNV+YRm9iyLD^$wmDV_O=;G!=Z<`F@Zot|;Das>16;HHuKo1v>S88p2^KqgrojSeFTk<_q30VvZ+hif-W&9h#N#ho0&;_ZXzd zI0cj2(X(~FTNxQTl<4=#SsRWMB^iC1H@!mFsFc92rvZ{pcte)t!#-P(+AnGW(|}Vq zh;vEquGctSQ_HAO_7$JLJr9&Uybt1!0Y1$KKy2wZZUdkqS)4Vc+V=AI2U=%@?=4p! zzp#kaHJDW3!SKwAI_&qX?Ll(Zd2TaDrB|iX)bHsFaI21-x9$hPeIr%tyR*94Ww$@QLW z*Q*_FA9>x@ek~pXgqr&}T6@qhlEJS2%gS6!I{w|Fy}VpD?tPf{@b9vJYo|SM%yYN0 z>d;}P5=a)lh5lB3E}5QsssS186GBScbS)QKJX8(&7{}rp8PnjiBV!K9`qiIq{MKrY zkB-Y%&G&e^l(|X+WP=<@@GOx#Z$p@#9%kHciZVte6cAPkOrO6lW0-Q%-?)M*gJtK~ zIfS>11!{Bj)ZRF24_Jf*28cgid&=P3pIhImoAJ}7us@#iq(Al;TTsItLtKH^ZJ-i{ zFkRbYZ{hDvI~@5=`QF)7sKszYAj|Y5HMHLUeCQ)F^RlH;iO?Sxk=GR#-eu|wO|tr1 zs`2hw8rLyx)44cwVDaHtgX}sl&|Ed-GV*MCc{ggtLd;|Hy|r_O%R(EeX{n-`f^Ao? z9o3}&c>I!hZ2yLtE~DkH;;qtd=6a^UFTB_hzuH_L0f|!AVd1UYGsc ztS+t2Q40Ak^7C!7~4Frm11?oliB@-J*uhoNUdmHJd z3ruS|_a-a92a?L8rJ}#4^|^?|ms;u2YjdO?+{3;m(!?e8`<4WAJ{T=?QgK*!_OX3V zb`N~euscLYZXbIAaBVM(em!O>C{l7&A0#c(^*`v31!@{K;bzQaImQx9bOOCIyvk1? z8+`9HUUoMp41OT-zuoI?xW(K>5t>Tn_!>dlSLZeY2IVV&|Sn%Bb(ze2bv zG5Lkm?`*Sn@P^|6&!KF%^42QjP}qXtBt1pRAuK-Lt<{DPyr1!F!G#Ad>=_6+rytt$ zWa>c0q6fw+?8V^;S&oi3VhWrtviFe)c)xorIR|2r#0j_yF74jClQi|Ko2LHB#wTS*N4)3+;V+?j+cD9{?k-PIkSFdjVVDmJ6s2$5n8f|S^sYrhbdYY7w zWZSg(;7-V#pI>j$uzq27^6nhtXJs?c$YY*8T zxP+3|OrmbxG7OLCyOkc(lOEf1c69do^FpF5Ufsl`P}SPPqb_R+2?8y!#I#yAyfwLS zHJSZ6q~Y$7RfZ#go&tp3E>Uov6+bCJlv;*`{lgc3s~=xYPtSn*k`Fc-!bgj=RAx-E zbVH^nU~ zQ7Rd%x!;)vpGc~oh zXyNHoeJZL$Np{0{x=7RIRcCIH>gYOyNUF4-AB#+84Ea5Aa)A1!7&NIlsV{!Uq)u8< zm(kQzUmE`;+6jgB{lVb}(e@O`5^am$XKj)SrR?+vaVnK@^yPpucf6xCoW{Qk?7 zoqdj)`h^t@&g?3T{QI*@1SGtPss_9BO5W<(ieGMhTN97}6WN{eI6h!w;T2fl_k1xm zHPS01yo%g&#chIr8k(mVTG*TV+bNka6DVBe|HqD&jGcYDbsh3>@0HV*a8MA}=Y>1Y zIK2p6a-BqF9D1cWBL&O*fT$cEs0%X9dwXVRlAb%PA6-avyEdr5pkRz=<)xNYB{{$T zy-~T6b1PaE4)cev;$Ip5yXPG}k&>RBX;q0C%oj*H5f>qXxRgKR>%L;8;ctKPDV0g< z85F*tt0$s(_F#lg<*ywf(QOt35bVzVo3DfKQQuAXf&>hkLu5(>IVi3Oc!x%gvF(dW zH{#nYbeQ>f-Sw-zFaFIjl)q@tVh?O*RVY#!TR*$?3GH_5R-YPrpXm1B3_T3YhIU&; zG&HYZ>}EBb_>G&zY1J^JhGqQ6_szICqIy)j;~GkV5LN`5yzp$GWMe?E)R zun*L~{GK57I9}Do4WO67;37Pp2EF`t`0$_T5;W=y*R|$X>F@P31>wxC^PbaI|zE!E(R z15}dq_G6B$EKE={odScV#lA5W4owT8sgd;bLpS}T6@ccpE4~dSc_?DpJFZCouiAQg z*CgO{N2mte*dL3G$91Q8VMl%A3thd2J$Qu20#b_P-Vp)+0rY@i)m@;hvt>8o>{r9iY-yGR;(D zkdP=3hQE0eq>L_V9L*xOt*5nBRBAandc%SNNUrla=)a2TK$NE=C!27S4X8Oez{mHc z0d7}Nl!@$k_ju>ks}&W%CkAY-D}WPds)NPFnl~27LFBZ`xrAsm^D3H=i$_S;N0tMs z$bnzN6A}ko(DebOj{>o@NFGC1{zs3ro){&hV7phNVe;W$yP|h@n|YtHWj-hG@iH+H zi+jq(E@Q>_V9=w$BR(&0(uPPu0VpyV8b^o9{}&z~C46#1QVzc?{PAeuowor8etsu3 zlo5(DfIlu~F7hopJVrf?B%Xs(>}>BsVnt2W8Zc~eL16IKT*VAO=6|C?j9{k6s(X#q&gTaC~Atbm7m4EFmK;WGl z0aCG?FaajfM^C}qrQ0+`|Bzga@5Shwg`6>LY?;wSjgRK~4D{r`ApF?{SZBH{tk_o# zvUDm7Yd{3Q+Iw^D1LD$BN4Y4ybRPbX2%={DlpZK9ekKn8AED-Jm*S3Sl5+o2MPwjm z!Bx9RvS9GcSVO~x?|0pFOjH!T78eU0dQBdj5)mOJ%j#NRQYr@)G-1(STX_!}?djh# zJ@uA^_KfixJ=Qi(6x0RUR~m+e*V_I>Fz6QF~s&`B8qGj8NR+;;hGc#ynVy-~zb+AwO%=z;IkB>#%r-+siJ!LQyg;}kv^aiT1ivb4*4#rrXDfwS~ z{=-9xU%NJ&n}JfV0RMzSq^v#vxxUt3Y8C!rnB`|h_5~N;sVNFFaYdU*NtR5FtR29a zI(ByUh^5^K!G}bvWy=d{uoMXT#<|s(%{Dh>WwUiJjwMEVVhcwtP%ir-EyUTwc9*k9 ztLQqsY}RwG2IRNr7CHU>hJhCa3p^C+^5A8l&X2aTvQ<#nz$9hKjeiS)!G8CHE_##r zRi2V{W=PuW5BholTP)Ia5Vq&hfRLKZ@p5x)Y}RzPSqWCBnfELr9%c)uZn{9YiH4ko z8G!w6ku906vCSF;RJqiR}S2Fe}t6 zyvFY*t^_@?G0Aq&ew3_zAE05oM!=+6V~QuFxIg0~!D4{fx`;CBpH3rXcuHPz3j~8v zs6%6H8o_SLSrhsim09YX-cBG;6fZAl<~|oH#mQ;&%WKGKaAxLeY8o`6)5*vw@XnoU z?QFI+s}A1pTRIQcl=UaFqN1cvkLq0OrxDS8PYaD^Pm8jbrx381UWZ~s-5RE%VEf<4 zKsCp_Oc&J?Unhd!1!&mcM#hN!Eq9x-xrC+olbUA`m+gZ?X~;WiGW2wF4hTdN7RncW zw(IUrbh{mxa6sZ+)|hT!8xXc{^VUZ1_1P`RxEvUm^<&YA`^s< zdSeygrCNU|)yCU^I7w4PcP#0$f%5yJUUazJ^wAnM|0|^d1|5c%ylF zqsha}*<&v}>mQBqLpv-IBc-GqaZ%9TBklL9mbR?wM-!hJm3FLHcDaS5p&dFbVAh&toZ^^YcbteNKVm@(((t<$x?>%6^enZ0{^Ks6P@ zjlXR;uK0by1q^|tHFSGmm@MiMpQa@mbQRQDnOS1e@_QE-OaHyJgQo_nN3L1ZU@$|l zW}dO$K>egmiT=b8Kj7BZ(_>6))K>Giq~u3Lh4vd%b!%P*_3W8AZJpNFUtDhmaJd(H z*Ykd73mR=p9YnvYBY+R$Rs#_FE)SE#i9$~J?sg(aeDJ*>-?;U|%uGd3L-pt@=(l$8 zpP-*ycOj~1YrLyLHZJU;?A!JqB|smpG?gWQDL7I_9(6T+3zcU9%sL9)_3PWWA{eLw z3CLi9eL8Fp?tYrAp?~4jwrriQUvF7aZ>gdlH>hW#0|AQ3O*J!>#e3tX?eVbqsTu!~ zkD)iOUxOhXr{N1%1I>@r7djaPk_Ccke3^S>4;Z{D!-BMzxo6<`vGpYwiQawr@#Sy6 z*6XeK{(k4#G`0Tmb}6#Ou&C32oPS>MJ+#!WX60{2XD6Umkc*M;;6tD=ja-3) z@Jyzity)b>L5yl47=&=7QaYiCca=VyW~$aH6nH(!WMfegE)m zv|i6d6|9D|6#e=0pUh8#Whg}%>E;${^X+qc{jXnBKYvLS<$9M0)&=5naC7Y)7g^g#> z7q4DvcPHzlN_f28nCJ7D_OAH++32UY$3~rDlCVZxtb_B2Y0l zX$b3#Vu(W51pV(|x98!{eBNDr#MiD0a)t7jKZKKj1Q|eDm0SMQCAkTXJKC>a(SLjg z%twGsXZP_jryw12Eq8IzmAt1UJUnm&Q+TzOJUE7zdwUPjyC4XJF3Z*N)vNg#6 zimjnk2Jzr^XJ^y>96p`Q(dc-de?-;xoxnZ?*tF#JjlChDaEAk=z@br_Q)6Che`De} zA~rV(cXtnP;{l5qH#}7kh5xQo(?&)<)h(@dq}f{Ebmzv)D!%@d73VRsTmUGqt&)d; z&6_-P2iB6w?x&-GphD!`(eI?Prv9ZJcc&S#K#b+kOv>+Ck^fogEY&hNoa+^}ZU*XV zo*Fn4MWoQu(vOzXikL40oh{Y*+xS^`(*r|Dzv# z&o*ReV9EF8MN*lYnh4w5W}6RJEF!e+qnWt=yV!VVZ0de{+jx*hRt3gT^+!$nKO$rg zPw3@q!t4=jMxzh@g-PG#hqiytgCh@gYJK@kM+z_lKp3m6+_L76Tx`XX{m;pdAYs(o zhb3CF-0`vLk}2Xe1UBzW0sv{$O}}HSYK@7NVYL)-GH-(CMx~eGRE&#@nZv_b@ru#< zRRH@+VDkV_J^Y~F97qk*;WExst{?DBDY7bOV-y?l{6m=@L5MxJP&EnzlHG&U3 zUkx@zP3KNfRJU%EtNYkt;$MAM_HH46i46&CzgA7M6)nxw>};A>q&t-%hFYl_06R`< zfm-=yom*y}KYqO1ac`pBEP-tO$15{&h=-!Wy33CFuR`tXL2dkWjb?AEHP#W9VXM>i z>ed65q+iHleodtqQ+sDPTTwlaj!@qGw2>iP-{tjQ!$5_5LE-2_cL*|!hewH%qKQawLG(Xq2c z>1`Y@Mr>YOAS}$y^Rlzc8O1GGWyjs9Q{|$*>TrqY<+`P%j>ne9amaATGg6R?n7fvw z#qVwOywKis5AaH-(5tVU%t(Ea5y(`c<=dB`2G0ETtADIaw*lBjP@AA=n ze7?E~A$8(ZMRfT6vt1-Or*dym26<4F9@HiY<Y$&4X`w1t#rA1!oa?56w7Z^dN}1wF&Jru><{_UAtSqT zen9Uk67GYy+jM?YS-B%`pk8OMJG9&b&K~HC#Eta)(22Ny-60IvS}=tjWshP2G%v<} zFi6$c@Jk+{Ga?P>ly3bazc{# zyw!;nha0_@ZCGFd09MZrc}5mCS#9QwPYO&s1sT(oXazgY*p>FrS`!%|YIB=}Pl!le zy+u;ASF5Yn_V(5Qp0LKkJd@|#QC?@%u=LysXJ591-nGf$_drs!gp2rz7!+968DZ5~ z$i01T`R&L67N0ge)O%<$<#&+eGp5u3sm@sZKySy$$fzqSQirRZ6xiKcX75|%MsW)u zc9?Hm{a*kjbVjNYtXEp}JzySS?s&7QT79Q#^gm|ZyG>wo%WbS|6;W5Gnua;uLV&Xo zp4|A)r1!VZdF-D&HKQ5t-k!L2N0-Aw7~?c+NmDKCOing1Q9lIQo0w1;{|Md5)_{Ce z8T!gfUO`y92M?f|tn2~HE~VYd6i2R@j6ARkopc(1bpL+g$iwzk-ARp2{l9E%HXu`K zIj{&RGiZG#C3JkMITGEH!O)amnEKsr;==tF0S|FH{te~eO0>?5Nm$Rjy5pU0Vd-P= z8WuYOFB`~|0T#%!$cW+hmU?A2b*F#he<77JU~=HsL7zekrM_BK)tR!a$|YUOLTQ=svQK$$=a95$i@Z-yIv$=Kne!3L9bqIl=EVT)!YM)erD!B zaoLL&@qWKT9&>6TT#xjDzK1o&K+FR#_#{Wy(N<1+CH;(4bOh=s-PC+r^| zgO~r-aO<+TrbsoFrj>+=wufi3WSQVb@<^d~CLqU#wGV3yP7gLkU;nh5YYsm<>-tj? zKzH@R2?z?irR=Sa^E4I1e+|b)>YN12jU3|#6?3p(?rjtT9wZ)9EA8I3Tgq_yC21g* zU1(GC-M$?Xrc zYaI~>8&x?pyvZI7;hCAA7{yDYXf@T}zB`>2kUsjcmJvwA&c0vQYM?nYZ5mL2us6(} zr%F3wXQ8h@fhq#l`5&W-Pmz;Db0=0-P3T7H%ieP|%^QWVgRaPGm`S0%wY~WewoLMv z4H~ANk&<%UdZ$nlikXyeJ=g>mia+9Ld!b9Q_%Yp;$pQyzUQFzEsUZyzM4C?Q+*hv? zl+dpnTqmfu+31!C{r%P;xytMfo&!)^Gj3g-zCPL8i>1g_G`>(16%B$?RcF zTAJy*AExi#y_Em$d|9-j8LRU1)&JA7AEX2tTrNyGGtIzZp>5(GM3D}2 z)3idIafA7P2Bf#g`th7*L~0&<~V z6p-ws!l^H+ol2ZvzRXK}$Rg&s_IE)N;aD*B0N8}vB(!`sn)1*8D!ByJM5l{6?v2=j zEq%g3#r|5j$8VPJ^TrJ(=4dfZo?mXJ`S?+x_J6eq;LvDxpNYe7UwACd=4fu*{jLH5 z-{htaK-sCk)%x*6kJ8{(C#lTST0KM8b`70ZPpEHM&cVKXd5|Fg`ZalF?0!j$w28^? zb$t%Dk6_$kob>xGpc2yL#V@crgPu0lB?l!G?a>~&lwI7A@NbMDj3{L{nS|Tx>b(DN z0YVtP=Y~SdO1Pbu_uaOw?b>$;Xwjl$_wE;C&eyI}51QtaatG&6o$MLiKcZu&m{TY3 zsg?qPKyZ&J_5AvS8BRk03Er)IveYRQ{svstg#$DTgbrC0Ag+rP0`EP%647Z%li_m<7gv_?Rih{&Lj z&@Eqnek(oQ)!lu}<5Pb7;!7*vTx>KN?JW!%`N-8v2_L<)9OyS_uv)D)8jT8d?}3`z zs-E2Q{k39sg;TFzukuFJEUk{Nefms#YI^x;t9I?ChlPE%X7%Oxc&l{G<5R36jmEKS z?>-)$Ue+P+hUNN|D*-K91hfclaLaelWS(f(HO(s)rcJ`0o?RN9<3=9dm!$L`M(f_{L zmYlUkTr%?AEsJX4`Nh96g8pzGcdSlk}Z_Ik$+BLQHt1qpm)^0gjmPbsO z2>f^RrX{b;d3*lcQ%4Ro0)!7iRHRdSa2I~ za(=+`H6GxTVvn_(&{?Nz4C4ZUKu`mUR(NWmKYHGLMeXk}Y09||aj*mHCR9M)2Ya~fhpN9wa==YG9k5BoOCP%II z4)wTm=kB(tQ1>3FxvlC?eeQ31YHAG&iffiuiGNU#&eaVVK4IdBiH`!3BmsfJWqofq zQ&NDSkdV&3dL>;-j5~KO?rh_%p_imaFV15UsY z&_YS$m8&XT0m!Yl9Z8n8PEOtX_UqoapM83EX4zby{M_92AFmxW@{wu3p9!QSC2jxr z*J&xq^}aHIL9e$TRV*$lvVNGYoIY~+S7XO+|Mz;GyL)h}R^NZSLy65MTcd|ZJ~D9l zh%KLgmKYybQd}Gu9P<2vh09Rf)sx1!zxBb%#l?Cc%auC#)ea8U zgX0H&ba8d<867?L@u@(5ZqDwlTN`7|sf$hrDEuV@gtTh??A&?QP|)C(1pvUwC~o< z%f~k_C+Et=cqKNQY>hhi?0NRsQEQJE;O<%VSbdcf>=f$O6(wy|9~$HSDyE8Vq>>_Q zU`FatC1u~T&)fhVRH1F!0_mwY z6y{G(y4K+F6d&x9Vvlb?0C4k0N`qS#2n2$fsXeNT8v4R^=xO_lhSK{U+XWa7g>axQ zD*Dn=wZ`G@^s5O8ZQ6y`I{c*9>*LOzJ+^ zYPMW6^|`IX+%rQXLO_caK&;J`Z)}?8lTuEn?mbFNN)lpY z>uEE-SO`Je#ds- zfPnE+f9>hxqtR$uv}`$U>JuHhmCeMD?9^%8)F*;MLmf04-++M5y?R|qxOm6j1Jzls z6^q64$fJ*W`TDwfc#N6+cy3nKg)?XDGn_hns7*wKr?>a%qet#mZ(cP@jJwdKSMQ*Z z5UrC_pMek8DVCcO-Jh;rF`Lc3`wx(-Dsy$xn%hc=W&fa{rK{J>oWGz>tzo6+`&)zM zTBT{p*MZ&x2fn%L!=dH1&QFdnemz0JxU z@&B@6xZUYDm8IM4HUb$ALm>}h;P-I4*5X#?^x?xTLPDoJ{fvvwRg$Fg$XgjV)Be8V z-CvC!J9GXY)M|%}w6xPlk0jW>Nr`cBuI?V=r~cYEAfULYFd;T}&-QN=TCSDSl9F>L zjz2T!6(<*$BB{mi6W@lz@`SSB2qs9yyKS7eDWeeupPn@bwT60?|v3zf>VWsB#TOYpLw$)Lq z4Q&s2m}Jb1E)#ER{hyfzLn?QoI7!R9(F6j4KpS{p+s{#WP!^5+pqH?05=6HM8vq&R!I%{F9f1_Pp>tpRe>(&iYDl49C zq%PCcqgH9gt<^Yt|J-Kx`gfMct<|{rH9)-rym@&VU%$hiwS4|(CJ&?T(VDBKWgrJX zWpJ-rvaMdgvKJfXiub@nT{-wE!Tz=9v}Y|ecwnbium%pocQ6MuiWLK4K5T}AumPst z^^uzZo8TzygjrDE%~%A2W{6nZD`=LCadrJiix%~f}SF zk~~Yq*n8Q2?U%#68F;#}>=P1xqI(oeX3{E%_trh=V*SE!fi(~dqaX-A04*$6ru!Or z!BV&gJ>hL=18>`;gJ2Q72j4;jyaUegZX;DgAZV%x&ri@S(OWE=^7B8=&fXjv>U>Y# zGGQ7Gkf_&3U%hIm+m{2htA3ks}>HzOk*>$uTHqoOb5z%m2SW~gxcYT;xY zo4==24{-P_+xIhR7;86?r)UG024I##2d63VY(pCy4?gfNBJgBRgrP_pUGao7OX z4Zjb-R+s{Rg**U$12OP#08YU=m<4Mgw_1Aw!Osxk`3agOJ8s|Jar^eY>8=e5n(Xdw zJ#MGbWJE>+Z)IfsB`XVXmgSjVUSnNd!!#Nh;!8_cXJ>E8&$s3u5*#Ov4h z6&5PL^k`Stm%Y6^XtgGbzHML@tvvT; z&B#HP@bOUU`E_c?bCU>bNvWRWF|6H0W=_>P#}DR}X}tXjyAPM89Q?HG;J|4Xeo!g% z?$rYXkrZeZ#P285EfSTAGZ$I;IjNZy($NE$J`P_`E+=!S z(iYRTbJBLOA6~62ngP$kA7C~FLlXQME+{kG53ay1+v#yoL2uYqlTsO$#Qom zr%zh6=%&@y-||#<_b-BjdpJ31CCNpWqn({Mg@z8Xtr2-YAYiJyyNfIbIXL`1D5!-) zQ<;F|pe4AUd}xJn&+7(@iN3SdlI^wO6Dhlg%4gOYcg@l7|C2#h)9)3vZ2LLs;6k6- z4tXh-J+JBaE-_@rnOhF1c;VD6)%i>P7dV)W?0wU)=QV?|gnlopC0m!JZ>Q0sQR^3`U-MoyX@S}lb=D=7ehCeC(-n4{NyVDy0gjSum7P^7HvbDPj zu^qU-(yY&&lHA zVM$5$(n7QOgRHD=`T3XhdYvq92np$=)sA&@J5y2u>?fk8}L5S z3=0tRwQ0x_hoJ8Ajgw{|Bi5V|Th`xJoMlPeW$OP22lr4Z=c)y850)I9NI7CQ6au6i ztJE`Bvovyo%2aAO{ZFIW0C4^b=($1J~95;tXH~f#- zF5x`Xg)Ki&T&gI4+Yoe4>^@9>A;5P>s?IiPD6^m7t-rDNL>&qvNqo4TlyrcVpYzpn z2KD0mW3@}4okVd7OV^?|0Ic|wZL1jGkDUhro*YH|H8y+)u>TZ$jxu^+9ZFR*GW^Bn zb$t!Xz#q~SD7r!+c*2)(1eSsY-0EI{ZZH7;14Xu!BnY$1&;fw*)nV;G2Rp&q!>^=^ z(aNAE`4$yYA+1iT2KGVEs&ZHnQX#$n(g(=#;^L96u3v_P_^MPY+jPAkb$#bWL^(S8 zsnx(+ett{*{UzJ}Ii0jxAX1}IOA_#IW@fI*^kG)k9B*%TRa2RkmvgObn6==h#ZZKM zsC45bfF#j+uqw2_?CdM49m_JSwGvo>!VJtN+CQec@|`*Rve{5v$!5(`-zYiljM?h; z0fu4<(=8sMQu1K{_YmpEi3(3K`%1%me(tWk@fDlA_c`& zOe3S52Wu9NVQ1;jv ziMia6o}U?Pg(@4m^;G8hS&=|x>n1Gwm#PaMumqA}wO!&D@GeY-oe%*}fZon>G8o|{ z_zcd0^0}S+n&ewlNQJaI#5_NNpaQidZ43zscXYfX-J#wqHoR9LtH1jNzkI zy=SQ<$hl@Y|CKTCx}{dBB;(*rXtW#}T`8JJVTJ@q5*nSPFRDm4)M2KjA{^AbJsT%S ze*YdtB}%n2vw7`9W{l;XmysauD(`>BrJpM0=@QQVQ;dve>ptSI)}crxdJJ}V;dUXO z?zKy08J#nu2Qa#FNBWIS07=4K$L+#OiVcVnOtxd|U03)IeH&aL8%6-&Y)e#*1H1>$ z@PBq4e!vf~9HzmmkPORUnceb^444N`!%C1K7M4RoU9Kw-Gz-LNuaGuH>4T!3Mq_pR zmStvsoRgDlGJP5xJg$zB`OFpzlBB8Z;pLjjK7u{2T)C>(-=T8LCJj`pe=-+q{}8Mb+~;_4ltEYL!~d z7>fDnM^oG;6L(>;P+DNoI9H@NG;}!a1-~V-6)(L@^3B>a+jICVhtHyQq)$g)dXl9t zFlBzF3?F>O_WgXch{ew_a$ZPS6itg?E%q^&8YA z-=ab)q}55qXs?hqMd^b=ZFAT1%x1mCGDN2vUiTQ$nI==HEI*>t{kNc?*j(ln#g&$3 zn@nCR)pCFTh3V-j21BTW!w{X$OQl+tnR!jGFR@s(k~G)HXHiDR3=fYc(&1;tv$d3! zXaRKol7lmO$z}a^Dh(!m#VA}rx2lI+O{JurF}wIlkrPysjKy5L^j5stCqhOtRXNC) zcirOAS}yNV@@m_VL%mN=WLRGoen9Nyd$dqe%KlRX`}4a=$TDWT`sNj2GPC6Ge7b~} zpJdrT>tMD+Th1l`gan{-Ch@u=>8UunM(cRkP4X=&q(WL9Vr{RWS+dyA&$?BH zHJIPmAt5FPD}{`B_UqDI6*hsV_}ev;Nh>Nr(Zlx0ahRn{gMv6kT8 zvU{lH;7semazNKg?~{Pea_1-29xWx62G>9-xQ~1*-dyhX)hxZZ&7=#Ix=d3$`$#HB zJX%Ser>TQ_l-a+$+vM3=YBNOT;6z|Ixkc|P-edRHR-^heV;t}Ni$iDYqz4e)mATW2 zXoXfot00E;<>Up-Re235>8$>m(E}Nz_}7oc!t5sq_Q%(gIZu*yi+v{)Nk861t3c+> zAjlW3BT=oHH@#vjk*{{rz73Oyp>yKl9t`bU=UO)g-@q-H4{i_v(_!;n!Tj37$-82> z0At}9_!InCmE};F4gTN^6X0oh-)=~HAS{F^&_Zh%3C9(XXkHh+|z-BzP{s*YQIyp@sRBuPVDTzo693;8rRH{E1DiNNaK+tSx*|zPOqelwz^J~xgi$}DTKp+qZ1OkCT eAP@+C(fmI+SKNplQZozy0000_lQ}bU&g^IJXHK|=nmj%ZB@O@pz*kg|(EWB=^4ufb`7g=)xx-6;)-QY(6Hz=X`9= zG!}#YKR{7NQpaOw` z;&VjATv_94h1Lgq*|L)5vfNdGW*oj@oa~Ro*xw4o(hM5kmo{%tV&_FKvb_&nf36>A zC*XF^3Wk1-=%Lvn zNl!HNgWH#Zb@l9`!x?xd@*`~7LQ*;uuc@hS72@fBxUkfIks>fN18%CA4K4=cGFIxlGMC4s4uh~ z`dn^%zdL+eo9gLbIWILAg!%DEND;pWragLcX=sod8ay&~^DwW)TGb1tnB&{A(+atI ziW4k_H9Bc?d3I{I)U>~Kobc783X(;$&`Lo;@l@*X-@nORcjAoeT(KilM;^MTr~3k8 z4iS*rq|odBb!7--QQQ;RfCP7li}&<|;Cqnkd>eV0ALe($E`f>vK)fB^+#E4lTmOMs zRaMn~9-KdFTMr5$k;>`$@;IUS~#%cBSc@<-=qEUhC()k8P#Ex3arCI}lrxxKx-5Q^8Fj(JN9M@AEUp zpeQVwmUoNu`T6-`oiDz6AKsPg8zv2u(D#)bSWAd#zs@Ix*IJ-5s?|3pzQ5iS# zXj@yveea!>l}y2CnVz`&3ffLuO%@oUUv+o2e-Vk0xCWo&0MeP}haTGQ;LSL!;}=ru zhOKrVClp6TMTI=ZBB_EgUQsiIE${3`36y}~oNsYztjsf-lY`CaTRi|UlsouCfh|9b zKfc+paj|guxQvMm10%GpZ`jixBTc2rck_U{_1wVN*zIuA@t}f2ncP1IDaa{uUAdFI zTbOT+?+sJSv4>@TvbM0OdhT9d^O`y6s7!UR4DJOQ`@U$VX}Le&ez8?cA}a2WP#@^n z8E*F4H}`jS&Par{gr0wAO^wUM*YFnvDK|Uw?F(;g#E9ca?UmFZf`f)&&jE)tG_8QS-BYh(9PIgARqFzwOPxqVQ+75 zH(cO-ERD7dA&v0h=Be-XmEiQ8B0HN|$KZ6myMOf3cUrI^AA-|<4E>bf&^_ZaC0Fhu z>5X;$;Py1U8}7UmzSv@_X=zuon3kvnBaX-29LYk=j%zcJquegT+Or(LP2J%`&iVSt z06s=vGceGY{+s2s0Pa1wTa(|X{T%mcqTzk|3HWb@=)Gz3tjxrc%Zu7krXSalG-x-_ z#6JnHTACeq9fikFapkD1DK*qJRG5!VRvHG6(4yPu=g|G}1LO(4h?yldjV;+o<}~zq zw{(C4T08K2OLpxQtB(^>i=TMDAVyc_#T3Q3EBMenkDc%ni>^Q_#9k}o)xDoY#r9X^ ze*2UsDZRJ97ZDlBW!6z=Kf?VqT!wgO6(Qs^tMd#5iZ}Skv+XufGz5cXVs3vfDEK+J zJuVD&P%juiTx^HC{p`uigzx;_K|43hz&rzap;t3g?tmO_jpB6xN-}{diid~C_iojC z)m-4NK8n3H-Q~6#1GlrBrtE@vX4W7_l-zs!Bnj;*Y&!Zy-S6IS6=FZFz08nBDXaU( zs0#rA-k$H=a(FuEBT%l*tiU@-98h<=ut z-Pp<3+5#F0Po(jlrntGwnbD7{$drt`QY~MZpg55}cUQ<_$IwtaYMCx}L|xU}0R;N5 z&Ir5jD8RHxp4y{;xy8l31@d;lG?iKp)OTqqh26?JSh;f30i1IXa&ZHP75u@GmF93jL?NUkz@b&AL zL5YcplPympyH`&<`eox4bTl>dsz=6LmUt;u&2z|>C0>8?e|L6K?+TgN)@?T~(kcxJ zaWHG7e|mJKdLxQRV~6$kD|O@IlopO1ned_)iMpu`E0}=9(vmCX`cCQL=TDCBa!ss& zX&;46g+L{X=-k_vf}U(#UiK=L>?H^2Hj@X|VK9?Guzc6pl=JFl>97f!UA@)ThQikl z+OHwaZR&^Fja7-~mv$Na+30T7PUrcGNGX746Z}(Te5Asvst%su6wMa6YaO^9OvFp& zO|&HKn6Y97OBFSpUmun`3Edic*^jsfW;@O`I0+XQ`pG{=FCx{3L9dJqD(^qjLC2Pl zZl2WQ;|&g~gkoTLK@Up8dT4~ezii|O)2wZOxw%0iQpI@0Zaw=KS`k!UmlsRGGzSBX z+&W5a<`&;gC;2DWvbgk>m6fR~Htd8KhvahS`{Tx0W~`$A$3KRLo1{bCXb~shr8_;F zPOvD%qq^Up?JtyfR;!N-F`kLs6JuL2%diuTZZ%6{p5*6y_E+D?$;Ks4;(w6xs8b21 zCEH*;RW<&TsZ~%Uy!#84qM{~MfwGdlS{fyqbxr&ZPfkuI#eppW{0TzK)h7Bz-#CD6 zi({x88XI|lK$IYfKQh*1-B0%-f+!ybYkUpqxJVI;!oz1JRXQtHDRmM;-{h^XsC+2=?Z(KJL1qxepqvLZh$Yd!J$X*QtihJ&VpA zyq5(339746aT{hB#sps-V+-$2o_7Mx$1^MX>cxc*@;!bs7g@OmgLEb=-nD8As5WB^QTV@rF zD7E8NfFoD1N#%|vJ4s)vpC?z7RD4i+BG_dw-7q*x3zVo<{$ev@bNxH}y8XU{bNrp$ z#VuE(JF(2;PR+LLud}9RikhbecTfTQnJ?W>&i9?768X#qR7rrPMC9TF<*n`$MO$7n ze0CH7Kzi%8W)&ZpBRR$)O8u@}bR*a*;bLlPG})gAsLTSBlWDDpZQNh!qgqd>R{QvN z>ODylFC;^GPq*Gsa%R@(UraN-{q8J;yW{# zSiF87F@<)A5zl;mpB!C1?6#Me4^0~{hVD#f%M~xRdpqxkL<i3z;+HEbK0k1!Wny+&6?7rZQ1$e=H2`BT(BR-nZ$M&0jJRI4k`bDFv! z>Pcu`mst^n6s~LgQfA($ruQQXE{x{MWi&Vc+x4vrftHgqg%LqY|arCX$|UI zHCl(3(ww=t&%J2m;@hwFf2T41*YKcZEa37MRdZx09UyH5nJL1zx8tRxlVNyj+TvsVDJKUa9}=>6VA|SsOT3iG zQ@R?4MQ4^J(%jn2X&WKC5~AbOR;@BQ_bi_=#UCjZalKn4w70yi?@?g+z4eQ zz1z0oi3zJuIn}RrR#yk#*P<2BxAkKsWuZ+jli5a)d0?T#{Iq6lj_57tIZF=rS1-;L z^wWtbpqIznbZOSi$QOEXFc1}0GGvUVH*LbuGNqE>?vl6_?{n%f5!d8_we|JQUK+9N zOo;lBsWwYu$f>`9UT#Hb#B?Mi?2Kz8#Y-YPz??w@=`}nFI&vNmKCIR zM!cZX(umxIqIpZ9mYtK?BKNgF9}f@I-L-u5q4~EZ%K8E{qtDHCIZbxdD%H9D*ThkB zQ>AB`Q}NVRHQm^-8NbXw9}ygTRc1&7u56!;Mk7?DyE$0 z=S|2Wo$Kn^+1NG)aZvg40_2g1Ppqy`yDJtx)#l5Kf{7hXb@f>J*fBH>cz=8s7#b8e zS!v8lF7lsjhh(??Vq&5<;+bK!`RUa|fvK`JRb}rRAid=~)LlgP=Ud-u=DY57NCt#S zmzFq537_nLjOv59Svb5p z4f)`=2GEVN<2ud5Q)MjHbvs9K3!bm?qw~uGq*6c#^xVAWSHI=zk0*fN)BRmKOwz-g z#UnHGU(j&)3!ARL?}RO%6A#Ao5MtJbnDO}$LZAEOf@^Cbo1Rpg(44O`qiSi(I$4E@ z1nV)gdbg}|^wn?SZ~-T##jkx)cjI#5O}`Zoi_9vipv(||*7 zX|Wx7jns+#KZ$h2*4+I3G<`2q6fM=VM402dcQ!V{94x=fUFG@QO$`kx-gqZhMmLt0 z+Wol^a&~pCzVWO?;uNnAe>~o?vbOeQx|_ew5NK2>6VK0pJDpveqHz#01;QZ2-C4g4 zMh=zL@Bi#XCHGnV40ve2e;S%!<_c3A7*E&+*L3@)kJqj@g$%+Pwjt%@ zt4@_3c4=Oa!BKSl)+d}0zuEe4t)HitN?LoctpS5}8pIWaLY(aLZ`MrQIkP+6_n*hK5(#JUH46D|at!D%|aG8y^s(e1yS z8T_xGM|oe+a+ubFf6gMnOiEdfOIDj4=FMKOO3^(%+U1oV6T13yM2+k0LaG!yJ?t zSt>!>M7x#&N1JOCz1E1Y4SuwGa+DP)DcU)ByN>pkQoP4LTgJd0V*a6>xghE?3)T?r zs8jEJz&80tKp;}Y(_xfQxyt1B^i=Rw=F*0@-vfX+b3~_|pjdySiHyA#_35^b@T9vNYHHZ{Hh8*%VzFq_ z_Dr^dp&a*wGuCmKK}odIzx&4fnWR<_-BDIuo@nA za>$fD5U0A2eq%C>_4DX^(1Y5DBiK;a?U9U0PGULDXFhH`ur>-t$91~vkoF`*J#}cM z{)eLg*|f0aq>-pj>#<@nbV=S-(?dGP?FXR}hg3apt+~aoKj=1!*&E+>&1E*GX{WMw73r7$AQkBdU z)Ali8*Kc{SNAzG&d~{NBXY7Mm?nogGpG#i-EEc-1-V8GSo@qolT}nd3$I+%ojQu@O zXo-Ye>ec!C8xDu@u;83W`hS|FLX_(X{bO5rdCU`p_f5kd;>7 z`Cd*Rm13L=7>G`(92AdqNt-b}Nmak8+tk8#K>mPbfA0iOV8sq*i>$y4Zro|a@9S)v%{^8OF%;Uw@pV*_t;8`nFwXShN} zK}4>W;lie4R>tJo{LYuVoir3q;ug|*U-)AJ7Wakji7ALG*E7+O+Vsjb#HN!rL7`N8 zviW|WDVo}Gf)-NkCL9!JFEH@RzaOT_YPSE;%VQg1ATOU%cM78Nwyzur4+vv)PU4#! zN`%0(M;a&+JGtRE8rOK3*1!H#)B&&VqMMR8{*HbQK@3zF74TAqXF4fi8MZgRJ|_+R zp6f&6B;}qzB-+CBh_5xO*V0= zWj;6ODne}CB!%^vVhOUPZao1=V_kEe*;rA*;3W)B`jqZqZ^<-?NM2I0#+Bif3QHIG z=He(%I$Gr7SS;A;nLGX1mW#&3q4Z~=&x8PeF9F}p(u?|l{D{;*M)+Y0$V#znsaex! zT;te?t?~wSXGdsWB+nLKOp{1i z)hXcwYp|e643SiS@Ckt*w{X*_P#Yqd=mZfqxM#u|E!Rx8>5!}L80Sohbmyj-Er0nj zy0z_#6xN4&PIF!+`)zIlie_jmRga9Q_fY_uH(IIv8I-5+Y8^4yM;-Kaldegp3{ruO zC91z@ieK?{*x6dWG?lnrKXu5ehpS|Rc9SFD_~YgpE(|^VG&1d6EZ8IWcg4KkKk7Ao z&>u3LiaE2tz1C*Yh^{Y6j+l%-0e$^L7_6Ii;1ri3zKwcF!msIE!`d3NJz|8%b}Rl$b^#oauWvvgL*@;nMaJG#g`M(14v z{ucAQ!Ec$r_J^TP!8t$~;_h5JfKQMY9;F(kMMbf^5UO|?8q+lvMMGr?>M9E69BfY3 zk-NL~7=z&sNPZea`#m$a-zlCM9LSf5z6*u?l*?=6~?T1feOM zH7~gCVEa~Cfz6o0G|pJV!ZGg*mco^x8o6jZI99MIx?eGr`yzI;az^j_pL? z*Hj7pT@xa?5D|LLmf1FG%GFEw_^s>xZj0*tO%1uWJQe#hcy;Zopg(_M(X(TF%hhdCK>gJP>PVo4_D^&Gg<5nJdLJL~a?{q;XmoxR~Et&_#_wNFTZP+^A z9bB}ZtPr$^Sfih+1=lZ1;qUX4epD9=BIYXij5HGK`>nY9i4e~cCc(4Vd<1yfwhUEiv$nlf9^T>*pxH%aEQ{)yYB$D$sE&|w7EFDT93*Hx5Nlc|z=7w{kY CzUWB+ diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index 8b34025..60fbcde 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -26,7 +26,7 @@ def pytest_addoption(parser): parser.addoption('--timeout', dest='timeout', type=int, default=7200, help='Timeout') - parser.addoption('--slurm-options', dest='slurm_options', type=str, help='list of SLURM options e.g. "--time=00:30:00 --qos=my_queue --n_tasks=4"') + parser.addoption('--slurm-options', dest='slurm_options', type=str, help='list of SLURM options e.g. "--time=00:30:00 --qos=my_queue"') parser.addoption('--slurm-srun-options', dest='slurm_srun_options', type=str, help='list of SLURM srun options e.g. "--mem-per-cpu=4GB"') parser.addoption('--slurm-additional-cmds', dest='slurm_additional_cmds', type=str, help='list of commands to pass to SLURM job e.g. "source my_env.sh"') parser.addoption('--slurm-file', dest='slurm_file', type=str, help='Path to file containing header of SLURM job') # TODO DEL @@ -59,7 +59,8 @@ def pytest_addoption(parser): f' with value "{os.getenv("I_MPI_MPIRUN")}"' \ ' while pytest was invoked with "--scheduler=slurm".\n' \ ' This indicates that pytest was run through MPI, and SLURM generally does not like that.\n' \ - ' With "--scheduler=slurm", just run pytest directly, not through `mpirun/mpiexec/srun`, and let pytest launch MPI itself.' + ' With "--scheduler=slurm", just run `pytest` directly, not through `mpirun/mpiexec/srun`,\n' \ + ' because it will launch MPI itself (you may want to use --n-workers=).' r = subprocess.run(['env','--null'], stdout=subprocess.PIPE) # `--null`: end each output line with NUL, required by `sbatch --export-file` @@ -108,7 +109,7 @@ def pytest_configure(config): from mpi4py import MPI assert MPI.COMM_WORLD.size == 1, 'Do not launch `pytest_parallel` on more that one process\n' \ 'when `--scheduler=shell` or `--scheduler=slurm`.\n' \ - '`pytest_parallel` spawn mpi processes itself.\n' \ + '`pytest_parallel` spawns MPI processes itself.\n' \ f'You may want to use --n-workers={MPI.COMM_WORLD.size}.' @@ -121,6 +122,8 @@ def pytest_configure(config): assert not slurm_options, 'You need to specify either `--slurm-options` or `--slurm-file`, but not both' assert not slurm_additional_cmds, 'You cannot specify `--slurm-additional-cmds` together with `--slurm-file`' + assert '-n=' not in slurm_options and '--ntasks=' not in slurm_options, 'Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).' + from .process_scheduler import ProcessScheduler enable_terminal_reporter = True diff --git a/pytest_parallel/process_worker.py b/pytest_parallel/process_worker.py index 49d3849..2d342d4 100644 --- a/pytest_parallel/process_worker.py +++ b/pytest_parallel/process_worker.py @@ -29,12 +29,11 @@ def pytest_runtestloop(self, session) -> bool: item.test_info = {'test_idx': self.test_idx, 'fatal_error': None} - # remove previous file if they existed + # check there is no file from a previous run if comm.rank == 0: for when in {'fatal_error', 'setup', 'call', 'teardown'}: path = self._file_path(when) - if path.exists(): - path.unlink() + assert not path.exists(), f'INTERNAL FATAL ERROR in pytest_parallel: file "{path}" should not exist at this point' if comm.size != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly if comm.rank == 0: From f4dd15ca3b34d07bd9bed06cdd51098e40592d7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Wed, 8 Jan 2025 00:00:55 +0100 Subject: [PATCH 06/14] process-isolate schedulers: use a unique folder + refac --- README.md | 26 +++--- pytest_parallel/mpi_reporter.py | 4 +- pytest_parallel/plugin.py | 11 +-- pytest_parallel/process_worker.py | 8 +- pytest_parallel/send_report.py | 7 +- pytest_parallel/shell_static_scheduler.py | 84 +++-------------- ...rocess_scheduler.py => slurm_scheduler.py} | 89 ++++--------------- pytest_parallel/socket_utils.py | 29 ------ pytest_parallel/utils/__init__.py | 0 pytest_parallel/utils/file.py | 18 ++++ pytest_parallel/{utils.py => utils/items.py} | 7 ++ .../{utils_mpi.py => utils/mpi.py} | 0 pytest_parallel/utils/socket.py | 70 +++++++++++++++ 13 files changed, 156 insertions(+), 197 deletions(-) rename pytest_parallel/{process_scheduler.py => slurm_scheduler.py} (73%) delete mode 100644 pytest_parallel/socket_utils.py create mode 100644 pytest_parallel/utils/__init__.py create mode 100644 pytest_parallel/utils/file.py rename pytest_parallel/{utils.py => utils/items.py} (74%) rename pytest_parallel/{utils_mpi.py => utils/mpi.py} (100%) create mode 100644 pytest_parallel/utils/socket.py diff --git a/README.md b/README.md index 0dcffe7..afd2a82 100644 --- a/README.md +++ b/README.md @@ -219,7 +219,16 @@ SLURM takes care of the scheduling. This scheduler as specific options: ## FAQ ## -1. **pytest_parallel** gives me a new communicator for each test, but my code only uses `MPI.COMM_WORLD`, how can I use **pytest_parallel**? +### Which MPI implementation is supported? + +**pytest_parallel** has currently be tested only with OpenMPI and Intel MPI. Other MPI implementation are also supposed to work. An exception is the `shell` scheduler that use implementation-specific environment variables to pin the processes to cores. Feel free to give use feedback/patches. + +### Which job scheduler is available? + +Currently SLURM is the only job scheduler available. Other job schedulers (PBS, LFS...) are not supported currently. If you don't use SLURM, the `shell` scheduler may be enought for your tests as long as you dont want to use more than one compute node. + + +### **pytest_parallel** gives me a new communicator for each test, but my code only uses `MPI.COMM_WORLD`, how can I use **pytest_parallel**? The [process-isolate schedulers](#process-isolate-schedulers) can be used with tests using different sizes of `MPI.COMM_WORLD`. The `comm` fixture can then be discarded: @@ -246,28 +255,24 @@ For unit tests, process-isolate schedulers are very slow, and **[process-reuse s It would be possible to develop hybrid process-reuse schedulers where processes are re-used, but only among tests of the same communicator size (and repeat the operation for as many communicator sizes there are on the test suite). If you feel the need, write a feature request and maybe we will implement it. -2. Can I write an MPI test with no fixed number of processes and let **pytest_parallel** use `MPI.COMM_WORLD`? +### Can I write an MPI test with no fixed number of processes and let **pytest_parallel** use `MPI.COMM_WORLD`? Not currently. **pytest_parallel** is designed to dissociate the parallelism specified for each test and the resources given to execute them. If the need arizes, we could however: - implement a mode that would use the number of processes given by the command line instead of the one specified with each test - add a `@pytest_parallel.mark.parallel_from_context` decorator that would mark the test to be run with the maximum parallelism specified (that is, the number of processes given by the command line) -3. My test suite deadlocks. How do I pinpoint the test at fault? +### My test suite deadlocks. How do I pinpoint the test at fault? There is no magic technique. Try to narrow it down by using the [sequential scheduler](#sequential-scheduler). A solution that we need to implement is to handle timeouts for the [process-isolate schedulers](#process-isolate-schedulers). Feel free to submit a feature request. -4. Why is the [shell scheduler](#shell-scheduler) using a static scheduling strategy? +### Why is the [shell scheduler](#shell-scheduler) using a static scheduling strategy? The [shell scheduler](#shell-scheduler) uses the same scheduling algorithm as the [static scheduler](#static-scheduler) because it is easier to implement. We hope to also implement a dynamic scheduling strategy if we feel the need for it. -5. Is SLURM the only job scheduler available? - -Currently yes. - -6. I want to use the static shell scheduler, but I have the error `MPI_INIT failed` +### I want to use the static shell scheduler, but I have the error `MPI_INIT failed` On some systems, using `mpi4py` without `mpirun` does not work. For example, using: ```Bash @@ -292,11 +297,10 @@ In this case, try: mpirun -np 1 pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py ``` -7. Can I use **pytest_parallel** with MPI and OpenMP/pthreads/TBB? +### Can I use **pytest_parallel** with MPI and OpenMP/pthreads/TBB? We do not use **pytest_parallel** with multi-treading, any feedback is welcomed! Regarding the `shell` scheduler, we explicitly pin one MPI process per core, with is probably wrong with multiple threads by MPI process. - ## Plugin compatibility ## **pytest_parallel** is known to work with the **pytest-html** plugin. It is also compatible with the xml built-in plugin. diff --git a/pytest_parallel/mpi_reporter.py b/pytest_parallel/mpi_reporter.py index 0ead5f4..2ac125b 100644 --- a/pytest_parallel/mpi_reporter.py +++ b/pytest_parallel/mpi_reporter.py @@ -3,8 +3,8 @@ from mpi4py import MPI from .algo import partition, lower_bound -from .utils import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index -from .utils_mpi import number_of_working_processes, is_dyn_master_process +from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index +from .utils.mpi import number_of_working_processes, is_dyn_master_process from .gather_report import gather_report_on_local_rank_0 from .static_scheduler_utils import group_items_by_parallel_steps diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index 60fbcde..5c515c4 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -10,7 +10,6 @@ import resource import pytest from _pytest.terminal import TerminalReporter -#import signal # -------------------------------------------------------------------------- def pytest_addoption(parser): @@ -44,6 +43,7 @@ def pytest_addoption(parser): parser.addoption('--_worker', dest='_worker', action='store_true', help='Internal pytest_parallel option') parser.addoption('--_scheduler_ip_address', dest='_scheduler_ip_address', type=str, help='Internal pytest_parallel option') parser.addoption('--_scheduler_port', dest='_scheduler_port', type=int, help='Internal pytest_parallel option') + parser.addoption('--_session_folder', dest='_session_folder', type=str, help='Internal pytest_parallel option') parser.addoption('--_test_idx' , dest='_test_idx' , type=int, help='Internal pytest_parallel option') # Note: @@ -124,7 +124,7 @@ def pytest_configure(config): assert '-n=' not in slurm_options and '--ntasks=' not in slurm_options, 'Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).' - from .process_scheduler import ProcessScheduler + from .slurm_scheduler import SlurmScheduler enable_terminal_reporter = True @@ -144,7 +144,7 @@ def pytest_configure(config): 'export_env' : slurm_export_env, 'sub_command' : slurm_sub_command, } - plugin = ProcessScheduler(main_invoke_params, n_workers, slurm_conf, detach) + plugin = SlurmScheduler(main_invoke_params, n_workers, slurm_conf, detach) elif scheduler == 'shell' and not is_worker: from .shell_static_scheduler import ShellStaticScheduler @@ -159,7 +159,7 @@ def pytest_configure(config): from mpi4py import MPI from .mpi_reporter import SequentialScheduler, StaticScheduler, DynamicScheduler from .process_worker import ProcessWorker - from .utils_mpi import spawn_master_process, should_enable_terminal_reporter + from .utils.mpi import spawn_master_process, should_enable_terminal_reporter global_comm = MPI.COMM_WORLD enable_terminal_reporter = should_enable_terminal_reporter(global_comm, scheduler) @@ -174,8 +174,9 @@ def pytest_configure(config): elif (scheduler == 'slurm' or scheduler == 'shell') and is_worker: scheduler_ip_address = config.getoption('_scheduler_ip_address') scheduler_port = config.getoption('_scheduler_port') + session_folder = config.getoption('_session_folder') test_idx = config.getoption('_test_idx') - plugin = ProcessWorker(scheduler_ip_address, scheduler_port, test_idx, detach) + plugin = ProcessWorker(scheduler_ip_address, scheduler_port, session_folder, test_idx, detach) else: assert 0 diff --git a/pytest_parallel/process_worker.py b/pytest_parallel/process_worker.py index 2d342d4..dc73f75 100644 --- a/pytest_parallel/process_worker.py +++ b/pytest_parallel/process_worker.py @@ -4,19 +4,20 @@ from pathlib import Path import pickle -from .utils import get_n_proc_for_test, run_item_test +from .utils.items import get_n_proc_for_test, run_item_test from .gather_report import gather_report_on_local_rank_0 class ProcessWorker: - def __init__(self, scheduler_ip_address, scheduler_port, test_idx, detach): + def __init__(self, scheduler_ip_address, scheduler_port, session_folder, test_idx, detach): self.scheduler_ip_address = scheduler_ip_address self.scheduler_port = scheduler_port + self.session_folder = session_folder self.test_idx = test_idx self.detach = detach def _file_path(self, when): - return Path(f'.pytest_parallel/tmp/{self.test_idx}_{when}') + return Path(f'.pytest_parallel/{self.session_folder}/_partial/{self.test_idx}_{when}') @pytest.hookimpl(tryfirst=True) def pytest_runtestloop(self, session) -> bool: @@ -35,6 +36,7 @@ def pytest_runtestloop(self, session) -> bool: path = self._file_path(when) assert not path.exists(), f'INTERNAL FATAL ERROR in pytest_parallel: file "{path}" should not exist at this point' + # check the number of procs matches the one specified by the test if comm.size != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly if comm.rank == 0: error_info = f'FATAL ERROR in pytest_parallel with slurm scheduling: test `{item.nodeid}`' \ diff --git a/pytest_parallel/send_report.py b/pytest_parallel/send_report.py index 412f2c0..85ea3db 100644 --- a/pytest_parallel/send_report.py +++ b/pytest_parallel/send_report.py @@ -2,7 +2,7 @@ import socket import pickle from pathlib import Path -from . import socket_utils +from .utils.socket import send as socket_send from _pytest._code.code import ( ExceptionChainRepr, ReprTraceback, @@ -14,13 +14,14 @@ parser.add_argument('--_scheduler_ip_address', dest='_scheduler_ip_address', type=str) parser.add_argument('--_scheduler_port', dest='_scheduler_port', type=int) +parser.add_argument('--_session_folder', dest='_session_folder', type=str) parser.add_argument('--_test_idx', dest='_test_idx', type=int) parser.add_argument('--_test_name', dest='_test_name', type=str) args = parser.parse_args() def _file_path(when): - return Path(f'.pytest_parallel/tmp/{args._test_idx}_{when}') + return Path(f'.pytest_parallel/{args._session_folder}/_partial/{args._test_idx}_{when}') test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error) @@ -68,5 +69,5 @@ def _file_path(when): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((args._scheduler_ip_address, args._scheduler_port)) - socket_utils.send(s, pickle.dumps(test_info)) + socket_send(s, pickle.dumps(test_info)) diff --git a/pytest_parallel/shell_static_scheduler.py b/pytest_parallel/shell_static_scheduler.py index 9dc62eb..1adf117 100644 --- a/pytest_parallel/shell_static_scheduler.py +++ b/pytest_parallel/shell_static_scheduler.py @@ -1,78 +1,18 @@ import pytest import os -import shutil import stat import subprocess import socket import pickle from pathlib import Path -from . import socket_utils -from .utils import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index +from .utils.socket import recv as socket_recv, setup_socket +from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index, mark_skip +from .utils.file import remove_exotic_chars, create_folders from .algo import partition from .static_scheduler_utils import group_items_by_parallel_steps from mpi4py import MPI import numpy as np -def mark_skip(item, ntasks): - n_proc_test = get_n_proc_for_test(item) - skip_msg = f"Not enough procs to execute: {n_proc_test} required but only {ntasks} available" - item.add_marker(pytest.mark.skip(reason=skip_msg), append=False) - item.marker_mpi_skip = True - -def replace_sub_strings(s, subs, replacement): - res = s - for sub in subs: - res = res.replace(sub,replacement) - return res - -def remove_exotic_chars(s): - return replace_sub_strings(str(s), ['[',']','/', ':'], '_') - -def parse_job_id_from_submission_output(s): - # At this point, we are trying to guess -_- - # Here we supposed that the command for submitting the job - # returned string with only one number, - # and that this number is the job id - import re - return int(re.search(r'\d+', str(s)).group()) - - -# https://stackoverflow.com/a/34177358 -def command_exists(cmd_name): - """Check whether `name` is on PATH and marked as executable.""" - return shutil.which(cmd_name) is not None - -def _get_my_ip_address(): - hostname = socket.gethostname() - - assert command_exists('tracepath'), 'pytest_parallel SLURM scheduler: command `tracepath` is not available' - cmd = ['tracepath','-4','-n',hostname] - r = subprocess.run(cmd, stdout=subprocess.PIPE) - assert r.returncode==0, f'pytest_parallel SLURM scheduler: error running command `{" ".join(cmd)}`' - ips = r.stdout.decode("utf-8") - - try: - my_ip = ips.split('\n')[0].split(':')[1].split()[0] - except: - assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' - import ipaddress - try: - ipaddress.ip_address(my_ip) - except ValueError: - assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' - - return my_ip - -def setup_socket(socket): - # Find IP our address - SCHEDULER_IP_ADDRESS = _get_my_ip_address() - - # setup master's socket - socket.bind((SCHEDULER_IP_ADDRESS, 0)) # 0: let the OS choose an available port - socket.listen() - port = socket.getsockname()[1] - return SCHEDULER_IP_ADDRESS, port - def mpi_command(current_proc, n_proc): mpi_vendor = MPI.get_vendor()[0] if mpi_vendor == 'Intel MPI': @@ -85,7 +25,7 @@ def mpi_command(current_proc, n_proc): else: assert 0, f'Unknown MPI implementation "{mpi_vendor}"' -def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, main_invoke_params, ntasks, i_step, n_step): +def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_invoke_params, ntasks, i_step, n_step): # sort item by comm size to launch bigger first (Note: in case SLURM prioritize first-received items) items = sorted(items_to_run, key=lambda item: item.n_proc, reverse=True) @@ -93,12 +33,12 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, main_invoke_params, n script_prolog = '' script_prolog += '#!/bin/bash\n\n' - socket_flags=f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" + socket_flags=f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port} --_session_folder={session_folder}" cmds = [] current_proc = 0 for i,item in enumerate(items): test_idx = item.original_index - test_out_file = f'.pytest_parallel/{remove_exotic_chars(item.nodeid)}' + test_out_file = f'.pytest_parallel/{session_folder}/{remove_exotic_chars(item.nodeid)}' cmd = '(' cmd += mpi_command(current_proc, item.n_proc) cmd += f' python3 -u -m pytest -s --_worker {socket_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' @@ -120,10 +60,7 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, main_invoke_params, n ## 3. wait everyone script += '\nwait\n' - Path('.pytest_parallel').mkdir(exist_ok=True) - shutil.rmtree('.pytest_parallel/tmp', ignore_errors=True) - Path('.pytest_parallel/tmp').mkdir() - script_path = f'.pytest_parallel/pytest_static_sched_{i_step+1}.sh' + script_path = f'.pytest_parallel/{session_folder}/pytest_static_sched_{i_step+1}.sh' with open(script_path,'w') as f: f.write(script) @@ -143,7 +80,7 @@ def receive_items(items, session, socket, n_item_to_recv): while n_item_to_recv>0: conn, addr = socket.accept() with conn: - msg = socket_utils.recv(conn) + msg = socket_recv(conn) test_info = pickle.loads(msg) # the worker is supposed to have send a dict with the correct structured information #print(f"{test_info=}") if 'signal_info' in test_info: @@ -208,11 +145,12 @@ def pytest_runtestloop(self, session) -> bool: run_item_test(item, nextitem, session) # schedule tests to run - SCHEDULER_IP_ADDRESS,port = setup_socket(self.socket) + SCHEDULER_IP_ADDRESS, port = setup_socket(self.socket) + session_folder = create_folders() n_step = len(items_by_steps) for i_step,items in enumerate(items_by_steps): n_item_to_receive = len(items) - sub_process = submit_items(items, SCHEDULER_IP_ADDRESS, port, self.main_invoke_params, self.ntasks, i_step, n_step) + sub_process = submit_items(items, SCHEDULER_IP_ADDRESS, port, session_folder, self.main_invoke_params, self.ntasks, i_step, n_step) if not self.detach: # The job steps are supposed to send their reports receive_items(session.items, session, self.socket, n_item_to_receive) returncode = sub_process.wait() # at this point, the sub-process should be done since items have been received diff --git a/pytest_parallel/process_scheduler.py b/pytest_parallel/slurm_scheduler.py similarity index 73% rename from pytest_parallel/process_scheduler.py rename to pytest_parallel/slurm_scheduler.py index 9dfdb61..d55f2d0 100644 --- a/pytest_parallel/process_scheduler.py +++ b/pytest_parallel/slurm_scheduler.py @@ -1,29 +1,13 @@ import pytest -import shutil import subprocess import socket import pickle from pathlib import Path -from . import socket_utils -from .utils import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index +from .utils.socket import recv as socket_recv, setup_socket +from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index, mark_skip +from .utils.file import remove_exotic_chars, create_folders from .algo import partition - -def mark_skip(item, ntasks): - n_proc_test = get_n_proc_for_test(item) - skip_msg = f"Not enough procs to execute: {n_proc_test} required but only {ntasks} available" - item.add_marker(pytest.mark.skip(reason=skip_msg), append=False) - item.marker_mpi_skip = True - -def replace_sub_strings(s, subs, replacement): - res = s - for sub in subs: - res = res.replace(sub,replacement) - return res - -def remove_exotic_chars(s): - return replace_sub_strings(str(s), ['[',']','/', ':'], '_') - def parse_job_id_from_submission_output(s): # At this point, we are trying to guess -_- # Here we supposed that the command for submitting the job @@ -32,42 +16,8 @@ def parse_job_id_from_submission_output(s): import re return int(re.search(r'\d+', str(s)).group()) - -# https://stackoverflow.com/a/34177358 -def command_exists(cmd_name): - """Check whether `name` is on PATH and marked as executable.""" - return shutil.which(cmd_name) is not None - -def _get_my_ip_address(): - hostname = socket.gethostname() - - assert command_exists('tracepath'), 'pytest_parallel SLURM scheduler: command `tracepath` is not available' - cmd = ['tracepath','-4','-n',hostname] - r = subprocess.run(cmd, stdout=subprocess.PIPE) - assert r.returncode==0, f'pytest_parallel SLURM scheduler: error running command `{" ".join(cmd)}`' - ips = r.stdout.decode("utf-8") - - try: - my_ip = ips.split('\n')[0].split(':')[1].split()[0] - except: - assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' - import ipaddress - try: - ipaddress.ip_address(my_ip) - except ValueError: - assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' - - return my_ip - - -def submit_items(items_to_run, socket, main_invoke_params, ntasks, slurm_conf): - # Find IP our address - SCHEDULER_IP_ADDRESS = _get_my_ip_address() - - # setup master's socket - socket.bind((SCHEDULER_IP_ADDRESS, 0)) # 0: let the OS choose an available port - socket.listen() - port = socket.getsockname()[1] +def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntasks, slurm_conf): + SCHEDULER_IP_ADDRESS, port = setup_socket(socket) # generate SLURM header options if slurm_conf['file'] is not None: @@ -80,8 +30,8 @@ def submit_items(items_to_run, socket, main_invoke_params, ntasks, slurm_conf): slurm_header = '#!/bin/bash\n' slurm_header += '\n' slurm_header += '#SBATCH --job-name=pytest_parallel\n' - slurm_header += '#SBATCH --output=.pytest_parallel/slurm.out\n' - slurm_header += '#SBATCH --error=.pytest_parallel/slurm.err\n' + slurm_header += f'#SBATCH --output=.pytest_parallel/{session_folder}/slurm.out\n' + slurm_header += f'#SBATCH --error=.pytest_parallel/{session_folder}/slurm.err\n' for opt in slurm_conf['options']: slurm_header += f'#SBATCH {opt}\n' slurm_header += f'#SBATCH --ntasks={ntasks}' @@ -93,13 +43,13 @@ def submit_items(items_to_run, socket, main_invoke_params, ntasks, slurm_conf): srun_options = slurm_conf['srun_options'] if srun_options is None: srun_options = '' - socket_flags = f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port}" + socket_flags = f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port} --_session_folder={session_folder}" cmds = '' if slurm_conf['additional_cmds'] is not None: cmds += slurm_conf['additional_cmds'] + '\n' for item in items: test_idx = item.original_index - test_out_file = f'.pytest_parallel/{remove_exotic_chars(item.nodeid)}' + test_out_file = f'.pytest_parallel/{session_folder}/{remove_exotic_chars(item.nodeid)}' cmd = '(' cmd += f'srun {srun_options}' cmd += ' --exclusive' @@ -116,24 +66,20 @@ def submit_items(items_to_run, socket, main_invoke_params, ntasks, slurm_conf): job_cmds = f'{slurm_header}\n\n{cmds}' - Path('.pytest_parallel').mkdir(exist_ok=True) - shutil.rmtree('.pytest_parallel/tmp', ignore_errors=True) - Path('.pytest_parallel/tmp').mkdir() - - with open('.pytest_parallel/job.sh','w') as f: + with open(f'.pytest_parallel/{session_folder}/job.sh','w') as f: f.write(job_cmds) # submit SLURM job - with open('.pytest_parallel/env_vars.sh','wb') as f: + with open(f'.pytest_parallel/{session_folder}/env_vars.sh','wb') as f: f.write(pytest._pytest_parallel_env_vars) if slurm_conf['sub_command'] is None: if slurm_conf['export_env']: - sbatch_cmd = 'sbatch --parsable --export-file=.pytest_parallel/env_vars.sh .pytest_parallel/job.sh' + sbatch_cmd = f'sbatch --parsable --export-file=.pytest_parallel/{session_folder}/env_vars.sh .pytest_parallel/{session_folder}/job.sh' else: - sbatch_cmd = 'sbatch --parsable .pytest_parallel/job.sh' + sbatch_cmd = f'sbatch --parsable .pytest_parallel/{session_folder}/job.sh' else: - sbatch_cmd = slurm_conf['sub_command'] + ' .pytest_parallel/job.sh' + sbatch_cmd = slurm_conf['sub_command'] + ' .pytest_parallel/{session_folder}/job.sh' p = subprocess.Popen([sbatch_cmd], shell=True, stdout=subprocess.PIPE) print('\nSubmitting tests to SLURM...') @@ -152,7 +98,7 @@ def receive_items(items, session, socket, n_item_to_recv): while n_item_to_recv>0: conn, addr = socket.accept() with conn: - msg = socket_utils.recv(conn) + msg = socket_recv(conn) test_info = pickle.loads(msg) # the worker is supposed to have send a dict with the correct structured information test_idx = test_info['test_idx'] if test_info['fatal_error'] is not None: @@ -166,7 +112,7 @@ def receive_items(items, session, socket, n_item_to_recv): run_item_test(items[test_idx], nextitem, session) n_item_to_recv -= 1 -class ProcessScheduler: +class SlurmScheduler: def __init__(self, main_invoke_params, ntasks, slurm_conf, detach): self.main_invoke_params = main_invoke_params self.ntasks = ntasks @@ -218,7 +164,8 @@ def pytest_runtestloop(self, session) -> bool: # schedule tests to run n_item_to_receive = len(items_to_run) if n_item_to_receive > 0: - self.slurm_job_id = submit_items(items_to_run, self.socket, self.main_invoke_params, self.ntasks, self.slurm_conf) + session_folder = create_folders() + self.slurm_job_id = submit_items(items_to_run, self.socket, session_folder, self.main_invoke_params, self.ntasks, self.slurm_conf) if not self.detach: # The job steps are supposed to send their reports receive_items(session.items, session, self.socket, n_item_to_receive) diff --git a/pytest_parallel/socket_utils.py b/pytest_parallel/socket_utils.py deleted file mode 100644 index f434270..0000000 --- a/pytest_parallel/socket_utils.py +++ /dev/null @@ -1,29 +0,0 @@ -def send(sock, msg_bytes): - msg_len = len(msg_bytes) - sent = sock.send(msg_len.to_bytes(8,'big')) # send int64 big endian - if sent == 0: - raise RuntimeError('Socket send broken: could not send message size') - - totalsent = 0 - while totalsent < msg_len: - sent = sock.send(msg_bytes[totalsent:]) - if sent == 0: - raise RuntimeError('Socket send broken: could not send message') - totalsent = totalsent + sent - -def recv(sock): - msg_len_bytes = sock.recv(8) - if msg_len_bytes == b'': - raise RuntimeError('Socket recv broken: message has no size') - msg_len = int.from_bytes(msg_len_bytes, 'big') - - chunks = [] - bytes_recv = 0 - while bytes_recv < msg_len: - chunk = sock.recv(min(msg_len-bytes_recv, 4096)) - if chunk == b'': - raise RuntimeError('Socket recv broken: could not receive message') - chunks.append(chunk) - bytes_recv += len(chunk) - msg_bytes = b''.join(chunks) - return msg_bytes diff --git a/pytest_parallel/utils/__init__.py b/pytest_parallel/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pytest_parallel/utils/file.py b/pytest_parallel/utils/file.py new file mode 100644 index 0000000..188b931 --- /dev/null +++ b/pytest_parallel/utils/file.py @@ -0,0 +1,18 @@ +from pathlib import Path +import tempfile + +def replace_sub_strings(s, subs, replacement): + res = s + for sub in subs: + res = res.replace(sub,replacement) + return res + +def remove_exotic_chars(s): + return replace_sub_strings(str(s), ['[',']','/', ':'], '_') + + +def create_folders(): + Path('.pytest_parallel').mkdir(exist_ok=True) + session_folder_abs = Path(tempfile.mkdtemp(dir='.pytest_parallel')) + Path(session_folder_abs/'_partial').mkdir() + return session_folder_abs.name diff --git a/pytest_parallel/utils.py b/pytest_parallel/utils/items.py similarity index 74% rename from pytest_parallel/utils.py rename to pytest_parallel/utils/items.py index 0869e8d..25545c5 100644 --- a/pytest_parallel/utils.py +++ b/pytest_parallel/utils/items.py @@ -26,3 +26,10 @@ def run_item_test(item, nextitem, session): def mark_original_index(items): for i, item in enumerate(items): item.original_index = i + + +def mark_skip(item, ntasks): + n_proc_test = get_n_proc_for_test(item) + skip_msg = f"Not enough procs to execute: {n_proc_test} required but only {ntasks} available" + item.add_marker(pytest.mark.skip(reason=skip_msg), append=False) + item.marker_mpi_skip = True diff --git a/pytest_parallel/utils_mpi.py b/pytest_parallel/utils/mpi.py similarity index 100% rename from pytest_parallel/utils_mpi.py rename to pytest_parallel/utils/mpi.py diff --git a/pytest_parallel/utils/socket.py b/pytest_parallel/utils/socket.py new file mode 100644 index 0000000..67adf68 --- /dev/null +++ b/pytest_parallel/utils/socket.py @@ -0,0 +1,70 @@ +import shutil +import socket +import subprocess + +def send(sock, msg_bytes): + msg_len = len(msg_bytes) + sent = sock.send(msg_len.to_bytes(8,'big')) # send int64 big endian + if sent == 0: + raise RuntimeError('Socket send broken: could not send message size') + + totalsent = 0 + while totalsent < msg_len: + sent = sock.send(msg_bytes[totalsent:]) + if sent == 0: + raise RuntimeError('Socket send broken: could not send message') + totalsent = totalsent + sent + +def recv(sock): + msg_len_bytes = sock.recv(8) + if msg_len_bytes == b'': + raise RuntimeError('Socket recv broken: message has no size') + msg_len = int.from_bytes(msg_len_bytes, 'big') + + chunks = [] + bytes_recv = 0 + while bytes_recv < msg_len: + chunk = sock.recv(min(msg_len-bytes_recv, 4096)) + if chunk == b'': + raise RuntimeError('Socket recv broken: could not receive message') + chunks.append(chunk) + bytes_recv += len(chunk) + msg_bytes = b''.join(chunks) + return msg_bytes + + +# https://stackoverflow.com/a/34177358 +def command_exists(cmd_name): + """Check whether `name` is on PATH and marked as executable.""" + return shutil.which(cmd_name) is not None + +def _get_my_ip_address(): + hostname = socket.gethostname() + + assert command_exists('tracepath'), 'pytest_parallel SLURM scheduler: command `tracepath` is not available' + cmd = ['tracepath','-4','-n',hostname] + r = subprocess.run(cmd, stdout=subprocess.PIPE) + assert r.returncode==0, f'pytest_parallel SLURM scheduler: error running command `{" ".join(cmd)}`' + ips = r.stdout.decode("utf-8") + + try: + my_ip = ips.split('\n')[0].split(':')[1].split()[0] + except: + assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' + import ipaddress + try: + ipaddress.ip_address(my_ip) + except ValueError: + assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' + + return my_ip + +def setup_socket(socket): + # Find our IP address + SCHEDULER_IP_ADDRESS = _get_my_ip_address() + + # setup master's socket + socket.bind((SCHEDULER_IP_ADDRESS, 0)) # 0: let the OS choose an available port + socket.listen() + port = socket.getsockname()[1] + return SCHEDULER_IP_ADDRESS, port From cc1ef6ce56ad89fc9f962af9f48776b93acadd55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Wed, 8 Jan 2025 10:24:19 +0100 Subject: [PATCH 07/14] Update readme + do not use 'resource' package nor 'MPI_Comm_create' on Windows --- README.md | 7 ++++++- pytest_parallel/mpi_reporter.py | 27 +++++++++++++-------------- pytest_parallel/plugin.py | 10 ++++++++-- 3 files changed, 27 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index afd2a82..9e25cd4 100644 --- a/README.md +++ b/README.md @@ -221,7 +221,11 @@ SLURM takes care of the scheduling. This scheduler as specific options: ### Which MPI implementation is supported? -**pytest_parallel** has currently be tested only with OpenMPI and Intel MPI. Other MPI implementation are also supposed to work. An exception is the `shell` scheduler that use implementation-specific environment variables to pin the processes to cores. Feel free to give use feedback/patches. +**pytest_parallel** has currently been tested and is used daily with OpenMPI and Intel MPI. MPICH is also regularly tested though the GitHub CI with the process-reuse schedulers. Other MPI implementations are also supposed to work. An exception is the `shell` scheduler that use implementation-specific environment variables to pin the processes to cores. Feel free to give use feedback/patches. + +### Which OS is supported? + +**pytest_parallel** is daily tested and used on Linux machines. However, on the GitHub CI, the `sequential`, `static` and `dynamic` schedulers work with Mac OS, and the `sequential` and `static` schedulers work with Windows. ### Which job scheduler is available? @@ -383,3 +387,4 @@ Any contributions are welcome: bug report, feature requests, general feedback, p @pytest_parallel.mark.parallel(4, exclusive_numa_domain='cpu') ``` * Dynamic scheduler: more asynchrony (send -> isend) +* Add the process-isolate scheduler to the CI \ No newline at end of file diff --git a/pytest_parallel/mpi_reporter.py b/pytest_parallel/mpi_reporter.py index 2ac125b..8075a1f 100644 --- a/pytest_parallel/mpi_reporter.py +++ b/pytest_parallel/mpi_reporter.py @@ -1,5 +1,6 @@ import numpy as np import pytest +import sys from mpi4py import MPI from .algo import partition, lower_bound @@ -28,7 +29,8 @@ def create_sub_comm_of_size(global_comm, n_proc, mpi_comm_creation_function): if mpi_comm_creation_function == 'MPI_Comm_create': return sub_comm_from_ranks(global_comm, range(0,n_proc)) elif mpi_comm_creation_function == 'MPI_Comm_split': - if i_rank < n_proc_test: + i_rank = global_comm.rank + if i_rank < n_proc: color = 1 else: color = MPI.UNDEFINED @@ -71,12 +73,17 @@ def add_sub_comm(items, global_comm, test_comm_creation, mpi_comm_creation_funct assert 0, 'Unknown test MPI communicator creation strategy. Available: `by_rank`, `by_test`' class SequentialScheduler: - def __init__(self, global_comm, test_comm_creation='by_rank', mpi_comm_creation_function='MPI_Comm_create', barrier_at_test_start=True, barrier_at_test_end=True): + def __init__(self, global_comm): self.global_comm = global_comm.Dup() # ensure that all communications within the framework are private to the framework - self.test_comm_creation = test_comm_creation - self.mpi_comm_creation_function = mpi_comm_creation_function - self.barrier_at_test_start = barrier_at_test_start - self.barrier_at_test_end = barrier_at_test_end + + # These parameters are not accessible through the API, but are left here for tweaking and experimenting + self.test_comm_creation = 'by_rank' # possible values : 'by_rank' | 'by_test' + self.mpi_comm_creation_function = 'MPI_Comm_create' # possible values : 'MPI_Comm_create' | 'MPI_Comm_split' + self.barrier_at_test_start = True + self.barrier_at_test_end = True + if sys.platform == "win32": + self.mpi_comm_creation_function = 'MPI_Comm_split' # because 'MPI_Comm_create' uses `Create_group`, + # that is not implemented in mpi4py for Windows @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(self, config, items): @@ -222,14 +229,6 @@ def pytest_runtestloop(self, session) -> bool: ) for i, item in enumerate(items): - # nextitem = items[i + 1] if i + 1 < len(items) else None - # For optimization purposes, it would be nice to have the previous commented line - # (`nextitem` is only used internally by PyTest in _setupstate.teardown_exact) - # Here, it does not work: - # it seems that things are messed up on rank 0 - # because the nextitem might not be run (see pytest_runtest_setup/call/teardown hooks just above) - # In practice though, it seems that it is not the main thing that slows things down... - nextitem = None run_item_test(item, nextitem, session) diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index 5c515c4..b8215bd 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -7,7 +7,6 @@ import tempfile from pathlib import Path import argparse -import resource import pytest from _pytest.terminal import TerminalReporter @@ -77,12 +76,19 @@ def _invoke_params(args): quoted_invoke_params.append(arg) return ' '.join(quoted_invoke_params) +# -------------------------------------------------------------------------- +def _set_timeout(timeout): + if sys.platform != "win32": + import resource + resource.setrlimit(resource.RLIMIT_CPU, (timeout, timeout)) + # if windows, we don't know how to do that + # -------------------------------------------------------------------------- @pytest.hookimpl(trylast=True) def pytest_configure(config): # Set timeout timeout = config.getoption('timeout') - resource.setrlimit(resource.RLIMIT_CPU, (timeout, timeout)) + _set_timeout(timeout) # Get options and check dependent/incompatible options scheduler = config.getoption('scheduler') From be408896e298ebb52d3a8dd55242538de4f85571 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Mon, 13 Jan 2025 17:21:04 +0100 Subject: [PATCH 08/14] Address review comments --- README.md | 110 ++++++++++++---------- pytest_parallel/mpi_reporter.py | 11 +-- pytest_parallel/plugin.py | 79 +++++++++------- pytest_parallel/send_report.py | 25 +++-- pytest_parallel/shell_static_scheduler.py | 13 ++- pytest_parallel/slurm_scheduler.py | 31 ++---- pytest_parallel/utils/file.py | 2 +- 7 files changed, 140 insertions(+), 131 deletions(-) diff --git a/README.md b/README.md index 9e25cd4..468ff2d 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ pytest_parallel ## Quick start ## -**pytest_parallel** automates the execution of MPI parallel tests. Let's say you want to precisely test several algorithms, each with a specific number of processes: +**pytest_parallel** automates the execution of MPI parallel tests. Let's say you want to test several algorithms, each with a specific number of processes: ```Python import pytest_parallel @@ -32,12 +32,12 @@ def test_D(): assert False ``` -Tests decorated with `@pytest_parallel.mark.parallel(N)` are specified to run in parallel on `N` processes. The `comm` fixture is a communicator private to the test, with `comm.size == N`. +Tests decorated with `@pytest_parallel.mark.parallel(N)` are specified to run in parallel on `N` processes. The `comm` fixture is a communicator created specifically for the test, satisfying `comm.size == N`. Sequential tests do not need a communicator and will be run as usual on one process. You can run the tests with MPI, e.g. with: ```Bash -mpirun -np 4 pytest --color=yes -vv test_pytest_parallel.py +mpirun -np 4 pytest --color=yes -vv path/to/tests ``` And the following output will be produced: @@ -48,7 +48,7 @@ If there is not enough MPI processes to run some tests, they will be skipped. For instance, the following command: ```Bash -mpirun -np 1 pytest --color=yes test_pytest_parallel.py +mpirun -np 1 pytest --color=yes path/to/tests ``` would lead to: @@ -97,6 +97,10 @@ export PYTEST_PLUGINS=pytest_parallel.plugin ## Schedulers ## +The job of **pytest_parallel** is to run `Nt` tests by using `Np` processes that the user asked for. For that, the multiple tests that need to be run have to be scheduled. **pytest_parallel** has 5 different schedulers that the user can select from. Schedulers are divided in two families: +- [process-reuse schedulers](#process-reuse-schedulers), efficient for many test that are very fast (typically unit tests) +- [process-isolate schedulers](#process-isolate-schedulers), more robust and able to report test crashes, more scalable (able to run on multiple compute nodes), but also more heavy-weight + ### Process-reuse schedulers ### Process-reuse schedulers are mostly useful when you have numerous tests that are very fast, typically unit tests. @@ -104,19 +108,19 @@ Process-reuse schedulers are mostly useful when you have numerous tests that are For these schedulers, **pytest_parallel** is always launched through MPI, e.g.: ```Bash -mpirun -np 4 pytest test_pytest_parallel.py +mpirun -np 4 pytest path/to/tests ``` -Here, 4 MPI processes have been spawn by `mpirun`, and **pytest_parallel** will use these 4 processes to run all the tests. This means that one process (let's say the one on rank 2 of `MPI.COMM_WORLD`) will likely be used by several tests. +Here, 4 MPI processes have been spawn by `mpirun`, and **pytest_parallel** will use them to run all the tests. Since `test_A` uses 2 processes, `test_B` and `test_D` use 1 process each (they are sequential) and `test_C` use 3 processes, we need `2+1+3+1 == 7` processes. This means some processes will be used by multiple tests. The main advantage is that the Python environment is loaded once and for all, hence if you have 1000 tests that take 1 millisecond each, and the loading of all your Python modules by the interpreter takes 1 second, then running PyTest will take approximately 2 seconds. -However, the tests are not completely isolated, so if one test crash (e.g. due to a segmentation fault), the segfault message may not point you directly to the faulty test. Deadlocks will also be difficult to pinpoint. +However, the tests are not completely isolated, so if one test crash (e.g. due to an unrecoverable error), the error message may not point you directly to the faulty test. Deadlocks will also be difficult to pinpoint. There are 3 kinds of process-reuse schedulers: - the [sequential scheduler](#sequential-scheduler) - the [static scheduler](#static-scheduler) -- the [dynamic scheduler](#dynamic-scheduler) +- the [dynamic scheduler](#dynamic-scheduler) (this is the default one) To understand how they work, let's again our previous example: @@ -142,10 +146,9 @@ and run it on 4 processes. #### Sequential scheduler #### -The **sequential** scheduler is the default one. To enable it explicitly, you can pass the `--scheduler=sequential` option to PyTest. - +The **sequential** scheduler can be selected with: ```Bash -mpirun -np 4 pytest --scheduler=sequential test_pytest_parallel.py +mpirun -np 4 pytest --scheduler=sequential path/to/tests ``` This scheduler just takes each test in order, one by one, and executes on as many processes it needs. The other processes are sleeping. On our example, this would result in the following sequence diagram: @@ -157,9 +160,9 @@ While it is not optimized for performance, the sequential scheduler is very usef #### Static scheduler #### -The static scheduler can be selected with: +The **static** scheduler can be selected with: ```Bash -mpirun -np 4 pytest --scheduler=static test_pytest_parallel.py +mpirun -np 4 pytest --scheduler=static path/to/tests ``` The **static** scheduler tries to distribute tests to minimize the number of idle processes. The process is static, that is, after test collection, it determines which process will execute which test, and in which order. On our example, it will result in the following scheduling: @@ -167,24 +170,27 @@ The **static** scheduler tries to distribute tests to minimize the number of idl The scheduler works by steps. Each step has `n_worker` slots (`n_worker` being the number of processes that PyTest was launched with). Each test will try to find a step with enough slots and will consume `n_proc` slots on the step. If no step is found, a new one is created, until each test has a step. -While this scheduler is more optimized, it gives an *a priori* scheduling, hence it is not optimal depending on the duration of the tests. Let's look again at our example, but let's say `test_B` takes much longer than the others. We will then have the following sequence: +While this scheduler is more optimized, it gives an *a priori* scheduling, hence it is not optimal depending on the duration of the tests. Let's look again at our example, but let's say `test_B` and `test_D` take longer than the others. We will then have the following sequence: ![static scheduler sequence diagram - bad case](doc/images/static_bad.png) -We see that processes 0,1 and 2 wait for process 3 to finish the first step, whereas they could do meaningful work. +We see that processes 0,1 and 2 wait for process 3 to finish the first step, whereas they could do meaningful work in the meantime. ### Dynamic scheduler ### -The **dynamic** scheduler can be selected with: +The **dynamic** scheduler is the default one. To enable it explicitly, you can pass the `--scheduler=dynamic` option to PyTest: + ```Bash -mpirun -np 4 pytest --scheduler=dynamic test_pytest_parallel.py +mpirun -np 4 pytest --scheduler=dynamic path/to/tests ``` -The scheduler spawns a new MPI process which acts as the master scheduler and sends work to the original processes. The scheduler tries to schedule tests requiring the most processes first. It sends work to idle processes until all the processes are busy executing a test, or when not enough processes are ready to accept a test. It then waits for a signal that workers have finished their test to schedule further work. +The scheduler spawns a new MPI process which acts as the master scheduler and sends work to the original processes. The scheduler tries to schedule tests requiring the most processes first. It sends work to idle processes until all the processes are busy executing a test, or until not enough processes are ready to accept a test. It then waits for a signal that workers have finished their test to schedule further work. Example: ![dynamic scheduler sequence diagram](doc/images/dyn_anim.png) +When tests have significantly different durations, the dynamic scheduler is beneficial than the static scheduler. It has however a slightly longer startup (because it needs to spawn the master process). + ### Process-isolate schedulers ### Process-isolate schedulers spawn a new process for each new test. Or more exactly, for a test that is specified to use a communicator of size `N`, **pytest_parallel** will launch `N` MPI processes just for this test, and it will do so for each test of the test suite. @@ -193,46 +199,44 @@ Or course, these schedulers are more robust: even if a test crashes with an irre If you use a process-isolate scheduler, contrary to process-reuse schedulers, you don't launch PyTest through `mpirun -np N`. Rather, you launch PyTest directly and specify the `--n-workers` parameter. -There are 2 kinds of process-isolate schedulers: -- the shell scheduler -- the SLURM scheduler +There are 2 kinds of process-isolate schedulers: the **shell** scheduler and the **SLURM** scheduler. #### Shell scheduler ### The **shell** scheduler can be selected with: ```Bash -pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py +pytest --n-workers=4 --scheduler=shell path/to/tests ``` The scheduling algorithm is the same as the [static scheduler](#static-scheduler). #### SLURM scheduler ### The **SLURM** scheduler can be selected with: ```Bash -pytest --n-workers=4 --scheduler=slurm test_pytest_parallel.py +pytest --n-workers=4 --scheduler=slurm path/to/tests ``` -SLURM takes care of the scheduling. This scheduler as specific options: -- `--slurm-options`: a list options for `sbatch`. For example: `--slurm-options="--time=00:30:00 --qos=my_queue"`. Do **not** specify `--ntasks` here, **pytest_parallel** will use the value given by `--n-workers`. -- `--slurm-srun-options`: a list options for `srun`. For example: `--slurm-srun-options="--mem-per-cpu=4GBb"` -- `--slurm-export-env`: should the SLURM job use the same environment as the terminal that spawned it? Enabled by default. -- `--slurm-additional-cmds`: commands to pass to the SLURM job that should be executed before the tests. Example: `--slurm-additional-cmds="source my_env.sh"` +SLURM takes care of the scheduling. This scheduler has specific options: +- `--slurm-options`: a list of arguments passed to [sbatch](https://slurm.schedmd.com/sbatch.html), for exemple `--slurm-options="--time=00:30:00 --qos=my_queue"`. Do **not** specify `--ntasks` here, since **pytest_parallel** will use the value given by `--n-workers`. +- `--slurm-srun-options`: a list options for [`srun`](https://slurm.schedmd.com/srun.html). For example: `--slurm-srun-options="--mem-per-cpu=4GBb"` +- `--slurm-export-env`: should the SLURM job use the same environment as the terminal that spawned it? Enabled by default. Use `--no-slurm-export-env` to disable. +- `--slurm-init-cmds`: commands to pass to the SLURM job that should be executed before the tests. Example: `--slurm-init-cmds="source my_env.sh"` ## FAQ ## ### Which MPI implementation is supported? -**pytest_parallel** has currently been tested and is used daily with OpenMPI and Intel MPI. MPICH is also regularly tested though the GitHub CI with the process-reuse schedulers. Other MPI implementations are also supposed to work. An exception is the `shell` scheduler that use implementation-specific environment variables to pin the processes to cores. Feel free to give use feedback/patches. +**pytest_parallel** has currently been tested and is used daily with OpenMPI and Intel MPI. MPICH is also regularly tested though the GitHub CI with the process-reuse schedulers. Other MPI implementations are supposed to work but have not been tested. An exception is the `shell` process-isolate scheduler that use implementation-specific environment variables to pin the processes to cores. Feel free to give use feedback/patches. ### Which OS is supported? -**pytest_parallel** is daily tested and used on Linux machines. However, on the GitHub CI, the `sequential`, `static` and `dynamic` schedulers work with Mac OS, and the `sequential` and `static` schedulers work with Windows. +**pytest_parallel** is daily tested and used on Linux machines. However, on the GitHub CI, the `sequential`, `static` and `dynamic` schedulers work with macOS, and the `sequential` and `static` schedulers work with Windows. ### Which job scheduler is available? Currently SLURM is the only job scheduler available. Other job schedulers (PBS, LFS...) are not supported currently. If you don't use SLURM, the `shell` scheduler may be enought for your tests as long as you dont want to use more than one compute node. -### **pytest_parallel** gives me a new communicator for each test, but my code only uses `MPI.COMM_WORLD`, how can I use **pytest_parallel**? +### **pytest_parallel** gives me a new communicator for each test, but my project only uses `MPI.COMM_WORLD`, how can I use **pytest_parallel**? The [process-isolate schedulers](#process-isolate-schedulers) can be used with tests using different sizes of `MPI.COMM_WORLD`. The `comm` fixture can then be discarded: @@ -240,22 +244,30 @@ The [process-isolate schedulers](#process-isolate-schedulers) can be used with t import pytest_parallel from mpi4py import MPI -@pytest_parallel.mark.parallel(2) -def test_fail_one_rank(comm): - assert comm.size == MPI.COMM_WORLD.size +@pytest_parallel.mark.parallel(3) +def test_using_comm_world(comm): + # `comm` is unused but you currently need to write it down anyway + # because it is a fixture that comes with `@pytest_parallel.mark.parallel` my_algo_implicitly_using_MPI_COMM_WORLD() +``` + +If you select only one test (e.g. because you are debugging this one), then you can also use a process-reuse scheduler. + +It may be good practice to assert the `comm` fixture has the correct size: +```Python @pytest_parallel.mark.parallel(3) -def test_fail_one_rank(comm): - # `comm` is unused but you currently need to write it down anyway - my_other_algo_implicitly_using_MPI_COMM_WORLD() +def test_using_comm_world(comm): + assert comm.size == MPI.COMM_WORLD.size # good practice, fails with a process-reuse scheduler if different communicator sizes + my_algo_implicitly_using_MPI_COMM_WORLD() ``` +This way, you will get a meaningful error message if you accidentally run multiple incompatible tests with a process-reuse scheduler. ```Bash -mpirun -np 4 pytest --scheduler=shell test_pytest_parallel.py +mpirun -np 4 pytest --scheduler=shell path/to/tests ``` -For unit tests, process-isolate schedulers are very slow, and **[process-reuse schedulers](#process-reuse-schedulers) will not work**. We really encourage you to generalize your function with an additional `comm` argument that is used for communication, rather than forcing your users to `MPI.COMM_WORLD`. +For unit tests, process-isolate schedulers are very slow, and **[process-reuse schedulers](#process-reuse-schedulers) will not work**. We really encourage you to generalize your function with an additional `comm` argument that is used for communication, rather than forcing your users to use `MPI.COMM_WORLD`. It would be possible to develop hybrid process-reuse schedulers where processes are re-used, but only among tests of the same communicator size (and repeat the operation for as many communicator sizes there are on the test suite). If you feel the need, write a feature request and maybe we will implement it. @@ -280,7 +292,7 @@ The [shell scheduler](#shell-scheduler) uses the same scheduling algorithm as th On some systems, using `mpi4py` without `mpirun` does not work. For example, using: ```Bash -pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py +pytest --n-workers=4 --scheduler=shell path/to/tests ``` produces the following error: @@ -298,7 +310,7 @@ developer): In this case, try: ```Bash -mpirun -np 1 pytest --n-workers=4 --scheduler=shell test_pytest_parallel.py +mpirun -np 1 pytest --n-workers=4 --scheduler=shell path/to/tests ``` ### Can I use **pytest_parallel** with MPI and OpenMP/pthreads/TBB? @@ -318,11 +330,11 @@ No other plugin has been tested, feedback is welcomed. We use PyTest hooks to schedule tests and gather report information from remote processes. That is, mainly: * either `pytest_collection_modifyitems` or `pytest_runtestloop` to schedule tests * `pytest_runtest_logreport` to gather test reports - * `pytest_pyfunc_call` to prevent the actual test code to be executed when it is actually executed on the other process + * `pytest_pyfunc_call` to prevent the actual test code to be executed when it is actually executed on the other process. PyTest expects its complete "pipeline" to be executed for all messages to be reported correctly, so we have to trick it: - * make it think that every test was run on the master rank. - * de-activate logging on other ranks + * make it think that every test was run on the master rank + * de-activate logging on other ranks. ### Implementation of process-isolate schedulers ### @@ -332,9 +344,9 @@ In both cases, we have a master process (the one launched by the user) that will The master and worker processes exchange information through sockets. Master creates a socket, then spawns workers by giving them the information of the socket (its ip and port), so that they can connect to it to send their report. -Actually, in order to correctly report crashes, the report of each test is created in two steps: -- First, `pytest --_worker` is launched. It runs the test `t` and writes a report at each of its stages `s` (`setup`/`call`/`teardown`) in the file `.pytest_parallel/tmp/{t}_{s}`. -- Then, when `pytest --_worker` is done for the test (either because it finished or because it crashed), the `pytest_parallel.send_report` module is run. It looks for the files that `pytest --_worker` has supposedly written. If the files are there, they are sent to master through the socket. If one of the file is missing, it means that the process crashed. In this case, a crash report is created and is sent to master through the socket. +Actually, in order to correctly report crashes, each test `t` using `Nt` processes is run and reported in two steps: +- First, `pytest --_worker` is launched `Nt` times by MPI. They run the test `t` and, and for each of the test stages `s` (`setup`/`call`/`teardown`), rank 0 writes a report in the file called `.pytest_parallel/tmp/{t}_{s}`. +- Then, when the `pytest --_worker` processes are done for the test (either because they finished or because they crashed), the `pytest_parallel.send_report` module is run. It looks for the files that rank 0 of `pytest --_worker` has supposedly written. If the files are there, they are sent to master through the socket. If one of the file is missing, it means that the worker processes have crashed. In this case, a crash report is created and is sent to master through the socket. Note that with MPI, errors are fatal, so if any process fail, then all the process fail, and since rank 0 needs to wait all process for their part of the report, if one fail during step `s`, rank 0 will fail **before writing anything to the `.pytest_parallel/tmp/{t}_{s}` file. #### Fake-running a test on master #### @@ -357,9 +369,9 @@ The master process gather tests in "steps" according to the [static scheduling]( File: `process_scheduler.py` -The master process writes a SLURM job `.pytest_parallel/job.sh` that is submitted through `sbatch`. In the job, to each test corresponds a "job step" launched with `srun --exclusive [...] &`. The `--exclusive` and `&` enables SLURM to schedule the job steps as best as it can. The job then waits for all the tests to finish (this is the `wait` command at the end of the script). (Note: contrary to `sbatch`, the `--exclusive` option of `srun` does not mean that we want the ressource to be exclusive to the job) +The master process writes a SLURM job `.pytest_parallel/job.sh` that is submitted through `sbatch`. In the job, to each test corresponds a "job step" launched with `srun --exclusive [...] &`. The `--exclusive` and `&` enables SLURM to schedule the job steps as best as it can. The job then waits for all the tests to finish (this is the `wait` command at the end of the script). Note: contrary to `sbatch`, the `--exclusive` option of `srun` enables job **steps** to be run in parallel. It does not mean that we want the ressource to be exclusive to the job (see [here](https://stackoverflow.com/a/45886983/1583122) and [here](https://stackoverflow.com/a/73970255/1583122)) -Once submitted, the master process wait to receive test reports. Each time it receives a report, it treats it immediately ([by fake-running the test](fake-running-a-test-on-master)). +Once submitted, the master process waits to receive test reports. Each time it receives a report, it treats it immediately ([by fake-running the test](fake-running-a-test-on-master)). ### Performance detail ### diff --git a/pytest_parallel/mpi_reporter.py b/pytest_parallel/mpi_reporter.py index 8075a1f..51a65d5 100644 --- a/pytest_parallel/mpi_reporter.py +++ b/pytest_parallel/mpi_reporter.py @@ -1,4 +1,3 @@ -import numpy as np import pytest import sys from mpi4py import MPI @@ -321,7 +320,7 @@ def schedule_test(item, available_procs, inter_comm): # mark the procs as busy for sub_rank in sub_ranks: - available_procs[sub_rank] = False + available_procs[sub_rank] = 0 # TODO isend would be slightly better (less waiting) for sub_rank in sub_ranks: @@ -357,7 +356,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm): # the procs are now available for sub_rank in sub_ranks: - available_procs[sub_rank] = True + available_procs[sub_rank] = 1 # "run" the test (i.e. trigger PyTest pipeline but do not really run the code) nextitem = None # not known at this point @@ -365,7 +364,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm): def wait_last_tests_to_complete(items_to_run, session, available_procs, inter_comm): - while np.sum(available_procs) < len(available_procs): + while sum(available_procs) < len(available_procs): wait_test_to_complete(items_to_run, session, available_procs, inter_comm) @@ -450,10 +449,10 @@ def pytest_runtestloop(self, session) -> bool: # schedule tests to run items_left_to_run = sorted(items_to_run, key=lambda item: item.n_proc) - available_procs = np.ones(n_workers, dtype=np.int8) + available_procs = [1] * n_workers while len(items_left_to_run) > 0: - n_av_procs = np.sum(available_procs) + n_av_procs = sum(available_procs) item_idx = item_with_biggest_admissible_n_proc(items_left_to_run, n_av_procs) diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index b8215bd..2a8d9ef 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -10,6 +10,9 @@ import pytest from _pytest.terminal import TerminalReporter +class PytestParallelError(ValueError): + pass + # -------------------------------------------------------------------------- def pytest_addoption(parser): parser.addoption( @@ -26,9 +29,8 @@ def pytest_addoption(parser): parser.addoption('--slurm-options', dest='slurm_options', type=str, help='list of SLURM options e.g. "--time=00:30:00 --qos=my_queue"') parser.addoption('--slurm-srun-options', dest='slurm_srun_options', type=str, help='list of SLURM srun options e.g. "--mem-per-cpu=4GB"') - parser.addoption('--slurm-additional-cmds', dest='slurm_additional_cmds', type=str, help='list of commands to pass to SLURM job e.g. "source my_env.sh"') + parser.addoption('--slurm-init-cmds', dest='slurm_init_cmds', type=str, help='list of commands to pass to SLURM job e.g. "source my_env.sh"') parser.addoption('--slurm-file', dest='slurm_file', type=str, help='Path to file containing header of SLURM job') # TODO DEL - parser.addoption('--slurm-sub-command', dest='slurm_sub_command', type=str, help='SLURM submission command (defaults to `sbatch`)') # TODO DEL if sys.version_info >= (3,9): parser.addoption('--slurm-export-env', dest='slurm_export_env', action=argparse.BooleanOptionalAction, default=True) @@ -51,19 +53,21 @@ def pytest_addoption(parser): # because it can mess SLURM `srun` if "--scheduler=slurm" in sys.argv: assert 'mpi4py.MPI' not in sys.modules, 'Internal pytest_parallel error: mpi4py.MPI should not be imported' \ - ' when we are about to register and environment for SLURM' \ + ' when we are about to register an environment for SLURM' \ ' (because importing mpi4py.MPI makes the current process look like and MPI process,' \ ' and SLURM does not like that)' - assert os.getenv('I_MPI_MPIRUN') is None, 'Internal pytest_parallel error: the environment variable I_MPI_MPIRUN is set' \ - f' with value "{os.getenv("I_MPI_MPIRUN")}"' \ - ' while pytest was invoked with "--scheduler=slurm".\n' \ - ' This indicates that pytest was run through MPI, and SLURM generally does not like that.\n' \ - ' With "--scheduler=slurm", just run `pytest` directly, not through `mpirun/mpiexec/srun`,\n' \ - ' because it will launch MPI itself (you may want to use --n-workers=).' + if os.getenv('I_MPI_MPIRUN') is not None: + err_msg = 'Internal pytest_parallel error: the environment variable I_MPI_MPIRUN is set' \ + f' (it has value "{os.getenv("I_MPI_MPIRUN")}"),\n' \ + ' while pytest was invoked with "--scheduler=slurm".\n' \ + ' This indicates that pytest was run through MPI, and SLURM generally does not like that.\n' \ + ' With "--scheduler=slurm", just run `pytest` directly, not through `mpirun/mpiexec/srun`,\n' \ + ' because it will launch MPI itself (you may want to use --n-workers=).' + raise PytestParallelError(err_msg) r = subprocess.run(['env','--null'], stdout=subprocess.PIPE) # `--null`: end each output line with NUL, required by `sbatch --export-file` - assert r.returncode==0, 'SLURM scheduler: error when writing `env` to `pytest_slurm/env_vars.sh`' + assert r.returncode==0, 'Internal pytest_parallel SLURM schedule error: error when writing `env` to `pytest_slurm/env_vars.sh`' pytest._pytest_parallel_env_vars = r.stdout # -------------------------------------------------------------------------- @@ -95,40 +99,50 @@ def pytest_configure(config): n_workers = config.getoption('n_workers') slurm_options = config.getoption('slurm_options') slurm_srun_options = config.getoption('slurm_srun_options') - slurm_additional_cmds = config.getoption('slurm_additional_cmds') + slurm_init_cmds = config.getoption('slurm_init_cmds') is_worker = config.getoption('_worker') slurm_file = config.getoption('slurm_file') slurm_export_env = config.getoption('slurm_export_env') - slurm_sub_command = config.getoption('slurm_sub_command') detach = config.getoption('detach') if scheduler != 'slurm' and scheduler != 'shell': - assert not is_worker, 'Option `--slurm-worker` only available when `--scheduler=slurm` or `--scheduler=shell`' + assert not is_worker, f'Internal pytest_parallel error `--_worker` not available with`--scheduler={scheduler}`' if (scheduler == 'slurm' or scheduler == 'shell') and not is_worker: - assert n_workers, f'You need to specify `--n-workers` when `--scheduler={scheduler}`' + if n_workers is None: + raise PytestParallelError(f'You need to specify `--n-workers` when `--scheduler={scheduler}`') if scheduler != 'slurm': - assert not slurm_options, 'Option `--slurm-options` only available when `--scheduler=slurm`' - assert not slurm_srun_options, 'Option `--slurms-run-options` only available when `--scheduler=slurm`' - assert not slurm_additional_cmds, 'Option `--slurm-additional-cmds` only available when `--scheduler=slurm`' - assert not slurm_file, 'Option `--slurm-file` only available when `--scheduler=slurm`' + if slurm_options is not None: + raise PytestParallelError('Option `--slurm-options` only available when `--scheduler=slurm`') + if slurm_srun_options is not None: + raise PytestParallelError('Option `--slurms-run-options` only available when `--scheduler=slurm`') + if slurm_init_cmds is not None: + raise PytestParallelError('Option `--slurm-init-cmds` only available when `--scheduler=slurm`') + if slurm_file is not None: + raise PytestParallelError('Option `--slurm-file` only available when `--scheduler=slurm`') if (scheduler == 'shell' or scheduler == 'slurm') and not is_worker: from mpi4py import MPI - assert MPI.COMM_WORLD.size == 1, 'Do not launch `pytest_parallel` on more that one process\n' \ - 'when `--scheduler=shell` or `--scheduler=slurm`.\n' \ - '`pytest_parallel` spawns MPI processes itself.\n' \ - f'You may want to use --n-workers={MPI.COMM_WORLD.size}.' + if MPI.COMM_WORLD.size != 1: + err_msg = 'Do not launch `pytest_parallel` on more that one process when `--scheduler=shell` or `--scheduler=slurm`.\n' \ + '`pytest_parallel` will spawn MPI processes itself.\n' \ + f'You may want to use --n-workers={MPI.COMM_WORLD.size}.' + raise PytestParallelError(err_msg) if scheduler == 'slurm' and not is_worker: - assert slurm_options or slurm_file, 'You need to specify either `--slurm-options` or `--slurm-file` when `--scheduler=slurm`' + if slurm_options is None and slurm_file is None: + raise PytestParallelError('You need to specify either `--slurm-options` or `--slurm-file` when `--scheduler=slurm`') if slurm_options: - assert not slurm_file, 'You need to specify either `--slurm-options` or `--slurm-file`, but not both' + if slurm_file: + raise PytestParallelError('You need to specify either `--slurm-options` or `--slurm-file`, but not both') if slurm_file: - assert not slurm_options, 'You need to specify either `--slurm-options` or `--slurm-file`, but not both' - assert not slurm_additional_cmds, 'You cannot specify `--slurm-additional-cmds` together with `--slurm-file`' + if slurm_options: + raise PytestParallelError('You need to specify either `--slurm-options` or `--slurm-file`, but not both') + if slurm_init_cmds: + raise PytestParallelError('You cannot specify `--slurm-init-cmds` together with `--slurm-file`') - assert '-n=' not in slurm_options and '--ntasks=' not in slurm_options, 'Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).' + if '-n=' in slurm_options or '--ntasks=' in slurm_options: + raise PytestParallelError('Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).') from .slurm_scheduler import SlurmScheduler @@ -143,12 +157,11 @@ def pytest_configure(config): main_invoke_params = main_invoke_params.replace(file_or_dir, '') slurm_option_list = slurm_options.split() if slurm_options is not None else [] slurm_conf = { - 'options' : slurm_option_list, - 'srun_options' : slurm_srun_options, - 'additional_cmds': slurm_additional_cmds, - 'file' : slurm_file, - 'export_env' : slurm_export_env, - 'sub_command' : slurm_sub_command, + 'options' : slurm_option_list, + 'srun_options': slurm_srun_options, + 'init_cmds' : slurm_init_cmds, + 'file' : slurm_file, + 'export_env' : slurm_export_env, } plugin = SlurmScheduler(main_invoke_params, n_workers, slurm_conf, detach) diff --git a/pytest_parallel/send_report.py b/pytest_parallel/send_report.py index 85ea3db..facb1e2 100644 --- a/pytest_parallel/send_report.py +++ b/pytest_parallel/send_report.py @@ -26,25 +26,26 @@ def _file_path(when): test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error) # 'fatal_error' file -try: - file_path = _file_path('fatal_error') +file_path = _file_path('fatal_error') +if file_path.exists(): with open(file_path, 'r') as file: fatal_error = file.read() test_info['fatal_error'] = fatal_error -except FileNotFoundError: # There was no fatal error - pass # 'setup/call/teardown' files already_failed = False for when in ('setup', 'call', 'teardown'): - try: - file_path = _file_path(when) - with open(file_path, 'rb') as file: - report_info = file.read() - report_info = pickle.loads(report_info) - test_info[when] = report_info - except FileNotFoundError: # Supposedly not found because the test crashed before writing the file + file_path = _file_path(when) + if file_path.exists(): + try: + with open(file_path, 'rb') as file: + report_info = file.read() + report_info = pickle.loads(report_info) + test_info[when] = report_info + except pickle.PickleError: + test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}' + else: # Supposedly not found because the test crashed before writing the file collect_longrepr = [] msg = f'Error: the test crashed. ' red = 31 @@ -63,8 +64,6 @@ def _file_path(when): 'duration': 0, } # unable to report accurately already_failed = True - except pickle.PickleError: - test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}' with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: diff --git a/pytest_parallel/shell_static_scheduler.py b/pytest_parallel/shell_static_scheduler.py index 1adf117..c118c5e 100644 --- a/pytest_parallel/shell_static_scheduler.py +++ b/pytest_parallel/shell_static_scheduler.py @@ -5,13 +5,13 @@ import socket import pickle from pathlib import Path -from .utils.socket import recv as socket_recv, setup_socket +from .utils.socket import recv as socket_recv +from .utils.socket import setup_socket from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index, mark_skip from .utils.file import remove_exotic_chars, create_folders from .algo import partition from .static_scheduler_utils import group_items_by_parallel_steps from mpi4py import MPI -import numpy as np def mpi_command(current_proc, n_proc): mpi_vendor = MPI.get_vendor()[0] @@ -43,7 +43,7 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_ cmd += mpi_command(current_proc, item.n_proc) cmd += f' python3 -u -m pytest -s --_worker {socket_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' cmd += f' > {test_out_file} 2>&1' - cmd += f' ; python -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' + cmd += f' ; python3 -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' cmd += ')' cmds.append(cmd) current_proc += item.n_proc @@ -74,18 +74,17 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_ def receive_items(items, session, socket, n_item_to_recv): # > Precondition: Items must keep their original order to pick up the right item at the reception - original_indices = np.array([item.original_index for item in items]) - assert (original_indices==np.arange(len(items))).all() + original_indices = [item.original_index for item in items] + assert original_indices==list(range(len(items))) while n_item_to_recv>0: conn, addr = socket.accept() with conn: msg = socket_recv(conn) test_info = pickle.loads(msg) # the worker is supposed to have send a dict with the correct structured information - #print(f"{test_info=}") if 'signal_info' in test_info: print('signal_info= ',test_info['signal_info']) - break; + break else: test_idx = test_info['test_idx'] if test_info['fatal_error'] is not None: diff --git a/pytest_parallel/slurm_scheduler.py b/pytest_parallel/slurm_scheduler.py index d55f2d0..3c582be 100644 --- a/pytest_parallel/slurm_scheduler.py +++ b/pytest_parallel/slurm_scheduler.py @@ -3,19 +3,12 @@ import socket import pickle from pathlib import Path -from .utils.socket import recv as socket_recv, setup_socket +from .utils.socket import recv as socket_recv +from .utils.socket import setup_socket from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index, mark_skip from .utils.file import remove_exotic_chars, create_folders from .algo import partition -def parse_job_id_from_submission_output(s): - # At this point, we are trying to guess -_- - # Here we supposed that the command for submitting the job - # returned string with only one number, - # and that this number is the job id - import re - return int(re.search(r'\d+', str(s)).group()) - def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntasks, slurm_conf): SCHEDULER_IP_ADDRESS, port = setup_socket(socket) @@ -45,8 +38,8 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask srun_options = '' socket_flags = f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port} --_session_folder={session_folder}" cmds = '' - if slurm_conf['additional_cmds'] is not None: - cmds += slurm_conf['additional_cmds'] + '\n' + if slurm_conf['init_cmds'] is not None: + cmds += slurm_conf['init_cmds'] + '\n' for item in items: test_idx = item.original_index test_out_file = f'.pytest_parallel/{session_folder}/{remove_exotic_chars(item.nodeid)}' @@ -58,7 +51,7 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask cmd += ' -l' # cmd += f' python3 -u -m pytest -s --_worker {socket_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' cmd += f' > {test_out_file} 2>&1' - cmd += f' ; python -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' + cmd += f' ; python3 -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' cmd += ')' cmd += ' &\n' # launch everything in parallel cmds += cmd @@ -73,23 +66,17 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask with open(f'.pytest_parallel/{session_folder}/env_vars.sh','wb') as f: f.write(pytest._pytest_parallel_env_vars) - if slurm_conf['sub_command'] is None: - if slurm_conf['export_env']: - sbatch_cmd = f'sbatch --parsable --export-file=.pytest_parallel/{session_folder}/env_vars.sh .pytest_parallel/{session_folder}/job.sh' - else: - sbatch_cmd = f'sbatch --parsable .pytest_parallel/{session_folder}/job.sh' + if slurm_conf['export_env']: + sbatch_cmd = f'sbatch --parsable --export-file=.pytest_parallel/{session_folder}/env_vars.sh .pytest_parallel/{session_folder}/job.sh' else: - sbatch_cmd = slurm_conf['sub_command'] + ' .pytest_parallel/{session_folder}/job.sh' + sbatch_cmd = f'sbatch --parsable .pytest_parallel/{session_folder}/job.sh' p = subprocess.Popen([sbatch_cmd], shell=True, stdout=subprocess.PIPE) print('\nSubmitting tests to SLURM...') returncode = p.wait() assert returncode==0, f'Error when submitting to SLURM with `{sbatch_cmd}`' - if slurm_conf['sub_command'] is None: - slurm_job_id = int(p.stdout.read()) - else: - slurm_job_id = parse_job_id_from_submission_output(p.stdout.read()) + slurm_job_id = int(p.stdout.read()) print(f'SLURM job {slurm_job_id} has been submitted') return slurm_job_id diff --git a/pytest_parallel/utils/file.py b/pytest_parallel/utils/file.py index 188b931..68ef516 100644 --- a/pytest_parallel/utils/file.py +++ b/pytest_parallel/utils/file.py @@ -13,6 +13,6 @@ def remove_exotic_chars(s): def create_folders(): Path('.pytest_parallel').mkdir(exist_ok=True) - session_folder_abs = Path(tempfile.mkdtemp(dir='.pytest_parallel')) + session_folder_abs = Path(tempfile.mkdtemp(dir='.pytest_parallel')) # create a folder that did not already exist Path(session_folder_abs/'_partial').mkdir() return session_folder_abs.name From 6302bee78f6e3b7bafb975ac509afc2e7142e78b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Mon, 13 Jan 2025 17:21:42 +0100 Subject: [PATCH 09/14] Version 1.3 --- CMakeLists.txt | 2 +- pyproject.toml | 4 ++-- pytest_parallel/__init__.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9321db8..662d11c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,7 +9,7 @@ cmake_policy(SET CMP0074 NEW) # force find_package to take _ROOT va # Project # ---------------------------------------------------------------------- project( - pytest_parallel VERSION 1.2.0 + pytest_parallel VERSION 1.3.0 DESCRIPTION "pytest_parallel extends PyTest to support parallel testing using mpi4py" ) diff --git a/pyproject.toml b/pyproject.toml index bc0954c..d727668 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ authors = [ {name = "Berenger Berthoul", email = "berenger.berthoul@onera.fr"}, ] maintainers = [ - {name = "Bruno Maugars", email = "bruno.maugars@onera.fr"}, + {name = "Berenger Berthoul", email = "bruno.maugars@onera.fr"}, ] license = {text = "Mozilla Public License 2.0"} keywords = [ @@ -52,7 +52,7 @@ dependencies = [ "mpi4py", "numpy", ] -version = "1.2.0" +version = "1.3.0" [project.urls] Homepage = "https://github.com/onera/pytest_parallel" diff --git a/pytest_parallel/__init__.py b/pytest_parallel/__init__.py index 69936b8..9eeaa6e 100644 --- a/pytest_parallel/__init__.py +++ b/pytest_parallel/__init__.py @@ -1,3 +1,3 @@ -__version__ = "1.2" +__version__ = "1.3" from . import mark From a848d701e825594667024260473434491a1eee24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Mon, 13 Jan 2025 17:37:28 +0100 Subject: [PATCH 10/14] minor, tracepath portability tracepath utility does not have a '-4' option on older versions, so remove it since it is the default anyway --- pytest_parallel/utils/socket.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest_parallel/utils/socket.py b/pytest_parallel/utils/socket.py index 67adf68..2ca0dce 100644 --- a/pytest_parallel/utils/socket.py +++ b/pytest_parallel/utils/socket.py @@ -42,7 +42,7 @@ def _get_my_ip_address(): hostname = socket.gethostname() assert command_exists('tracepath'), 'pytest_parallel SLURM scheduler: command `tracepath` is not available' - cmd = ['tracepath','-4','-n',hostname] + cmd = ['tracepath','-n',hostname] r = subprocess.run(cmd, stdout=subprocess.PIPE) assert r.returncode==0, f'pytest_parallel SLURM scheduler: error running command `{" ".join(cmd)}`' ips = r.stdout.decode("utf-8") From af20e5f9e57b56ec2f6b6551441d1498005658a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Wed, 15 Jan 2025 10:38:09 +0100 Subject: [PATCH 11/14] minor cleaning --- pytest_parallel/mpi_reporter.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pytest_parallel/mpi_reporter.py b/pytest_parallel/mpi_reporter.py index 51a65d5..2018137 100644 --- a/pytest_parallel/mpi_reporter.py +++ b/pytest_parallel/mpi_reporter.py @@ -92,20 +92,10 @@ def pytest_collection_modifyitems(self, config, items): def pytest_runtest_protocol(self, item, nextitem): if self.barrier_at_test_start: self.global_comm.barrier() - #print(f'pytest_runtest_protocol beg {MPI.COMM_WORLD.rank=}') _ = yield - #print(f'pytest_runtest_protocol end {MPI.COMM_WORLD.rank=}') if self.barrier_at_test_end: self.global_comm.barrier() - #@pytest.hookimpl(tryfirst=True) - #def pytest_runtest_protocol(self, item, nextitem): - # if self.barrier_at_test_start: - # self.global_comm.barrier() - # print(f'pytest_runtest_protocol beg {MPI.COMM_WORLD.rank=}') - # if item.sub_comm == MPI.COMM_NULL: - # return True # for this hook, `firstresult=True` so returning a non-None will stop other hooks to run - @pytest.hookimpl(tryfirst=True) def pytest_pyfunc_call(self, pyfuncitem): #print(f'pytest_pyfunc_call {MPI.COMM_WORLD.rank=}') From b850d0d0654abdf37ec6500bf8ddc174341d9074 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Fri, 17 Jan 2025 16:01:20 +0100 Subject: [PATCH 12/14] minor (typo) --- test/test_pytest_parallel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_pytest_parallel.py b/test/test_pytest_parallel.py index fd5d2e8..15b1afb 100644 --- a/test/test_pytest_parallel.py +++ b/test/test_pytest_parallel.py @@ -52,7 +52,7 @@ def run_pytest_parallel_test(test_name, n_workers, scheduler, capfd, suffix=""): stderr_file_path.unlink(missing_ok=True) test_env = os.environ.copy() - # To test pytest_parallel, we can need to launch pytest with it + # To test pytest_parallel, we need to launch pytest with pytest_parallel as a plugin: if "PYTEST_DISABLE_PLUGIN_AUTOLOAD" not in test_env: test_env["PYTEST_DISABLE_PLUGIN_AUTOLOAD"] = "1" cmd = f"mpiexec -n {n_workers} pytest -p pytest_parallel.plugin -s -ra -vv --color=no --scheduler={scheduler} {test_file_path}" From 8570a580b623c70ce5595ce84d157ca1e33790c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Fri, 17 Jan 2025 16:03:14 +0100 Subject: [PATCH 13/14] [ci] Deactivate ubuntu+mpich: MPICH seems broken --- .github/workflows/test.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0632fbe..4fdb17a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,10 +16,10 @@ jobs: pylint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: mpi4py/setup-mpi@v1 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install dependencies @@ -67,12 +67,19 @@ jobs: mpi: intelmpi - os: ubuntu-latest mpi: msmpi + # mpich seems broken on Ubuntu - os: ubuntu-latest py-version: 3.8 mpi: mpich - os: ubuntu-latest py-version: 3.9 mpi: mpich + - os: ubuntu-latest + py-version: 3.10 + mpi: mpich + - os: ubuntu-latest + py-version: 3.11 + mpi: mpich name: ${{ matrix.mpi }} - ${{matrix.py-version}} - ${{matrix.os}} steps: - name: Checkout From f47726dd78217aa9dfa28abe0f6c0f397598fafb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9renger=20Berthoul?= Date: Fri, 17 Jan 2025 18:04:44 +0100 Subject: [PATCH 14/14] cleaning with pylint --- .github/workflows/test.yml | 2 +- .slurm_draft/worker.py | 6 +- pytest_parallel/gather_report.py | 4 +- pytest_parallel/mpi_reporter.py | 37 ++++----- pytest_parallel/plugin.py | 25 +++--- pytest_parallel/process_worker.py | 16 ++-- pytest_parallel/send_report.py | 69 ++++++++-------- pytest_parallel/shell_static_scheduler.py | 29 ++++--- pytest_parallel/slurm_scheduler.py | 42 +++++----- pytest_parallel/static_scheduler_utils.py | 2 - pytest_parallel/utils/file.py | 19 ++--- pytest_parallel/utils/items.py | 12 +-- pytest_parallel/utils/mpi.py | 4 +- pytest_parallel/utils/socket.py | 81 ++++++++++--------- .../terminal_fail_complex_assert_two_procs | 6 +- .../terminal_success_0_fail_1 | 2 +- .../test_crash_reporting.py | 38 ++++----- .../pytest_parallel_tests/test_doc_example.py | 6 +- .../test_fail_complex_assert_two_procs.py | 6 +- .../pytest_parallel_tests/test_parametrize.py | 1 - test/pytest_parallel_tests/test_scheduling.py | 2 +- .../test_success_0_fail_1.py | 4 +- test/test_pytest_parallel.py | 36 ++++----- 23 files changed, 221 insertions(+), 228 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4fdb17a..f37d281 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,7 +30,7 @@ jobs: pip install -r *.egg-info/requires.txt - name: Analysing the code with pylint run: | - pylint --unsafe-load-any-extension=y --disable=fixme $(git ls-files '*.py') || true + pylint --unsafe-load-any-extension=y --disable=fixme $(git ls-files "pytest_parallel/*.py" "test/*.py") || true build: needs: [pylint] diff --git a/.slurm_draft/worker.py b/.slurm_draft/worker.py index 3084a00..b69b272 100644 --- a/.slurm_draft/worker.py +++ b/.slurm_draft/worker.py @@ -11,13 +11,13 @@ test_idx = int(sys.argv[3]) comm = MPI.COMM_WORLD -print(f'start at {scheduler_ip}@{server_port} test {test_idx} at rank {comm.Get_rank()}/{comm.Get_size()} exec on {socket.gethostname()} - ',datetime.datetime.now()) +print(f'start at {scheduler_ip}@{server_port} test {test_idx} at rank {comm.rank}/{comm.size} exec on {socket.gethostname()} - ',datetime.datetime.now()) -if comm.Get_rank() == 0: +if comm.rank == 0: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((scheduler_ip, server_port)) #time.sleep(10+5*test_idx) - #msg = f'Hello from test {test_idx} at rank {comm.Get_rank()}/{comm.Get_size()} exec on {socket.gethostname()}' + #msg = f'Hello from test {test_idx} at rank {comm.rank}/{comm.size} exec on {socket.gethostname()}' #socket_utils.send(s, msg) info = { 'test_idx': test_idx, diff --git a/pytest_parallel/gather_report.py b/pytest_parallel/gather_report.py index ad7a4e3..3a7e1dc 100644 --- a/pytest_parallel/gather_report.py +++ b/pytest_parallel/gather_report.py @@ -45,8 +45,8 @@ def gather_report_on_local_rank_0(report): del report.sub_comm # No need to keep it in the report # Furthermore we need to serialize the report # and mpi4py does not know how to serialize report.sub_comm - i_sub_rank = sub_comm.Get_rank() - n_sub_rank = sub_comm.Get_size() + i_sub_rank = sub_comm.rank + n_sub_rank = sub_comm.size if ( report.outcome != "skipped" diff --git a/pytest_parallel/mpi_reporter.py b/pytest_parallel/mpi_reporter.py index 2018137..a3062d9 100644 --- a/pytest_parallel/mpi_reporter.py +++ b/pytest_parallel/mpi_reporter.py @@ -1,5 +1,6 @@ -import pytest import sys + +import pytest from mpi4py import MPI from .algo import partition, lower_bound @@ -11,7 +12,7 @@ def mark_skip(item): comm = MPI.COMM_WORLD - n_rank = comm.Get_size() + n_rank = comm.size n_proc_test = get_n_proc_for_test(item) skip_msg = f"Not enough procs to execute: {n_proc_test} required but only {n_rank} available" item.add_marker(pytest.mark.skip(reason=skip_msg), append=False) @@ -38,8 +39,7 @@ def create_sub_comm_of_size(global_comm, n_proc, mpi_comm_creation_function): assert 0, 'Unknown MPI communicator creation function. Available: `MPI_Comm_create`, `MPI_Comm_split`' def create_sub_comms_for_each_size(global_comm, mpi_comm_creation_function): - i_rank = global_comm.Get_rank() - n_rank = global_comm.Get_size() + n_rank = global_comm.size sub_comms = [None] * n_rank for i in range(0,n_rank): n_proc = i+1 @@ -48,8 +48,7 @@ def create_sub_comms_for_each_size(global_comm, mpi_comm_creation_function): def add_sub_comm(items, global_comm, test_comm_creation, mpi_comm_creation_function): - i_rank = global_comm.Get_rank() - n_rank = global_comm.Get_size() + n_rank = global_comm.size # Strategy 'by_rank': create one sub-communicator by size, from sequential (size=1) to n_rank if test_comm_creation == 'by_rank': @@ -109,7 +108,7 @@ def pytest_runtestloop(self, session) -> bool: _ = yield # prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED) # when no test run on non-master - if self.global_comm.Get_rank() != 0 and session.testscollected == 0: + if self.global_comm.rank != 0 and session.testscollected == 0: session.testscollected = 1 return True @@ -132,7 +131,7 @@ def pytest_runtest_logreport(self, report): def prepare_items_to_run(items, comm): - i_rank = comm.Get_rank() + i_rank = comm.rank items_to_run = [] @@ -164,7 +163,7 @@ def prepare_items_to_run(items, comm): def items_to_run_on_this_proc(items_by_steps, items_to_skip, comm): - i_rank = comm.Get_rank() + i_rank = comm.rank items = [] @@ -200,14 +199,13 @@ def pytest_runtestloop(self, session) -> bool: and not session.config.option.continue_on_collection_errors ): raise session.Interrupted( - "%d error%s during collection" - % (session.testsfailed, "s" if session.testsfailed != 1 else "") + f"{session.testsfailed} error{'s' if session.testsfailed != 1 else ''} during collection" ) if session.config.option.collectonly: return True - n_workers = self.global_comm.Get_size() + n_workers = self.global_comm.size add_n_procs(session.items) @@ -217,12 +215,12 @@ def pytest_runtestloop(self, session) -> bool: items_by_steps, items_to_skip, self.global_comm ) - for i, item in enumerate(items): + for item in items: nextitem = None run_item_test(item, nextitem, session) # prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED) when no test run on non-master - if self.global_comm.Get_rank() != 0 and session.testscollected == 0: + if self.global_comm.rank != 0 and session.testscollected == 0: session.testscollected = 1 return True @@ -244,8 +242,8 @@ def pytest_runtest_logreport(self, report): gather_report_on_local_rank_0(report) # master ranks of each sub_comm must send their report to rank 0 - if sub_comm.Get_rank() == 0: # only master are concerned - if self.global_comm.Get_rank() != 0: # if master is not global master, send + if sub_comm.rank == 0: # only master are concerned + if self.global_comm.rank != 0: # if master is not global master, send self.global_comm.send(report, dest=0) elif report.master_running_proc != 0: # else, recv if test run remotely # In the line below, MPI.ANY_TAG will NOT clash with communications outside the framework because self.global_comm is private @@ -342,7 +340,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm): for sub_rank in sub_ranks: if sub_rank != first_rank_done: rank_original_idx = inter_comm.recv(source=sub_rank, tag=WORK_DONE_TAG) - assert (rank_original_idx == original_idx) # sub_rank is supposed to have worked on the same test + assert rank_original_idx == original_idx # sub_rank is supposed to have worked on the same test # the procs are now available for sub_rank in sub_ranks: @@ -406,8 +404,7 @@ def pytest_runtestloop(self, session) -> bool: and not session.config.option.continue_on_collection_errors ): raise session.Interrupted( - "%d error%s during collection" - % (session.testsfailed, "s" if session.testsfailed != 1 else "") + f"{session.testsfailed} error{'s' if session.testsfailed != 1 else ''} during collection" ) if session.config.option.collectonly: @@ -499,7 +496,7 @@ def pytest_runtest_logreport(self, report): sub_comm = report.sub_comm gather_report_on_local_rank_0(report) - if sub_comm.Get_rank() == 0: # if local master proc, send + if sub_comm.rank == 0: # if local master proc, send # The idea of the scheduler is the following: # The server schedules test over clients # A client executes the test then report to the server it is done diff --git a/pytest_parallel/plugin.py b/pytest_parallel/plugin.py index 2a8d9ef..2dca271 100644 --- a/pytest_parallel/plugin.py +++ b/pytest_parallel/plugin.py @@ -7,11 +7,12 @@ import tempfile from pathlib import Path import argparse + import pytest from _pytest.terminal import TerminalReporter class PytestParallelError(ValueError): - pass + pass # -------------------------------------------------------------------------- def pytest_addoption(parser): @@ -104,9 +105,9 @@ def pytest_configure(config): slurm_file = config.getoption('slurm_file') slurm_export_env = config.getoption('slurm_export_env') detach = config.getoption('detach') - if scheduler != 'slurm' and scheduler != 'shell': + if not scheduler in ['slurm', 'shell']: assert not is_worker, f'Internal pytest_parallel error `--_worker` not available with`--scheduler={scheduler}`' - if (scheduler == 'slurm' or scheduler == 'shell') and not is_worker: + if scheduler in ['slurm', 'shell'] and not is_worker: if n_workers is None: raise PytestParallelError(f'You need to specify `--n-workers` when `--scheduler={scheduler}`') if scheduler != 'slurm': @@ -119,7 +120,7 @@ def pytest_configure(config): if slurm_file is not None: raise PytestParallelError('Option `--slurm-file` only available when `--scheduler=slurm`') - if (scheduler == 'shell' or scheduler == 'slurm') and not is_worker: + if scheduler in ['shell', 'slurm'] and not is_worker: from mpi4py import MPI if MPI.COMM_WORLD.size != 1: err_msg = 'Do not launch `pytest_parallel` on more that one process when `--scheduler=shell` or `--scheduler=slurm`.\n' \ @@ -142,7 +143,7 @@ def pytest_configure(config): raise PytestParallelError('You cannot specify `--slurm-init-cmds` together with `--slurm-file`') if '-n=' in slurm_options or '--ntasks=' in slurm_options: - raise PytestParallelError('Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).') + raise PytestParallelError('Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).') from .slurm_scheduler import SlurmScheduler @@ -154,7 +155,7 @@ def pytest_configure(config): ## pull apart `--slurm-options` for special treatement main_invoke_params = main_invoke_params.replace(f'--slurm-options={slurm_options}', '') for file_or_dir in config.option.file_or_dir: - main_invoke_params = main_invoke_params.replace(file_or_dir, '') + main_invoke_params = main_invoke_params.replace(file_or_dir, '') slurm_option_list = slurm_options.split() if slurm_options is not None else [] slurm_conf = { 'options' : slurm_option_list, @@ -172,7 +173,7 @@ def pytest_configure(config): # reconstruct complete invoke string main_invoke_params = _invoke_params(config.invocation_params.args) for file_or_dir in config.option.file_or_dir: - main_invoke_params = main_invoke_params.replace(file_or_dir, '') + main_invoke_params = main_invoke_params.replace(file_or_dir, '') plugin = ShellStaticScheduler(main_invoke_params, n_workers, detach) else: from mpi4py import MPI @@ -190,7 +191,7 @@ def pytest_configure(config): elif scheduler == 'dynamic': inter_comm = spawn_master_process(global_comm) plugin = DynamicScheduler(global_comm, inter_comm) - elif (scheduler == 'slurm' or scheduler == 'shell') and is_worker: + elif scheduler in ['shell', 'slurm'] and is_worker: scheduler_ip_address = config.getoption('_scheduler_ip_address') scheduler_port = config.getoption('_scheduler_port') session_folder = config.getoption('_session_folder') @@ -209,7 +210,7 @@ def pytest_configure(config): # Pytest relies on having a terminal reporter to decide on how to create error messages, see #12 # Hence, register a terminal reporter that outputs to /dev/null - null_file = open(os.devnull,'w') + null_file = open(os.devnull,'w', encoding='utf-8') terminal_reporter = TerminalReporter(config, null_file) config.pluginmanager.register(terminal_reporter, "terminalreporter") @@ -238,16 +239,16 @@ def __init__(self, comm): def __enter__(self): from mpi4py import MPI if self.comm != MPI.COMM_NULL: # TODO DEL once non-participating rank do not participate in fixtures either - rank = self.comm.Get_rank() + rank = self.comm.rank self.tmp_dir = tempfile.TemporaryDirectory() if rank == 0 else None self.tmp_path = Path(self.tmp_dir.name) if rank == 0 else None return self.comm.bcast(self.tmp_path, root=0) - def __exit__(self, type, value, traceback): + def __exit__(self, ex_type, ex_value, traceback): from mpi4py import MPI if self.comm != MPI.COMM_NULL: # TODO DEL once non-participating rank do not participate in fixtures either self.comm.barrier() - if self.comm.Get_rank() == 0: + if self.comm.rank == 0: self.tmp_dir.cleanup() diff --git a/pytest_parallel/process_worker.py b/pytest_parallel/process_worker.py index dc73f75..3c76c21 100644 --- a/pytest_parallel/process_worker.py +++ b/pytest_parallel/process_worker.py @@ -1,9 +1,9 @@ -import pytest +from pathlib import Path +import pickle +import pytest from mpi4py import MPI -from pathlib import Path -import pickle from .utils.items import get_n_proc_for_test, run_item_test from .gather_report import gather_report_on_local_rank_0 @@ -32,7 +32,7 @@ def pytest_runtestloop(self, session) -> bool: # check there is no file from a previous run if comm.rank == 0: - for when in {'fatal_error', 'setup', 'call', 'teardown'}: + for when in ['fatal_error', 'setup', 'call', 'teardown']: path = self._file_path(when) assert not path.exists(), f'INTERNAL FATAL ERROR in pytest_parallel: file "{path}" should not exist at this point' @@ -40,10 +40,10 @@ def pytest_runtestloop(self, session) -> bool: if comm.size != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly if comm.rank == 0: error_info = f'FATAL ERROR in pytest_parallel with slurm scheduling: test `{item.nodeid}`' \ - f' uses a `comm` of size {test_comm_size} but was launched with size {comm.Get_size()}.\n' \ + f' uses a `comm` of size {test_comm_size} but was launched with size {comm.size}.\n' \ f' This generally indicates that `srun` does not interoperate correctly with MPI.' file_path = self._file_path('fatal_error') - with open(file_path, "w") as f: + with open(file_path, 'w', encoding='utf-8') as f: f.write(error_info) return True @@ -55,7 +55,7 @@ def pytest_runtestloop(self, session) -> bool: assert 0, f'{item.test_info["fatal_error"]}' return True - + @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(self, item): """ @@ -77,5 +77,5 @@ def pytest_runtest_logreport(self, report): 'longrepr': report.longrepr, 'duration': report.duration, } if sub_comm.rank == 0: - with open(self._file_path(report.when), "wb") as f: + with open(self._file_path(report.when), 'wb') as f: f.write(pickle.dumps(report_info)) diff --git a/pytest_parallel/send_report.py b/pytest_parallel/send_report.py index facb1e2..2a92b1a 100644 --- a/pytest_parallel/send_report.py +++ b/pytest_parallel/send_report.py @@ -2,12 +2,12 @@ import socket import pickle from pathlib import Path -from .utils.socket import send as socket_send from _pytest._code.code import ( ExceptionChainRepr, ReprTraceback, ReprEntryNative, ) +from .utils.socket import send as socket_send parser = argparse.ArgumentParser(description='Send return the codes of the tests to the master pytest_parallel process') @@ -21,52 +21,51 @@ args = parser.parse_args() def _file_path(when): - return Path(f'.pytest_parallel/{args._session_folder}/_partial/{args._test_idx}_{when}') + return Path(f'.pytest_parallel/{args._session_folder}/_partial/{args._test_idx}_{when}') test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error) # 'fatal_error' file file_path = _file_path('fatal_error') if file_path.exists(): - with open(file_path, 'r') as file: - fatal_error = file.read() - test_info['fatal_error'] = fatal_error + with open(file_path, 'r', encoding='utf-8') as file: + fatal_error = file.read() + test_info['fatal_error'] = fatal_error # 'setup/call/teardown' files already_failed = False for when in ('setup', 'call', 'teardown'): - file_path = _file_path(when) - if file_path.exists(): - try: - with open(file_path, 'rb') as file: - report_info = file.read() - report_info = pickle.loads(report_info) - test_info[when] = report_info - except pickle.PickleError: - test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}' - else: # Supposedly not found because the test crashed before writing the file - collect_longrepr = [] - msg = f'Error: the test crashed. ' - red = 31 - bold = 1 - msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m' - msg += f'Log file: {args._test_name}\n' - trace_back = ReprTraceback([ReprEntryNative(msg)], None, None) - collect_longrepr.append( - (trace_back, None, None) - ) - longrepr = ExceptionChainRepr(collect_longrepr) - - outcome = 'passed' if already_failed else 'failed' # No need to report the error twice - test_info[when] = {'outcome' : outcome, - 'longrepr': longrepr, - 'duration': 0, } # unable to report accurately + file_path = _file_path(when) + if file_path.exists(): + try: + with open(file_path, 'rb') as file: + report_info = file.read() + report_info = pickle.loads(report_info) + test_info[when] = report_info + except pickle.PickleError: + test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}' + else: # Supposedly not found because the test crashed before writing the file + collect_longrepr = [] + msg = 'Error: the test crashed. ' + red = 31 + bold = 1 + msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m' + msg += f'Log file: {args._test_name}\n' + trace_back = ReprTraceback([ReprEntryNative(msg)], None, None) + collect_longrepr.append( + (trace_back, None, None) + ) + longrepr = ExceptionChainRepr(collect_longrepr) - already_failed = True + outcome = 'passed' if already_failed else 'failed' # No need to report the error twice + test_info[when] = {'outcome' : outcome, + 'longrepr': longrepr, + 'duration': 0, } # unable to report accurately + already_failed = True -with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s.connect((args._scheduler_ip_address, args._scheduler_port)) - socket_send(s, pickle.dumps(test_info)) +with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.connect((args._scheduler_ip_address, args._scheduler_port)) + socket_send(s, pickle.dumps(test_info)) diff --git a/pytest_parallel/shell_static_scheduler.py b/pytest_parallel/shell_static_scheduler.py index c118c5e..bc8b4a3 100644 --- a/pytest_parallel/shell_static_scheduler.py +++ b/pytest_parallel/shell_static_scheduler.py @@ -1,17 +1,17 @@ -import pytest import os import stat import subprocess import socket import pickle -from pathlib import Path + +import pytest +from mpi4py import MPI + from .utils.socket import recv as socket_recv from .utils.socket import setup_socket -from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index, mark_skip +from .utils.items import add_n_procs, run_item_test, mark_original_index, mark_skip from .utils.file import remove_exotic_chars, create_folders -from .algo import partition from .static_scheduler_utils import group_items_by_parallel_steps -from mpi4py import MPI def mpi_command(current_proc, n_proc): mpi_vendor = MPI.get_vendor()[0] @@ -25,7 +25,7 @@ def mpi_command(current_proc, n_proc): else: assert 0, f'Unknown MPI implementation "{mpi_vendor}"' -def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_invoke_params, ntasks, i_step, n_step): +def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_invoke_params, i_step, n_step): # sort item by comm size to launch bigger first (Note: in case SLURM prioritize first-received items) items = sorted(items_to_run, key=lambda item: item.n_proc, reverse=True) @@ -36,7 +36,7 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_ socket_flags=f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port} --_session_folder={session_folder}" cmds = [] current_proc = 0 - for i,item in enumerate(items): + for item in items: test_idx = item.original_index test_out_file = f'.pytest_parallel/{session_folder}/{remove_exotic_chars(item.nodeid)}' cmd = '(' @@ -59,10 +59,10 @@ def submit_items(items_to_run, SCHEDULER_IP_ADDRESS, port, session_folder, main_ ## 3. wait everyone script += '\nwait\n' - + script_path = f'.pytest_parallel/{session_folder}/pytest_static_sched_{i_step+1}.sh' - with open(script_path,'w') as f: - f.write(script) + with open(script_path,'w', encoding='utf-8') as f: + f.write(script) current_permissions = stat.S_IMODE(os.lstat(script_path).st_mode) os.chmod(script_path, current_permissions | stat.S_IXUSR) @@ -78,7 +78,7 @@ def receive_items(items, session, socket, n_item_to_recv): assert original_indices==list(range(len(items))) while n_item_to_recv>0: - conn, addr = socket.accept() + conn, _ = socket.accept() with conn: msg = socket_recv(conn) test_info = pickle.loads(msg) # the worker is supposed to have send a dict with the correct structured information @@ -122,8 +122,7 @@ def pytest_runtestloop(self, session) -> bool: and not session.config.option.continue_on_collection_errors ): raise session.Interrupted( - "%d error%s during collection" - % (session.testsfailed, "s" if session.testsfailed != 1 else "") + f"{session.testsfailed} error{'s' if session.testsfailed != 1 else ''} during collection" ) if session.config.option.collectonly: @@ -149,13 +148,13 @@ def pytest_runtestloop(self, session) -> bool: n_step = len(items_by_steps) for i_step,items in enumerate(items_by_steps): n_item_to_receive = len(items) - sub_process = submit_items(items, SCHEDULER_IP_ADDRESS, port, session_folder, self.main_invoke_params, self.ntasks, i_step, n_step) + sub_process = submit_items(items, SCHEDULER_IP_ADDRESS, port, session_folder, self.main_invoke_params, i_step, n_step) if not self.detach: # The job steps are supposed to send their reports receive_items(session.items, session, self.socket, n_item_to_receive) returncode = sub_process.wait() # at this point, the sub-process should be done since items have been received # https://docs.pytest.org/en/stable/reference/exit-codes.html - # 0 means all passed, 1 means all executed, but some failed + # 0 means all passed, 1 means all executed, but some failed assert returncode==0 or returncode==1 , f'Pytest internal error during step {i_step} of shell scheduler (error code {returncode})' return True diff --git a/pytest_parallel/slurm_scheduler.py b/pytest_parallel/slurm_scheduler.py index 3c582be..73964e5 100644 --- a/pytest_parallel/slurm_scheduler.py +++ b/pytest_parallel/slurm_scheduler.py @@ -1,11 +1,12 @@ -import pytest import subprocess import socket import pickle -from pathlib import Path + +import pytest + from .utils.socket import recv as socket_recv from .utils.socket import setup_socket -from .utils.items import get_n_proc_for_test, add_n_procs, run_item_test, mark_original_index, mark_skip +from .utils.items import add_n_procs, run_item_test, mark_original_index, mark_skip from .utils.file import remove_exotic_chars, create_folders from .algo import partition @@ -14,9 +15,9 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask # generate SLURM header options if slurm_conf['file'] is not None: - with open(slurm_conf['file']) as f: + with open(slurm_conf['file'], encoding='utf-8') as f: slurm_header = f.read() - # Note: + # Note: # ntasks is supposed to be <= to the number of the ntasks submitted to slurm # but since the header file can be arbitrary, we have no way to check at this point else: @@ -35,7 +36,7 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask # launch srun for each item srun_options = slurm_conf['srun_options'] if srun_options is None: - srun_options = '' + srun_options = '' socket_flags = f"--_scheduler_ip_address={SCHEDULER_IP_ADDRESS} --_scheduler_port={port} --_session_folder={session_folder}" cmds = '' if slurm_conf['init_cmds'] is not None: @@ -48,7 +49,7 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask cmd += ' --exclusive' cmd += ' --kill-on-bad-exit=1' # make fatal errors (e.g. segfault) kill the whole srun step. Else, deadlock (at least with Intel MPI) cmd += f' --ntasks={item.n_proc}' - cmd += ' -l' # + cmd += ' --label' # Prepend task number to lines of stdout/err cmd += f' python3 -u -m pytest -s --_worker {socket_flags} {main_invoke_params} --_test_idx={test_idx} {item.config.rootpath}/{item.nodeid}' cmd += f' > {test_out_file} 2>&1' cmd += f' ; python3 -m pytest_parallel.send_report {socket_flags} --_test_idx={test_idx} --_test_name={test_out_file}' @@ -59,12 +60,12 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask job_cmds = f'{slurm_header}\n\n{cmds}' - with open(f'.pytest_parallel/{session_folder}/job.sh','w') as f: - f.write(job_cmds) + with open(f'.pytest_parallel/{session_folder}/job.sh','w', encoding='utf-8') as f: + f.write(job_cmds) # submit SLURM job with open(f'.pytest_parallel/{session_folder}/env_vars.sh','wb') as f: - f.write(pytest._pytest_parallel_env_vars) + f.write(pytest._pytest_parallel_env_vars) if slurm_conf['export_env']: sbatch_cmd = f'sbatch --parsable --export-file=.pytest_parallel/{session_folder}/env_vars.sh .pytest_parallel/{session_folder}/job.sh' @@ -83,7 +84,7 @@ def submit_items(items_to_run, socket, session_folder, main_invoke_params, ntask def receive_items(items, session, socket, n_item_to_recv): while n_item_to_recv>0: - conn, addr = socket.accept() + conn, _ = socket.accept() with conn: msg = socket_recv(conn) test_info = pickle.loads(msg) # the worker is supposed to have send a dict with the correct structured information @@ -125,8 +126,7 @@ def pytest_runtestloop(self, session) -> bool: and not session.config.option.continue_on_collection_errors ): raise session.Interrupted( - "%d error%s during collection" - % (session.testsfailed, "s" if session.testsfailed != 1 else "") + f"{session.testsfailed} error{'s' if session.testsfailed != 1 else ''} during collection" ) if session.config.option.collectonly: @@ -151,18 +151,18 @@ def pytest_runtestloop(self, session) -> bool: # schedule tests to run n_item_to_receive = len(items_to_run) if n_item_to_receive > 0: - session_folder = create_folders() - self.slurm_job_id = submit_items(items_to_run, self.socket, session_folder, self.main_invoke_params, self.ntasks, self.slurm_conf) - if not self.detach: # The job steps are supposed to send their reports - receive_items(session.items, session, self.socket, n_item_to_receive) + session_folder = create_folders() + self.slurm_job_id = submit_items(items_to_run, self.socket, session_folder, self.main_invoke_params, self.ntasks, self.slurm_conf) + if not self.detach: # The job steps are supposed to send their reports + receive_items(session.items, session, self.socket, n_item_to_receive) return True @pytest.hookimpl() - def pytest_keyboard_interrupt(excinfo): - if excinfo.slurm_job_id is not None: - print(f'Calling `scancel {excinfo.slurm_job_id}`') - subprocess.run(['scancel',str(excinfo.slurm_job_id)]) + def pytest_keyboard_interrupt(self, excinfo): + if self.slurm_job_id is not None: + print(f'Calling `scancel {self.slurm_job_id}`') + subprocess.run(['scancel',str(self.slurm_job_id)]) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(self, item): diff --git a/pytest_parallel/static_scheduler_utils.py b/pytest_parallel/static_scheduler_utils.py index 9e33da9..6034d79 100644 --- a/pytest_parallel/static_scheduler_utils.py +++ b/pytest_parallel/static_scheduler_utils.py @@ -20,5 +20,3 @@ def group_items_by_parallel_steps(items, n_workers): remaining_n_procs_by_step += [n_workers - item.n_proc] return items_by_step, items_to_skip - - diff --git a/pytest_parallel/utils/file.py b/pytest_parallel/utils/file.py index 68ef516..cc87074 100644 --- a/pytest_parallel/utils/file.py +++ b/pytest_parallel/utils/file.py @@ -1,18 +1,19 @@ from pathlib import Path import tempfile + def replace_sub_strings(s, subs, replacement): - res = s - for sub in subs: - res = res.replace(sub,replacement) - return res + res = s + for sub in subs: + res = res.replace(sub,replacement) + return res def remove_exotic_chars(s): - return replace_sub_strings(str(s), ['[',']','/', ':'], '_') + return replace_sub_strings(str(s), ['[',']','/', ':'], '_') def create_folders(): - Path('.pytest_parallel').mkdir(exist_ok=True) - session_folder_abs = Path(tempfile.mkdtemp(dir='.pytest_parallel')) # create a folder that did not already exist - Path(session_folder_abs/'_partial').mkdir() - return session_folder_abs.name + Path('.pytest_parallel').mkdir(exist_ok=True) + session_folder_abs = Path(tempfile.mkdtemp(dir='.pytest_parallel')) # create a folder that did not already exist + Path(session_folder_abs/'_partial').mkdir() + return session_folder_abs.name diff --git a/pytest_parallel/utils/items.py b/pytest_parallel/utils/items.py index 25545c5..c403bd2 100644 --- a/pytest_parallel/utils/items.py +++ b/pytest_parallel/utils/items.py @@ -1,13 +1,13 @@ -import sys import pytest from _pytest.nodes import Item + def get_n_proc_for_test(item: Item) -> int : - if not hasattr(item, 'callspec'): return 1 # no callspec, so no `comm` => sequential test case - try: - return item.callspec.getparam('comm') - except ValueError: # no `comm` => sequential test case - return 1 + if not hasattr(item, 'callspec'): return 1 # no callspec, so no `comm` => sequential test case + try: + return item.callspec.getparam('comm') + except ValueError: # no `comm` => sequential test case + return 1 def add_n_procs(items): diff --git a/pytest_parallel/utils/mpi.py b/pytest_parallel/utils/mpi.py index 3466235..4aac2bc 100644 --- a/pytest_parallel/utils/mpi.py +++ b/pytest_parallel/utils/mpi.py @@ -13,7 +13,7 @@ def should_enable_terminal_reporter(comm, scheduler): if scheduler == "dynamic": return is_dyn_master_process(comm) else: - return comm.Get_rank() == 0 + return comm.rank == 0 def spawn_master_process(global_comm): @@ -37,4 +37,4 @@ def spawn_master_process(global_comm): def number_of_working_processes(comm): if is_dyn_master_process(comm): return comm.Get_remote_size() - return comm.Get_size() + return comm.size diff --git a/pytest_parallel/utils/socket.py b/pytest_parallel/utils/socket.py index 2ca0dce..0adda64 100644 --- a/pytest_parallel/utils/socket.py +++ b/pytest_parallel/utils/socket.py @@ -2,35 +2,36 @@ import socket import subprocess -def send(sock, msg_bytes): - msg_len = len(msg_bytes) - sent = sock.send(msg_len.to_bytes(8,'big')) # send int64 big endian - if sent == 0: - raise RuntimeError('Socket send broken: could not send message size') - totalsent = 0 - while totalsent < msg_len: - sent = sock.send(msg_bytes[totalsent:]) +def send(sock, msg_bytes): + msg_len = len(msg_bytes) + sent = sock.send(msg_len.to_bytes(8,'big')) # send int64 big endian if sent == 0: - raise RuntimeError('Socket send broken: could not send message') - totalsent = totalsent + sent + raise RuntimeError('Socket send broken: could not send message size') + + totalsent = 0 + while totalsent < msg_len: + sent = sock.send(msg_bytes[totalsent:]) + if sent == 0: + raise RuntimeError('Socket send broken: could not send message') + totalsent = totalsent + sent def recv(sock): - msg_len_bytes = sock.recv(8) - if msg_len_bytes == b'': - raise RuntimeError('Socket recv broken: message has no size') - msg_len = int.from_bytes(msg_len_bytes, 'big') + msg_len_bytes = sock.recv(8) + if msg_len_bytes == b'': + raise RuntimeError('Socket recv broken: message has no size') + msg_len = int.from_bytes(msg_len_bytes, 'big') - chunks = [] - bytes_recv = 0 - while bytes_recv < msg_len: - chunk = sock.recv(min(msg_len-bytes_recv, 4096)) - if chunk == b'': - raise RuntimeError('Socket recv broken: could not receive message') - chunks.append(chunk) - bytes_recv += len(chunk) - msg_bytes = b''.join(chunks) - return msg_bytes + chunks = [] + bytes_recv = 0 + while bytes_recv < msg_len: + chunk = sock.recv(min(msg_len-bytes_recv, 4096)) + if chunk == b'': + raise RuntimeError('Socket recv broken: could not receive message') + chunks.append(chunk) + bytes_recv += len(chunk) + msg_bytes = b''.join(chunks) + return msg_bytes # https://stackoverflow.com/a/34177358 @@ -39,25 +40,25 @@ def command_exists(cmd_name): return shutil.which(cmd_name) is not None def _get_my_ip_address(): - hostname = socket.gethostname() + hostname = socket.gethostname() - assert command_exists('tracepath'), 'pytest_parallel SLURM scheduler: command `tracepath` is not available' - cmd = ['tracepath','-n',hostname] - r = subprocess.run(cmd, stdout=subprocess.PIPE) - assert r.returncode==0, f'pytest_parallel SLURM scheduler: error running command `{" ".join(cmd)}`' - ips = r.stdout.decode("utf-8") + assert command_exists('tracepath'), 'pytest_parallel SLURM scheduler: command `tracepath` is not available' + cmd = ['tracepath','-n',hostname] + r = subprocess.run(cmd, stdout=subprocess.PIPE) + assert r.returncode==0, f'pytest_parallel SLURM scheduler: error running command `{" ".join(cmd)}`' + ips = r.stdout.decode("utf-8") - try: - my_ip = ips.split('\n')[0].split(':')[1].split()[0] - except: - assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' - import ipaddress - try: - ipaddress.ip_address(my_ip) - except ValueError: - assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' + try: + my_ip = ips.split('\n')[0].split(':')[1].split()[0] + except IndexError: + assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' + import ipaddress + try: + ipaddress.ip_address(my_ip) + except ValueError: + assert 0, f'pytest_parallel SLURM scheduler: error parsing result `{ips}` of command `{" ".join(cmd)}`' - return my_ip + return my_ip def setup_socket(socket): # Find our IP address diff --git a/test/pytest_parallel_refs/terminal_fail_complex_assert_two_procs b/test/pytest_parallel_refs/terminal_fail_complex_assert_two_procs index ed73216..1e36693 100644 --- a/test/pytest_parallel_refs/terminal_fail_complex_assert_two_procs +++ b/test/pytest_parallel_refs/terminal_fail_complex_assert_two_procs @@ -19,7 +19,7 @@ comm = @pytest_parallel.mark.parallel\(2\) def test_fail_with_complex_assert_reporting\(comm\): - if comm.Get_rank\(\) == 0: + if comm.rank == 0: > assert 1 == 0 E assert 1 == 0 @@ -30,9 +30,9 @@ comm = @pytest_parallel.mark.parallel\(2\) def test_fail_with_complex_assert_reporting\(comm\): - if comm.Get_rank\(\) == 0: + if comm.rank == 0: assert 1 == 0 - if comm.Get_rank\(\) == 1: + if comm.rank == 1: > assert \(np.array\(\[0,1,2\]\) == np.array\(\[0,1,3\]\)\).all\(\) E assert (?:np.)?False_? E \+ where (?:np.)?False_? = \(\) diff --git a/test/pytest_parallel_refs/terminal_success_0_fail_1 b/test/pytest_parallel_refs/terminal_success_0_fail_1 index eceba75..a4dbe61 100644 --- a/test/pytest_parallel_refs/terminal_success_0_fail_1 +++ b/test/pytest_parallel_refs/terminal_success_0_fail_1 @@ -19,7 +19,7 @@ comm = @pytest_parallel.mark.parallel\(2\) def test_fail_one_rank\(comm\): - if comm.Get_rank\(\) == 0: + if comm.rank == 0: > assert 0 E assert 0 diff --git a/test/pytest_parallel_tests/test_crash_reporting.py b/test/pytest_parallel_tests/test_crash_reporting.py index a45a561..d0be3c1 100644 --- a/test/pytest_parallel_tests/test_crash_reporting.py +++ b/test/pytest_parallel_tests/test_crash_reporting.py @@ -1,49 +1,49 @@ # TODO These test file was used to develop crash reporting when scheduler=shell or scheduler=slurm, # but it is not currently integrated to the pytest_parallel test suite -import pytest_parallel import signal +import pytest_parallel def test_seq_pass(): - assert 1 + assert 1 def test_seq_fail(): - assert 0 + assert 0 def test_seq_crash(): - signal.raise_signal(11) # SIGSEGV + signal.raise_signal(11) # SIGSEGV @pytest_parallel.mark.parallel(2) def test_par_pass(comm): - assert 1 + assert 1 @pytest_parallel.mark.parallel(2) def test_par_fail(comm): - assert 0 + assert 0 @pytest_parallel.mark.parallel(2) def test_par_pass_fail(comm): - if comm.rank==0: - assert 1 - if comm.rank==1: - assert 0 + if comm.rank==0: + assert 1 + if comm.rank==1: + assert 0 @pytest_parallel.mark.parallel(2) def test_par_crash(comm): - signal.raise_signal(11) # SIGSEGV + signal.raise_signal(11) # SIGSEGV @pytest_parallel.mark.parallel(2) def test_par_pass_crash(comm): - if comm.rank==0: - assert 1 - if comm.rank==1: - signal.raise_signal(11) # SIGSEGV + if comm.rank==0: + assert 1 + if comm.rank==1: + signal.raise_signal(11) # SIGSEGV @pytest_parallel.mark.parallel(2) def test_par_crash_fail(comm): - if comm.rank==1: - signal.raise_signal(11) # SIGSEGV - if comm.rank==1: - assert 0 + if comm.rank==1: + signal.raise_signal(11) # SIGSEGV + if comm.rank==1: + assert 0 diff --git a/test/pytest_parallel_tests/test_doc_example.py b/test/pytest_parallel_tests/test_doc_example.py index a1955b8..c895027 100644 --- a/test/pytest_parallel_tests/test_doc_example.py +++ b/test/pytest_parallel_tests/test_doc_example.py @@ -1,11 +1,11 @@ -import pytest_parallel import time +import pytest_parallel @pytest_parallel.mark.parallel(2) def test_A(comm): time.sleep(0.1) - if comm.Get_rank() == 1: + if comm.rank == 1: assert False @@ -17,7 +17,7 @@ def test_B(): @pytest_parallel.mark.parallel(3) def test_C(comm): time.sleep(0.2) - assert comm.Get_size() == 3 + assert comm.size == 3 def test_D(): diff --git a/test/pytest_parallel_tests/test_fail_complex_assert_two_procs.py b/test/pytest_parallel_tests/test_fail_complex_assert_two_procs.py index b2bfb22..be7003b 100644 --- a/test/pytest_parallel_tests/test_fail_complex_assert_two_procs.py +++ b/test/pytest_parallel_tests/test_fail_complex_assert_two_procs.py @@ -1,9 +1,9 @@ -import pytest_parallel import numpy as np +import pytest_parallel @pytest_parallel.mark.parallel(2) def test_fail_with_complex_assert_reporting(comm): - if comm.Get_rank() == 0: + if comm.rank == 0: assert 1 == 0 - if comm.Get_rank() == 1: + if comm.rank == 1: assert (np.array([0,1,2]) == np.array([0,1,3])).all() diff --git a/test/pytest_parallel_tests/test_parametrize.py b/test/pytest_parallel_tests/test_parametrize.py index 0355b24..a8487d0 100644 --- a/test/pytest_parallel_tests/test_parametrize.py +++ b/test/pytest_parallel_tests/test_parametrize.py @@ -1,6 +1,5 @@ import pytest import pytest_parallel -from mpi4py import MPI @pytest_parallel.mark.parallel([1, 2]) diff --git a/test/pytest_parallel_tests/test_scheduling.py b/test/pytest_parallel_tests/test_scheduling.py index ab8bdb3..1c08c1b 100644 --- a/test/pytest_parallel_tests/test_scheduling.py +++ b/test/pytest_parallel_tests/test_scheduling.py @@ -1,5 +1,5 @@ -import pytest_parallel import time +import pytest_parallel # time_base = 1.0 time_base = 0.01 diff --git a/test/pytest_parallel_tests/test_success_0_fail_1.py b/test/pytest_parallel_tests/test_success_0_fail_1.py index 5d6b81a..5e3ac15 100644 --- a/test/pytest_parallel_tests/test_success_0_fail_1.py +++ b/test/pytest_parallel_tests/test_success_0_fail_1.py @@ -3,7 +3,7 @@ @pytest_parallel.mark.parallel(2) def test_fail_one_rank(comm): - if comm.Get_rank() == 0: + if comm.rank == 0: assert 0 - if comm.Get_rank() == 1: + if comm.rank == 1: assert 1 diff --git a/test/test_pytest_parallel.py b/test/test_pytest_parallel.py index 15b1afb..24960b8 100644 --- a/test/test_pytest_parallel.py +++ b/test/test_pytest_parallel.py @@ -3,21 +3,19 @@ by running it on a set of examples, then comparing it to template references """ - - -# pytest_parallel MUST NOT be plugged in its testing framework environement -# it will be plugged by the framework when needed (see `run_pytest_parallel_test`) -# (else we would use pytest_parallel to test pytest_parallel, which is logically wrong) import os -pytest_plugins = os.getenv('PYTEST_PLUGINS') -assert pytest_plugins is None or 'pytest_parallel.plugin' not in pytest_plugins - import sys import re import subprocess from pathlib import Path import pytest +# pytest_parallel MUST NOT be plugged in its testing framework environement +# it will be plugged by the framework when needed (see `run_pytest_parallel_test`) +# (else we would use pytest_parallel to test pytest_parallel, which is logically wrong) +pytest_plugins = os.getenv('PYTEST_PLUGINS') +assert pytest_plugins is None or 'pytest_parallel.plugin' not in pytest_plugins + root_dir = Path(__file__).parent tests_dir = root_dir / "pytest_parallel_tests" @@ -68,10 +66,10 @@ def run_pytest_parallel_test(test_name, n_workers, scheduler, capfd, suffix=""): param_scheduler = ["sequential", "static", "dynamic"] -# TODO "slurm" scheduler +# TODO "slurm", "shell" scheduler #param_scheduler = ["slurm"] if sys.platform == "win32": - param_scheduler = ["sequential", "static"] + param_scheduler = ["sequential", "static"] # fmt: off @pytest.mark.parametrize("scheduler", param_scheduler) @@ -81,28 +79,28 @@ def test_00(self, scheduler, capfd): run_pytest_parallel_test('seq' def test_01(self, scheduler, capfd): run_pytest_parallel_test('two_success_tests_one_proc' , 1, scheduler, capfd) # need at least 1 proc def test_02(self, scheduler, capfd): run_pytest_parallel_test('two_success_tests_one_proc' , 2, scheduler, capfd) # 2 tests executing concurrently def test_04(self, scheduler, capfd): run_pytest_parallel_test('two_success_tests_one_proc' , 4, scheduler, capfd) # 2 tests executing concurrently, 2 procs do nothing - + def test_05(self, scheduler, capfd): run_pytest_parallel_test('two_fail_tests_one_proc' , 1, scheduler, capfd) # same but failing def test_06(self, scheduler, capfd): run_pytest_parallel_test('two_fail_tests_one_proc' , 2, scheduler, capfd) def test_07(self, scheduler, capfd): run_pytest_parallel_test('two_fail_tests_one_proc' , 4, scheduler, capfd) - + def test_08(self, scheduler, capfd): run_pytest_parallel_test('two_success_tests_two_procs' , 2, scheduler, capfd) # need at least 2 procs def test_09(self, scheduler, capfd): run_pytest_parallel_test('two_success_tests_two_procs' , 4, scheduler, capfd) # 4 tests (needing 2 procs each) executing concurrently def test_10(self, scheduler, capfd): run_pytest_parallel_test('two_success_tests_two_procs' , 1, scheduler, capfd, suffix='_skip') # the two test will be skipped (not enough procs) - + def test_11(self, scheduler, capfd): run_pytest_parallel_test('two_fail_tests_two_procs' , 2, scheduler, capfd) # same but failing def test_12(self, scheduler, capfd): run_pytest_parallel_test('two_fail_tests_two_procs' , 4, scheduler, capfd) def test_13(self, scheduler, capfd): run_pytest_parallel_test('two_fail_tests_two_procs' , 1, scheduler, capfd, suffix='_skip') - + def test_14(self, scheduler, capfd): run_pytest_parallel_test('success_0_fail_1' , 2, scheduler, capfd) # one test failing (succeed one rank 0, fail on rank 1) - + def test_15(self, scheduler, capfd): run_pytest_parallel_test('two_success_fail_tests_two_procs', 2, scheduler, capfd) # one test succeeds, one test fails def test_16(self, scheduler, capfd): run_pytest_parallel_test('two_success_fail_tests_two_procs', 4, scheduler, capfd) # same, more procs - + def test_17(self, scheduler, capfd): run_pytest_parallel_test('fixture_error' , 1, scheduler, capfd) # check that fixture errors are correctly reported - - def test_18(self, scheduler, capfd): run_pytest_parallel_test('parametrize' , 2, scheduler, capfd) # check the parametrize API - + + def test_18(self, scheduler, capfd): run_pytest_parallel_test('parametrize' , 2, scheduler, capfd) # check the parametrize API + def test_19(self, scheduler, capfd): run_pytest_parallel_test('scheduling' , 4, scheduler, capfd) # check 'real' case def test_20(self, scheduler, capfd): run_pytest_parallel_test('fail_complex_assert_two_procs' , 2, scheduler, capfd) # check 'complex' error message # fmt: on