diff --git a/recOrder/cli/apply_inverse_transfer_function.py b/recOrder/cli/apply_inverse_transfer_function.py index afb31e01..c12159bd 100644 --- a/recOrder/cli/apply_inverse_transfer_function.py +++ b/recOrder/cli/apply_inverse_transfer_function.py @@ -344,8 +344,8 @@ def apply_inverse_transfer_function_cli( f"{gb_ram_request} GB of memory per CPU." ) - name_without_ext = os.path.splitext(Path(output_dirpath).name)[0] - executor_folder = os.path.join(Path(output_dirpath).parent.absolute(), name_without_ext + "_logs") + # name_without_ext = os.path.splitext(Path(output_dirpath).name)[0] + executor_folder = output_dirpath.parent / "slurm_output" executor = submitit.AutoExecutor(folder=Path(executor_folder)) executor.update_parameters( @@ -375,21 +375,28 @@ def apply_inverse_transfer_function_cli( f"{num_jobs} job{'s' if num_jobs > 1 else ''} submitted {'locally' if executor.cluster == 'local' else 'via ' + executor.cluster}." ) - doPrint = True # CLI prints Job status when used as cmd line - if unique_id != "": # no unique_id means no job submission info being listened to - JM.start_client() - i=0 - for j in jobs: - job : submitit.Job = j - job_idx : str = job.job_id - position = input_position_dirpaths[i] - JM.put_Job_in_list(job, unique_id, str(job_idx), position, str(executor.folder.absolute())) - i += 1 - JM.send_data_thread() - JM.set_shorter_timeout() - doPrint = False # CLI printing disabled when using GUI - - monitor_jobs(jobs, input_position_dirpaths, doPrint) + # doPrint = True # CLI prints Job status when used as cmd line + # if unique_id != "" and unique_id !="-1": # no unique_id means no job submission info being listened to + # JM.start_client() + # i=0 + # for j in jobs: + # job : submitit.Job = j + # job_idx : str = job.job_id + # position = input_position_dirpaths[i] + # JM.put_Job_in_list(job, unique_id, str(job_idx), position, str(executor.folder.absolute())) + # i += 1 + # JM.send_data_thread() + # JM.set_shorter_timeout() + # doPrint = False # CLI printing disabled when using GUI + # elif unique_id==-1: # CLI used to run automatic pipeline + # doPrint = False + + job_ids = [job.job_id for job in jobs] # Access job IDs after batch submission + log_path = Path(executor_folder/"submitit_jobs_ids.log") + with log_path.open("w") as log_file: + log_file.write("\n".join(job_ids)) + + # monitor_jobs(jobs, input_position_dirpaths, doPrint) @click.command() diff --git a/recOrder/cli/utils.py b/recOrder/cli/utils.py index 681aabbb..95307d69 100644 --- a/recOrder/cli/utils.py +++ b/recOrder/cli/utils.py @@ -90,6 +90,11 @@ def apply_inverse_to_zyx_and_save( # Load data czyx_uint16_numpy = position.data.oindex[t_idx, input_channel_indices] + # Check if all values in czyx_uint16_numpy are not zeros or Nan + if _check_nan_n_zeros(czyx_uint16_numpy): + click.echo(f"All values at t={t_idx} are zero or Nan, skipping reconstruction.") + return + # convert to np.int32 (torch doesn't accept np.uint16), then convert to tensor float32 czyx_data = torch.tensor(np.int32(czyx_uint16_numpy), dtype=torch.float32) @@ -104,3 +109,9 @@ def apply_inverse_to_zyx_and_save( ] = reconstruction_czyx click.echo(f"Finished Writing.. t={t_idx}") + +def _check_nan_n_zeros(input_array): + """ + Checks if data are all zeros or nan + """ + return np.all(np.isnan(input_array)) or np.all(input_array == 0)