", '
')
+
+
def renderer_to_arr(ren, size):
"""
Convert DataFrame to HTML table.
@@ -120,21 +143,26 @@ def renderer_to_arr(ren, size):
def compute_labels_map(lut_fname, unique_vals, compute_lut):
labels = {}
if compute_lut:
- labels[0] = np.array((0,0,0),dtype=np.int8)
- vtkcolors = fury.colormap.distinguishable_colormap(nb_colors=len(unique_vals))
+ labels[0] = np.array((0, 0, 0), dtype=np.int8)
+ vtkcolors = fury.colormap.distinguishable_colormap(
+ nb_colors=len(unique_vals)
+ )
for index, curr_label in enumerate(unique_vals[1:]):
- labels[curr_label] = np.array((vtkcolors[index][0]*255,
- vtkcolors[index][1]*255,
- vtkcolors[index][2]*255),
- dtype=np.int8)
+ labels[curr_label] = np.array(
+ (
+ vtkcolors[index][0] * 255,
+ vtkcolors[index][1] * 255,
+ vtkcolors[index][2] * 255,
+ ),
+ dtype=np.int8,
+ )
else:
with open(lut_fname) as f:
for line in f:
- tokens = ' '.join(line.split()).split()
- if tokens and not tokens[0].startswith('#'):
- labels[np.int(tokens[0])] = np.array((tokens[2],
- tokens[3],
- tokens[4]),
- dtype=np.int8)
+ tokens = " ".join(line.split()).split()
+ if tokens and not tokens[0].startswith("#"):
+ labels[np.int(tokens[0])] = np.array(
+ (tokens[2], tokens[3], tokens[4]), dtype=np.int8
+ )
return labels
diff --git a/scripts/dmriqc_brain_extraction.py b/scripts/dmriqc_brain_extraction.py
index f250425..3022406 100755
--- a/scripts/dmriqc_brain_extraction.py
+++ b/scripts/dmriqc_brain_extraction.py
@@ -2,21 +2,29 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
+from functools import partial
-import itertools
-from multiprocessing import Pool
import numpy as np
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.analysis.stats import stats_mean_median
-from dmriqcpy.viz.graph import graph_mean_median
-from dmriqcpy.viz.screenshot import screenshot_mosaic_blend
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+ get_generic_qa_stats_and_graph,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -25,125 +33,82 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
- p.add_argument('image_type',
- help='Type of image (e.g. B0).')
-
- p.add_argument('output_report',
- help='Filename of QC report (in html format).')
-
- p.add_argument('--no_bet', nargs='+', required=True,
- help='A folder or a list of images with the skull in'
- ' Nifti format.')
- p.add_argument('--bet_mask', nargs='+', required=True,
- help='Folder or a list of images of brain extraction masks'
- ' in Nifti format.')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("image_type", help="Type of image (e.g. B0).")
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+ p.add_argument(
+ "--no_bet",
+ nargs="+",
+ required=True,
+ help="A folder or a list of images with the skull in Nifti format.",
+ )
+ p.add_argument(
+ "--bet_mask",
+ nargs="+",
+ required=True,
+ help="A folder or a list of images of brain extraction masks in Nifti format.",
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(images_no_bet, images_bet_mask, name, skip,
- summary, nb_columns):
- subjects_dict = {}
- for subj_metric, mask in zip(images_no_bet, images_bet_mask):
- curr_key = os.path.basename(subj_metric).split('.')[0]
- screenshot_path = screenshot_mosaic_blend(subj_metric, mask,
- output_prefix=name,
- directory="data",
- blend_val=0.3,
- skip=skip,
- nb_columns=nb_columns,
- is_mask=True)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
images_no_bet = list_files_from_paths(args.no_bet)
images_bet_mask = list_files_from_paths(args.bet_mask)
-
- if not len(images_no_bet) == len(images_bet_mask):
- parser.error("Not the same number of images in input.")
+ assert_list_arguments_equal_size(parser, images_no_bet, images_bet_mask)
all_images = np.concatenate([images_no_bet, images_bet_mask])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
-
- metrics = images_no_bet
- name = args.image_type
- curr_metrics = ['Mean {}'.format(name),
- 'Median {}'.format(name)]
-
- summary, stats = stats_mean_median(curr_metrics, metrics)
-
- warning_dict = {}
- warning_dict[name] = analyse_qa(summary, stats, curr_metrics)
- warning_images = [filenames for filenames in warning_dict[name].values()]
- warning_list = np.concatenate([warning_images])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graphs = []
- graph = graph_mean_median('Mean {}'.format(name), curr_metrics, summary,
- args.online)
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict = {}
- summary_dict[name] = stats_html
-
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(np.array_split(np.array(images_no_bet), args.nb_threads),
- np.array_split(np.array(images_bet_mask), args.nb_threads),
- itertools.repeat(name), itertools.repeat(args.skip),
- itertools.repeat(summary), itertools.repeat(args.nb_columns)))
-
- pool.close()
- pool.join()
-
- metrics_dict = {}
- subjects_dict = {}
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- subjects_dict[key] = dict_sub[key]
- metrics_dict[name] = subjects_dict
-
+ metrics, name = images_no_bet, args.image_type
nb_subjects = len(images_no_bet)
- report = Report(args.output_report)
- report.generate(title="Quality Assurance BET " + args.image_type,
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
+ summary, stats, qa_report, qa_graphs = get_generic_qa_stats_and_graph(
+ metrics, name, args.online
+ )
+ warning_dict = {name: qa_report}
+ summary_dict = {name: dataframe_to_html(stats)}
+
+ metrics_dict = {
+ name: generate_metric_reports_parallel(
+ zip(images_no_bet, images_bet_mask),
+ args.nb_threads,
+ nb_subjects // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ blend_is_mask=True,
+ ),
+ )
+ }
-if __name__ == '__main__':
+ report = Report(args.output_report)
+ report.generate(
+ title="Quality Assurance BET " + args.image_type,
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=qa_graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_dti.py b/scripts/dmriqc_dti.py
index fadfa14..0815f4a 100755
--- a/scripts/dmriqc_dti.py
+++ b/scripts/dmriqc_dti.py
@@ -2,22 +2,32 @@
# -*- coding: utf-8 -*-
import argparse
-import itertools
-from multiprocessing import Pool
+from functools import partial
import os
-import shutil
import numpy as np
-from dmriqcpy.analysis.stats import stats_mean_in_tissues
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_mean_in_tissues
-from dmriqcpy.viz.screenshot import (screenshot_fa_peaks,
- screenshot_mosaic_wrapper)
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_report_package,
+ generate_metric_reports_parallel,
+ get_generic_qa_stats_and_graph,
+)
+from dmriqcpy.viz.screenshot import screenshot_fa_peaks
+from dmriqcpy.viz.utils import dataframe_to_html
+
DESCRIPTION = """
Compute the DTI report in HTML format.
@@ -25,167 +35,137 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='HTML report')
-
- p.add_argument('--fa', nargs='+', required=True,
- help='Folder or FA images in Nifti format.')
-
- p.add_argument('--md', nargs='+', required=True,
- help='Folder of MD images in Nifti format.')
-
- p.add_argument('--rd', nargs='+', required=True,
- help='Folder or RD images in Nifti format.')
-
- p.add_argument('--ad', nargs='+', required=True,
- help='Folder or AD images in Nifti format.')
-
- p.add_argument('--residual', nargs='+', required=True,
- help='Folder or residual images in Nifti format.')
-
- p.add_argument('--evecs_v1', nargs='+', required=True,
- help='Folder or evecs v1 images in Nifti format.')
-
- p.add_argument('--wm', nargs='+', required=True,
- help='Folder or WM mask in Nifti format.')
-
- p.add_argument('--gm', nargs='+', required=True,
- help='Folder or GM mask in Nifti format.')
-
- p.add_argument('--csf', nargs='+', required=True,
- help='Folder or CSF mask in Nifti format.')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+
+ p.add_argument(
+ "--fa", nargs="+", required=True, help="Folder or list of FA images in Nifti format."
+ )
+
+ p.add_argument(
+ "--md", nargs="+", required=True, help="Folder or list of MD images in Nifti format."
+ )
+
+ p.add_argument(
+ "--rd", nargs="+", required=True, help="Folder or list of RD images in Nifti format."
+ )
+
+ p.add_argument(
+ "--ad", nargs="+", required=True, help="Folder or list of AD images in Nifti format."
+ )
+
+ p.add_argument(
+ "--residual",
+ nargs="+",
+ required=True,
+ help="Folder or list of residual images in Nifti format.",
+ )
+ p.add_argument(
+ "--evecs_v1",
+ nargs="+",
+ required=True,
+ help="Folder or list of evecs v1 images in Nifti format.",
+ )
+
+ p.add_argument(
+ "--wm", nargs="+", required=True, help="Folder or list of WM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--gm", nargs="+", required=True, help="Folder or list of GM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--csf", nargs="+", required=True, help="Folder or list of CSF mask in Nifti format."
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(subj_metric, summary, name, skip, nb_columns):
- subjects_dict = {}
- curr_key = os.path.basename(subj_metric).split('.')[0]
- cmap = None
- if name == "Residual":
- cmap = "hot"
- screenshot_path = screenshot_mosaic_wrapper(subj_metric,
- output_prefix=name,
- directory="data", skip=skip,
- nb_columns=nb_columns,
- cmap=cmap)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
- fa = list_files_from_paths(args.fa)
- md = list_files_from_paths(args.md)
- rd = list_files_from_paths(args.rd)
- ad = list_files_from_paths(args.ad)
- residual = list_files_from_paths(args.residual)
- evecs_v1 = list_files_from_paths(args.evecs_v1)
- wm = list_files_from_paths(args.wm)
- gm = list_files_from_paths(args.gm)
- csf = list_files_from_paths(args.csf)
-
- if not len(fa) == len(md) == len(rd) == len(ad) == \
- len(residual) == len(evecs_v1) == len(wm) == len(gm) == len(csf):
- parser.error("Not the same number of images in input.")
-
- all_images = np.concatenate([fa, md, rd, ad, residual, evecs_v1, wm,
- gm, csf])
+ (
+ fa, md, rd, ad, residual, evecs_v1, wm, gm, csf
+ ) = images = [
+ list_files_from_paths(args.fa),
+ list_files_from_paths(args.md),
+ list_files_from_paths(args.rd),
+ list_files_from_paths(args.ad),
+ list_files_from_paths(args.residual),
+ list_files_from_paths(args.evecs_v1),
+ list_files_from_paths(args.wm),
+ list_files_from_paths(args.gm),
+ list_files_from_paths(args.csf),
+ ]
+
+ assert_list_arguments_equal_size(parser, *images)
+ all_images = np.concatenate(images)
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
-
- metrics_names = [[fa, 'FA'], [md, 'MD'], [rd, 'RD'],
- [ad, 'AD'], [residual, "Residual"]]
metrics_dict = {}
summary_dict = {}
graphs = []
warning_dict = {}
- for metrics, name in metrics_names:
- subjects_dict = {}
- curr_metrics = ['Mean {} in WM'.format(name),
- 'Mean {} in GM'.format(name),
- 'Mean {} in CSF'.format(name),
- 'Max {} in WM'.format(name)]
-
- summary, stats = stats_mean_in_tissues(curr_metrics, metrics, wm,
- gm, csf)
-
- warning_dict[name] = analyse_qa(summary, stats, curr_metrics[:3])
- warning_list = np.concatenate(
- [filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graph = graph_mean_in_tissues('Mean {}'.format(name), curr_metrics[:3],
- summary, args.online)
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict[name] = stats_html
-
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(metrics,
- itertools.repeat(summary),
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(
- args.nb_columns)))
-
- pool.close()
- pool.join()
-
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- subjects_dict[key] = dict_sub[key]
- metrics_dict[name] = subjects_dict
-
- subjects_dict = {}
- name = "Peaks"
- for curr_fa, curr_evecs in zip(fa, evecs_v1):
- evecs_filename = os.path.basename(curr_evecs).split('.')[0]
- screenshot_path = screenshot_fa_peaks(curr_fa, curr_evecs, "data")
-
- subjects_dict[evecs_filename] = {}
- subjects_dict[evecs_filename]['screenshot'] = screenshot_path
- metrics_dict[name] = subjects_dict
+ for metrics, name in [
+ [fa, "FA"],
+ [md, "MD"],
+ [rd, "RD"],
+ [ad, "AD"],
+ [residual, "Residual"],
+ ]:
+ summary, stats, qa_report, qa_graphs = get_generic_qa_stats_and_graph(
+ metrics, name, args.online
+ )
+ warning_dict[name] = qa_report
+ summary_dict[name] = dataframe_to_html(stats)
+ graphs.extend(qa_graphs)
+
+ cmap = "hot" if name == "Residual" else None
+ metrics_dict[name] = generate_metric_reports_parallel(
+ zip(metrics),
+ args.nb_threads,
+ len(metrics) // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ cmap=cmap,
+ ),
+ )
+
+ metrics_dict["Peaks"] = {
+ os.path.basename(evecs).split('.')[0]: {
+ "screenshot": screenshot_fa_peaks(fa, evecs, "data")
+ }
+ for fa, evecs in zip(fa, evecs_v1)
+ }
nb_subjects = len(fa)
report = Report(args.output_report)
- report.generate(title="Quality Assurance DTI metrics",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance DTI metrics",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_dwi_protocol.py b/scripts/dmriqc_dwi_protocol.py
index e71855f..723ec3f 100755
--- a/scripts/dmriqc_dwi_protocol.py
+++ b/scripts/dmriqc_dwi_protocol.py
@@ -3,23 +3,35 @@
import argparse
import os
-import shutil
import numpy as np
import pandas as pd
-from dmriqcpy.analysis.utils import (dwi_protocol, read_protocol,
- identify_shells,
- build_ms_from_shell_idx)
+from dmriqcpy.analysis.utils import (
+ dwi_protocol,
+ get_bvecs_from_shells_idxs,
+ identify_shells,
+ read_protocol,
+)
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import (graph_directions_per_shells,
- graph_dwi_protocol,
- graph_subjects_per_shells)
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ assert_inputs_exist,
+ assert_outputs_exist,
+ assert_list_arguments_equal_size,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import get_qa_report
+from dmriqcpy.viz.graph import (
+ graph_directions_per_shells,
+ graph_dwi_protocol,
+ graph_subjects_per_shells,
+)
from dmriqcpy.viz.screenshot import plot_proj_shell
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.viz.utils import dataframe_to_html
+
DESCRIPTION = """
Compute DWI protocol report.
@@ -27,31 +39,45 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='Filename of QC report (in html format).')
-
- p.add_argument('--bval', nargs='+', required=True,
- help='Folder or list of bval files.')
-
- p.add_argument('--bvec', nargs='+', required=True,
- help='Folder or list of bvec files.')
-
- p.add_argument('--metadata', nargs='+',
- help='Folder or list of json files to get the metadata.')
-
- p.add_argument('--dicom_fields', nargs='+',
- default=["EchoTime", "RepetitionTime", "SliceThickness",
- "Manufacturer", "ManufacturersModelName"],
- help='DICOM fields used to compare information. '
- '%(default)s')
-
- p.add_argument('--tolerance', '-t',
- metavar='INT', type=int, default=20,
- help='The tolerated gap between the b-values to '
- 'extract\nand the actual b-values. [%(default)s]')
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+
+ p.add_argument(
+ "--bval", nargs="+", required=True, help="Folder or list of bval files."
+ )
+
+ p.add_argument(
+ "--bvec", nargs="+", required=True, help="Folder or list of bvec files."
+ )
+
+ p.add_argument(
+ "--metadata", nargs="+", help="Folder or list of json files to get the metadata."
+ )
+
+ p.add_argument(
+ "--dicom_fields",
+ nargs="+",
+ default=[
+ "EchoTime",
+ "RepetitionTime",
+ "SliceThickness",
+ "Manufacturer",
+ "ManufacturersModelName",
+ ],
+ help="DICOM fields used to compare information. %(default)s",
+ )
+ p.add_argument(
+ "--tolerance",
+ "-t",
+ metavar="INT",
+ type=int,
+ default=20,
+ help="The tolerated gap between the b-values to extract "
+ "and the actual b-values. [%(default)s]",
+ )
add_online_arg(p)
add_overwrite_arg(p)
@@ -63,85 +89,60 @@ def main():
parser = _build_arg_parser()
args = parser.parse_args()
- if args.metadata:
- metadata = list_files_from_paths(args.metadata)
-
bval = list_files_from_paths(args.bval)
bvec = list_files_from_paths(args.bvec)
- if not len(bval) == len(bvec):
- parser.error("Not the same number of images in input.")
+ files_to_validate = [bval, bvec]
- stats_tags = []
- stats_tags_for_graph = []
+ metadata = None
if args.metadata:
- if not len(metadata) == len(bval):
- parser.error('Number of metadata files: {}.\n'
- 'Number of bval files: {}.\n'
- 'Not the same number of images '
- 'in input'.format(len(metadata),
- len(bval)))
- else:
- stats_tags, stats_tags_for_graph,\
- stats_tags_for_graph_all = read_protocol(metadata,
- args.dicom_fields)
+ metadata = list_files_from_paths(args.metadata)
+ files_to_validate.append(metadata)
+ assert_list_arguments_equal_size(parser, *files_to_validate)
all_data = np.concatenate([bval, bvec])
assert_inputs_exist(parser, all_data)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
+ stats_tags = []
+ stats_tags_for_graph = []
+ stats_tags_for_graph_all = []
+ if args.metadata:
+ (
+ stats_tags,
+ stats_tags_for_graph,
+ stats_tags_for_graph_all,
+ ) = read_protocol(metadata, args.dicom_fields)
name = "DWI Protocol"
summary, stats_for_graph, stats_all, shells = dwi_protocol(bval)
if stats_tags:
- for curr_column in stats_tags:
- tag = curr_column[0]
- curr_df = curr_column[1]
- if 'complete_' in tag:
+ for tag, curr_df in stats_tags:
+ if "complete_" in tag:
metric = curr_df.columns[0]
for nSub in curr_df.index:
- currKey = [nKey for nKey in summary.keys() if nSub in nKey]
- summary[currKey[0]][metric] = curr_df[metric][nSub]
+ curr_key = [nKey for nKey in summary.keys() if nSub in nKey]
+ summary[curr_key[0]][metric] = curr_df[metric][nSub]
if not isinstance(stats_tags_for_graph, list):
- stats_for_graph = pd.concat([stats_for_graph, stats_tags_for_graph],
- axis=1, join="inner")
- stats_all = pd.concat([stats_all, stats_tags_for_graph_all],
- axis=1, join="inner")
-
- warning_dict = {}
- warning_dict[name] = analyse_qa(stats_for_graph, stats_all,
- stats_all.columns)
- warning_images = [filenames for filenames in warning_dict[name].values()]
- warning_list = np.concatenate(warning_images)
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- stats_html = dataframe_to_html(stats_all)
- summary_dict = {}
- summary_dict[name] = stats_html
+ stats_for_graph = pd.concat([stats_for_graph, stats_tags_for_graph], axis=1, join="inner")
+ stats_all = pd.concat([stats_all, stats_tags_for_graph_all], axis=1, join="inner")
+
+ warning_dict = {name: get_qa_report(stats_for_graph, stats_all, stats_all.columns)}
+ summary_dict = {name: dataframe_to_html(stats_all)}
if args.metadata:
for curr_tag in stats_tags:
- if 'complete_' not in curr_tag[0]:
+ if "complete_" not in curr_tag[0]:
summary_dict[curr_tag[0]] = dataframe_to_html(curr_tag[1])
- graphs = []
-
- graphs.append(
- graph_directions_per_shells("Nbr directions per shell",
- shells, args.online))
-
- graphs.append(graph_subjects_per_shells("Nbr subjects per shell",
- shells, args.online))
+ graphs = [
+ graph_directions_per_shells("Nbr directions per shell", shells, not args.online),
+ graph_subjects_per_shells("Nbr subjects per shell", shells, not args.online),
+ ]
for c in stats_for_graph.keys():
- graph = graph_dwi_protocol(c, c, stats_for_graph, args.online)
- graphs.append(graph)
+ graphs.append(graph_dwi_protocol(c, c, stats_for_graph, not args.online))
subjects_dict = {}
for curr_bval, curr_bvec in zip(bval, bvec):
@@ -150,35 +151,40 @@ def main():
points = np.genfromtxt(curr_bvec)
if points.shape[0] == 3:
points = points.T
- bvals = np.genfromtxt(curr_bval)
- centroids, shell_idx = identify_shells(bvals)
- ms = build_ms_from_shell_idx(points, shell_idx)
- plot_proj_shell(ms, centroids, use_sym=True, use_sphere=True,
- same_color=False, rad=0.025, opacity=0.2,
- ofile=os.path.join("data", name.replace(" ", "_") +
- "_" + curr_subj),
- ores=(800, 800))
- subjects_dict[curr_subj]['screenshot'] = os.path.join("data",
- name.replace(" ",
- "_") +
- "_" +
- curr_subj +
- '.png')
- metrics_dict = {}
+ centroids, shell_idx = identify_shells(np.genfromtxt(curr_bval))
+ plot_proj_shell(
+ get_bvecs_from_shells_idxs(points, shell_idx),
+ centroids,
+ opacity=0.2,
+ ofile=os.path.join(
+ "data",
+ name.replace(" ", "_") + "_" + curr_subj
+ ),
+ ores=(800, 800),
+ )
+ subjects_dict[curr_subj]["screenshot"] = os.path.join(
+ "data",
+ name.replace(" ", "_") + "_" + curr_subj + ".png"
+ )
+
for subj in bval:
curr_subj = os.path.basename(subj).split('.')[0]
summary_html = dataframe_to_html(summary[subj])
- subjects_dict[curr_subj]['stats'] = summary_html
- metrics_dict[name] = subjects_dict
+ subjects_dict[curr_subj]["stats"] = summary_html
+ metrics_dict = {name: subjects_dict}
nb_subjects = len(bval)
report = Report(args.output_report)
- report.generate(title="Quality Assurance DWI protocol",
- nb_subjects=nb_subjects, metrics_dict=metrics_dict,
- summary_dict=summary_dict, graph_array=graphs,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance DWI protocol",
+ nb_subjects=nb_subjects,
+ metrics_dict=metrics_dict,
+ summary_dict=summary_dict,
+ graph_array=graphs,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_fodf.py b/scripts/dmriqc_fodf.py
index 3ff3ce6..cf0eb69 100755
--- a/scripts/dmriqc_fodf.py
+++ b/scripts/dmriqc_fodf.py
@@ -2,21 +2,29 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
-import itertools
-from multiprocessing import Pool
+from functools import partial
import numpy as np
-from dmriqcpy.analysis.stats import stats_mean_in_tissues
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_mean_in_tissues
-from dmriqcpy.viz.screenshot import screenshot_mosaic_wrapper
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+ get_qa_stats_and_graph_in_tissues,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -25,142 +33,126 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='HTML report')
-
- p.add_argument('--afd_max', nargs='+', required=True,
- help='Folder or list of AFD max images in Nifti format.')
-
- p.add_argument('--afd_sum', nargs='+', required=True,
- help='Folder or list of AFD sum images in Nifti format.')
-
- p.add_argument('--afd_total', nargs='+', required=True,
- help='Folder or list of AFD total images in Nifti format.')
-
- p.add_argument('--nufo', nargs='+', required=True,
- help='Folder or list of NUFO max images in Nifti format.')
-
- p.add_argument('--wm', nargs='+', required=True,
- help='Folder or list of WM mask in Nifti format.')
-
- p.add_argument('--gm', nargs='+', required=True,
- help='Folder or list of GM mask in Nifti format.')
-
- p.add_argument('--csf', nargs='+', required=True,
- help='Folder or list of CSF mask in Nifti format.')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+ p.add_argument(
+ "--afd_max",
+ nargs="+",
+ required=True,
+ help="Folder or list of AFD max images in Nifti format.",
+ )
+ p.add_argument(
+ "--afd_sum",
+ nargs="+",
+ required=True,
+ help="Folder or list of AFD sum images in Nifti format.",
+ )
+ p.add_argument(
+ "--afd_total",
+ nargs="+",
+ required=True,
+ help="Folder or list of AFD total images in Nifti format.",
+ )
+ p.add_argument(
+ "--nufo",
+ nargs="+",
+ required=True,
+ help="Folder or list of NUFO max images in Nifti format.",
+ )
+
+ p.add_argument(
+ "--wm", nargs="+", required=True, help="Folder or list of WM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--gm", nargs="+", required=True, help="Folder or list of GM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--csf", nargs="+", required=True, help="Folder or list of CSF mask in Nifti format."
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(subj_metric, summary, name, skip, nb_columns):
- subjects_dict = {}
- curr_key = os.path.basename(subj_metric).split('.')[0]
- screenshot_path = screenshot_mosaic_wrapper(subj_metric,
- output_prefix=name,
- directory="data", skip=skip,
- nb_columns=nb_columns)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
- afd_max = list_files_from_paths(args.afd_max)
- afd_sum = list_files_from_paths(args.afd_sum)
- afd_total = list_files_from_paths(args.afd_total)
- nufo = list_files_from_paths(args.nufo)
- wm = list_files_from_paths(args.wm)
- gm = list_files_from_paths(args.gm)
- csf = list_files_from_paths(args.csf)
-
- if not len(afd_max) == len(afd_sum) == len(afd_total) ==\
- len(nufo) == len(wm) == len(gm) == len(csf):
- parser.error("Not the same number of images in input.")
-
- all_images = np.concatenate([afd_max, afd_sum, afd_total,
- nufo, wm, gm, csf])
+ (
+ afd_max,
+ afd_sum,
+ afd_total,
+ nufo,
+ wm,
+ gm,
+ csf
+ ) = images = [
+ list_files_from_paths(args.afd_max),
+ list_files_from_paths(args.afd_sum),
+ list_files_from_paths(args.afd_total),
+ list_files_from_paths(args.nufo),
+ list_files_from_paths(args.wm),
+ list_files_from_paths(args.gm),
+ list_files_from_paths(args.csf)
+ ]
+
+ assert_list_arguments_equal_size(parser, *images)
+ all_images = np.concatenate(images)
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
-
- metrics_names = [[afd_max, 'AFD_max'], [afd_sum, 'AFD_sum'],
- [afd_total, 'AFD_total'], [nufo, 'NUFO']]
metrics_dict = {}
summary_dict = {}
graphs = []
warning_dict = {}
- for metrics, name in metrics_names:
- subjects_dict = {}
- curr_metrics = ['Mean {} in WM'.format(name),
- 'Mean {} in GM'.format(name),
- 'Mean {} in CSF'.format(name),
- 'Max {} in WM'.format(name)]
-
- summary, stats = stats_mean_in_tissues(curr_metrics, metrics, wm,
- gm, csf)
- warning_dict[name] = analyse_qa(summary, stats, curr_metrics[:3])
- warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graph = graph_mean_in_tissues('Mean {}'.format(name), curr_metrics[:3],
- summary, args.online)
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict[name] = stats_html
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(metrics,
- itertools.repeat(summary),
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(args.nb_columns)))
-
- pool.close()
- pool.join()
-
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- curr_key = os.path.basename(key).split('.')[0]
- subjects_dict[curr_key] = dict_sub[curr_key]
- metrics_dict[name] = subjects_dict
+ for metrics, name in [
+ [afd_max, "AFD_max"],
+ [afd_sum, "AFD_sum"],
+ [afd_total, "AFD_total"],
+ [nufo, "NUFO"],
+ ]:
+ summary, stats, qa_report, qa_graphs = get_qa_stats_and_graph_in_tissues(
+ metrics, name, wm, gm, csf, args.online
+ )
+ warning_dict[name] = qa_report
+ summary_dict[name] = dataframe_to_html(stats)
+ graphs.extend(qa_graphs)
+
+ metrics_dict[name] = generate_metric_reports_parallel(
+ zip(metrics),
+ args.nb_threads,
+ len(metrics) // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ ),
+ )
nb_subjects = len(afd_max)
report = Report(args.output_report)
- report.generate(title="Quality Assurance FODF metrics",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance FODF metrics",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_frf.py b/scripts/dmriqc_frf.py
index 7101200..7bf13a4 100755
--- a/scripts/dmriqc_frf.py
+++ b/scripts/dmriqc_frf.py
@@ -5,15 +5,17 @@
import os
import shutil
-import numpy as np
-
-from dmriqcpy.analysis.stats import stats_frf
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_frf_eigen, graph_frf_b0
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ assert_inputs_exist,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import get_frf_qa_stats_and_graph
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -22,15 +24,16 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('frf', nargs='+',
- help='Folder or list of fiber response function (frf) '
- 'files (in txt format).')
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
- p.add_argument('output_report',
- help='Filename of QC report (in html format).')
+ p.add_argument(
+ "frf",
+ nargs="+",
+ help="Folder or list of fiber response function (frf) files (in txt format).",
+ )
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
add_online_arg(p)
add_overwrite_arg(p)
@@ -46,46 +49,33 @@ def main():
assert_inputs_exist(parser, frf)
assert_outputs_exist(parser, args, [args.output_report, "libs"])
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
+ clean_output_directories(False)
name = "FRF"
- metrics_names = ["Mean Eigen value 1", "Mean Eigen value 2", "Mean B0"]
-
- warning_dict = {}
- summary, stats = stats_frf(metrics_names, frf)
- warning_dict[name] = analyse_qa(summary, stats, metrics_names)
- warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(set(warning_list))
-
- graphs = []
- graphs.append(graph_frf_eigen("EigenValues", metrics_names, summary,
- args.online))
- graphs.append(graph_frf_b0("Mean B0", metrics_names, summary, args.online))
-
+ nb_subjects = len(frf)
- summary_dict = {}
- stats_html = dataframe_to_html(stats)
- summary_dict[name] = stats_html
+ summary, stats, qa_report, qa_graphs = get_frf_qa_stats_and_graph(frf, args.online)
+ warning_dict = {name: qa_report}
+ summary_dict = {name: dataframe_to_html(stats)}
- metrics_dict = {}
- subjects_dict = {}
- for subj_metric in frf:
- curr_subj = os.path.basename(subj_metric).split('.')[0]
- summary_html = dataframe_to_html(summary.loc[curr_subj].to_frame())
- subjects_dict[curr_subj] = {}
- subjects_dict[curr_subj]['stats'] = summary_html
- metrics_dict[name] = subjects_dict
+ metrics_dict = {
+ name: {
+ subj_metric: {"stats": dataframe_to_html(summary.loc[subj_metric])}
+ for subj_metric in frf
+ }
+ }
- nb_subjects = len(frf)
report = Report(args.output_report)
- report.generate(title="Quality Assurance FRF",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance FRF",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=qa_graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_from_screenshot.py b/scripts/dmriqc_from_screenshot.py
index 20b0874..baac1bb 100755
--- a/scripts/dmriqc_from_screenshot.py
+++ b/scripts/dmriqc_from_screenshot.py
@@ -8,8 +8,13 @@
import shutil
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist)
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ assert_inputs_exist,
+ assert_outputs_exist,
+ clean_output_directories,
+)
from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -18,20 +23,24 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
- p.add_argument('output_report',
- help='HTML report')
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
- p.add_argument('--data', nargs='+',
- help='Screenshot and stats (optional) folders.')
+ p.add_argument(
+ "--data",
+ nargs="+",
+ required=True,
+ help="Screenshot and stats (optional) folders."
+ )
- p.add_argument('--stats', action="store_true",
- help='Use included csv files.')
+ p.add_argument(
+ "--stats", action="store_true", help="Use included csv files."
+ )
- p.add_argument('--sym_link', action="store_true",
- help='Use symlink instead of copy')
+ p.add_argument("--sym_link", action="store_true", help="Use symlink instead of copy.")
add_online_arg(p)
add_overwrite_arg(p)
@@ -45,56 +54,60 @@ def main():
assert_inputs_exist(parser, args.data, are_directories=True)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
nb_subjects = len(os.listdir(args.data[0]))
for folder in args.data[1:]:
nb_subjects += len(os.listdir(folder))
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
-
metrics_dict = {}
types = ""
for folder in args.data:
screenshot_files = []
stats_files = []
- for ext in ["png","jpeg","jpg"]:
+ for ext in ["png", "jpeg", "jpg"]:
screenshot_files = screenshot_files + sorted(glob.glob(folder + '/*' + ext))
if args.stats:
stats_files = sorted(glob.glob(folder + '/*.csv'))
if len(screenshot_files) != len(stats_files):
parser.error("Not same number of stats and screenshots")
-
name = os.path.basename(os.path.normpath(folder))
subjects_dict = {}
for index, curr_screenshot in enumerate(screenshot_files):
screenshot_basename = os.path.basename(curr_screenshot)
if args.sym_link:
- os.symlink(os.path.abspath(folder) + "/" + screenshot_basename,
- "data/" + screenshot_basename)
+ os.symlink(
+ os.path.abspath(folder) + "/" + screenshot_basename,
+ "data/" + screenshot_basename,
+ )
else:
- shutil.copyfile(curr_screenshot,
- "data/" + screenshot_basename)
+ shutil.copyfile(
+ curr_screenshot, "data/" + screenshot_basename
+ )
subjects_dict[screenshot_basename] = {}
- subjects_dict[screenshot_basename]['screenshot'] =\
+ subjects_dict[screenshot_basename]['screenshot'] = (
"data/" + screenshot_basename
+ )
+
if args.stats:
- subjects_dict[screenshot_basename]['stats'] = dataframe_to_html(pd.read_csv(stats_files[index], index_col=False))
+ stats = dataframe_to_html(
+ pd.read_csv(stats_files[index], index_col=False)
+ )
+ subjects_dict[screenshot_basename]['stats'] = stats
metrics_dict[name] = subjects_dict
types += " {0}".format(name)
report = Report(args.output_report)
- report.generate(title="Quality Assurance" + types,
- nb_subjects=nb_subjects, metrics_dict=metrics_dict,
- online=args.online)
+ report.generate(
+ title="Quality Assurance" + types,
+ nb_subjects=nb_subjects,
+ metrics_dict=metrics_dict,
+ online=args.online,
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_generic.py b/scripts/dmriqc_generic.py
index c361c95..a75a733 100755
--- a/scripts/dmriqc_generic.py
+++ b/scripts/dmriqc_generic.py
@@ -2,21 +2,30 @@
# -*- coding: utf-8 -*-
import argparse
-import itertools
-from multiprocessing import Pool
-import os
-import shutil
+from functools import partial
import numpy as np
-from dmriqcpy.analysis.stats import stats_mean_in_tissues, stats_mean_median
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_mean_in_tissues, graph_mean_median
-from dmriqcpy.viz.screenshot import screenshot_mosaic_wrapper
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+ get_generic_qa_stats_and_graph,
+ get_qa_stats_and_graph_in_tissues,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
Compute report in HTML format from images.
@@ -24,63 +33,54 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('image_type',
- help='Type of image (e.g. B0 resample).')
-
- p.add_argument('output_report',
- help='HTML report.')
-
- p.add_argument('--images', nargs='+', required=True,
- help='Folder or list of images in Nifti format.')
-
- p.add_argument('--wm', nargs='+',
- help='Folder or list of WM mask in Nifti format.')
-
- p.add_argument('--gm', nargs='+',
- help='Folder or list of GM mask in Nifti format')
-
- p.add_argument('--csf', nargs='+',
- help='Folder or list of CSF mask in Nifti format.')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--duration', default=100, type=int,
- help='Duration of each image in GIF in milliseconds.'
- ' [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("image_type", help="Type of image (e.g. B0 resample).")
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+
+ p.add_argument(
+ "--images",
+ nargs="+",
+ required=True,
+ help="Folder or list of images in Nifti format."
+ )
+
+ p.add_argument(
+ "--wm",
+ nargs="+",
+ help="Folder or list of WM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--gm",
+ nargs="+",
+ help="Folder or list of GM mask in Nifti format"
+ )
+
+ p.add_argument(
+ "--csf",
+ nargs="+",
+ help="Folder or list of CSF mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--duration",
+ default=100,
+ type=int,
+ help="Duration of each image in GIF in milliseconds. [%(default)s]",
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(subj_metric, summary, name, skip, nb_columns, duration):
- subjects_dict = {}
- curr_key = os.path.basename(subj_metric).split('.')[0]
- screenshot_path = screenshot_mosaic_wrapper(subj_metric,
- output_prefix=name,
- directory="data", skip=skip,
- nb_columns=nb_columns,
- duration=duration)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
@@ -93,79 +93,56 @@ def main():
wm = list_files_from_paths(args.wm)
gm = list_files_from_paths(args.gm)
csf = list_files_from_paths(args.csf)
- if not len(images) == len(wm) == len(gm) == len(csf):
- parser.error("Not the same number of images in input.")
+ assert_list_arguments_equal_size(parser, images, wm, gm, csf)
with_tissues = True
all_images = np.concatenate([images, wm, gm, csf])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
-
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
+ clean_output_directories()
name = args.image_type
+ nb_subjects = len(images)
if with_tissues:
- curr_metrics = ['Mean {} in WM'.format(name),
- 'Mean {} in GM'.format(name),
- 'Mean {} in CSF'.format(name),
- 'Max {} in WM'.format(name)]
- summary, stats = stats_mean_in_tissues(curr_metrics, images,
- wm, gm, csf)
- graph = graph_mean_in_tissues('Mean {}'.format(name), curr_metrics[:3],
- summary, args.online)
+ summary, stats, qa_report, qa_graphs = get_qa_stats_and_graph_in_tissues(
+ images, name, wm, gm, csf, args.online
+ )
else:
- curr_metrics = ['Mean {}'.format(name),
- 'Median {}'.format(name)]
- summary, stats = stats_mean_median(curr_metrics, images)
- graph = graph_mean_median('Mean {}'.format(name), curr_metrics,
- summary, args.online)
-
- warning_dict = {}
- warning_dict[name] = analyse_qa(summary, stats, curr_metrics[:3])
- warning_list = np.concatenate(
- [filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graphs = []
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict = {}
- summary_dict[name] = stats_html
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(images,
- itertools.repeat(summary),
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(args.nb_columns),
- itertools.repeat(args.duration)))
- pool.close()
- pool.join()
-
- metrics_dict = {}
- subjects_dict = {}
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- curr_key = os.path.basename(key).split('.')[0]
- subjects_dict[curr_key] = dict_sub[curr_key]
- metrics_dict[name] = subjects_dict
+ summary, stats, qa_report, qa_graphs = get_generic_qa_stats_and_graph(
+ images, name, args.online
+ )
+
+ warning_dict = {name: qa_report}
+ summary_dict = {name: dataframe_to_html(stats)}
+
+ metrics_dict = {
+ name: generate_metric_reports_parallel(
+ zip(images),
+ args.nb_threads,
+ nb_subjects // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ duration=args.duration,
+ ),
+ )
+ }
- nb_subjects = len(images)
report = Report(args.output_report)
- report.generate(title="Quality Assurance " + args.image_type,
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance " + args.image_type,
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=qa_graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_labels.py b/scripts/dmriqc_labels.py
index 8c9cc52..f1b4e00 100755
--- a/scripts/dmriqc_labels.py
+++ b/scripts/dmriqc_labels.py
@@ -2,18 +2,26 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
+from functools import partial
-import itertools
-from multiprocessing import Pool
import numpy as np
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.screenshot import screenshot_mosaic_blend
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+)
DESCRIPTION = """
@@ -25,60 +33,45 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='HTML report.')
-
- p.add_argument('--t1', nargs='+', required=True,
- help='Folder or list of T1 images in Nifti format.')
-
- p.add_argument('--label', nargs='+', required=True,
- help='Folder or list of label images in Nifti format.')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--lut', nargs=1, default="",
- help='Look Up Table for RGB.')
-
- p.add_argument('--compute_lut', action='store_true',
- help='Compute Look Up Table for RGB.')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+
+ p.add_argument(
+ "--t1",
+ nargs="+",
+ required=True,
+ help="Folder or list of T1 images in Nifti format."
+ )
+
+ p.add_argument(
+ "--label",
+ nargs="+",
+ required=True,
+ help="Folder or list of label images in Nifti format."
+ )
+
+ p.add_argument(
+ "--lut", nargs=1, default="", help="Look Up Table for RGB."
+ )
+
+ p.add_argument(
+ "--compute_lut",
+ action="store_true",
+ help="Compute Look Up Table for RGB."
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(t1, label, name, skip, nb_columns, lut, compute_lut):
- subjects_dict = {}
- if not lut:
- lut = None
-
- screenshot_path = screenshot_mosaic_blend(t1, label,
- output_prefix=name,
- directory="data",
- blend_val=0.4,
- skip=skip, nb_columns=nb_columns,
- lut=lut,
- compute_lut=compute_lut)
-
- key = os.path.basename(t1).split('.')[0]
-
- subjects_dict[key] = {}
- subjects_dict[key]['screenshot'] = screenshot_path
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
@@ -90,7 +83,7 @@ def main():
parser.error("Not the same number of images in input.")
if len(label) == 1:
- label = label * len(args.t1)
+ label = label * len(t1)
all_images = np.concatenate([t1, label])
if args.lut:
@@ -98,42 +91,34 @@ def main():
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
-
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
+ clean_output_directories()
name = "Labels"
+ nb_subjects = len(t1)
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(t1,
- label,
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(args.nb_columns),
- itertools.repeat(args.lut),
- itertools.repeat(args.compute_lut)))
- pool.close()
- pool.join()
-
- metrics_dict = {}
- subjects_dict = {}
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- curr_key = os.path.basename(key).split('.')[0]
- subjects_dict[curr_key] = dict_sub[curr_key]
- metrics_dict[name] = subjects_dict
+ metrics_dict = {
+ name: generate_metric_reports_parallel(
+ zip(t1, label),
+ args.nb_threads,
+ nb_subjects // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ lut=args.lut,
+ compute_lut=args.compute_lut
+ ),
+ )
+ }
- nb_subjects = len(t1)
report = Report(args.output_report)
- report.generate(title="Quality Assurance labels",
- nb_subjects=nb_subjects, metrics_dict=metrics_dict,
- online=args.online)
+ report.generate(
+ title="Quality Assurance labels",
+ nb_subjects=nb_subjects,
+ metrics_dict=metrics_dict,
+ online=args.online,
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_registration.py b/scripts/dmriqc_registration.py
index 0d704e8..1b2c2df 100755
--- a/scripts/dmriqc_registration.py
+++ b/scripts/dmriqc_registration.py
@@ -2,22 +2,29 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
+from functools import partial
-import itertools
-from multiprocessing import Pool
import numpy as np
-
-from dmriqcpy.analysis.stats import stats_mean_in_tissues
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_mean_in_tissues
-from dmriqcpy.viz.screenshot import screenshot_mosaic_blend
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+ get_qa_stats_and_graph_in_tissues,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -26,60 +33,43 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='HTML report')
-
- p.add_argument('--t1_warped', nargs='+', required=True,
- help='Folder or list of T1 registered images '
- 'in Nifti format.')
-
- p.add_argument('--rgb', nargs='+', required=True,
- help='Folder or list of RGB images in Nifti format.')
-
- p.add_argument('--wm', nargs='+', required=True,
- help='Folder or list of WM mask in Nifti format.')
-
- p.add_argument('--gm', nargs='+', required=True,
- help='Folder or list of GM mask in Nifti format.')
-
- p.add_argument('--csf', nargs='+', required=True,
- help='Folder or list of CSF mask in Nifti format.')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+ p.add_argument(
+ "--t1_warped",
+ nargs="+",
+ required=True,
+ help="Folder or list of T1 registered images in Nifti format.",
+ )
+
+ p.add_argument(
+ "--rgb", nargs="+", required=True, help="Folder or list of RGB images in Nifti format."
+ )
+
+ p.add_argument(
+ "--wm", nargs="+", required=True, help="Folder or list of WM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--gm", nargs="+", required=True, help="Folder or list of GM mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--csf", nargs="+", required=True, help="Folder or list of CSF mask in Nifti format."
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(t1_metric, rgb_metric, summary, name, skip, nb_columns):
- subjects_dict = {}
- curr_key = os.path.basename(t1_metric).split('.')[0]
- screenshot_path = screenshot_mosaic_blend(t1_metric, rgb_metric,
- output_prefix=name,
- directory="data",
- blend_val=0.5,
- skip=skip, nb_columns=nb_columns)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
@@ -90,70 +80,47 @@ def main():
gm = list_files_from_paths(args.gm)
csf = list_files_from_paths(args.csf)
- if not len(t1_warped) == len(rgb) == len(wm) ==\
- len(gm) == len(csf):
- parser.error("Not the same number of images in input.")
-
+ assert_list_arguments_equal_size(parser, t1_warped, rgb, wm, gm, csf)
all_images = np.concatenate([t1_warped, rgb, wm, gm, csf])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
-
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
+ clean_output_directories()
name = "Register T1"
- curr_metrics = ['Mean {} in WM'.format(name),
- 'Mean {} in GM'.format(name),
- 'Mean {} in CSF'.format(name),
- 'Max {} in WM'.format(name)]
-
- warning_dict = {}
- summary, stats = stats_mean_in_tissues(curr_metrics, t1_warped,
- wm, gm, csf)
- warning_dict[name] = analyse_qa(summary, stats, curr_metrics[:3])
- warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graphs = []
- graph = graph_mean_in_tissues('Mean {}'.format(name), curr_metrics[:3],
- summary, args.online)
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict = {}
- summary_dict[name] = stats_html
-
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(t1_warped,
- rgb,
- itertools.repeat(summary),
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(args.nb_columns)))
- pool.close()
- pool.join()
-
- metrics_dict = {}
- subjects_dict = {}
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- curr_key = os.path.basename(key).split('.')[0]
- subjects_dict[curr_key] = dict_sub[curr_key]
- metrics_dict[name] = subjects_dict
-
nb_subjects = len(t1_warped)
- report = Report(args.output_report)
- report.generate(title="Quality Assurance registration",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
+ summary, stats, qa_report, qa_graphs = get_qa_stats_and_graph_in_tissues(
+ t1_warped, name, wm, gm, csf, args.online
+ )
+
+ warning_dict = {name: qa_report}
+ summary_dict = {name: dataframe_to_html(stats)}
+
+ metrics_dict = {
+ name: generate_metric_reports_parallel(
+ zip(t1_warped, rgb),
+ args.nb_threads,
+ nb_subjects // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ ),
+ )
+ }
-if __name__ == '__main__':
+ report = Report(args.output_report)
+ report.generate(
+ title="Quality Assurance registration",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=qa_graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_tissues.py b/scripts/dmriqc_tissues.py
index b47b093..b898426 100755
--- a/scripts/dmriqc_tissues.py
+++ b/scripts/dmriqc_tissues.py
@@ -2,22 +2,29 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
+from functools import partial
-import itertools
-from multiprocessing import Pool
import numpy as np
-
-from dmriqcpy.analysis.stats import stats_mask_volume
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_mask_volume
-from dmriqcpy.viz.screenshot import screenshot_mosaic_wrapper
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+ get_mask_qa_stats_and_graph,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -26,52 +33,24 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='HTML report')
-
- p.add_argument('--wm', nargs='+', required=True,
- help='WM mask in Nifti format')
-
- p.add_argument('--gm', nargs='+', required=True,
- help='GM mask in Nifti format')
-
- p.add_argument('--csf', nargs='+', required=True,
- help='CSF mask in Nifti format')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+ p.add_argument("--wm", nargs="+", required=True, help="Folder or list of WM mask in Nifti format")
+ p.add_argument("--gm", nargs="+", required=True, help="Folder or list of GM mask in Nifti format")
+ p.add_argument("--csf", nargs="+", required=True, help="Folder or list of CSF mask in Nifti format")
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(subj_metric, summary, name, skip, nb_columns):
- subjects_dict = {}
- curr_key = os.path.basename(subj_metric).split('.')[0]
- screenshot_path = screenshot_mosaic_wrapper(subj_metric,
- output_prefix=name,
- directory="data", skip=skip,
- nb_columns=nb_columns)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
@@ -80,67 +59,52 @@ def main():
gm = list_files_from_paths(args.gm)
csf = list_files_from_paths(args.csf)
- if not len(wm) == len(gm) == len(csf):
- parser.error("Not the same number of images in input.")
-
+ assert_list_arguments_equal_size(parser, wm, gm, csf)
all_images = np.concatenate([wm, gm, csf])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
-
- metrics_names = [[wm, 'WM mask'],
- [gm, 'GM mask'],
- [csf, 'CSF mask']]
metrics_dict = {}
summary_dict = {}
graphs = []
warning_dict = {}
- for metrics, name in metrics_names:
- columns = ["{} volume".format(name)]
- summary, stats = stats_mask_volume(columns, metrics)
-
- warning_dict[name] = analyse_qa(summary, stats, columns)
- warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graph = graph_mask_volume('{} mean volume'.format(name),
- columns, summary, args.online)
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict[name] = stats_html
-
- subjects_dict = {}
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(metrics,
- itertools.repeat(summary),
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(args.nb_columns)))
- pool.close()
- pool.join()
-
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- curr_key = os.path.basename(key).split('.')[0]
- subjects_dict[curr_key] = dict_sub[curr_key]
- metrics_dict[name] = subjects_dict
+ for metrics, name in [
+ [wm, "WM mask"],
+ [gm, "GM mask"],
+ [csf, "CSF mask"],
+ ]:
+ summary, stats, qa_report, qa_graphs = get_mask_qa_stats_and_graph(
+ metrics, name, args.online
+ )
+
+ warning_dict[name] = qa_report
+ summary_dict[name] = dataframe_to_html(stats)
+ graphs.extend(qa_graphs)
+
+ metrics_dict[name] = generate_metric_reports_parallel(
+ zip(metrics),
+ args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ ),
+ )
nb_subjects = len(wm)
report = Report(args.output_report)
- report.generate(title="Quality Assurance tissue segmentation",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance tissue segmentation",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_tracking_maps.py b/scripts/dmriqc_tracking_maps.py
index c9c5eb9..b73c96e 100755
--- a/scripts/dmriqc_tracking_maps.py
+++ b/scripts/dmriqc_tracking_maps.py
@@ -2,22 +2,29 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
+from functools import partial
-import itertools
-from multiprocessing import Pool
import numpy as np
-
-from dmriqcpy.analysis.stats import stats_mask_volume
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.viz.graph import graph_mask_volume
-from dmriqcpy.viz.screenshot import screenshot_mosaic_wrapper
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_online_arg,
+ add_overwrite_arg,
+ add_nb_columns_arg,
+ add_nb_threads_arg,
+ add_skip_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_report_package,
+ generate_metric_reports_parallel,
+ get_mask_qa_stats_and_graph,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -26,58 +33,40 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('tracking_type', choices=["pft", "local"],
- help='Tracking type')
-
- p.add_argument('output_report',
- help='HTML report')
-
- p.add_argument('--seeding_mask', nargs='+', required=True,
- help='Folder or list of seeding mask in Nifti format')
-
- p.add_argument('--tracking_mask', nargs='+',
- help='Folder or list of tracking mask in Nifti format')
-
- p.add_argument('--map_include', nargs='+',
- help='Folder or list of map include in Nifti format')
-
- p.add_argument('--map_exclude', nargs='+',
- help='Folder or list of map exlude in Nifti format')
-
- p.add_argument('--skip', default=2, type=int,
- help='Number of images skipped to build the '
- 'mosaic. [%(default)s]')
-
- p.add_argument('--nb_columns', default=12, type=int,
- help='Number of columns for the mosaic. [%(default)s]')
-
- p.add_argument('--nb_threads', type=int, default=1,
- help='Number of threads. [%(default)s]')
-
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("tracking_type", choices=["pft", "local"], help="Tracking type.")
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+ p.add_argument(
+ "--seeding_mask",
+ nargs="+",
+ required=True,
+ help="Folder or list of seeding mask in Nifti format.",
+ )
+
+ p.add_argument(
+ "--tracking_mask", nargs="+", help="Folder or list of tracking mask in Nifti format."
+ )
+
+ p.add_argument(
+ "--map_include", nargs="+", help="Folder or list of map include in Nifti format."
+ )
+
+ p.add_argument(
+ "--map_exclude", nargs="+", help="Folder or list of map exlude in Nifti format."
+ )
+
+ add_skip_arg(p)
+ add_nb_columns_arg(p)
+ add_nb_threads_arg(p)
add_online_arg(p)
add_overwrite_arg(p)
return p
-def _subj_parralel(subj_metric, summary, name, skip, nb_columns):
- subjects_dict = {}
- curr_key = os.path.basename(subj_metric).split('.')[0]
- screenshot_path = screenshot_mosaic_wrapper(subj_metric,
- output_prefix=name,
- directory="data", skip=skip,
- nb_columns=nb_columns)
-
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- return subjects_dict
-
-
def main():
parser = _build_arg_parser()
args = parser.parse_args()
@@ -86,79 +75,68 @@ def main():
if args.tracking_type == "local":
tracking_mask = list_files_from_paths(args.tracking_mask)
- if not len(seeding_mask) == len(tracking_mask):
- parser.error("Not the same number of images in input.")
- all_images = np.concatenate([args.seeding_mask, args.tracking_mask])
+ assert_list_arguments_equal_size(parser, seeding_mask, tracking_mask)
+ all_images = np.concatenate([seeding_mask, tracking_mask])
+ metrics_names = [
+ [seeding_mask, "Seeding mask"],
+ [tracking_mask, "Tracking mask"],
+ ]
else:
map_include = list_files_from_paths(args.map_include)
map_exclude = list_files_from_paths(args.map_exclude)
- if not len(seeding_mask) == len(map_include) ==\
- len(map_exclude):
- parser.error("Not the same number of images in input.")
- all_images = np.concatenate([seeding_mask, map_include,
- map_exclude])
+ assert_list_arguments_equal_size(
+ parser, seeding_mask, map_include, map_exclude
+ )
+ all_images = np.concatenate(
+ [seeding_mask, map_include, map_exclude]
+ )
+ metrics_names = [
+ [seeding_mask, "Seeding mask"],
+ [map_include, "Map include"],
+ [map_exclude, "Maps exclude"],
+ ]
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
+ clean_output_directories()
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
-
- if args.tracking_type == "local":
- metrics_names = [[seeding_mask, 'Seeding mask'],
- [tracking_mask, 'Tracking mask']]
- else:
- metrics_names = [[seeding_mask, 'Seeding mask'],
- [map_include, 'Map include'],
- [map_exclude, 'Maps exclude']]
metrics_dict = {}
summary_dict = {}
graphs = []
warning_dict = {}
for metrics, name in metrics_names:
- columns = ["{} volume".format(name)]
- summary, stats = stats_mask_volume(columns, metrics)
-
- warning_dict[name] = analyse_qa(summary, stats, columns)
- warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graph = graph_mask_volume('{} mean volume'.format(name),
- columns, summary, args.online)
- graphs.append(graph)
-
- stats_html = dataframe_to_html(stats)
- summary_dict[name] = stats_html
-
- subjects_dict = {}
- pool = Pool(args.nb_threads)
- subjects_dict_pool = pool.starmap(_subj_parralel,
- zip(metrics,
- itertools.repeat(summary),
- itertools.repeat(name),
- itertools.repeat(args.skip),
- itertools.repeat(args.nb_columns)))
- pool.close()
- pool.join()
-
- for dict_sub in subjects_dict_pool:
- for key in dict_sub:
- curr_key = os.path.basename(key).split('.')[0]
- subjects_dict[curr_key] = dict_sub[curr_key]
- metrics_dict[name] = subjects_dict
+ summary, stats, qa_report, qa_graphs = get_mask_qa_stats_and_graph(
+ metrics, name, args.online
+ )
+
+ warning_dict[name] = qa_report
+ summary_dict[name] = dataframe_to_html(stats)
+ graphs.extend(qa_graphs)
+
+ metrics_dict[name] = generate_metric_reports_parallel(
+ zip(metrics),
+ args.nb_threads,
+ len(metrics) // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ skip=args.skip,
+ nb_columns=args.nb_columns,
+ ),
+ )
nb_subjects = len(seeding_mask)
report = Report(args.output_report)
- report.generate(title="Quality Assurance tracking maps",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
-
-
-if __name__ == '__main__':
+ report.generate(
+ title="Quality Assurance tracking maps",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/scripts/dmriqc_tractogram.py b/scripts/dmriqc_tractogram.py
index 8795d77..26eaebb 100755
--- a/scripts/dmriqc_tractogram.py
+++ b/scripts/dmriqc_tractogram.py
@@ -2,20 +2,27 @@
# -*- coding: utf-8 -*-
import argparse
-import os
-import shutil
+from functools import partial
import numpy as np
-
from dmriqcpy.io.report import Report
-from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
- assert_inputs_exist, assert_outputs_exist,
- list_files_from_paths)
-from dmriqcpy.analysis.stats import stats_tractogram
-from dmriqcpy.viz.graph import graph_tractogram
-from dmriqcpy.viz.screenshot import screenshot_tracking
-from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
+from dmriqcpy.io.utils import (
+ add_nb_threads_arg,
+ add_online_arg,
+ add_overwrite_arg,
+ assert_inputs_exist,
+ assert_list_arguments_equal_size,
+ assert_outputs_exist,
+ clean_output_directories,
+ list_files_from_paths,
+)
+from dmriqcpy.reporting.report import (
+ generate_metric_reports_parallel,
+ generate_report_package,
+ get_tractogram_qa_stats_and_graph,
+)
+from dmriqcpy.viz.utils import dataframe_to_html
DESCRIPTION = """
@@ -24,20 +31,21 @@
def _build_arg_parser():
- p = argparse.ArgumentParser(description=DESCRIPTION,
- formatter_class=argparse.RawTextHelpFormatter)
-
- p.add_argument('output_report',
- help='HTML report')
-
- p.add_argument('--tractograms', nargs='+',
- help='Folder or list of tractograms in format supported'
- ' by Nibabel.')
-
- p.add_argument('--t1', nargs='+',
- help='Folder or list of T1 images in Nifti format.')
+ p = argparse.ArgumentParser(
+ description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ p.add_argument("output_report", help="Filename of QC report (in html format).")
+ p.add_argument(
+ "--tractograms",
+ nargs="+",
+ required=True,
+ help="Tractograms in format supported by Nibabel.",
+ )
+ p.add_argument("--t1", nargs="+", required=True, help="Folder or list of T1 images in Nifti format.")
add_online_arg(p)
+ add_nb_threads_arg(p)
add_overwrite_arg(p)
return p
@@ -50,56 +58,46 @@ def main():
t1 = list_files_from_paths(args.t1)
tractograms = list_files_from_paths(args.tractograms)
- if not len(tractograms) == len(t1):
- parser.error("Not the same number of images in input.")
-
+ assert_list_arguments_equal_size(parser, t1, tractograms)
all_images = np.concatenate([tractograms, t1])
assert_inputs_exist(parser, all_images)
assert_outputs_exist(parser, args, [args.output_report, "data", "libs"])
-
- if os.path.exists("data"):
- shutil.rmtree("data")
- os.makedirs("data")
-
- if os.path.exists("libs"):
- shutil.rmtree("libs")
+ clean_output_directories()
name = "Tracking"
- columns = ["Nb streamlines"]
-
- warning_dict = {}
- summary, stats = stats_tractogram(columns, tractograms)
- warning_dict[name] = analyse_qa(summary, stats, ["Nb streamlines"])
- warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
- warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
-
- graphs = []
- graph = graph_tractogram("Tracking", columns, summary, args.online)
- graphs.append(graph)
-
- summary_dict = {}
- stats_html = dataframe_to_html(stats)
- summary_dict[name] = stats_html
-
- metrics_dict = {}
- subjects_dict = {}
- for subj_metric, curr_t1 in zip(tractograms, t1):
- curr_key = os.path.basename(subj_metric).split('.')[0]
- screenshot_path = screenshot_tracking(subj_metric, curr_t1, "data")
- summary_html = dataframe_to_html(summary.loc[curr_key].to_frame())
- subjects_dict[curr_key] = {}
- subjects_dict[curr_key]['screenshot'] = screenshot_path
- subjects_dict[curr_key]['stats'] = summary_html
- metrics_dict[name] = subjects_dict
-
nb_subjects = len(tractograms)
- report = Report(args.output_report)
- report.generate(title="Quality Assurance tractograms",
- nb_subjects=nb_subjects, summary_dict=summary_dict,
- graph_array=graphs, metrics_dict=metrics_dict,
- warning_dict=warning_dict,
- online=args.online)
+ summary, stats, qa_report, qa_graphs = get_tractogram_qa_stats_and_graph(
+ tractograms, args.online
+ )
+
+ warning_dict = {name: qa_report}
+ summary_dict = {name: dataframe_to_html(stats)}
+
+ metrics_dict = {
+ name: generate_metric_reports_parallel(
+ zip(tractograms, t1),
+ args.nb_threads,
+ nb_subjects // args.nb_threads,
+ report_package_generation_fn=partial(
+ generate_report_package,
+ stats_summary=summary,
+ metric_is_tracking=True
+ ),
+ )
+ }
-if __name__ == '__main__':
+ report = Report(args.output_report)
+ report.generate(
+ title="Quality Assurance tractograms",
+ nb_subjects=nb_subjects,
+ summary_dict=summary_dict,
+ graph_array=qa_graphs,
+ metrics_dict=metrics_dict,
+ warning_dict=warning_dict,
+ online=args.online,
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/setup.py b/setup.py
index e1a019c..955b348 100644
--- a/setup.py
+++ b/setup.py
@@ -2,32 +2,35 @@
import os
from setuptools import setup, find_packages
+
PACKAGES = find_packages()
# Get version and release info, which is all stored in dmriqc/version.py
-ver_file = os.path.join('dmriqcpy', 'version.py')
+ver_file = os.path.join("dmriqcpy", "version.py")
with open(ver_file) as f:
exec(f.read())
-opts = dict(name=NAME,
- maintainer=MAINTAINER,
- maintainer_email=MAINTAINER_EMAIL,
- description=DESCRIPTION,
- long_description=LONG_DESCRIPTION,
- url=URL,
- download_url=DOWNLOAD_URL,
- license=LICENSE,
- classifiers=CLASSIFIERS,
- author=AUTHOR,
- author_email=AUTHOR_EMAIL,
- platforms=PLATFORMS,
- version=VERSION,
- packages=PACKAGES,
- install_requires=REQUIRES,
- requires=REQUIRES,
- scripts=SCRIPTS,
- include_package_data=True)
+opts = dict(
+ name=NAME,
+ maintainer=MAINTAINER,
+ maintainer_email=MAINTAINER_EMAIL,
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
+ url=URL,
+ download_url=DOWNLOAD_URL,
+ license=LICENSE,
+ classifiers=CLASSIFIERS,
+ author=AUTHOR,
+ author_email=AUTHOR_EMAIL,
+ platforms=PLATFORMS,
+ version=VERSION,
+ packages=PACKAGES,
+ install_requires=REQUIRES,
+ requires=REQUIRES,
+ scripts=SCRIPTS,
+ include_package_data=True,
+)
-if __name__ == '__main__':
+if __name__ == "__main__":
setup(**opts)