From 5ffae4d6d6643c87817193011f03580f4a907f4b Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Wed, 24 Jun 2020 07:14:42 -0700 Subject: [PATCH 001/690] Beginning of toast-3 interfaces: * New Observation class as a data container * Detector and shared data classes * Intervals and views of data * Operators now configured using traitlets and have additional methods for specifying workflow inputs and outputs * Operator workflows can be specified in TOML / JSON config files. * New pipeline operator for nested workflows of other operators on subsets of detectors. * Improved distributed map / pixel value storage, with both Allreduce and Alltoallv communication patterns. * A couple of operators ported to new interface as a test. * Disable many unit tests for now, and leave old source files in tree to facilitate future rebasing prior to merge. --- .atom/config.cson | 1 - etc/map_comm/run_knl.slurm | 63 + etc/map_comm/toast_map_comm.py | 271 +++ etc/map_comm/toast_plot_map_comm.py | 203 ++ pipelines/CMakeLists.txt | 2 + pipelines/toast_future.py | 165 ++ pipelines/toast_future_demo.ipynb | 981 ++++++++++ pipelines/toast_pixel_comm.py | 266 +++ setup.py | 3 + src/libtoast/include/toast/map_pixels.hpp | 23 +- src/toast/CMakeLists.txt | 14 +- src/toast/__init__.py | 46 +- src/toast/_libtoast.hpp | 194 ++ src/toast/_libtoast_tod_pointing.cpp | 22 +- src/toast/config.py | 572 ++++++ src/toast/cuda.py | 110 ++ src/toast/data.py | 322 ++++ src/toast/dist.py | 413 +--- src/toast/future_ops/CMakeLists.txt | 13 + src/toast/future_ops/__init__.py | 15 + src/toast/future_ops/mapmaker.py | 1711 +++++++++++++++++ src/toast/future_ops/noise_model.py | 91 + src/toast/future_ops/pipeline.py | 144 ++ src/toast/future_ops/pointing_healpix.py | 367 ++++ src/toast/future_ops/sim_ground.py | 287 +++ src/toast/future_ops/sim_hwp.py | 98 + src/toast/future_ops/sim_satellite.py | 543 ++++++ src/toast/future_ops/sim_tod_noise.py | 222 +++ src/toast/instrument.py | 278 +++ src/toast/instrument_sim.py | 649 +++++++ src/toast/intervals.py | 426 ++++ src/toast/map/cov.py | 2 +- src/toast/map/pixels.py | 2 +- src/toast/mpi.py | 50 +- src/toast/observation.py | 1509 +++++++++++++++ src/toast/op.py | 29 - src/toast/operator.py | 160 ++ src/toast/pipeline_tools/CMakeLists.txt | 1 - src/toast/pipeline_tools/__init__.py | 1 - src/toast/pipeline_tools/todground.py | 4 +- src/toast/pixels.py | 780 ++++++++ src/toast/pixels_io.py | 282 +++ src/toast/pshmem/CMakeLists.txt | 11 - src/toast/pshmem/README.md | 8 - src/toast/pshmem/__init__.py | 20 - src/toast/pshmem/locking.py | 352 ---- src/toast/pshmem/shmem.py | 395 ---- src/toast/pshmem/test.py | 214 --- src/toast/pshmem/utils.py | 74 - src/toast/tests/CMakeLists.txt | 11 +- src/toast/tests/_helpers.py | 110 +- src/toast/tests/config.py | 140 ++ src/toast/tests/dist.py | 3 +- src/toast/tests/intervals.py | 171 +- src/toast/tests/observation.py | 132 ++ src/toast/tests/ops_sim_satellite.py | 43 + src/toast/tests/pixels.py | 165 ++ src/toast/tests/runner.py | 223 ++- src/toast/tod/applygain.py | 2 +- src/toast/tod/gainscrambler.py | 2 +- src/toast/tod/interval.py | 2 +- src/toast/tod/memorycounter.py | 2 +- src/toast/tod/polyfilter.py | 2 +- src/toast/tod/sim_det_noise.py | 2 +- src/toast/tod/spt3g.py | 2 +- src/toast/tod/tidas.py | 5 +- src/toast/tod/tod_math.py | 2 +- src/toast/todmap/conviqt.py | 2 +- src/toast/todmap/groundfilter.py | 2 +- src/toast/todmap/madam.py | 2 +- src/toast/todmap/mapmaker.py | 9 +- src/toast/todmap/pointing.py | 2 +- src/toast/todmap/sim_det_atm.py | 2 +- src/toast/todmap/sim_det_dipole.py | 2 +- src/toast/todmap/sim_det_map.py | 2 +- src/toast/todmap/sim_det_pysm.py | 8 +- src/toast/todmap/sss.py | 10 +- src/toast/todmap/todmap_math.py | 2 +- src/toast/traits.py | 544 ++++++ src/toast/utils.py | 56 +- tutorial/01_Introduction/intro.ipynb | 780 +++++--- tutorial/01_Introduction/intro_parallel.ipynb | 203 ++ 82 files changed, 13015 insertions(+), 2039 deletions(-) create mode 100644 etc/map_comm/run_knl.slurm create mode 100644 etc/map_comm/toast_map_comm.py create mode 100755 etc/map_comm/toast_plot_map_comm.py create mode 100644 pipelines/toast_future.py create mode 100644 pipelines/toast_future_demo.ipynb create mode 100644 pipelines/toast_pixel_comm.py create mode 100644 src/toast/config.py create mode 100644 src/toast/cuda.py create mode 100644 src/toast/data.py create mode 100644 src/toast/future_ops/CMakeLists.txt create mode 100644 src/toast/future_ops/__init__.py create mode 100644 src/toast/future_ops/mapmaker.py create mode 100644 src/toast/future_ops/noise_model.py create mode 100644 src/toast/future_ops/pipeline.py create mode 100644 src/toast/future_ops/pointing_healpix.py create mode 100644 src/toast/future_ops/sim_ground.py create mode 100644 src/toast/future_ops/sim_hwp.py create mode 100644 src/toast/future_ops/sim_satellite.py create mode 100644 src/toast/future_ops/sim_tod_noise.py create mode 100644 src/toast/instrument.py create mode 100644 src/toast/instrument_sim.py create mode 100644 src/toast/intervals.py create mode 100644 src/toast/observation.py delete mode 100644 src/toast/op.py create mode 100644 src/toast/operator.py create mode 100644 src/toast/pixels.py create mode 100644 src/toast/pixels_io.py delete mode 100644 src/toast/pshmem/CMakeLists.txt delete mode 100644 src/toast/pshmem/README.md delete mode 100644 src/toast/pshmem/__init__.py delete mode 100644 src/toast/pshmem/locking.py delete mode 100644 src/toast/pshmem/shmem.py delete mode 100644 src/toast/pshmem/test.py delete mode 100644 src/toast/pshmem/utils.py create mode 100644 src/toast/tests/config.py create mode 100644 src/toast/tests/observation.py create mode 100644 src/toast/tests/ops_sim_satellite.py create mode 100644 src/toast/tests/pixels.py create mode 100644 src/toast/traits.py create mode 100644 tutorial/01_Introduction/intro_parallel.ipynb diff --git a/.atom/config.cson b/.atom/config.cson index 385231685..951b781fb 100644 --- a/.atom/config.cson +++ b/.atom/config.cson @@ -9,7 +9,6 @@ python: {} editor: autoIndentOnPaste: false - fontSize: 18 preferredLineLength: 88 scrollPastEnd: true showInvisibles: true diff --git a/etc/map_comm/run_knl.slurm b/etc/map_comm/run_knl.slurm new file mode 100644 index 000000000..abad0c9ed --- /dev/null +++ b/etc/map_comm/run_knl.slurm @@ -0,0 +1,63 @@ +#!/bin/bash -l + +#SBATCH --partition=debug +#SBATCH --constraint=knl,quad,cache +#SBATCH --account=mp107 +#SBATCH --nodes=1 +#SBATCH --core-spec=4 +#SBATCH --time=00:30:00 +#SBATCH --job-name=mapcomm + +set -e + +echo "Starting batch script at $(date)" + +# Set TMPDIR to be on the ramdisk +export TMPDIR=/dev/shm + +# Numba threading may conflict with our own. Disable it. +export NUMBA_NUM_THREADS=1 + +# nodes used by this job +NODES=${SLURM_JOB_NUM_NODES} + +# set procs and threads +NODE_PROC=16 +PROC_THREADS=4 +PROC_DEPTH=$(( 256 / NODE_PROC )) + +# total number of processes on all nodes +NPROC=$(( NODES * NODE_PROC )) + +echo "Using ${NODES} node(s), which have 256 thread slots each." +echo "Starting ${NODE_PROC} process(es) per node (${NPROC} total), each with ${PROC_THREADS} OpenMP threads." + +export OMP_NUM_THREADS=${PROC_THREADS} +export OMP_PROC_BIND=spread +export OMP_PLACES=threads + +# The launching command and options +launch_str="srun" +if [ "x-n" != "x" ]; then + launch_str="${launch_str} -n ${NPROC}" +fi +if [ "x-N" != "x" ]; then + launch_str="${launch_str} -N ${NODES}" +fi +if [ "x-c" != "x" ]; then + launch_str="${launch_str} -c ${PROC_DEPTH}" +fi +launch_str="${launch_str} --cpu_bind=cores" + +# Run the pipeline script + +export TOAST_FUNCTIME=1 + +for nside in 512 1024 2048 4096; do + com="${launch_str} python toast_map_comm.py --nside ${nside}" + echo ${com} + echo "Launching pipeline at $(date)" + eval ${com} > log 2>&1 +done + +echo "Ending batch script at $(date)" diff --git a/etc/map_comm/toast_map_comm.py b/etc/map_comm/toast_map_comm.py new file mode 100644 index 000000000..572f5141d --- /dev/null +++ b/etc/map_comm/toast_map_comm.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2020-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +""" +Distributed map communication tests. +""" + +import os +import sys + +import argparse + +import traceback + +import numpy as np + +import healpy as hp + +import toast + +from toast.mpi import get_world, Comm + +from toast.dist import Data + +from toast.utils import Logger, Environment + +from toast.timing import Timer, GlobalTimers, gather_timers + +from toast.timing import dump as dump_timing + +from toast import dump_config, parse_config, create + +from toast.pixels import PixelDistribution, PixelData + +from toast.pixels_io import write_healpix_fits + +from toast import future_ops as ops + +from toast.future_ops.sim_focalplane import fake_hexagon_focalplane + +from toast.instrument import Telescope + + +def main(): + env = Environment.get() + log = Logger.get() + + gt = GlobalTimers.get() + gt.start("toast_map_comm (total)") + + mpiworld, procs, rank = get_world() + + # Argument parsing + parser = argparse.ArgumentParser( + description="TOAST distributed map communication tests." + ) + + parser.add_argument( + "--nside", required=False, type=int, default=256, help="Map NSIDE" + ) + + parser.add_argument( + "--nside_submap", required=False, type=int, default=16, help="Submap NSIDE" + ) + + parser.add_argument( + "--comm_mb", + required=False, + type=int, + default=10, + help="Size in MB of allreduce buffer", + ) + + args = parser.parse_args() + + # Test different types of submap distribution. + + n_pix = 12 * args.nside ** 2 + n_pix_submap = 12 * args.nside_submap ** 2 + n_sub = n_pix // n_pix_submap + + # Tuples are: + # 1. Fraction of total submaps with full overlap + # 2. Fraction of total submaps held empty on all procs + # 3. Fraction of *remaining* submaps to randomly assign + + fractions = [ + #(0.00, 0.0, 0.25), + (0.00, 0.0, 0.50), + (0.00, 0.0, 0.75), + #(0.25, 0.0, 0.00), + #(0.25, 0.0, 0.25), + (0.25, 0.0, 0.50), + #(0.50, 0.0, 0.00), + (0.50, 0.0, 0.25), + #(0.50, 0.0, 0.50), + #(0.75, 0.0, 0.00), + (0.75, 0.0, 0.25), + #(0.75, 0.0, 0.50), + (1.00, 0.0, 0.00), + ] + + timing_file_root = "mapcomm_nproc-{:04d}_nside-{:04d}_nsub-{:03d}".format( + procs, args.nside, args.nside_submap + ) + timing_file = "{}.csv".format(timing_file_root) + + if os.path.isfile(timing_file): + if rank == 0: + print( + "Skipping completed job (n_proc = {}, nside = {}, nsub = {})".format( + procs, args.nside, args.nside_submap + ) + ) + return + + for full, empty, fill in fractions: + perc_full = int(100 * full) + perc_empty = int(100 * empty) + perc_fill = int(100 * fill) + + n_full = int(full * n_sub) + n_empty = int((n_sub - n_full) * empty) + n_fill = int((n_sub - n_full - n_empty) * fill) + fill_start = n_full + n_empty + local_submaps = [x for x in range(n_full)] + # print("loc = {}, fill_start = {}".format(local_submaps, fill_start)) + if n_fill > 0: + rem = n_sub - n_full - n_empty + flist = [x + fill_start for x in range(rem)] + n_remove = rem - n_fill + for nr in range(n_remove): + select = np.random.randint(0, high=len(flist), size=1, dtype=np.int32) + del flist[select[0]] + local_submaps.extend(flist) + dist = PixelDistribution( + n_pix=n_pix, n_submap=n_sub, local_submaps=local_submaps, comm=mpiworld + ) + # print("rank {} submaps: {}".format(rank, dist.local_submaps)) + + # Output file root + outroot = "mapcomm_nproc-{:04d}_nside-{:04d}_nsub-{:03d}_full-{:03d}_empty-{:03d}_fill-{:03d}".format( + procs, args.nside, args.nside_submap, perc_full, perc_empty, perc_fill + ) + + # Coverage map + cover = PixelData(dist, np.int32, n_value=1) + + # Set local submaps + cover.raw[:] = 1 + + # Write coverage info + if rank == 0: + fcover = cover.storage_class(dist.n_pix) + fview = fcover.array() + for lc, sm in enumerate(dist.local_submaps): + offset = sm * dist.n_pix_submap + loffset = lc * dist.n_pix_submap + fview[offset : offset + dist.n_pix_submap] = cover.raw[ + loffset : loffset + dist.n_pix_submap + ] + outfile = "{}_cover-root.fits".format(outroot) + if os.path.isfile(outfile): + os.remove(outfile) + hp.write_map(outfile, fview, dtype=np.int32, fits_IDL=False, nest=True) + del fview + fcover.clear() + del fcover + + cover.sync_allreduce() + + outfile = "{}_cover.fits".format(outroot) + write_healpix_fits(cover, outfile, nest=True) + + cover.clear() + del cover + + # Data map for communication + pix = PixelData(dist, np.float64, n_value=3) + + # Set local submaps + pix.raw[:] = 1.0 + + # Time the different sync techniques + niter = 5 + + allreduce_seconds = None + alltoallv_seconds = None + tm = Timer() + + gtname = "SYNC_ALLREDUCE_{}_{}_{}".format(perc_full, perc_empty, perc_fill) + + if mpiworld is not None: + mpiworld.barrier() + tm.clear() + tm.start() + gt.start(gtname) + + cbytes = args.comm_mb * 1000000 + for i in range(niter): + pix.sync_allreduce(comm_bytes=cbytes) + + if mpiworld is not None: + mpiworld.barrier() + tm.stop() + gt.stop(gtname) + + allreduce_seconds = tm.seconds() / niter + msg = "{} / {} / {}: Allreduce average time = {:0.2f} seconds".format( + perc_full, perc_empty, perc_fill, allreduce_seconds + ) + if rank == 0: + print(msg) + + gtname = "SYNC_ALLTOALLV_{}_{}_{}".format(perc_full, perc_empty, perc_fill) + + if mpiworld is not None: + mpiworld.barrier() + tm.clear() + tm.start() + gt.start(gtname) + + for i in range(niter): + pix.sync_alltoallv() + + if mpiworld is not None: + mpiworld.barrier() + tm.stop() + gt.stop(gtname) + + alltoallv_seconds = tm.seconds() / niter + msg = "{} / {} / {}: Alltoallv average time = {:0.2f} seconds".format( + perc_full, perc_empty, perc_fill, alltoallv_seconds + ) + if rank == 0: + print(msg) + + pix.clear() + del pix + + gt.stop_all() + alltimers = gather_timers(comm=mpiworld) + if rank == 0: + dump_timing( + alltimers, + "mapcomm_nproc-{:04d}_nside-{:04d}_nsub-{:03d}".format( + procs, args.nside, args.nside_submap + ), + ) + + return + + +if __name__ == "__main__": + try: + main() + except Exception: + # We have an unhandled exception on at least one process. Print a stack + # trace for this process and then abort so that all processes terminate. + mpiworld, procs, rank = get_world() + if procs == 1: + raise + exc_type, exc_value, exc_traceback = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_traceback) + lines = ["Proc {}: {}".format(rank, x) for x in lines] + print("".join(lines), flush=True) + if mpiworld is not None: + mpiworld.Abort() diff --git a/etc/map_comm/toast_plot_map_comm.py b/etc/map_comm/toast_plot_map_comm.py new file mode 100755 index 000000000..8a31d2919 --- /dev/null +++ b/etc/map_comm/toast_plot_map_comm.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python3 + +import os +import sys +import re + +import csv + +import numpy as np + +import healpy as hp + +import matplotlib + +matplotlib.use("pdf") + +import matplotlib.pyplot as plt + + +# Load all the cases + +cases = dict() + +tmpat = re.compile(r"mapcomm_nproc-(.*)_nside-(.*)_nsub-(.*)\.csv") +mappat = re.compile( + r"mapcomm_nproc-(.*)_nside-(.*)_nsub-(.*)_full-(.*)_empty-(.*)_fill-(.*)_cover\.fits" +) +rootmappat = re.compile( + r"mapcomm_nproc-(.*)_nside-(.*)_nsub-(.*)_full-(.*)_empty-(.*)_fill-(.*)_cover-root\.fits" +) + +allreduce_pat = re.compile(r"SYNC_ALLREDUCE_(\d+)_(\d+)_(\d+)") + +alltoallv_pat = re.compile(r"SYNC_ALLTOALLV_(\d+)_(\d+)_(\d+)") + + +def case_init(tcases, nside, full, fill): + if full not in cases: + tcases[full] = dict() + if fill not in cases[full]: + tcases[full][fill] = dict() + if nside not in cases[full][fill]: + tcases[full][fill][nside] = dict() + tcases[full][fill][nside]["allreduce"] = dict() + tcases[full][fill][nside]["alltoallv"] = dict() + return + + +maxproc = 0 +maxproc_root = 0 + +for dirpath, dirnames, filenames in os.walk("."): + for file in filenames: + path = os.path.join(dirpath, file) + + mat = mappat.match(file) + if mat is not None: + print("found map file {}".format(path)) + nproc = int(mat.group(1)) + nside = int(mat.group(2)) + nsub = int(mat.group(3)) + full = int(mat.group(4)) + empty = int(mat.group(5)) + fill = int(mat.group(6)) + case_init(cases, nside, full, fill) + if nproc >= maxproc: + cases[full][fill][nside]["cover"] = path + maxproc = nproc + + mat = rootmappat.match(file) + if mat is not None: + print("found map root file {}".format(path)) + nproc = int(mat.group(1)) + nside = int(mat.group(2)) + nsub = int(mat.group(3)) + full = int(mat.group(4)) + empty = int(mat.group(5)) + fill = int(mat.group(6)) + case_init(cases, nside, full, fill) + if nproc >= maxproc_root: + cases[full][fill][nside]["rootcover"] = path + maxproc_root = nproc + + mat = tmpat.match(file) + if mat is not None: + print("found timing file {}".format(path)) + nproc = int(mat.group(1)) + nside = int(mat.group(2)) + nsub = int(mat.group(3)) + with open(path, "r") as tf: + reader = csv.reader(tf, delimiter=",") + for row in reader: + namemat = allreduce_pat.match(row[0]) + if namemat is not None: + print("Found allreduce timing {}".format(row[0])) + full = int(namemat.group(1)) + empty = int(namemat.group(2)) + fill = int(namemat.group(3)) + case_init(cases, nside, full, fill) + seconds = float(row[7]) / 5.0 + cases[full][fill][nside]["allreduce"][nproc] = seconds + namemat = alltoallv_pat.match(row[0]) + if namemat is not None: + print("Found alltoallv timing {}".format(row[0])) + full = int(namemat.group(1)) + empty = int(namemat.group(2)) + fill = int(namemat.group(3)) + case_init(cases, nside, full, fill) + seconds = float(row[7]) / 5.0 + cases[full][fill][nside]["alltoallv"][nproc] = seconds + +print(cases) + +n_case = 0 +fullvals = sorted(cases.keys()) +for full in fullvals: + fillvals = sorted(cases[full].keys()) + n_case += len(fillvals) + +fig = plt.figure(figsize=(12, 2.5 * n_case), dpi=100) + +plotrows = 0 +for x in cases.keys(): + for y in cases[x].keys(): + plotrows += 1 + +plotoff = 1 + +fullvals = sorted(cases.keys()) +for full in fullvals: + fillvals = sorted(cases[full].keys()) + for fill in fillvals: + # Find the lowest NSIDE to use for plotting, since these submap coverage plots + # will be identical for every NSIDE. + nsides = sorted(cases[full][fill].keys()) + pmax = 0 + for ns in nsides: + procs = sorted(cases[full][fill][ns]["allreduce"].keys()) + for p in procs: + if p > pmax: + pmax = p + + cover = hp.read_map(cases[full][fill][nsides[0]]["cover"]) + rootcover = hp.read_map(cases[full][fill][nsides[0]]["rootcover"]) + hp.mollview( + map=cover, + sub=(plotrows, 3, plotoff), + title="Total Submap Coverage {:d}% / {:d}%".format(full, fill), + xsize=1200, + cmap="rainbow", + min=0, + max=pmax, + margins=(0.0, 0.0, 0.0, 0.01), + ) + plotoff += 1 + hp.mollview( + map=rootcover, + sub=(plotrows, 3, plotoff), + title="Rank Zero Submaps {:d}% / {:d}%".format(full, fill), + xsize=1200, + cmap="rainbow", + min=0, + max=1, + margins=(0.0, 0.0, 0.0, 0.01), + ) + plotoff += 1 + ax = fig.add_subplot(plotrows, 3, plotoff) + for ns in nsides: + lw = 0.5 * (ns // 256) + procs = sorted(cases[full][fill][ns]["allreduce"].keys()) + xdata = np.array(procs, dtype=np.int32) + ydata = np.array([cases[full][fill][ns]["allreduce"][x] for x in procs]) + ax.plot( + xdata, + ydata, + label="allreduce N{}".format(ns), + color="r", + linewidth=lw, + marker="o", + markersize=(lw + 1), + ) + ydata = np.array([cases[full][fill][ns]["alltoallv"][x] for x in procs]) + ax.plot( + xdata, + ydata, + label="alltoallv N{}".format(ns), + color="g", + linewidth=lw, + marker="o", + markersize=(lw + 1), + ) + ax.legend(loc="upper left", fontsize=6) + + ax.set_ylabel("Seconds (Mean of {} calls)".format(5)) + ax.set_xlabel("MPI Ranks") + ax.set_ylim(0, 9.0) + plotoff += 1 + +plt.tight_layout() +# plt.subplots_adjust(top=0.9) +pfile = "mapcomm.pdf" +plt.savefig(pfile) +plt.close() diff --git a/pipelines/CMakeLists.txt b/pipelines/CMakeLists.txt index 5fb1fc7f4..2729d3c25 100644 --- a/pipelines/CMakeLists.txt +++ b/pipelines/CMakeLists.txt @@ -12,5 +12,7 @@ install(PROGRAMS toast_ground_sim.py toast_ground_sim_simple.py toast_benchmark.py + toast_future.py + toast_pixel_comm.py DESTINATION bin ) diff --git a/pipelines/toast_future.py b/pipelines/toast_future.py new file mode 100644 index 000000000..7906ada4f --- /dev/null +++ b/pipelines/toast_future.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +""" +Prototype of TOAST 3.0 interfaces +""" + +import os +import sys + +import argparse + +import traceback + +import numpy as np + +import toast + +from toast import qarray as qa + +from toast import Telescope + +from toast.mpi import get_world, Comm + +from toast.dist import Data + +from toast.utils import Logger, Environment + +from toast.timing import GlobalTimers, gather_timers + +from toast.timing import dump as dump_timing + +from toast import dump_config, parse_config, create + +from toast import future_ops as ops + +from toast.future_ops.sim_focalplane import fake_hexagon_focalplane + + +def main(): + env = Environment.get() + log = Logger.get() + gt = GlobalTimers.get() + gt.start("toast_future (total)") + + mpiworld, procs, rank = get_world() + + # The operators used in this script: + operators = { + "sim_satellite": ops.SimSatellite, + "noise_model": ops.DefaultNoiseModel, + "sim_noise": ops.SimNoise, + } + + # Argument parsing + parser = argparse.ArgumentParser(description="Demo of TOAST future features.") + + # Add some custom arguments specific to this script. + + parser.add_argument( + "--focalplane_pixels", + required=False, + type=int, + default=1, + help="Number of focalplane pixels", + ) + + parser.add_argument( + "--group_size", + required=False, + type=int, + default=procs, + help="Size of a process group assigned to an observation", + ) + + # Build a config dictionary starting from the operator defaults, overriding with any + # config files specified with the '--config' commandline option, followed by any + # individually specified parameter overrides. + config, argvars = parse_config(parser, operators=operators) + + # The satellite simulation operator requires a Telescope object. Make a fake + # focalplane and telescope + focalplane = fake_hexagon_focalplane( + argvars["focalplane_pixels"], + 10.0, + samplerate=10.0, + epsilon=0.0, + net=1.0, + fmin=1.0e-5, + alpha=1.0, + fknee=0.05, + ) + print(focalplane) + + # Set the telecope option of the satellite simulation operator. If we were using + # an experiment-specific operator, this would be done internally. + + config["operators"]["sim_satellite"]["telescope"] = Telescope( + name="fake", focalplane=focalplane + ) + + # Log the config that was actually used at runtime. + out = "future_config_log.toml" + dump_config(out, config) + + # Instantiate our operators + run = create(config) + + # Put our operators into a pipeline in a specific order, running all detectors at + # once. + pipe_opts = ops.Pipeline.defaults() + pipe_opts["detector_sets"] = "ALL" + pipe_opts["operators"] = [ + run["operators"][x] for x in ["sim_satellite", "noise_model", "sim_noise"] + ] + + pipe = ops.Pipeline(pipe_opts) + + # Set up the communicator + comm = Comm(world=mpiworld, groupsize=argvars["group_size"]) + + # Start with an empty data object (the first operator in our pipeline will create + # Observations in the data). + data = Data(comm=comm) + + # Run the pipeline + pipe.exec(data) + pipe.finalize(data) + + # Print the resulting data + for ob in data.obs: + group_rank = 0 + if ob.mpicomm is not None: + group_rank = ob.mpicomm.rank + if group_rank == 0: + print(ob) + + # Cleanup + gt.stop_all() + + alltimers = gather_timers(comm=comm.comm_world) + if comm.world_rank == 0: + dump_timing(alltimers, "future_timing") + + return + + +if __name__ == "__main__": + try: + main() + except Exception: + # We have an unhandled exception on at least one process. Print a stack + # trace for this process and then abort so that all processes terminate. + mpiworld, procs, rank = get_world() + if procs == 1: + raise + exc_type, exc_value, exc_traceback = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_traceback) + lines = ["Proc {}: {}".format(rank, x) for x in lines] + print("".join(lines), flush=True) + if mpiworld is not None: + mpiworld.Abort() diff --git a/pipelines/toast_future_demo.ipynb b/pipelines/toast_future_demo.ipynb new file mode 100644 index 000000000..be61aa350 --- /dev/null +++ b/pipelines/toast_future_demo.ipynb @@ -0,0 +1,981 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "import tempfile\n", + "\n", + "import numpy as np\n", + "\n", + "import astropy.units as u\n", + "\n", + "import toast\n", + "\n", + "from toast import qarray as qa\n", + "\n", + "from toast import config as tc\n", + "\n", + "from toast import (\n", + " Telescope, \n", + " Focalplane, \n", + " Observation, \n", + ")\n", + "\n", + "import toast.future_ops as ops\n", + "\n", + "from toast.future_ops.sim_focalplane import fake_hexagon_focalplane\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Data Model\n", + "\n", + "The basic data model is a set of `Observation` instances, each of which is associated with a `Focalplane` on a `Telescope`. Note that a Focalplane instance is probably just a sub-set of detectors on the actual physical focalplane. These detectors must be co-sampled and likely have other things in common (for example, they are on the same wafer or are correlated in some other way). For this example, we will manually create these objects, but usually these will be loaded / created by some experiment-specific function.\n", + "\n", + "MPI is optional in TOAST, although it is required to achieve good parallel performance on traditional CPU systems. In this section we show how interactive use of TOAST can be done without any reference to MPI. In a later section we show how to make use of distributed data and operations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# help(Observation)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# help(Focalplane)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# help(Telescope)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Start by making a fake focalplane\n", + "\n", + "focalplane_pixels = 7 # (hexagonal)\n", + "field_of_view = 10.0 # degrees\n", + "sample_rate = 10.0 # Hz\n", + "\n", + "focalplane = fake_hexagon_focalplane(\n", + " focalplane_pixels,\n", + " field_of_view,\n", + " samplerate=10.0,\n", + " epsilon=0.0,\n", + " net=1.0,\n", + " fmin=1.0e-5,\n", + " alpha=1.0,\n", + " fknee=0.05,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Now make a fake telescope\n", + "\n", + "telescope = Telescope(name=\"fake\", focalplane=focalplane)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make an empty observation\n", + "\n", + "samples = 10\n", + "\n", + "obs = Observation(telescope, name=\"2020-07-31_A\", samples=samples)\n", + "\n", + "print(obs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Metadata\n", + "\n", + "By default, the observation is empty. You can add arbitrary metadata to the observation- it acts just like a dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hk = {\n", + " \"Temperature 1\": np.array([1.0, 2.0, 3.0]),\n", + " \"Other Sensor\": 1.2345\n", + "}\n", + "\n", + "obs[\"housekeeping\"] = hk\n", + "\n", + "print(obs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Time Ordered Data\n", + "\n", + "Now we can add some Time Ordered Data to this observation. There are basically two types of data: timestreams of information that all detectors have in common (telescope boresight, etc) and timestreams of detector data (signals and flags). Although an Observation acts like a dictionary that can hold arbitrary keys, there are some standard built-in names for TOD quantities that are used by the Operator classes. You can also create other custom types of data. To see the built-in names, you can do:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(obs.keynames)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "These underlying names can be overridden at construction time if you like." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Time Ordered Detector Data\n", + "\n", + "Detector data has some unique properties that we often want to leverage in our analyses. Each process has some detectors and some time slice of the observation. In the case of a single process like this example, all the data is local. Before using data we need to create it within the empty `Observation`. Here we create the default SIGNAL data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs.create_signal()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(obs.signal)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This special `DetectorData` class is a table that can be indexed either by name or by index. You can set and get values as usual:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs.signal[\"D0A\"] = np.arange(samples, dtype=np.float64)\n", + "\n", + "obs.signal[1] = 10.0 * np.arange(samples, dtype=np.float64)\n", + "\n", + "print(obs.signal)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(obs.signal[:])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(obs.signal[\"D0A\", \"D0B\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(obs.signal[1][1:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This showed the creation of the default \"SIGNAL\" detector data, but you can create other types of data. For example, lets say you wanted to create some detector pointing matrix values consisting of a 64bit integer pixel number and three 32bit floats for the I/Q/U weights:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs.create_detector_data(\"pixels\", shape=(samples,), dtype=np.int64)\n", + "obs.create_detector_data(\"weights\", shape=(samples, 3), dtype=np.float32)\n", + "\n", + "print(obs[\"weights\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "weights = obs[\"weights\"]\n", + "\n", + "for d in obs.detectors:\n", + " for s in range(samples):\n", + " weights[d][s] = [1.0, 0.5, 0.5]\n", + " \n", + "print(obs[\"weights\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Time Ordered Data Shared by all Detectors\n", + "\n", + "There are some types of timestreams which all detectors have in common within the observation. These include things like telescope pointing, timestamps, and other quantities. We want all processes to have access to these quantities. However, this type of data is usually stored once and then read many times. We use shared memory on the node to store this data to avoid duplicating it for every process. For this simple serial example, the details are not important. The main thing is to use a special method when creating these buffers in the observation. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs.create_times()\n", + "obs.create_boresight_radec()\n", + "obs.create_common_flags()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# This accesses the timestamps regardless of the underlying\n", + "# dictionary key and checks that the underlying buffer has\n", + "# the right dimensions.\n", + "\n", + "print(obs.times)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(obs.times[0:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `create_*()` methods create these shared memory objects of the correct default dimensions and type. You can also create completely custom timestream data (see advanced topics below).\n", + "\n", + "After creating the shared buffer, we used the observation method `times()` to return the timestamps. There are similar methods for all the \"standard\" observation data products (boresight_radec(), signal(), etc). The benefit to using these methods instead of accessing the internal dictionary key directly is that there are checks on the shapes of the underlying objects to ensure consistency. Also, an operator does not have to know the name of the underlying dictionary key, which might be different between experiments.\n", + "\n", + "These shared data objects have a `set()` method used to write to them. This is more important when using MPI. In the serial case, you can just do:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs.times.set(np.arange(samples, dtype=np.float64))\n", + "\n", + "print(obs.times[:])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Data\n", + "\n", + "The `Observation` instances discussed previously are usually stored as a list inside a top-level container class called `Data`. This class also stores the TOAST MPI communicator information. For this serial example you can just instantiate an empty `Data` class and add things to the observation list:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = toast.Data()\n", + "\n", + "print(data)\n", + "\n", + "print(data.obs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Obviously this `Data` object has no observations yet. We'll fix that in the next section!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Processing Model\n", + "\n", + "The `Operator` class defines the interfaces for operators working on data. Each operator constructor takes only keyword arguments, and these keyword arguments are stored as class attributes in the instance. Each operator has methods that describe the observation dictionary keys it requires for input and which keys it provides as output. An operator has an `exec()` method that works with `Data` objects. We will start by looking at the `SimSatellite` operator to simulate fake telescope scan strategies for a generic satellite. We can always see the options and default values by using the standard help function or the '?' command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "help(ops.SimSatellite)\n", + "\n", + "?ops.SimSatellite" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can instantiate a class directly by overriding some defaults:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simsat = ops.SimSatellite(\n", + " n_observation=2, \n", + " observation_time=(5 * u.minute),\n", + ")\n", + "\n", + "print(simsat)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After the operator is constructed, the parameters can be changed directly. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simsat.telescope = telescope\n", + "simsat.n_observation = 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(simsat)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And now we have an `Operator` that is ready to use. This particular operator creates observations from scratch with telescope properties generated and stored. We can create an empty `Data` object and then run this operator on it:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = toast.Data()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simsat.exec(data)\n", + "simsat.finalize(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.info()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"There are {} observations\".format(len(data.obs)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(data.obs[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for ob in data.obs:\n", + " print(ob.times[:5])\n", + " print(ob.boresight_radec[:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "So we see that our `SimSatellite` operator has created just one observation of 360 samples in the `Data` object. We can feed this tiny dataset to further operators to simulate signals or process the data. Let's now simulate some noise timestreams for our detectors. First we need to create a \"noise model\" for our detectors. We can bootstrap this process by making a noise model from the nominal detector properties in the focalplane:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ops.DefaultNoiseModel.help()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "noise_model_config = ops.DefaultNoiseModel.defaults()\n", + "print(noise_model_config)\n", + "\n", + "noise_model = ops.DefaultNoiseModel(noise_model_config)\n", + "noise_model.exec(data)\n", + "noise_model.finalize(data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we are ready to use the `SimNoise` operator to simulate some timestreams:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ops.SimNoise.help()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this case, we can just use all the default options. It assumes the default noise model and if we don't specify the `out` key this operator just accumulates to the default detector data (\"SIGNAL\")." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "noise_config = ops.SimNoise.defaults()\n", + "print(noise_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the operator\n", + "\n", + "sim_noise = ops.SimNoise(noise_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run it on the data\n", + "\n", + "sim_noise.exec(data)\n", + "sim_noise.finalize(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.info()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that the observation now has some signal. Let's look at that:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# print(data.obs[0].signal())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Just look at few samples from one detector in the first observation\n", + "\n", + "print(data.obs[0].signal[\"D1A\"][:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pipeline Operator\n", + "\n", + "There is a special Operator class called `Pipeline` which serves as a way to group other operators together and run them in sequence (possibly running them on only a few detectors at a time). The default is to run the list of operators on the full `Data` object in one shot. The Pipeline class has the usual way of getting the defaults:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ops.Pipeline.help()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll see more about this Operator below." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuration Files\n", + "\n", + "We saw above how operators are constructed with a dictionary of parameters. **You can do everything by passing parameters when constructing operators**. Configuration files are completely optional, but they do allow easy sharing of complicated pipeline setups.\n", + "\n", + "These parameters can be loaded from one or more files and used to automatically construct operators for use. When doing this, each instance of an operator is given a \"name\" that can be used to reference it later. This way you can have multiple operators of the same class doing different things within your pipeline. If you have a script where you know which operators you are going to be using, you can get the defaults for the whole list at once:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipe_ops = {\n", + " \"sim_satellite\": ops.SimSatellite,\n", + " \"noise_model\": ops.DefaultNoiseModel,\n", + " \"sim_noise\": ops.SimNoise,\n", + " \"pointing\": ops.PointingHealpix\n", + "}\n", + "\n", + "conf = default_config(operators=pipe_ops)\n", + "\n", + "print(conf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can dump this to a file and look at it:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tmpdir = tempfile.mkdtemp()\n", + "conf_file = os.path.join(tmpdir, \"test.toml\")\n", + "\n", + "dump_config(conf_file, conf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + " !cat {conf_file}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "... and we can also load it back in:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "newconf = load_config(conf_file)\n", + "\n", + "print(newconf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "What if we wanted to add a Pipeline to this configuration that reference the names of the two operators to use? We can do that using a special syntax which consists of `@config:` followed by a UNIX-style path to the object we are referencing. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the default Pipeline config\n", + "\n", + "sim_pipe_config = ops.Pipeline.defaults()\n", + "print(sim_pipe_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Add references to the operators\n", + "\n", + "sim_pipe_config[\"operators\"] = [\n", + " \"@config:/operators/sim_satellite\",\n", + " \"@config:/operators/noise_model\",\n", + " \"@config:/operators/sim_noise\",\n", + " \"@config:/operators/pointing\",\n", + "]\n", + "\n", + "# Add the pipeline config to the main config\n", + "\n", + "newconf[\"operators\"][\"sim_pipe\"] = sim_pipe_config\n", + "\n", + "print(newconf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we could dump this to a file for later use. What if we wanted to go and create operators from this? We could loop through each key in the \"operators\" dictionary and instantiate the class with the config values. However, there is a helper function that does this. Before doing that we need to add a Telescope to this config for the satellite simulation. Normally this would be done by some experiment-specific script that would create a more custom telescope / focalplane." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "newconf[\"operators\"][\"sim_satellite\"][\"telescope\"] = telescope" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now instantiate all the operators in one go:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run = create(newconf)\n", + "\n", + "print(run)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Although the result looks similar, look more closely. Our dictionary of configuration options is now actually a dictionary of instantiated classes. We can now run these operators directly:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = toast.Data()\n", + "\n", + "# Run the Pipeline operator, which in turn runs 2 other operators\n", + "\n", + "run[\"operators\"][\"sim_pipe\"].exec(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data.info()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Print the first 5 samples of one detector in the first observation.\n", + "\n", + "print(data.obs[0].signal[\"D0A\"][:5])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(data.obs[0][\"weights\"][\"D0A\"][:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Advanced Topics\n", + "\n", + "The previous sections covered the `Observation` container and its interfaces, and how to create and run Operators on a `Data` object containing a list of observations. The new data model has some aspects that improve our situation on larger runs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Memory Use\n", + "\n", + "Earlier we saw how the MPI shared memory objects created in an Observation are used to store things that are common to all detectors (boresight pointing, telescope velocity, etc). These quantities have defaults for the shape, dtype, **and** the communicator used. In the case of these common properties, the \"grid column communicator\" is used. This includes all processes in the observation that share a common slice of time. The result is that only one copy of these common data objects exist on each node, regardless of how many processes are running on the node.\n", + "\n", + "However, we can create completely custom shared memory objects. Imagine that every single process needed some common telescope data to be able to work with its local signal. We could create a shared object on the group communicator used for the whole observation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "samples = 10\n", + "\n", + "obs = Observation(telescope, name=\"2020-07-31_A\", samples=samples)\n", + "\n", + "# This is the same for every process, regardless of location in the process grid\n", + "\n", + "obs.create_shared_data(\n", + " \"same_for_every_process\", \n", + " shape=(100, 100), \n", + " dtype=np.float64, \n", + " comm=obs.mpicomm\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In another example, suppose that we had some detector-specific quantities (beams, bandpasses, etc) shared by all processes with data from a given detector. We can store that in shared memory using the \"grid row communicator\" of the process grid, so that we only have one copy of those products per node:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# This is the same for every process along a row of the grid\n", + "# so these processes all have the same detectors.\n", + "\n", + "obs.create_shared_data(\n", + " \"detector_aux_data\", \n", + " shape=(len(obs.local_detectors), 100), \n", + " dtype=np.float64, \n", + " comm=obs.grid_comm_row\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The use of MPI shared memory should greatly reduce our memory footprint when running many MPI processes per node." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Processing Subsets of Detectors\n", + "\n", + "The `Pipeline` operator is used to chain other operators together and can internally feed data to those sub-operators in sets of detectors. This is a work in progress, but the workflow code that creates the `Pipeline` will be able to specify sets of detectors to process at once. This set will be different on different processes. The special strings \"ALL\" and \"SINGLE\" are used to either work with all detectors in one shot (the current toast default) or to loop over detectors individually, running all operators on those before moving on to the next." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Accelerator Use\n", + "\n", + "This covers planned features...\n", + "\n", + "For each supported architecture, if all operators in a pipeline support that hardware then the pipeline can use observation methods to copy select data to the accelerator at the beginning and back from the accelerator at the end. Operators individually have methods that specify the observation keys they \"require\" and also the observation keys they \"provide\". This allows logic in the pipeline operator to determine which intermediate data products of the operators are only on the accelerator and do not need to be moved back to the host system. A Pipeline running on an accelerator will likely process only a few detectors at a time due to memory constraints.\n", + "\n", + "Observations already make use of MPI shared memory that is replicated across nodes. Each node will have some number of accelerators. We can assign each process to a particular accelerator and compute the minimal set of shared memory objects that need to be staged to each accelerator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pipelines/toast_pixel_comm.py b/pipelines/toast_pixel_comm.py new file mode 100644 index 000000000..7294769ed --- /dev/null +++ b/pipelines/toast_pixel_comm.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2020-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +""" +Distributed map communication tests. +""" + +import os +import sys + +import argparse + +import traceback + +import numpy as np + +import healpy as hp + +import toast + +from toast.mpi import get_world, Comm + +from toast.dist import Data + +from toast.utils import Logger, Environment + +from toast.timing import Timer, GlobalTimers, gather_timers + +from toast.timing import dump as dump_timing + +from toast import dump_config, parse_config, create + +from toast.pixels import PixelDistribution, PixelData + +from toast.pixels_io import write_healpix_fits + +from toast import future_ops as ops + +from toast.future_ops.sim_focalplane import fake_hexagon_focalplane + +from toast.instrument import Telescope + + +def main(): + env = Environment.get() + log = Logger.get() + + gt = GlobalTimers.get() + gt.start("toast_pixel_comm (total)") + + mpiworld, procs, rank = get_world() + + # The operators used in this script: + operators = {"sim_satellite": ops.SimSatellite, "pointing": ops.PointingHealpix} + + # Argument parsing + parser = argparse.ArgumentParser( + description="TOAST distributed map communication tests." + ) + + parser.add_argument( + "--focalplane_pixels", + required=False, + type=int, + default=1, + help="Number of focalplane pixels", + ) + + parser.add_argument( + "--group_size", + required=False, + type=int, + default=procs, + help="Size of a process group assigned to an observation", + ) + + parser.add_argument( + "--comm_mb", + required=False, + type=int, + default=10, + help="Size in MB of allreduce buffer", + ) + + config, argvars = parse_config(parser, operators=operators) + + # Communicator + comm = Comm(world=mpiworld, groupsize=argvars["group_size"]) + + # Make a fake focalplane and telescope + focalplane = fake_hexagon_focalplane( + argvars["focalplane_pixels"], + 10.0, + samplerate=10.0, + epsilon=0.0, + net=1.0, + fmin=1.0e-5, + alpha=1.0, + fknee=0.05, + ) + + config["operators"]["sim_satellite"]["telescope"] = Telescope( + name="fake", focalplane=focalplane + ) + + # Specify where to store the pixel distribution + config["operators"]["pointing"]["create_dist"] = "pixel_dist" + + # Log the config that was actually used at runtime. + out = "pixel_comm_config_log.toml" + dump_config(out, config) + + # Instantiate our operators + run = create(config) + + # Put our operators into a pipeline running all detectors at once. + pipe_opts = ops.Pipeline.defaults() + pipe_opts["detector_sets"] = "ALL" + pipe_opts["operators"] = [ + run["operators"][x] for x in ["sim_satellite", "pointing"] + ] + + pipe = ops.Pipeline(pipe_opts) + + # Start with empty data + data = toast.Data(comm=comm) + + # Run the pipeline + pipe.exec(data) + pipe.finalize(data) + + # print(data) + + # Get the pixel distribution from the Data object + pixdist = data["pixel_dist"] + + # print(pixdist) + + # Output file root + outroot = "pixcomm_nproc-{:04d}_gsize-{:04d}_nobs-{:03d}_ndet-{:03d}_nside-{:04d}_nsub-{:03d}".format( + procs, + argvars["group_size"], + config["operators"]["sim_satellite"]["n_observation"], + 2 * argvars["focalplane_pixels"], + config["operators"]["pointing"]["nside"], + config["operators"]["pointing"]["nside_submap"], + ) + + # Print out the total hit map and also the hitmap on rank zero. + hits = PixelData(pixdist, dtype=np.int32, n_value=1) + hview = hits.raw.array() + for obs in data.obs: + for det in obs.local_detectors: + global_pixels = obs["pixels"][det] + # We can do this since n_value == 1 + local_pixels = pixdist.global_pixel_to_local(global_pixels) + hview[local_pixels] += 1 + + if rank == 0: + fhits = hits.storage_class(pixdist.n_pix) + fview = fhits.array() + for lc, sm in enumerate(pixdist.local_submaps): + offset = sm * pixdist.n_pix_submap + loffset = lc * pixdist.n_pix_submap + fview[offset : offset + pixdist.n_pix_submap] = hits.raw[ + loffset : loffset + pixdist.n_pix_submap + ] + outfile = "{}_hits-rank0.fits".format(outroot) + if os.path.isfile(outfile): + os.remove(outfile) + hp.write_map( + outfile, + fview, + dtype=np.int32, + fits_IDL=False, + nest=config["operators"]["pointing"]["nest"], + ) + del fview + fhits.clear() + del fhits + + hits.sync_allreduce() + + outfile = "{}_hits.fits".format(outroot) + write_healpix_fits(hits, outfile, nest=config["operators"]["pointing"]["nest"]) + + # Create some IQU maps with fake local data + pixdata = PixelData(pixdist, dtype=np.float64, n_value=3) + + # print(pixdata) + + pixdata.raw[:] = np.random.uniform(0.0, 1.0, len(pixdata.raw)) + + # Time the different sync techniques + + niter = 20 + + allreduce_seconds = None + alltoallv_seconds = None + tm = Timer() + + if mpiworld is not None: + mpiworld.barrier() + tm.clear() + tm.start() + gt.start("SYNC_ALLREDUCE") + + cbytes = argvars["comm_mb"]*1000000 + for i in range(niter): + pixdata.sync_allreduce(comm_bytes=cbytes) + + if mpiworld is not None: + mpiworld.barrier() + tm.stop() + gt.stop("SYNC_ALLREDUCE") + + allreduce_seconds = tm.seconds() / niter + msg = "Allreduce average time = {:0.2f} seconds".format(allreduce_seconds) + if rank == 0: + print(msg) + + if mpiworld is not None: + mpiworld.barrier() + tm.clear() + tm.start() + gt.start("SYNC_ALLTOALLV") + + for i in range(niter): + pixdata.sync_alltoallv() + + if mpiworld is not None: + mpiworld.barrier() + tm.stop() + gt.stop("SYNC_ALLTOALLV") + + alltoallv_seconds = tm.seconds() / niter + msg = "Alltoallv average time = {:0.2f} seconds".format(alltoallv_seconds) + if rank == 0: + print(msg) + + gt.stop_all() + alltimers = gather_timers(comm=mpiworld) + if comm.world_rank == 0: + dump_timing(alltimers, "{}_timing".format(outroot)) + + return + + +if __name__ == "__main__": + try: + main() + except Exception: + # We have an unhandled exception on at least one process. Print a stack + # trace for this process and then abort so that all processes terminate. + mpiworld, procs, rank = get_world() + if procs == 1: + raise + exc_type, exc_value, exc_traceback = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_traceback) + lines = ["Proc {}: {}".format(rank, x) for x in lines] + print("".join(lines), flush=True) + if mpiworld is not None: + mpiworld.Abort() diff --git a/setup.py b/setup.py index 82ce375b5..dad27b98a 100644 --- a/setup.py +++ b/setup.py @@ -229,8 +229,11 @@ def readme(): conf["python_requires"] = ">=3.6.0" conf["install_requires"] = [ "cmake", + "tomlkit", + "traitlets>=5.0", "numpy", "scipy", + "pshmem", "healpy", "matplotlib", "ephem", diff --git a/src/libtoast/include/toast/map_pixels.hpp b/src/libtoast/include/toast/map_pixels.hpp index fb30f69a1..34981c43a 100644 --- a/src/libtoast/include/toast/map_pixels.hpp +++ b/src/libtoast/include/toast/map_pixels.hpp @@ -22,22 +22,23 @@ void global_to_local(size_t nsamp, // memory buffers were aligned. That could be ensured with care in the // calling code. To be revisited if this code is ever the bottleneck. - #pragma omp parallel for default(shared) schedule(static, 64) + #pragma omp parallel for default(shared) schedule(static) for (size_t i = 0; i < nsamp; ++i) { - T pixel = global_pixels[i]; - T submap = 0; - if (pixel < 0) { - pixel = -1; + if (global_pixels[i] < 0) { + local_submaps[i] = -1; + local_pixels[i] = -1; } else { - submap = static_cast ( - static_cast (pixel) * npix_submap_inv + local_pixels[i] = global_pixels[i] % npix_submap; + local_submaps[i] = static_cast ( + global2local[ + static_cast ( + static_cast (global_pixels[i]) * npix_submap_inv + ) + ] ); - pixel -= submap * static_cast (npix_submap); } - local_pixels[i] = pixel; - submap = static_cast (global2local[submap]); - local_submaps[i] = submap; } + return; } } diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 7c088d3dd..04e235393 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -82,9 +82,19 @@ install(FILES utils.py mpi.py timing.py + traits.py cache.py + config.py + pixels.py + pixels_io.py + cuda.py dist.py - op.py + data.py + intervals.py + instrument.py + instrument_sim.py + observation.py + operator.py vis.py rng.py qarray.py @@ -103,4 +113,4 @@ add_subdirectory(map) add_subdirectory(todmap) add_subdirectory(fod) add_subdirectory(pipeline_tools) -add_subdirectory(pshmem) +add_subdirectory(future_ops) diff --git a/src/toast/__init__.py b/src/toast/__init__.py index fab157f17..4c28b0e17 100644 --- a/src/toast/__init__.py +++ b/src/toast/__init__.py @@ -5,9 +5,35 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. # -"""Time Ordered Astrophysics Scalable Tools (TOAST) is a software package +""" +Time Ordered Astrophysics Scalable Tools (TOAST) is a software package designed to allow the processing of data from telescopes that acquire data as timestreams (rather than images). + +Runtime behavior of this package can be controlled by setting some +environment variables (before importing the package): + +TOAST_LOGLEVEL= + * Possible values are "VERBOSE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL". + Default is "INFO". + * Controls logging of both C++ and Python code. + +TOAST_FUNCTIME= + * Any non-empty value will enable python function timers in many parts of the code + +TOAST_TOD_BUFFER= + * Number of elements to buffer in code where many intermediate timestream + products are created. Default is 1048576. + +OMP_NUM_THREADS= + * Toast uses OpenMP threading in several places and the concurrency is set by the + usual environment variable. + +MPI_DISABLE= + * Any non-empty value will disable a try block that looks for mpi4py. Needed on + some systems where mpi4py is available but does not work. + * The same variable also controls the `pshmem` package used by toast. + """ import sys import os @@ -40,8 +66,22 @@ # Namespace imports from .mpi import Comm -from .dist import Data, distribute_uniform, distribute_discrete, distribute_samples +from .timing import Timer, GlobalTimers -from .op import Operator +from .intervals import Interval + +from .observation import Observation + +from .data import Data + +from .config import load_config + +from .operator import Operator + +from .instrument import Telescope, Focalplane, Site + +from .instrument_sim import fake_hexagon_focalplane from .weather import Weather + +from .pixels import PixelDistribution, PixelData diff --git a/src/toast/_libtoast.hpp b/src/toast/_libtoast.hpp index 40b465864..f30b7643c 100644 --- a/src/toast/_libtoast.hpp +++ b/src/toast/_libtoast.hpp @@ -72,6 +72,8 @@ void register_aligned(py::module & m, char const * name) { .def("__iter__", [](C & self) { return py::make_iterator(self.begin(), self.end()); }, py::keep_alive <0, 1>()) + + // Set and get individual elements .def("__setitem__", [](C & self, typename C::size_type i, const typename C::value_type & t) { @@ -87,6 +89,8 @@ void register_aligned(py::module & m, char const * name) { } return self[i]; }) + + // Set and get a slice .def("__setitem__", [](C & self, py::slice slice, py::buffer other) { size_t start, stop, step, slicelength; @@ -109,6 +113,18 @@ void register_aligned(py::module & m, char const * name) { start += step; } }) + .def("__setitem__", + [](C & self, py::slice slice, const typename C::value_type & t) { + size_t start, stop, step, slicelength; + if (!slice.compute(self.size(), &start, &stop, &step, + &slicelength)) { + throw py::error_already_set(); + } + for (size_t i = 0; i < slicelength; ++i) { + self[start] = t; + start += step; + } + }) .def("__getitem__", [](C & self, py::slice slice) { size_t start, stop, step, slicelength; @@ -123,6 +139,42 @@ void register_aligned(py::module & m, char const * name) { } return ret; }) + + // Set and get explicit indices + .def("__setitem__", + [](C & self, py::array_t indices, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + + if (indices.size() != info.size) { + throw std::runtime_error( + "Left and right hand indexed assignment have different sizes!"); + } + + auto * dat = indices.data(); + + for (size_t i = 0; i < info.size; ++i) { + self[dat[i]] = raw[i]; + } + }) + .def("__setitem__", + [](C & self, py::array_t indices, const typename C::value_type & t) { + auto * dat = indices.data(); + for (size_t i = 0; i < indices.size(); ++i) { + self[dat[i]] = t; + } + }) + .def("__getitem__", + [](C & self, py::array_t indices) { + auto * dat = indices.data(); + std::unique_ptr ret(new C(indices.size())); + for (size_t i = 0; i < indices.size(); ++i) { + (*ret)[i] = self[dat[i]]; + } + return ret; + }) .def("__lt__", [](const C & self, typename C::value_type val) { py::array_t ret(self.size()); auto result = ret.mutable_data(); @@ -171,6 +223,148 @@ void register_aligned(py::module & m, char const * name) { } return ret; }) + + // Arithmetic + .def("__iadd__", + [](C & self, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + + if (self.size() != info.size) { + throw std::runtime_error( + "Object and operand have different sizes!"); + } + + for (size_t i = 0; i < info.size; ++i) { + self[i] += raw[i]; + } + }) + .def("__iadd__", + [](C & self, typename C::value_type val) { + for (size_t i = 0; i < self.size(); ++i) { + self[i] += val; + } + }) + .def("__isub__", + [](C & self, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + + if (self.size() != info.size) { + throw std::runtime_error( + "Object and operand have different sizes!"); + } + + for (size_t i = 0; i < info.size; ++i) { + self[i] -= raw[i]; + } + }) + .def("__isub__", + [](C & self, typename C::value_type val) { + for (size_t i = 0; i < self.size(); ++i) { + self[i] -= val; + } + }) + .def("__imul__", + [](C & self, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + + if (self.size() != info.size) { + throw std::runtime_error( + "Object and operand have different sizes!"); + } + + for (size_t i = 0; i < info.size; ++i) { + self[i] *= raw[i]; + } + }) + .def("__imul__", + [](C & self, typename C::value_type val) { + for (size_t i = 0; i < self.size(); ++i) { + self[i] *= val; + } + }) + .def("__add__", + [](C & self, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + if (self.size() != info.size) { + throw std::runtime_error( + "Object and operand have different sizes!"); + } + std::unique_ptr ret(new C(self)); + for (size_t i = 0; i < info.size; ++i) { + (*ret)[i] += raw[i]; + } + return ret; + }) + .def("__add__", + [](C & self, typename C::value_type val) { + std::unique_ptr ret(new C(self)); + for (size_t i = 0; i < self.size(); ++i) { + (*ret)[i] += val; + } + return ret; + }) + .def("__sub__", + [](C & self, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + if (self.size() != info.size) { + throw std::runtime_error( + "Object and operand have different sizes!"); + } + std::unique_ptr ret(new C(self)); + for (size_t i = 0; i < info.size; ++i) { + (*ret)[i] -= raw[i]; + } + return ret; + }) + .def("__sub__", + [](C & self, typename C::value_type val) { + std::unique_ptr ret(new C(self)); + for (size_t i = 0; i < self.size(); ++i) { + (*ret)[i] -= val; + } + return ret; + }) + .def("__mul__", + [](C & self, py::buffer other) { + pybuffer_check_1D (other); + py::buffer_info info = other.request(); + typename C::value_type * raw = + reinterpret_cast (info.ptr); + if (self.size() != info.size) { + throw std::runtime_error( + "Object and operand have different sizes!"); + } + std::unique_ptr ret(new C(self)); + for (size_t i = 0; i < info.size; ++i) { + (*ret)[i] *= raw[i]; + } + return ret; + }) + .def("__mul__", + [](C & self, typename C::value_type val) { + std::unique_ptr ret(new C(self)); + for (size_t i = 0; i < self.size(); ++i) { + (*ret)[i] *= val; + } + return ret; + }) + + // string representation .def("__repr__", [name](C const & self) { size_t npre = 1; diff --git a/src/toast/_libtoast_tod_pointing.cpp b/src/toast/_libtoast_tod_pointing.cpp index 65267dc45..1e333404d 100644 --- a/src/toast/_libtoast_tod_pointing.cpp +++ b/src/toast/_libtoast_tod_pointing.cpp @@ -10,13 +10,11 @@ void init_tod_pointing(py::module & m) { m.def("pointing_matrix_healpix", [](toast::HealpixPixels const & hpix, bool nest, double eps, double cal, std::string const & mode, py::buffer pdata, py::object hwpang, - py::buffer flags, py::buffer pixels, py::buffer weights) { + py::object flags, py::buffer pixels, py::buffer weights) { pybuffer_check_1D (pdata); - pybuffer_check_1D (flags); pybuffer_check_1D (weights); pybuffer_check_1D (pixels); py::buffer_info info_pdata = pdata.request(); - py::buffer_info info_flags = flags.request(); py::buffer_info info_pixels = pixels.request(); py::buffer_info info_weights = weights.request(); size_t n = (size_t)(info_pdata.size / 4); @@ -24,8 +22,7 @@ void init_tod_pointing(py::module & m) { if (mode.compare("IQU") == 0) { nw = (size_t)(info_weights.size / 3); } - if ((info_flags.size != n) || - (info_pixels.size != n) || (nw != n)) { + if ((info_pixels.size != n) || (nw != n)) { auto log = toast::Logger::get(); std::ostringstream o; o << "Buffer sizes are not consistent."; @@ -33,9 +30,22 @@ void init_tod_pointing(py::module & m) { throw std::runtime_error(o.str().c_str()); } double * rawpdata = reinterpret_cast (info_pdata.ptr); - uint8_t * rawflags = reinterpret_cast (info_flags.ptr); double * rawweights = reinterpret_cast (info_weights.ptr); int64_t * rawpixels = reinterpret_cast (info_pixels.ptr); + uint8_t * rawflags = NULL; + if (!flags.is_none()) { + auto flagbuf = py::cast (flags); + pybuffer_check_1D (flagbuf); + py::buffer_info info_flags = flagbuf.request(); + if (info_flags.size != n) { + auto log = toast::Logger::get(); + std::ostringstream o; + o << "Flag buffer size is not consistent."; + log.error(o.str().c_str()); + throw std::runtime_error(o.str().c_str()); + } + rawflags = reinterpret_cast (info_flags.ptr); + } double * rawhwpang = NULL; if (!hwpang.is_none()) { auto hwpbuf = py::cast (hwpang); diff --git a/src/toast/config.py b/src/toast/config.py new file mode 100644 index 000000000..61f69a22f --- /dev/null +++ b/src/toast/config.py @@ -0,0 +1,572 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import re + +import copy + +from collections import OrderedDict +from collections.abc import MutableMapping + +import json + +import numpy as np + +import tomlkit +from tomlkit import comment, document, nl, table, loads, dumps + +from astropy import units as u + +from .utils import Environment, Logger + +from .instrument import Focalplane, Telescope +from . import instrument + +from .operator import Operator + +from .traits import TraitConfig, build_config, add_config_args, args_update_config + +from . import future_ops as ops + + +def parse_config(parser, operators=list()): + """Load command line arguments associated with object properties. + + This function: + + * Adds a "--config" option to the parser which accepts multiple config file + paths to load. + + * Adds arguments for all object parameters in the defaults for the specified + classes. + + * Builds a config dictionary starting from the defaults, updating these using + values from any config files, and then applies any overrides from the + commandline. + + Args: + parser (ArgumentParser): The argparse parser. + operators (list): The operator classes to add to the commandline. Note that + if these are classes, then the commandline names will be the class names. + If you pass a list of instances with the name attribute set, then the + commandline names will use these. + + Returns: + (dict): The config dictionary. + + """ + + # The default configuration + defaults = build_config(operators) + + # Add commandline overrides for operators + add_config_args(parser, defaults, "operators", ignore=["API"]) + + # Add an option to load one or more config files. These should have compatible + # names for the operators used in defaults. + parser.add_argument( + "--config", + type=str, + required=False, + nargs="+", + help="One or more input config files.", + ) + + # Parse commandline. + args = parser.parse_args() + + # Load any config files. This overrides default values with config file contents. + config = copy.deepcopy(defaults) + if args.config is not None: + for conf in args.config: + config = load_config(conf, input=config) + + # Parse operator commandline options. These override any config file or default + # values. + remaining = args_update_config(args, config, defaults, "operators") + + # Remove the "config" option we created in this function + del remaining.config + + return config, remaining + + +def _merge_config(loaded, original): + for section, objs in loaded.items(): + if section in original.keys(): + # We have this section + for objname, objprops in objs.items(): + if objname not in original[section]: + # This is a new object + original[section][objname] = objprops + else: + for k, v in objprops.items(): + original[section][objname][k] = v + else: + # A new section + original[section] = objs + + +def _load_toml_trait(tbl): + result = OrderedDict() + for k in tbl.keys(): + if k == "class": + result[k] = tbl[k] + elif isinstance(tbl[k], str): + if tbl[k] == "None": + # Copy None values. There is no way to determine the type in this case. + # This is just a feature of how we are constructing the TOML files for + # ease of use, rather than dumping the full trait info as sub-tables. + # In practice these parameters will be ignored when constructing + # TraitConfig objects and the defaults will be used anyway. + result[k] = OrderedDict() + result[k]["value"] = tbl[k] + result[k]["type"] = "unknown" + result[k]["unit"] = "None" + else: + result[k] = OrderedDict() + # Is this string actually a Quantity? We try to convert the first + # element of the string to a float and the remainder to a Unit. + parts = tbl[k].split() + vstr = parts.pop(0) + ustr = " ".join(parts) + try: + v = float(vstr) + unit = u.Unit(ustr) + result[k]["value"] = "{:0.14e}".format(v) + result[k]["type"] = "Quantity" + result[k]["unit"] = str(unit) + except Exception: + # Just a regular string + result[k]["value"] = tbl[k] + result[k]["type"] = "str" + result[k]["unit"] = "None" + elif isinstance(tbl[k], bool): + result[k] = OrderedDict() + if tbl[k]: + result[k]["value"] = "True" + else: + result[k]["value"] = "False" + result[k]["type"] = "bool" + result[k]["unit"] = "None" + elif isinstance(tbl[k], int): + result[k] = OrderedDict() + result[k]["value"] = "{}".format(tbl[k]) + result[k]["type"] = "int" + result[k]["unit"] = "None" + elif isinstance(tbl[k], float): + result[k] = OrderedDict() + result[k]["value"] = "{:0.14e}".format(tbl[k]) + result[k]["type"] = "float" + result[k]["unit"] = "None" + return result + + +def load_toml(file, input=None): + """Load a TOML config file. + + This loads the document into a config dictionary. If input is specified, the file + contents are merged into this dictionary. + + Args: + file (str): The file to load. + input (dict): Append to this dictionary. + + Returns: + (dict): The result. + + """ + raw = None + with open(file, "r") as f: + raw = loads(f.read()) + + # Convert this TOML document into a dictionary + + def convert_node(raw_root, conf_root): + """Helper function to recursively convert tables""" + if isinstance( + raw_root, (tomlkit.toml_document.TOMLDocument, tomlkit.items.Table) + ): + for k in raw_root.keys(): + try: + subkeys = raw_root[k].keys() + # This element is table-like. + if "class" in subkeys: + conf_root[k] = _load_toml_trait(raw_root[k]) + else: + # This is just a dictionary + conf_root[k] = OrderedDict() + convert_node(raw_root[k], conf_root[k]) + except: + # This element is not a sub-table, just copy. + conf_root[k] = raw_root[k] + else: + raise RuntimeError("Cannot convert TOML node {}".format(raw_root)) + + raw_config = OrderedDict() + convert_node(raw, raw_config) + + if input is None: + return raw_config + + # We need to merge results. + _merge_config(raw_config, input) + + return input + + +def _dump_toml_trait(tbl, indent, name, value, unit, typ, help): + if typ == "bool": + # Bools seem to have an issue adding comments. To workaround, we + # add the value as a string, add the comment, and then set it to + # a real bool. + tbl.add(name, "temp") + if help is not None: + tbl[name].comment(help) + tbl[name].indent(indent) + if value == "None": + tbl[name] = "None" + elif value == "True": + tbl[name] = True + else: + tbl[name] = False + else: + if typ == "Quantity": + qval = "None" + if value != "None": + qval = "{} {}".format(value, unit) + tbl.add(name, qval) + elif typ in ["list", "set", "tuple"]: + val = "None" + if value != "None": + val = list(value) + tbl.add(name, val) + elif typ == "dict": + val = "None" + if value != "None": + val = table() + subindent = indent_size + 2 + for k, v in value.items(): + val.add(k, v) + val[k].indent(subindent) + tbl.add(name, val) + elif typ == "int": + val = "None" + if value != "None": + val = int(value) + tbl.add(name, val) + elif typ == "float": + val = "None" + if value != "None": + val = float(value) + tbl.add(name, val) + else: + # Just leave the string representation. + tbl.add(name, value) + if help is not None: + tbl[name].comment(help) + tbl[name].indent(indent) + + +def dump_toml(file, conf): + """Dump a configuration to a TOML file. + + This writes a config dictionary to a TOML file. + + Args: + file (str): The file to write. + conf (dict): The configuration to dump. + + Returns: + None + + """ + env = Environment.get() + doc = document() + + doc.add(comment("TOAST config")) + doc.add(comment("Generated with version {}".format(env.version()))) + + def convert_node(conf_root, table_root, indent_size): + """Helper function to recursively convert dictionaries to tables""" + if isinstance(conf_root, (dict, OrderedDict)): + for k in list(conf_root.keys()): + if isinstance(conf_root[k], (dict, OrderedDict)): + if "value" in conf_root[k] and "type" in conf_root[k]: + # this is a trait + unit = None + if "unit" in conf_root[k]: + unit = conf_root[k]["unit"] + help = None + if "help" in conf_root[k]: + help = conf_root[k]["help"] + _dump_toml_trait( + table_root, + indent_size, + k, + conf_root[k]["value"], + unit, + conf_root[k]["type"], + help, + ) + else: + # descend tree + table_root[k] = table() + convert_node(conf_root[k], table_root[k], indent_size + 2) + else: + table_root.add(k, conf_root[k]) + table_root[k].indent(indent_size) + else: + raise RuntimeError("Cannot convert config node {}".format(conf_root)) + + # Convert all top-level sections from the config dictionary into a TOML table. + convert_node(conf, doc, 0) + + with open(file, "w") as f: + f.write(dumps(doc)) + + +def load_json(file, input=None): + """Load a JSON config file. + + This loads the document into a config dictionary. If input is specified, the file + contents are merged into this dictionary. + + Args: + file (str): The file to load. + input (dict): Append to this dictionary. + + Returns: + (dict): The result. + + """ + raw = None + with open(file, "r") as f: + raw = json.load(f) + + if input is None: + return raw + + # We need to merge results. + _merge_config(raw_config, input) + + return input + + +def dump_json(file, conf): + """Dump a configuration to a JSON file. + + This writes a config dictionary to a JSON file. + + Args: + file (str): The file to write. + conf (dict): The configuration to dump. + + Returns: + None + + """ + env = Environment.get() + versioned = OrderedDict() + versioned["version"] = env.version() + versioned.update(conf) + + with open(file, "w") as f: + json.dump(versioned, f, indent=2) + + +def load_config(file, input=None): + """Load a config file in a supported format. + + This loads the document into a config dictionary. If input is specified, the file + contents are merged into this dictionary. + + Args: + file (str): The file to load. + input (dict): Append to this dictionary. + + Returns: + (dict): The result. + + """ + ret = None + try: + ret = load_json(file, input=input) + except Exception: + ret = load_toml(file, input=input) + return ret + + +def create(conf): + """Instantiate classes in a configuration. + + This iteratively instantiates classes defined in the configuration, replacing + object names with references to those objects. References to other objects in the + config are specified with the string '@config:' followed by a UNIX-style "path" + where each element of the path is a dictionary key in the config. For example: + + @config:/operators/pointing + + Would reference an object at conf["operators"]["pointing"]. Object references like + this only work if the target of the reference is a built-in type (str, float, int, + etc) or a class derived from TraitConfig. + + Args: + conf (dict): the configuration + + Returns: + (dict): The dictionary of instantiated classes + + """ + log = Logger.get() + ref_prefix = "@config:" + ref_pat = re.compile("^{}/(.*)".format(ref_prefix)) + + # Helper functions + + def get_node(tree, cursor): + node = None + try: + node = tree + for c in cursor: + parent = node + node = parent[c] + # We found it! + except: + node = None + return node + + def find_object_ref(top, name): + """ + Return same string if no match, None if matched but nonexistant, or + the object itself. + """ + found = name + mat = ref_pat.match(name) + if mat is not None: + # See if the referenced object exists + path = mat.group(1) + path_keys = path.split("/") + found = get_node(top, path_keys) + return found + + def parse_tree(in_tree, out_tree, cursor): + unresolved = 0 + # print("PARSE ------------------------") + + # The node at this cursor location + # print("PARSE fetching node at cursor {}".format(cursor)) + in_node = get_node(in_tree, cursor) + + # print("PARSE at input {} got node {}".format(cursor, in_node)) + + # The output parent node + parent_cursor = list(cursor) + node_name = parent_cursor.pop() + out_parent = get_node(out_tree, parent_cursor) + # print("PARSE at output parent {} got node {}".format(parent_cursor, out_parent)) + + # The output node + node_type = type(in_node) + out_parent[node_name] = node_type() + + # In terms of this function, "nodes" are always dictionary-like + for child_key, child_val in in_node.items(): + if isinstance(child_val, str): + # print("PARSE child value {} is a string".format(child_val)) + # See if this string is an object reference and try to resolve it. + check = find_object_ref(out_tree, child_val) + if check is None: + unresolved += 1 + out_parent[node_name][child_key] = child_val + else: + out_parent[node_name][child_key] = check + else: + is_dict = None + try: + subkeys = child_val.keys() + # Ok, this child is like a dictionary + is_dict = True + except: + is_dict = False + if is_dict: + child_cursor = list(cursor) + child_cursor.append(child_key) + # print( + # "PARSE child value {} is a dict, descend with cursor {}".format( + # child_val, child_cursor + # ) + # ) + unresolved += parse_tree(in_tree, out_tree, child_cursor) + else: + # Not a dictionary + try: + _ = len(child_val) + out_parent[node_name][child_key] = [ + None for x in range(len(child_val)) + ] + + for elem in range(len(child_val)): + found = find_object_ref(out_tree, child_val[elem]) + if found is None: + unresolved += 1 + out_parent[node_name][child_key][elem] = child_val[elem] + else: + out_parent[node_name][child_key][elem] = found + # print("PARSE child value {} is a list".format(child_val)) + except: + # Not a list / array, just leave it alone + # print("PARSE child value {} is not modified".format(child_val)) + out_parent[node_name][child_key] = child_val + + # If this node is an object and all refs exist, then create it. Otherwise + # leave it alone. + # print( + # "PARSE unresolved = {}, out_parent[{}] has class? {}".format( + # unresolved, node_name, ("class" in out_parent[node_name]) + # ) + # ) + if unresolved == 0 and "class" in out_parent[node_name]: + # We have a TraitConfig object with all references resolved. + # Instantiate it. + # print("PARSE creating TraitConfig {}".format(node_name)) + obj = TraitConfig.from_config(node_name, out_parent[node_name]) + # print("PARSE instantiated {}".format(obj)) + out_parent[node_name] = obj + + # print("PARSE VERIFY parent[{}] = {}".format(node_name, out_parent[node_name])) + # print("PARSE out_tree now:\n", out_tree, "\n--------------") + return unresolved + + # Iteratively instantiate objects + + out = OrderedDict() + + done = False + last_unresolved = None + + it = 0 + while not done: + # print("PARSE iter ", it) + done = True + unresolved = 0 + for sect in list(conf.keys()): + # print("PARSE examine ", sect, "-->", type(conf[sect])) + if not isinstance(conf[sect], (dict, OrderedDict)): + continue + out[sect] = OrderedDict() + # print("PARSE section ", sect) + unresolved += parse_tree(conf, out, [sect]) + + if last_unresolved is not None: + if unresolved == last_unresolved: + msg = "Cannot resolve all references in the configuration" + log.error(msg) + raise RuntimeError(msg) + last_unresolved = unresolved + if unresolved > 0: + done = False + it += 1 + + return out diff --git a/src/toast/cuda.py b/src/toast/cuda.py new file mode 100644 index 000000000..6cf0871f2 --- /dev/null +++ b/src/toast/cuda.py @@ -0,0 +1,110 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + + +from .utils import Logger + +# Detect whether we are using pyCUDA + +use_pycuda = None +cuda = None +cuda_devices = 0 + +if use_pycuda is None: + try: + import pycuda.driver as cuda + + cuda.init() + n_dev = cuda.Device.count() + if n_dev > 0: + use_pycuda = True + cuda_devices = n_dev + else: + use_pycuda = False + except: + use_pycuda = False + + +class AcceleratorCuda(object): + """Class storing the device properties, context, and streams for one process. + """ + + def __init__(self, device_index): + self._device_index = device_index + self._device = cuda.Device(device_index) + self._device_attr = self._device.get_attributes() + self._context = self._device.make_context() + self._streams = dict() + + def close(self): + """Explicitly shut down the context and streams. + """ + if hasattr(self, "_streams") and self._streams is not None: + for k, v in self._streams.items(): + pass + self._streams = None + if hasattr(self, "_context") and self._context is not None: + self._context.pop() + self._context = None + + def __del__(self): + self.close() + + @property + def device(self): + """The cuda.Device + """ + return self._device + + @property + def device_index(self): + """The cuda.Device index + """ + return self._device_index + + @property + def device_attr(self): + """The cuda.Device attributes + """ + return self._device_attr + + @property + def context(self): + """The Context on this device + """ + return self._context + + def get_stream(self, name): + """Get the stream with the specified name. + + This creates the stream if it does not exist. + + Args: + name (str): The name of the stream. + + Returns: + (Stream): The cuda stream. + + """ + if name not in self._streams: + self._streams[name] = cuda.Stream() + + return self._streams[name] + + def del_stream(self, name): + """Delete the specified stream. + + This performs a sync on the stream and then removes it. + + Args: + name (str): The name of the stream. + + Returns: + None + + """ + if name not in self._streams: + return + self._streams[name].synchronize() + del self._streams[name] diff --git a/src/toast/data.py b/src/toast/data.py new file mode 100644 index 000000000..071175cb3 --- /dev/null +++ b/src/toast/data.py @@ -0,0 +1,322 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from collections.abc import MutableMapping + +import numpy as np + +from .mpi import Comm + + +class Data(MutableMapping): + """Class which represents distributed data + + A Data object contains a list of observations assigned to + each process group in the Comm. + + Args: + comm (:class:`toast.Comm`): the toast Comm class for distributing the data. + + """ + + def __init__(self, comm=Comm()): + self._comm = comm + self.obs = [] + """The list of observations. + """ + self._internal = dict() + + def __getitem__(self, key): + return self._internal[key] + + def __delitem__(self, key): + del self._internal[key] + + def __setitem__(self, key, value): + self._internal[key] = value + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def __repr__(self): + val = " 0: + stamps = tod.local_times() + + procstr = "{} timestamps {} --> {}\n".format( + procstr, stamps[0], stamps[-1] + ) + + common = tod.local_common_flags() + for dt in dets: + procstr = "{} det {}:\n".format(procstr, dt) + + pdata = tod.local_pointing(dt) + + procstr = ( + "{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] " + "--> [{:.3e} {:.3e} {:.3e} {:.3e}]\n".format( + procstr, + pdata[0, 0], + pdata[0, 1], + pdata[0, 2], + pdata[0, 3], + pdata[-1, 0], + pdata[-1, 1], + pdata[-1, 2], + pdata[-1, 3], + ) + ) + + data = tod.local_signal(dt) + flags = tod.local_flags(dt) + procstr = "{} {:.3e} ({}) --> {:.3e} ({})\n".format( + procstr, data[0], flags[0], data[-1], flags[-1] + ) + good = np.where( + ((flags & flag_mask) | (common & common_flag_mask)) == 0 + )[0] + procstr = "{} {} good samples\n".format( + procstr, len(good) + ) + try: + min = np.min(data[good]) + max = np.max(data[good]) + mean = np.mean(data[good]) + rms = np.std(data[good]) + procstr = ( + "{} min = {:.4e}, max = {:.4e}," + " mean = {:.4e}, rms = {:.4e}\n".format( + procstr, min, max, mean, rms + ) + ) + except FloatingPointError: + procstr = ( + "{} min = N/A, max = N/A, " + "mean = N/A, rms = N/A\n".format(procstr) + ) + + for cname in tod.cache.keys(): + procstr = "{} cache {}:\n".format(procstr, cname) + ref = tod.cache.reference(cname) + min = np.min(ref) + max = np.max(ref) + mean = np.mean(ref) + rms = np.std(ref) + procstr = ( + "{} min = {:.4e}, max = {:.4e}, " + "mean = {:.4e}, rms = {:.4e}\n".format( + procstr, min, max, mean, rms + ) + ) + + recvstr = "" + if self._comm.group_rank == 0: + groupstr = "{}{}".format(groupstr, procstr) + if gcomm is not None: + for p in range(1, self._comm.group_size): + if gcomm.rank == 0: + recvstr = gcomm.recv(source=p, tag=p) + groupstr = "{}{}".format(groupstr, recvstr) + elif p == gcomm.rank: + gcomm.send(procstr, dest=0, tag=p) + gcomm.barrier() + + # the world rank 0 process collects output from all groups and + # writes to the handle + + recvgrp = "" + if self._comm.world_rank == 0: + if handle is None: + print(groupstr, flush=True) + else: + handle.write(groupstr) + if wcomm is not None: + for g in range(1, self._comm.ngroups): + if wcomm.rank == 0: + recvgrp = rcomm.recv(source=g, tag=g) + if handle is None: + print(recvgrp, flush=True) + else: + handle.write(recvgrp) + elif g == self._comm.group: + if gcomm.rank == 0: + rcomm.send(groupstr, dest=0, tag=g) + wcomm.barrier() + return + + def split(self, key): + """Split the Data object. + + Split the Data object based on the value of `key` in the + observation dictionary. + + Args: + key(str) : Observation key to use. + + Returns: + List of 2-tuples of the form (value, data) + + """ + # Build a superset of all values + values = set() + for obs in self.obs: + if key not in obs: + raise RuntimeError( + 'Cannot split data by "{}". Key is not ' + "defined for all observations.".format(key) + ) + values.add(obs[key]) + all_values = None + if self._comm.comm_world is None: + all_values = [values] + else: + all_values = self._comm.comm_world.allgather(values) + for vals in all_values: + values = values.union(vals) + + # Order the values alphabetically. + values = sorted(list(values)) + + # Split the data + datasplit = [] + for value in values: + new_data = Data(comm=self._comm) + for obs in self.obs: + if obs[key] == value: + new_data.obs.append(obs) + datasplit.append((value, new_data)) + + return datasplit diff --git a/src/toast/dist.py b/src/toast/dist.py index e296a3e0d..67c0d6ef5 100644 --- a/src/toast/dist.py +++ b/src/toast/dist.py @@ -2,6 +2,8 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections.abc import MutableMapping + import numpy as np from .mpi import Comm @@ -171,58 +173,48 @@ def distribute_uniform(totalsize, groups, breaks=None): def distribute_samples( - mpicomm, - detectors, - samples, - detranks=1, - detbreaks=None, - sampsizes=None, - sampbreaks=None, + mpicomm, detectors, samples, detranks=1, detsets=None, sampsets=None ): """Distribute data by detector and sample. - Given a list of detectors and some number of samples, distribute - the data in a load balanced way. Optionally account for constraints - on this distribution. The samples may be grouped by indivisible - chunks, and there may be forced breaks in the distribution in both - the detector and chunk directions. + Given a list of detectors and some number of samples, distribute the data in a load + balanced way. Optionally account for constraints on this distribution. Both the + detectors and the samples may be arranged into "sets" that must not be split + between processes. samples --> +--------------+-------------- - / | sampsize[0] | sampsize[1] ... + / | sampset[0] | sampset[1] ... detrank = 0 +--------------+-------------- - \ | sampsize[0] | sampsize[1] ... + \ | sampset[0] | sampset[1] ... +--------------+-------------- - / | sampsize[0] | sampsize[1] ... + / | sampset[0] | sampset[1] ... detrank = 1 +--------------+-------------- - \ | sampsize[0] | sampsize[1] ... + \ | sampset[0] | sampset[1] ... +--------------+-------------- | ... Args: - mpicomm (mpi4py.MPI.Comm): the MPI communicator over which the - data is distributed. If None, then all data is assigned to a - single process. + mpicomm (mpi4py.MPI.Comm): the MPI communicator over which the data is + distributed. If None, then all data is assigned to a single process. detectors (list): The list of detector names. samples (int): The total number of samples. - detranks (int): The dimension of the process grid in the detector - direction. The MPI communicator size must be evenly divisible - by this number. - detbreaks (list): Optional list of hard breaks in the detector - distribution. - sampsizes (list): Optional list of sample chunk sizes which - cannot be split. - sampbreaks (list): Optional list of hard breaks in the sample - distribution. + detranks (int): The dimension of the process grid in the detector direction. + The MPI communicator size must be evenly divisible by this number. + detsets (list): Optional list of lists of detectors that must not be split up + between process rows. + sampsets (list): Optional list of lists of sample chunks that must not be + split up between process columns Returns: - tuple of lists: the 3 lists returned contain information about - the detector distribution, the sample distribution, and the chunk - distribution. The first list has one entry for each detrank and - contains the list of detectors for that row of the process grid. - The second list contains tuples of (first sample, N samples) for - each column of the process grid. The third list contains tuples - of (first chunk, N chunks) for each column of the process grid. + (tuple): The 3 lists returned contain information about + the detector distribution, the sample distribution, and the chunk + distribution. The first list has one entry for each detrank and contains + the list of detectors for that row of the process grid. The second list + contains tuples of (first sample, N samples) for each column of the process + grid. The third list contains tuples of (first chunk, N chunks) for each + column of the process grid. The third element is None if the input + sampsets was None. """ nproc = 1 @@ -238,312 +230,51 @@ def distribute_samples( # Compute the other dimension of the process grid. sampranks = nproc // detranks - # Distribute detectors uniformly, but respecting forced breaks in the - # grouping specified by the calling code. - - dist_detsindx = distribute_uniform(len(detectors), detranks, breaks=detbreaks) - dist_dets = [detectors[d[0] : d[0] + d[1]] for d in dist_detsindx] + # Distribute detectors either by set or uniformly. - # Distribute samples using both the chunking and the forced breaks + dist_dets = None + dist_detsets = None - if sampsizes is not None: - dist_sizes = distribute_discrete(sampsizes, sampranks, breaks=sampbreaks) - dist_samples = [] - off = 0 - for ds in dist_sizes: - cursamp = np.sum(sampsizes[ds[0] : ds[0] + ds[1]]) - dist_samples.append((off, cursamp)) - off += cursamp + if detsets is None: + # Uniform distribution + dist_detsindx = distribute_uniform(len(detectors), detranks) + dist_dets = [detectors[d[0] : d[0] + d[1]] for d in dist_detsindx] else: - dist_samples = distribute_uniform(samples, sampranks, breaks=sampbreaks) - dist_sizes = [(x, 1) for x in range(sampranks)] - - return (dist_dets, dist_samples, dist_sizes) - - -class Data(object): - """Class which represents distributed data - - A Data object contains a list of observations assigned to - each process group in the Comm. - - Args: - comm (:class:`toast.Comm`): the toast Comm class for distributing the data. - - """ - - def __init__(self, comm=Comm()): - self._comm = comm - self.obs = [] - """The list of observations. - """ - self._metadata = {} - - def __contains__(self, key): - return key in self._metadata - - def __getitem__(self, key): - return self._metadata[key] - - def __setitem__(self, key, value): - self._metadata[key] = value - - @property - def comm(self): - """The toast.Comm over which the data is distributed.""" - return self._comm - - def clear(self): - """Clear the list of observations.""" - for ob in self.obs: - ob.clear() - return - - def info(self, handle=None, flag_mask=255, common_flag_mask=255, intervals=None): - """Print information about the distributed data. - - Information is written to the specified file handle. Only the rank 0 - process writes. Optional flag masks are used when computing the - number of good samples. - - Args: - handle (descriptor): file descriptor supporting the write() - method. If None, use print(). - flag_mask (int): bit mask to use when computing the number of - good detector samples. - common_flag_mask (int): bit mask to use when computing the - number of good telescope pointings. - intervals (str): optional name of an intervals object to print - from each observation. - - Returns: - None - - """ - # Each process group gathers their output - - groupstr = "" - procstr = "" - - gcomm = self._comm.comm_group - wcomm = self._comm.comm_world - rcomm = self._comm.comm_rank - - if wcomm is None: - msg = "Data distributed over a single process (no MPI)" - if handle is None: - print(msg, flush=True) - else: - handle.write(msg) - else: - if wcomm.rank == 0: - msg = "Data distributed over {} processes in {} groups\n".format( - self._comm.world_size, self._comm.ngroups - ) - if handle is None: - print(msg, flush=True) - else: - handle.write(msg) - - def _get_optional(k, dt): - if k in dt: - return dt[k] - else: - return None - - for ob in self.obs: - id = ob["id"] - tod = _get_optional("tod", ob) - intrvl = None - if intervals is not None: - _get_optional(intervals, ob) - - if self._comm.group_rank == 0: - groupstr = "observation {}:\n".format(id) - for ko in sorted(ob.keys()): - groupstr = "{} key {}\n".format(groupstr, ko) - if tod is not None: - groupstr = "{} {} total samples, {} detectors\n".format( - groupstr, tod.total_samples, len(tod.detectors) - ) - if intrvl is not None: - groupstr = "{} {} intervals:\n".format(groupstr, len(intrvl)) - for it in intrvl: - groupstr = "{} {} --> {} ({} --> {})\n".format( - groupstr, it.first, it.last, it.start, it.stop - ) - - # rank zero of the group will print general information, - # and each process will get its statistics. - - procstr = " proc {}\n".format(self._comm.group_rank) - if tod is not None: - offset, nsamp = tod.local_samples - dets = tod.local_dets - - my_chunks = 1 - if tod.local_chunks is not None: - my_chunks = tod.local_chunks[1] - procstr = "{} sample range {} --> {} in {} chunks:\n".format( - procstr, offset, (offset + nsamp - 1), my_chunks - ) - - if tod.local_chunks is not None: - chkoff = tod.local_samples[0] - for chk in range(tod.local_chunks[1]): - abschk = tod.local_chunks[0] + chk - chkstart = chkoff - chkstop = chkstart + tod.total_chunks[abschk] - 1 - procstr = "{} {} --> {}\n".format( - procstr, chkstart, chkstop - ) - chkoff += tod.total_chunks[abschk] - - if nsamp > 0: - stamps = tod.local_times() - - procstr = "{} timestamps {} --> {}\n".format( - procstr, stamps[0], stamps[-1] - ) - - common = tod.local_common_flags() - for dt in dets: - procstr = "{} det {}:\n".format(procstr, dt) - - pdata = tod.local_pointing(dt) - - procstr = ( - "{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] " - "--> [{:.3e} {:.3e} {:.3e} {:.3e}]\n".format( - procstr, - pdata[0, 0], - pdata[0, 1], - pdata[0, 2], - pdata[0, 3], - pdata[-1, 0], - pdata[-1, 1], - pdata[-1, 2], - pdata[-1, 3], - ) - ) - - data = tod.local_signal(dt) - flags = tod.local_flags(dt) - procstr = "{} {:.3e} ({}) --> {:.3e} ({})\n".format( - procstr, data[0], flags[0], data[-1], flags[-1] - ) - good = np.where( - ((flags & flag_mask) | (common & common_flag_mask)) == 0 - )[0] - procstr = "{} {} good samples\n".format( - procstr, len(good) - ) - try: - min = np.min(data[good]) - max = np.max(data[good]) - mean = np.mean(data[good]) - rms = np.std(data[good]) - procstr = ( - "{} min = {:.4e}, max = {:.4e}," - " mean = {:.4e}, rms = {:.4e}\n".format( - procstr, min, max, mean, rms - ) - ) - except FloatingPointError: - procstr = ( - "{} min = N/A, max = N/A, " - "mean = N/A, rms = N/A\n".format(procstr) - ) - - for cname in tod.cache.keys(): - procstr = "{} cache {}:\n".format(procstr, cname) - ref = tod.cache.reference(cname) - min = np.min(ref) - max = np.max(ref) - mean = np.mean(ref) - rms = np.std(ref) - procstr = ( - "{} min = {:.4e}, max = {:.4e}, " - "mean = {:.4e}, rms = {:.4e}\n".format( - procstr, min, max, mean, rms - ) - ) - - recvstr = "" - if self._comm.group_rank == 0: - groupstr = "{}{}".format(groupstr, procstr) - if gcomm is not None: - for p in range(1, self._comm.group_size): - if gcomm.rank == 0: - recvstr = gcomm.recv(source=p, tag=p) - groupstr = "{}{}".format(groupstr, recvstr) - elif p == gcomm.rank: - gcomm.send(procstr, dest=0, tag=p) - gcomm.barrier() - - # the world rank 0 process collects output from all groups and - # writes to the handle - - recvgrp = "" - if self._comm.world_rank == 0: - if handle is None: - print(groupstr, flush=True) - else: - handle.write(groupstr) - if wcomm is not None: - for g in range(1, self._comm.ngroups): - if wcomm.rank == 0: - recvgrp = rcomm.recv(source=g, tag=g) - if handle is None: - print(recvgrp, flush=True) - else: - handle.write(recvgrp) - elif g == self._comm.group: - if gcomm.rank == 0: - rcomm.send(groupstr, dest=0, tag=g) - wcomm.barrier() - return - - def split(self, key): - """Split the Data object. - - Split the Data object based on the value of `key` in the - observation dictionary. - - Args: - key(str) : Observation key to use. - - Returns: - List of 2-tuples of the form (value, data) - - """ - # Build a superset of all values - values = set() - for obs in self.obs: - if key not in obs: - raise RuntimeError( - 'Cannot split data by "{}". Key is not ' - "defined for all observations.".format(key) - ) - values.add(obs[key]) - all_values = None - if self._comm.comm_world is None: - all_values = [values] - else: - all_values = self._comm.comm_world.allgather(values) - for vals in all_values: - values = values.union(vals) - - # Order the values alphabetically. - values = sorted(list(values)) - - # Split the data - datasplit = [] - for value in values: - new_data = Data(comm=self._comm) - for obs in self.obs: - if obs[key] == value: - new_data.obs.append(obs) - datasplit.append((value, new_data)) - - return datasplit + # Distribute by det set + detsizes = [len(x) for x in detsets] + dist_detsets = distribute_discrete(detsizes, detranks) + dist_dets = list() + for set_off, n_set in dist_detsets: + cur = list() + for ds in range(n_set): + cur.extend(detsets[set_off + ds]) + dist_dets.append(cur) + + # Distribute samples either uniformly or by set. + + dist_samples = None + dist_chunks = None + + if sampsets is None: + dist_samples = distribute_uniform(samples, sampranks) + dist_chunks = None + else: + sampsetsizes = [np.sum(x) for x in sampsets] + dist_sampsets = distribute_discrete(sampsetsizes, sampranks) + dist_chunks = list() + dist_samples = list() + samp_off = 0 + chunk_off = 0 + for set_off, n_set in dist_sampsets: + setsamp = 0 + setchunk = 0 + for ds in range(n_set): + sset = sampsets[set_off + ds] # One sample set + setsamp += np.sum(sset) + setchunk += len(sset) + dist_chunks.append((chunk_off, setchunk)) + dist_samples.append((samp_off, setsamp)) + samp_off += setsamp + chunk_off += setchunk + + return (dist_dets, dist_detsets, dist_samples, dist_chunks) diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/future_ops/CMakeLists.txt new file mode 100644 index 000000000..67f35ac76 --- /dev/null +++ b/src/toast/future_ops/CMakeLists.txt @@ -0,0 +1,13 @@ + +# Install the python files + +install(FILES + __init__.py + pipeline.py + sim_hwp.py + sim_tod_noise.py + sim_satellite.py + noise_model.py + pointing_healpix.py + DESTINATION ${PYTHON_SITE}/toast/future_ops +) diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py new file mode 100644 index 000000000..9f6cf4831 --- /dev/null +++ b/src/toast/future_ops/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +# import functions into our public API + +# from .pipeline import Pipeline + +from .sim_satellite import SimSatellite + +# from .sim_tod_noise import SimNoise + +from .noise_model import DefaultNoiseModel + +# from .pointing_healpix import PointingHealpix diff --git a/src/toast/future_ops/mapmaker.py b/src/toast/future_ops/mapmaker.py new file mode 100644 index 000000000..cf584c1bc --- /dev/null +++ b/src/toast/future_ops/mapmaker.py @@ -0,0 +1,1711 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from collections import OrderedDict +import os +import sys + +import numpy as np +import scipy.linalg +import scipy.signal + +from ..operator import Operator +from ..mpi import MPI + +from ..timing import gather_timers, GlobalTimers, function_timer, Timer +from ..utils import Logger, Environment +from .sim_det_map import OpSimScan +from .todmap_math import OpAccumDiag, OpScanScale, OpScanMask +from ..tod import OpCacheClear, OpCacheCopy, OpCacheInit, OpFlagsApply, OpFlagGaps +from ..map import covariance_apply, covariance_invert, DistPixels, covariance_rcond + +from .._libtoast import add_offsets_to_signal, project_signal_offsets + +temporary_names = set() + + +def get_temporary_name(): + i = 0 + while True: + name = "temporary{:03}".format(i) + if name not in temporary_names: + break + i += 1 + temporary_names.add(name) + return name + + +def free_temporary_name(name): + temporary_names.remove(name) + + +class TOASTMatrix: + def apply(self, vector, inplace=False): + """ Every TOASTMatrix can apply itself to a distributed vectors + of signal, map or template offsets as is appropriate. + """ + raise NotImplementedError("Virtual apply not implemented in derived class") + + def apply_transpose(self, vector, inplace=False): + """ Every TOASTMatrix can apply itself to a distributed vectors + of signal, map or template offsets as is appropriate. + """ + raise NotImplementedError( + "Virtual apply_transpose not implemented in derived class" + ) + + +class TOASTVector: + def dot(self, other): + raise NotImplementedError("Virtual dot not implemented in derived class") + + +class UnitMatrix(TOASTMatrix): + def apply(self, vector, inplace=False): + if inplace: + outvec = vector + else: + outvec = vector.copy() + return outvec + + +class TODTemplate: + """ Parent class for all templates that can be registered with + TemplateMatrix + """ + + name = None + namplitude = 0 + comm = None + + def __init___(self, *args, **kwargs): + raise NotImplementedError("Derived class must implement __init__()") + + def add_to_signal(self, signal, amplitudes): + """ signal += F.a + """ + raise NotImplementedError("Derived class must implement add_to_signal()") + + def project_signal(self, signal, amplitudes): + """ a += F^T.signal + """ + raise NotImplementedError("Derived class must implement project_signal()") + + def add_prior(self, amplitudes_in, amplitudes_out): + """ a' += C_a^{-1}.a + """ + # Not all TODTemplates implement the prior + return + + def apply_precond(self, amplitudes_in, amplitudes_out): + """ a' = M^{-1}.a + """ + raise NotImplementedError("Derived class must implement apply_precond()") + + +class SubharmonicTemplate(TODTemplate): + """ This class represents sub-harmonic noise fluctuations. + + Sub-harmonic means that the characteristic frequency of the noise + modes is lower than 1/T where T is the length of the interval + being fitted. + """ + + name = "subharmonic" + + def __init__( + self, + data, + detweights, + order=1, + intervals=None, + common_flags=None, + common_flag_mask=1, + flags=None, + flag_mask=1, + ): + self.data = data + self.detweights = detweights + self.order = order + self.intervals = intervals + self.common_flags = common_flags + self.common_flag_mask = common_flag_mask + self.flags = flags + self.flag_mask = flag_mask + self._last_nsamp = None + self._last_templates = None + self.get_steps_and_preconditioner() + + def get_steps_and_preconditioner(self): + """ Assign each template an amplitude + """ + self.templates = [] + self.slices = [] + self.preconditioners = [] + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + common_flags = tod.local_common_flags(self.common_flags) + common_flags = (common_flags & self.common_flag_mask) != 0 + if (self.intervals is not None) and (self.intervals in obs): + intervals = obs[self.intervals] + else: + intervals = None + local_intervals = tod.local_intervals(intervals) + slices = {} # this observation + preconditioners = {} # this observation + for ival in local_intervals: + todslice = slice(ival.first, ival.last + 1) + for idet, det in enumerate(tod.local_dets): + ind = slice(self.namplitude, self.namplitude + self.order + 1) + self.templates.append([ind, iobs, det, todslice]) + self.namplitude += self.order + 1 + preconditioner = self._get_preconditioner( + det, tod, todslice, common_flags, self.detweights[iobs][det] + ) + if det not in preconditioners: + preconditioners[det] = [] + slices[det] = [] + preconditioners[det].append(preconditioner) + slices[det].append(ind) + self.slices.append(slices) + self.preconditioners.append(preconditioners) + return + + def _get_preconditioner(self, det, tod, todslice, common_flags, detweight): + """ Calculate the preconditioner for the given interval and detector + """ + flags = tod.local_flags(det, self.flags)[todslice] + good = (flags & self.flag_mask) == 0 + good[common_flags[todslice]] = False + norder = self.order + 1 + preconditioner = np.zeros([norder, norder]) + templates = self._get_templates(todslice) + for row in range(norder): + for col in range(row, norder): + preconditioner[row, col] = np.dot( + templates[row][good], templates[col][good] + ) + preconditioner[row, col] *= detweight + if row != col: + preconditioner[col, row] = preconditioner[row, col] + preconditioner = np.linalg.inv(preconditioner) + return preconditioner + + def add_to_signal(self, signal, amplitudes): + subharmonic_amplitudes = amplitudes[self.name] + for ibase, (ind, iobs, det, todslice) in enumerate(self.templates): + templates = self._get_templates(todslice) + amps = subharmonic_amplitudes[ind] + for template, amplitude in zip(templates, amps): + signal[iobs, det, todslice] += template * amplitude + return + + def _get_templates(self, todslice): + """ Develop hierarchy of subharmonic modes matching the given length + + The basis functions are (orthogonal) Legendre polynomials + """ + nsamp = todslice.stop - todslice.start + if nsamp != self._last_nsamp: + templates = np.zeros([self.order + 1, nsamp]) + r = np.linspace(-1, 1, nsamp) + for order in range(self.order + 1): + if order == 0: + templates[order] = 1 + elif order == 1: + templates[order] = r + else: + templates[order] = ( + (2 * order - 1) * r * templates[order - 1] + - (order - 1) * templates[order - 2] + ) / order + self._last_nsamp = nsamp + self._last_templates = templates + return self._last_templates + + def project_signal(self, signal, amplitudes): + subharmonic_amplitudes = amplitudes[self.name] + for ibase, (ind, iobs, det, todslice) in enumerate(self.templates): + templates = self._get_templates(todslice) + amps = subharmonic_amplitudes[ind] + for order, template in enumerate(templates): + amps[order] = np.dot(signal[iobs, det, todslice], template) + pass + + def apply_precond(self, amplitudes_in, amplitudes_out): + """ Standard diagonal preconditioner accounting for the fact that + the templates are not orthogonal in the presence of flagging and masking + """ + subharmonic_amplitudes_in = amplitudes_in[self.name] + subharmonic_amplitudes_out = amplitudes_out[self.name] + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + slices = self.slices[iobs][det] + preconditioners = self.preconditioners[iobs][det] + for ind, preconditioner in zip(slices, preconditioners): + subharmonic_amplitudes_out[ind] = np.dot( + preconditioner, subharmonic_amplitudes_in[ind] + ) + return + + +class OffsetTemplate(TODTemplate): + """ This class represents noise fluctuations as a step function + """ + + name = "offset" + + def __init__( + self, + data, + detweights, + step_length=1000000, + intervals=None, + use_noise_prior=True, + common_flags=None, + common_flag_mask=1, + flags=None, + flag_mask=1, + precond_width=20, + ): + self.data = data + self.detweights = detweights + self.step_length = step_length + self.intervals = intervals + self.common_flags = common_flags + self.common_flag_mask = common_flag_mask + self.flags = flags + self.flag_mask = flag_mask + self.precond_width = precond_width + self.get_steps() + self.use_noise_prior = use_noise_prior + if self.use_noise_prior: + self.get_filters_and_preconditioners() + return + + @function_timer + def get_filters_and_preconditioners(self): + """ Compute and store the filter and associated preconditioner + for every detector and every observation + """ + log = Logger.get() + self.filters = [] # all observations + self.preconditioners = [] # all observations + for iobs, obs in enumerate(self.data.obs): + if "noise" not in obs: + # If the observations do not include noise PSD:s, we + # we cannot build filters. + if len(self.filters) > 0: + log.warning( + 'Observation "{}" does not have noise information' + "".format(obs["name"]) + ) + continue + tod = obs["tod"] + # Determine the binning for the noise prior + times = tod.local_times() + dtime = np.amin(np.diff(times)) + fsample = 1 / dtime + obstime = times[-1] - times[0] + tbase = self.step_length + fbase = 1 / tbase + powmin = np.floor(np.log10(1 / obstime)) - 1 + powmax = min(np.ceil(np.log10(1 / tbase)) + 2, fsample) + freq = np.logspace(powmin, powmax, 1000) + # Now build the filter for each detector + noise = obs["noise"] + noisefilters = {} # this observation + preconditioners = {} # this observation + for det in tod.local_dets: + offset_psd = self._get_offset_psd(noise, freq, det) + # Store real space filters for every interval and every detector. + ( + noisefilters[det], + preconditioners[det], + ) = self._get_noisefilter_and_preconditioner( + freq, offset_psd, self.offset_slices[iobs][det] + ) + self.filters.append(noisefilters) + self.preconditioners.append(preconditioners) + return + + @function_timer + def _get_offset_psd(self, noise, freq, det): + psdfreq = noise.freq(det) + psd = noise.psd(det) + rate = noise.rate(det) + # Remove the white noise component from the PSD + psd = psd.copy() * np.sqrt(rate) + psd -= np.amin(psd[psdfreq > 1.0]) + psd[psd < 1e-30] = 1e-30 + + # The calculation of `offset_psd` is from Keihänen, E. et al: + # "Making CMB temperature and polarization maps with Madam", + # A&A 510:A57, 2010 + logfreq = np.log(psdfreq) + logpsd = np.log(psd) + + def interpolate_psd(x): + result = np.zeros(x.size) + good = np.abs(x) > 1e-10 + logx = np.log(np.abs(x[good])) + logresult = np.interp(logx, logfreq, logpsd) + result[good] = np.exp(logresult) + return result + + def g(x): + bad = np.abs(x) < 1e-10 + good = np.logical_not(bad) + arg = np.pi * x[good] + result = bad.astype(np.float64) + result[good] = (np.sin(arg) / arg) ** 2 + return result + + tbase = self.step_length + fbase = 1 / tbase + offset_psd = interpolate_psd(freq) * g(freq * tbase) + for m in range(1, 2): + offset_psd += interpolate_psd(freq + m * fbase) * g(freq * tbase + m) + offset_psd += interpolate_psd(freq - m * fbase) * g(freq * tbase - m) + offset_psd *= fbase + return offset_psd + + @function_timer + def _get_noisefilter_and_preconditioner(self, freq, offset_psd, offset_slices): + logfreq = np.log(freq) + logpsd = np.log(offset_psd) + logfilter = np.log(1 / offset_psd) + + def interpolate(x, psd): + result = np.zeros(x.size) + good = np.abs(x) > 1e-10 + logx = np.log(np.abs(x[good])) + logresult = np.interp(logx, logfreq, psd) + result[good] = np.exp(logresult) + return result + + def truncate(noisefilter, lim=1e-4): + icenter = noisefilter.size // 2 + ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim + icut = np.argwhere(ind)[-1][0] + if icut % 2 == 0: + icut += 1 + noisefilter = np.roll(noisefilter, icenter) + noisefilter = noisefilter[icenter - icut : icenter + icut + 1] + return noisefilter + + noisefilters = [] + preconditioners = [] + for offset_slice, sigmasqs in offset_slices: + nstep = offset_slice.stop - offset_slice.start + filterlen = nstep * 2 + 1 + filterfreq = np.fft.rfftfreq(filterlen, self.step_length) + noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter))) + noisefilters.append(noisefilter) + # Build the band-diagonal preconditioner + if self.precond_width <= 1: + # Compute C_a prior + preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd))) + else: + # Compute Cholesky decomposition prior + wband = min(self.precond_width, noisefilter.size // 2) + precond_width = max(wband, min(self.precond_width, nstep)) + icenter = noisefilter.size // 2 + preconditioner = np.zeros([precond_width, nstep], dtype=np.float64) + preconditioner[0] = sigmasqs + preconditioner[:wband, :] += np.repeat( + noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1 + ) + lower = True + scipy.linalg.cholesky_banded( + preconditioner, overwrite_ab=True, lower=lower, check_finite=True + ) + preconditioners.append((preconditioner, lower)) + return noisefilters, preconditioners + + @function_timer + def get_steps(self): + """ Divide each interval into offset steps + """ + self.offset_templates = [] + self.offset_slices = [] # slices in all observations + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + common_flags = tod.local_common_flags(self.common_flags) + common_flags = (common_flags & self.common_flag_mask) != 0 + if (self.intervals is not None) and (self.intervals in obs): + intervals = obs[self.intervals] + else: + intervals = None + local_intervals = tod.local_intervals(intervals) + times = tod.local_times() + offset_slices = {} # slices in this observation + for ival in local_intervals: + length = times[ival.last] - times[ival.first] + nbase = int(np.ceil(length / self.step_length)) + # Divide the interval into steps, allowing for irregular sampling + todslices = [] + start_times = np.arange(nbase) * self.step_length + ival.start + start_indices = np.searchsorted(times, start_times) + stop_indices = np.hstack([start_indices[1:], [ival.last]]) + todslices = [] + for istart, istop in zip(start_indices, stop_indices): + todslices.append(slice(istart, istop)) + for idet, det in enumerate(tod.local_dets): + istart = self.namplitude + sigmasqs = [] + for todslice in todslices: + sigmasq = self._get_sigmasq( + tod, det, todslice, common_flags, self.detweights[iobs][det] + ) + # Register the baseline offset + self.offset_templates.append( + [self.namplitude, iobs, det, todslice, sigmasq] + ) + sigmasqs.append(sigmasq) + self.namplitude += 1 + # Keep a record of ranges of offsets that correspond + # to one detector and one interval. + # This is the domain we apply the noise filter in. + if det not in offset_slices: + offset_slices[det] = [] + offset_slices[det].append( + (slice(istart, self.namplitude), sigmasqs) + ) + self.offset_slices.append(offset_slices) + return + + @function_timer + def _get_sigmasq(self, tod, det, todslice, common_flags, detweight): + """ calculate a rough estimate of the baseline variance + for diagonal preconditioner + """ + flags = tod.local_flags(det, self.flags)[todslice] + good = (flags & self.flag_mask) == 0 + good[common_flags[todslice]] = False + ngood = np.sum(good) + sigmasq = 1 + if detweight != 0: + sigmasq /= detweight + if ngood != 0: + sigmasq /= ngood + return sigmasq + + @function_timer + def add_to_signal(self, signal, amplitudes): + offset_amplitudes = amplitudes[self.name] + last_obs = None + last_det = None + last_ref = None + todslices = [] + itemplates = [] + for itemplate, iobs, det, todslice, sigmasq in self.offset_templates: + if iobs != last_obs or det != last_det: + if len(todslices) != 0: + add_offsets_to_signal( + last_ref, todslices, offset_amplitudes, np.array(itemplates) + ) + todslices = [] + itemplates = [] + last_obs = iobs + last_det = det + last_ref = signal[iobs, det, :] + todslices.append(todslice) + itemplates.append(itemplate) + if len(todslices) != 0: + add_offsets_to_signal( + last_ref, todslices, offset_amplitudes, np.array(itemplates) + ) + return + + @function_timer + def project_signal(self, signal, amplitudes): + offset_amplitudes = amplitudes[self.name] + last_obs = None + last_det = None + last_ref = None + todslices = [] + itemplates = [] + for itemplate, iobs, det, todslice, sqsigma in self.offset_templates: + if iobs != last_obs or det != last_det: + if len(todslices) != 0: + project_signal_offsets( + last_ref, todslices, offset_amplitudes, np.array(itemplates) + ) + todslices = [] + itemplates = [] + last_obs = iobs + last_det = det + last_ref = signal[iobs, det, :] + todslices.append(todslice) + itemplates.append(itemplate) + if len(todslices) != 0: + project_signal_offsets( + last_ref, todslices, offset_amplitudes, np.array(itemplates) + ) + return + + @function_timer + def add_prior(self, amplitudes_in, amplitudes_out): + if not self.use_noise_prior: + return + offset_amplitudes_in = amplitudes_in[self.name] + offset_amplitudes_out = amplitudes_out[self.name] + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + slices = self.offset_slices[iobs][det] + filters = self.filters[iobs][det] + for (offsetslice, sigmasqs), noisefilter in zip(slices, filters): + amps_in = offset_amplitudes_in[offsetslice] + # scipy.signal.convolve will use either `convolve` or `fftconvolve` + # depending on the size of the inputs + amps_out = scipy.signal.convolve(amps_in, noisefilter, mode="same") + offset_amplitudes_out[offsetslice] += amps_out + return + + @function_timer + def apply_precond(self, amplitudes_in, amplitudes_out): + offset_amplitudes_in = amplitudes_in[self.name] + offset_amplitudes_out = amplitudes_out[self.name] + if self.use_noise_prior: + # C_a preconditioner + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + slices = self.offset_slices[iobs][det] + preconditioners = self.preconditioners[iobs][det] + for (offsetslice, sigmasqs), preconditioner in zip( + slices, preconditioners + ): + amps_in = offset_amplitudes_in[offsetslice] + if self.precond_width <= 1: + # Use C_a prior + # scipy.signal.convolve will use either `convolve` or `fftconvolve` + # depending on the size of the inputs + amps_out = scipy.signal.convolve( + amps_in, preconditioner, mode="same" + ) + else: + # Use pre-computed Cholesky decomposition + amps_out = scipy.linalg.cho_solve_banded( + preconditioner, + amps_in, + overwrite_b=False, + check_finite=True, + ) + offset_amplitudes_out[offsetslice] = amps_out + else: + # Diagonal preconditioner + offset_amplitudes_out[:] = offset_amplitudes_in + for itemplate, iobs, det, todslice, sigmasq in self.offset_templates: + offset_amplitudes_out[itemplate] *= sigmasq + return + + +class TemplateMatrix(TOASTMatrix): + def __init__(self, data, comm, templates=None): + """ Initialize the template matrix with a given baseline length + """ + self.data = data + self.comm = comm + self.templates = [] + for template in templates: + self.register_template(template) + return + + @function_timer + def register_template(self, template): + """ Add template to the list of templates to fit + """ + self.templates.append(template) + + @function_timer + def apply(self, amplitudes): + """ Compute and return y = F.a + """ + new_signal = self.zero_signal() + for template in self.templates: + template.add_to_signal(new_signal, amplitudes) + return new_signal + + @function_timer + def apply_transpose(self, signal): + """ Compute and return a = F^T.y + """ + new_amplitudes = self.zero_amplitudes() + for template in self.templates: + template.project_signal(signal, new_amplitudes) + return new_amplitudes + + @function_timer + def add_prior(self, amplitudes, new_amplitudes): + """ Compute a' += C_a^{-1}.a + """ + for template in self.templates: + template.add_prior(amplitudes, new_amplitudes) + return + + @function_timer + def apply_precond(self, amplitudes): + """ Compute a' = M^{-1}.a + """ + new_amplitudes = self.zero_amplitudes() + for template in self.templates: + template.apply_precond(amplitudes, new_amplitudes) + return new_amplitudes + + @function_timer + def zero_amplitudes(self): + """ Return a null amplitudes object + """ + new_amplitudes = TemplateAmplitudes(self.templates, self.comm) + return new_amplitudes + + @function_timer + def zero_signal(self): + """ Return a distributed vector of signal set to zero. + + The zero signal object will use the same TOD objects but different cache prefix + """ + new_signal = Signal(self.data, temporary=True, init_val=0) + return new_signal + + @function_timer + def clean_signal(self, signal, amplitudes, in_place=True): + """ Clean the given distributed signal vector by subtracting + the templates multiplied by the given amplitudes. + """ + # DEBUG begin + """ + import pdb + import matplotlib.pyplot as plt + plt.figure(figsize=[18, 12]) + for sig in [signal]: + tod = sig.data.obs[0]["tod"] + for idet, det in enumerate(tod.local_dets): + plt.subplot(2, 2, idet + 1) + plt.plot(tod.local_signal(det, sig.name), label=sig.name, zorder=50) + """ + # DEBUG end + if in_place: + outsignal = signal + else: + outsignal = signal.copy() + template_tod = self.apply(amplitudes) + outsignal -= template_tod + # DEBUG begin + """ + for sig, zorder in [(template_tod, 100), (outsignal, 0)]: + tod = sig.data.obs[0]["tod"] + for idet, det in enumerate(tod.local_dets): + plt.subplot(2, 2, idet + 1) + plt.plot(tod.local_signal(det, sig.name), label=sig.name, zorder=zorder) + plt.legend(loc="best") + plt.savefig("test.png") + plt.close() + #pdb.set_trace() + """ + # DEBUG end + return outsignal + + +class TemplateAmplitudes(TOASTVector): + """ TemplateAmplitudes objects hold local and shared template amplitudes + """ + + def __init__(self, templates, comm): + self.comm = comm + self.amplitudes = OrderedDict() + self.comms = OrderedDict() + for template in templates: + self.amplitudes[template.name] = np.zeros(template.namplitude) + self.comms[template.name] = template.comm + return + + @function_timer + def __str__(self): + result = "template amplitudes:" + for name, values in self.amplitudes.items(): + result += '\n"{}" : \n{}'.format(name, values) + return result + + @function_timer + def dot(self, other): + """ Compute the dot product between the two amplitude vectors + """ + total = 0 + for name, values in self.amplitudes.items(): + dp = np.dot(values, other.amplitudes[name]) + comm = self.comms[name] + if comm is not None: + dp = comm.reduce(dp, op=MPI.SUM) + if comm.rank != 0: + dp = 0 + total += dp + if self.comm is not None: + total = self.comm.allreduce(total, op=MPI.SUM) + return total + + @function_timer + def __getitem__(self, key): + return self.amplitudes[key] + + @function_timer + def __setitem__(self, key, value): + self.amplitudes[name][:] = value + return + + @function_timer + def copy(self): + new_amplitudes = TemplateAmplitudes([], self.comm) + for name, values in self.amplitudes.items(): + new_amplitudes.amplitudes[name] = self.amplitudes[name].copy() + new_amplitudes.comms[name] = self.comms[name] + return new_amplitudes + + @function_timer + def __iadd__(self, other): + """ Add the provided amplitudes to this one + """ + if isinstance(other, TemplateAmplitudes): + for name, values in self.amplitudes.items(): + values += other.amplitudes[name] + else: + for name, values in self.amplitudes.items(): + values += other + return self + + @function_timer + def __isub__(self, other): + """ Subtract the provided amplitudes from this one + """ + if isinstance(other, TemplateAmplitudes): + for name, values in self.amplitudes.items(): + values -= other.amplitudes[name] + else: + for name, values in self.amplitudes.items(): + values -= other + return self + + @function_timer + def __imul__(self, other): + """ Scale the amplitudes + """ + for name, values in self.amplitudes.items(): + values *= other + return self + + @function_timer + def __itruediv__(self, other): + """ Divide the amplitudes + """ + for name, values in self.amplitudes.items(): + values /= other + return self + + +class TemplateCovariance(TOASTMatrix): + def __init__(self): + pass + + +class ProjectionMatrix(TOASTMatrix): + """ Projection matrix: + Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} + = I - P B, + where + `P` is the pointing matrix + `N` is the noise matrix and + `B` is the binning operator + """ + + def __init__( + self, + data, + comm, + detweights, + nnz, + white_noise_cov_matrix, + common_flag_mask=1, + flag_mask=1, + ): + self.data = data + self.comm = comm + self.detweights = detweights + self.dist_map = DistPixels(data, comm=self.comm, nnz=nnz, dtype=np.float64) + self.white_noise_cov_matrix = white_noise_cov_matrix + self.common_flag_mask = common_flag_mask + self.flag_mask = flag_mask + + @function_timer + def apply(self, signal): + """ Return Z.y + """ + self.bin_map(signal.name) + new_signal = signal.copy() + scanned_signal = Signal(self.data, temporary=True, init_val=0) + self.scan_map(scanned_signal.name) + new_signal -= scanned_signal + return new_signal + + @function_timer + def bin_map(self, name): + if self.dist_map.data is not None: + self.dist_map.data.fill(0.0) + # FIXME: OpAccumDiag should support separate detweights for each observation + build_dist_map = OpAccumDiag( + zmap=self.dist_map, + name=name, + detweights=self.detweights[0], + common_flag_mask=self.common_flag_mask, + flag_mask=self.flag_mask, + ) + build_dist_map.exec(self.data) + self.dist_map.allreduce() + covariance_apply(self.white_noise_cov_matrix, self.dist_map) + return + + @function_timer + def scan_map(self, name): + scansim = OpSimScan(distmap=self.dist_map, out=name) + scansim.exec(self.data) + return + + +class NoiseMatrix(TOASTMatrix): + def __init__( + self, comm, detweights, weightmap=None, common_flag_mask=1, flag_mask=1 + ): + self.comm = comm + self.detweights = detweights + self.weightmap = weightmap + self.common_flag_mask = common_flag_mask + self.flag_mask = flag_mask + + @function_timer + def apply(self, signal, in_place=False): + """ Multiplies the signal with N^{-1}. + + Note that the quality flags cause the corresponding diagonal + elements of N^{-1} to be zero. + """ + if in_place: + new_signal = signal + else: + new_signal = signal.copy() + for iobs, detweights in enumerate(self.detweights): + for det, detweight in detweights.items(): + new_signal[iobs, det, :] *= detweight + # Set flagged samples to zero + new_signal.apply_flags(self.common_flag_mask, self.flag_mask) + # Scale the signal with the weight map + new_signal.apply_weightmap(self.weightmap) + return new_signal + + def apply_transpose(self, signal): + # Symmetric matrix + return self.apply(signal) + + +class PointingMatrix(TOASTMatrix): + def __init__(self): + pass + + +class Signal(TOASTVector): + """ Signal class wraps the TOAST data object but represents only + one cached signal flavor. + """ + + def __init__(self, data, name=None, init_val=None, temporary=False): + self.data = data + self.temporary = temporary + if self.temporary: + self.name = get_temporary_name() + else: + self.name = name + if init_val is not None: + cacheinit = OpCacheInit(name=self.name, init_val=init_val) + cacheinit.exec(data) + return + + def __del__(self): + if self.temporary: + cacheclear = OpCacheClear(self.name) + cacheclear.exec(self.data) + free_temporary_name(self.name) + return + + @function_timer + def apply_flags(self, common_flag_mask, flag_mask): + """ Set the signal at flagged samples to zero + """ + flags_apply = OpFlagsApply( + name=self.name, common_flag_mask=common_flag_mask, flag_mask=flag_mask + ) + flags_apply.exec(self.data) + return + + @function_timer + def apply_weightmap(self, weightmap): + """ Scale the signal with the provided weight map + """ + if weightmap is None: + return + scanscale = OpScanScale(distmap=weightmap, name=self.name) + scanscale.exec(self.data) + return + + @function_timer + def copy(self): + """ Return a new Signal object with independent copies of the + signal vectors. + """ + new_signal = Signal(self.data, temporary=True) + copysignal = OpCacheCopy(self.name, new_signal.name, force=True) + copysignal.exec(self.data) + return new_signal + + @function_timer + def __getitem__(self, key): + """ Return a reference to a slice of TOD cache + """ + iobs, det, todslice = key + tod = self.data.obs[iobs]["tod"] + return tod.local_signal(det, self.name)[todslice] + + @function_timer + def __setitem__(self, key, value): + """ Set slice of TOD cache + """ + iobs, det, todslice = key + tod = self.data.obs[iobs]["tod"] + tod.local_signal(det, self.name)[todslice] = value + return + + @function_timer + def __iadd__(self, other): + """ Add the provided Signal object to this one + """ + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + if isinstance(other, Signal): + self[iobs, det, :] += other[iobs, det, :] + else: + self[iobs, det, :] += other + return self + + @function_timer + def __isub__(self, other): + """ Subtract the provided Signal object from this one + """ + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + if isinstance(other, Signal): + self[iobs, det, :] -= other[iobs, det, :] + else: + self[iobs, det, :] -= other + return self + + @function_timer + def __imul__(self, other): + """ Scale the signal + """ + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + self[iobs, det, :] *= other + return self + + @function_timer + def __itruediv__(self, other): + """ Divide the signal + """ + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + for det in tod.local_dets: + self[iobs, det, :] /= other + return self + + +class PCGSolver: + """ Solves `x` in A.x = b + """ + + def __init__( + self, + comm, + templates, + noise, + projection, + signal, + niter_min=3, + niter_max=100, + convergence_limit=1e-12, + ): + self.comm = comm + if comm is None: + self.rank = 0 + else: + self.rank = comm.rank + self.templates = templates + self.noise = noise + self.projection = projection + self.signal = signal + self.niter_min = niter_min + self.niter_max = niter_max + self.convergence_limit = convergence_limit + + self.rhs = self.templates.apply_transpose( + self.noise.apply(self.projection.apply(self.signal)) + ) + # print("RHS {}: {}".format(self.signal.name, self.rhs)) # DEBUG + return + + @function_timer + def apply_lhs(self, amplitudes): + """ Return A.x + """ + new_amplitudes = self.templates.apply_transpose( + self.noise.apply(self.projection.apply(self.templates.apply(amplitudes))) + ) + self.templates.add_prior(amplitudes, new_amplitudes) + return new_amplitudes + + @function_timer + def solve(self): + """ Standard issue PCG solution of A.x = b + + Returns: + x : the least squares solution + """ + log = Logger.get() + timer0 = Timer() + timer0.start() + timer = Timer() + timer.start() + # Initial guess is zero amplitudes + guess = self.templates.zero_amplitudes() + # print("guess:", guess) # DEBUG + # print("RHS:", self.rhs) # DEBUG + residual = self.rhs.copy() + # print("residual(1):", residual) # DEBUG + residual -= self.apply_lhs(guess) + # print("residual(2):", residual) # DEBUG + precond_residual = self.templates.apply_precond(residual) + proposal = precond_residual.copy() + sqsum = precond_residual.dot(residual) + init_sqsum, best_sqsum, last_best = sqsum, sqsum, sqsum + if self.rank == 0: + log.info("Initial residual: {}".format(init_sqsum)) + # Iterate to convergence + for iiter in range(self.niter_max): + if not np.isfinite(sqsum): + raise RuntimeError("Residual is not finite") + alpha = sqsum + alpha /= proposal.dot(self.apply_lhs(proposal)) + alpha_proposal = proposal.copy() + alpha_proposal *= alpha + guess += alpha_proposal + residual -= self.apply_lhs(alpha_proposal) + del alpha_proposal + # Prepare for next iteration + precond_residual = self.templates.apply_precond(residual) + beta = 1 / sqsum + # Check for convergence + sqsum = precond_residual.dot(residual) + if self.rank == 0: + timer.report_clear( + "Iter = {:4} relative residual: {:12.4e}".format( + iiter, sqsum / init_sqsum + ) + ) + if sqsum < init_sqsum * self.convergence_limit or sqsum < 1e-30: + if self.rank == 0: + timer0.report_clear( + "PCG converged after {} iterations".format(iiter) + ) + break + best_sqsum = min(sqsum, best_sqsum) + if iiter % 10 == 0 and iiter >= self.niter_min: + if last_best < best_sqsum * 2: + if self.rank == 0: + timer0.report_clear( + "PCG stalled after {} iterations".format(iiter) + ) + break + last_best = best_sqsum + # Select the next direction + beta *= sqsum + proposal *= beta + proposal += precond_residual + log.info("{} : Solution: {}".format(self.rank, guess)) # DEBUG + return guess + + +class OpMapMaker(Operator): + + # Choose one bit in the common flags for storing gap information + gap_bit = 2 ** 7 + # Choose one bit in the quality flags for storing processing mask + mask_bit = 2 ** 7 + + def __init__( + self, + nside=64, + nnz=3, + name=None, + outdir="out", + outprefix="", + write_hits=True, + zip_maps=False, + write_wcov_inv=True, + write_wcov=True, + write_binned=True, + write_destriped=True, + write_rcond=True, + rcond_limit=1e-3, + baseline_length=100000, + maskfile=None, + weightmapfile=None, + common_flag_mask=1, + flag_mask=1, + intervals="intervals", + subharmonic_order=None, + iter_min=3, + iter_max=100, + use_noise_prior=True, + precond_width=20, + pixels="pixels", + ): + self.nside = nside + self.npix = 12 * self.nside ** 2 + self.name = name + self.nnz = nnz + self.ncov = self.nnz * (self.nnz + 1) // 2 + self.outdir = outdir + self.outprefix = outprefix + self.write_hits = write_hits + self.zip_maps = zip_maps + self.write_wcov_inv = write_wcov_inv + self.write_wcov = write_wcov + self.write_binned = write_binned + self.write_destriped = write_destriped + self.write_rcond = write_rcond + self.rcond_limit = rcond_limit + self.baseline_length = baseline_length + self.maskfile = maskfile + self.weightmap = None + self.weightmapfile = weightmapfile + self.common_flag_mask = common_flag_mask + self.flag_mask = flag_mask + self.intervals = intervals + self.subharmonic_order = subharmonic_order + self.iter_min = iter_min + self.iter_max = iter_max + self.use_noise_prior = use_noise_prior + self.precond_width = precond_width + self.pixels = pixels + + def report_timing(self): + # gt.stop_all() + all_timers = gather_timers(comm=self.comm) + names = OrderedDict() + names["OpMapMaker.exec"] = OrderedDict( + [ + ("OpMapMaker.flag_gaps", None), + ("OpMapMaker.get_detweights", None), + ("OpMapMaker.initialize_binning", None), + ("OpMapMaker.bin_map", None), + ("OpMapMaker.load_mask", None), + ("OpMapMaker.load_weightmap", None), + ("OpMapMaker.get_templatematrix", None), + ("OpMapMaker.get_noisematrix", None), + ("OpMapMaker.get_projectionmatrix", None), + ("OpMapMaker.get_solver", None), + ( + "PCGSolver.solve", + OrderedDict( + [ + ("TemplateMatrix.zero_amplitudes", None), + ("PCGSolver.apply_lhs", None), + ("TemplateMatrix.apply_precond", None), + ] + ), + ), + ("TemplateMatrix.clean_signal", None), + ] + ) + names["OpMapMaker.exec"]["PCGSolver.solve"][ + "PCGSolver.apply_lhs" + ] = OrderedDict( + [ + ( + "TemplateMatrix.apply_transpose", + OrderedDict( + [ + ("OffsetTemplate.project_signal", None), + ("SubharmonicTemplate.project_signal", None), + ] + ), + ), + ("NoiseMatrix.apply", None), + ( + "ProjectionMatrix.apply", + OrderedDict( + [ + ( + "ProjectionMatrix.bin_map", + OrderedDict( + [ + ( + "OpAccumDiag.exec", + OrderedDict( + [ + ( + "OpAccumDiag.exec.apply_flags", + None, + ), + ( + "OpAccumDiag.exec.global_to_local", + None, + ), + ("cov_accum_zmap", None), + ] + ), + ), + ("covariance_apply", None), + ] + ), + ), + ( + "ProjectionMatrix.scan_map", + OrderedDict( + [ + ( + "OpSimScan.exec", + OrderedDict( + [ + ( + "OpSimScan.exec.global_to_local", + None, + ), + ("OpSimScan.exec.scan_map", None), + ] + ), + ) + ] + ), + ), + ] + ), + ), + ( + "TemplateMatrix.apply", + OrderedDict( + [ + ("OffsetTemplate.add_to_signal", None), + ("SubharmonicTemplate.add_to_signal", None), + ] + ), + ), + ("TemplateMatrix.add_prior", None), + ] + ) + if self.rank == 0: + print("all_timers:", all_timers) # DEBUG + + def report_line(name, indent): + full_name = name + if full_name not in all_timers: + full_name += " (function_timer)" + if full_name not in all_timers: + return + t = all_timers[full_name]["time_max"] + print(indent, "{:.<60}{:8.1f}".format(name, t)) + return + + def report(names, indent): + if names is None: + return + if isinstance(names, str): + report_line(names, indent) + else: + for name, entries in names.items(): + report_line(name, indent) + report(entries, " " * 8 + indent) + + report(names, "-") + print(flush=True) + return + + @function_timer + def get_noisematrix(self, data): + timer = Timer() + timer.start() + noise = NoiseMatrix( + self.comm, + self.detweights, + self.weightmap, + common_flag_mask=(self.common_flag_mask | self.gap_bit), + flag_mask=(self.flag_mask | self.mask_bit), + ) + if self.rank == 0: + timer.report_clear("Initialize projection matrix") + return noise + + @function_timer + def get_projectionmatrix(self, data): + timer = Timer() + timer.start() + projection = ProjectionMatrix( + data, + self.comm, + self.detweights, + self.nnz, + self.white_noise_cov_matrix, + common_flag_mask=(self.common_flag_mask | self.gap_bit), + # Do not add mask_bit here since it is not + # included in the white noise matrices + flag_mask=self.flag_mask, + ) + if self.rank == 0: + timer.report_clear("Initialize projection matrix") + return projection + + @function_timer + def get_templatematrix(self, data): + timer = Timer() + timer.start() + log = Logger.get() + templatelist = [] + if self.baseline_length is not None: + log.info( + "Initializing offset template, step_length = {}".format( + self.baseline_length + ) + ) + templatelist.append( + OffsetTemplate( + data, + self.detweights, + step_length=self.baseline_length, + intervals=self.intervals, + common_flag_mask=(self.common_flag_mask | self.gap_bit), + flag_mask=(self.flag_mask | self.mask_bit), + use_noise_prior=self.use_noise_prior, + precond_width=self.precond_width, + ) + ) + if self.subharmonic_order is not None: + log.info( + "Initializing subharmonic template, order = {}".format( + self.subharmonic_order + ) + ) + templatelist.append( + SubharmonicTemplate( + data, + self.detweights, + order=self.subharmonic_order, + intervals=self.intervals, + common_flag_mask=(self.common_flag_mask | self.gap_bit), + flag_mask=(self.flag_mask | self.mask_bit), + ) + ) + if len(templatelist) == 0: + if self.rank == 0: + log.info("No templates to fit, no destriping done.") + templates = None + else: + templates = TemplateMatrix(data, self.comm, templatelist) + if self.rank == 0: + timer.report_clear("Initialize templates") + return templates + + @function_timer + def get_solver(self, data, templates, noise, projection, signal): + timer = Timer() + timer.start() + solver = PCGSolver( + self.comm, + templates, + noise, + projection, + signal, + niter_min=self.iter_min, + niter_max=self.iter_max, + ) + if self.rank == 0: + timer.report_clear("Initialize PCG solver") + return solver + + @function_timer + def load_mask(self, data): + """ Load processing mask and generate appropriate flag bits + """ + if self.maskfile is None: + return + log = Logger.get() + timer = Timer() + timer.start() + if self.rank == 0 and not os.path.isfile(self.maskfile): + raise RuntimeError( + "Processing mask does not exist: {}".format(self.maskfile) + ) + distmap = DistPixels(data, comm=self.comm, nnz=1, dtype=np.float32) + distmap.read_healpix_fits(self.maskfile) + if self.rank == 0: + timer.report_clear("Read processing mask from {}".format(self.maskfile)) + + scanmask = OpScanMask(distmap=distmap, flagmask=self.mask_bit) + scanmask.exec(data) + + if self.rank == 0: + timer.report_clear("Apply processing mask") + + return + + @function_timer + def load_weightmap(self, data): + """ Load weight map + """ + if self.weightmapfile is None: + return + log = Logger.get() + timer = Timer() + timer.start() + if self.rank == 0 and not os.path.isfile(self.weightmapfile): + raise RuntimeError( + "Weight map does not exist: {}".format(self.weightmapfile) + ) + self.weightmap = DistPixels(data, comm=self.comm, nnz=1, dtype=np.float32) + self.weightmap.read_healpix_fits(self.weightmapfile) + if self.rank == 0: + timer.report_clear("Read weight map from {}".format(self.weightmapfile)) + return + + @function_timer + def exec(self, data, comm=None): + log = Logger.get() + timer = Timer() + + # Initialize objects + if comm is None: + self.comm = data.comm.comm_world + else: + self.comm = comm + if self.comm is None: + self.rank = 0 + else: + self.rank = self.comm.rank + self.flag_gaps(data) + self.get_detweights(data) + self.initialize_binning(data) + if self.write_binned: + self.bin_map(data, "binned") + self.load_mask(data) + self.load_weightmap(data) + + # Solve template amplitudes + + templates = self.get_templatematrix(data) + if templates is None: + return + noise = self.get_noisematrix(data) + projection = self.get_projectionmatrix(data) + signal = Signal(data, name=self.name) + solver = self.get_solver(data, templates, noise, projection, signal) + timer.start() + amplitudes = solver.solve() + if self.rank == 0: + timer.report_clear("Solve amplitudes") + + # Clean TOD + templates.clean_signal(signal, amplitudes) + if self.rank == 0: + timer.report_clear("Clean TOD") + + if self.write_destriped: + self.bin_map(data, "destriped") + + return + + @function_timer + def flag_gaps(self, data): + """ Add flag bits between the intervals + """ + timer = Timer() + timer.start() + flag_gaps = OpFlagGaps(common_flag_value=self.gap_bit, intervals=self.intervals) + flag_gaps.exec(data) + if self.rank == 0: + timer.report_clear("Flag gaps") + return + + @function_timer + def bin_map(self, data, suffix): + log = Logger.get() + timer = Timer() + + dist_map = DistPixels(data, comm=self.comm, nnz=self.nnz, dtype=np.float64) + if dist_map.data is not None: + dist_map.data.fill(0.0) + # FIXME: OpAccumDiag should support separate detweights for each observation + build_dist_map = OpAccumDiag( + zmap=dist_map, + name=self.name, + detweights=self.detweights[0], + common_flag_mask=(self.common_flag_mask | self.gap_bit), + flag_mask=self.flag_mask, + ) + build_dist_map.exec(data) + dist_map.allreduce() + if self.rank == 0: + timer.report_clear(" Build noise-weighted map") + + covariance_apply(self.white_noise_cov_matrix, dist_map) + if self.rank == 0: + timer.report_clear(" Apply noise covariance") + + fname = os.path.join(self.outdir, self.outprefix + suffix + ".fits") + if self.zip_maps: + fname += ".gz" + dist_map.write_healpix_fits(fname) + if self.rank == 0: + timer.report_clear(" Write map to {}".format(fname)) + + return + + @function_timer + def get_detweights(self, data): + """ Each observation will have its own detweight dictionary + """ + timer = Timer() + timer.start() + self.detweights = [] + for obs in data.obs: + tod = obs["tod"] + if "noise" in obs: + noise = obs["noise"] + else: + noise = None + detweights = {} + for det in tod.local_dets: + if noise is None: + noisevar = 1 + else: + # Determine an approximate white noise level, + # accounting for the fact that the PSD may have a + # transfer function roll-off near Nyquist + freq = noise.freq(det) + psd = noise.psd(det) + rate = noise.rate(det) + ind = np.logical_and(freq > rate * 0.2, freq < rate * 0.4) + noisevar = np.median(psd[ind]) + detweights[det] = 1 / noisevar + self.detweights.append(detweights) + if self.rank == 0: + timer.report_clear("Get detector weights") + return + + @function_timer + def initialize_binning(self, data): + log = Logger.get() + timer = Timer() + timer.start() + + if self.rank == 0: + os.makedirs(self.outdir, exist_ok=True) + + self.white_noise_cov_matrix = DistPixels( + data, comm=self.comm, nnz=self.ncov, dtype=np.float64 + ) + if self.white_noise_cov_matrix.data is not None: + self.white_noise_cov_matrix.data.fill(0.0) + + hits = DistPixels(data, comm=self.comm, nnz=1, dtype=np.int64) + if hits.data is not None: + hits.data.fill(0) + + # compute the hits and covariance once, since the pointing and noise + # weights are fixed. + # FIXME: OpAccumDiag should support separate weights for each observation + + build_wcov = OpAccumDiag( + detweights=self.detweights[0], + invnpp=self.white_noise_cov_matrix, + hits=hits, + common_flag_mask=(self.common_flag_mask | self.gap_bit), + flag_mask=self.flag_mask, + ) + build_wcov.exec(data) + + if self.comm is not None: + self.comm.Barrier() + if self.rank == 0: + timer.report_clear("Accumulate N_pp'^1") + + self.white_noise_cov_matrix.allreduce() + + if self.comm is not None: + self.comm.Barrier() + if self.rank == 0: + timer.report_clear("All reduce N_pp'^1") + + if self.write_hits: + hits.allreduce() + fname = os.path.join(self.outdir, self.outprefix + "hits.fits") + if self.zip_maps: + fname += ".gz" + hits.write_healpix_fits(fname) + if self.rank == 0: + log.info("Wrote hits to {}".format(fname)) + if self.rank == 0: + timer.report_clear("Write hits") + + if self.write_wcov_inv: + fname = os.path.join(self.outdir, self.outprefix + "invnpp.fits") + if self.zip_maps: + fname += ".gz" + self.white_noise_cov_matrix.write_healpix_fits(fname) + if self.rank == 0: + log.info("Wrote inverse white noise covariance to {}".format(fname)) + if self.rank == 0: + timer.report_clear("Write N_pp'^1") + + if self.write_rcond: + # Reciprocal condition numbers + rcond = covariance_rcond(self.white_noise_cov_matrix) + if self.rank == 0: + timer.report_clear("Compute reciprocal condition numbers") + fname = os.path.join(self.outdir, self.outprefix + "rcond.fits") + if self.zip_maps: + fname += ".gz" + rcond.write_healpix_fits(fname) + if self.rank == 0: + log.info("Wrote reciprocal condition numbers to {}".format(fname)) + if self.rank == 0: + timer.report_clear("Write rcond") + + # Invert the white noise covariance in each pixel + covariance_invert(self.white_noise_cov_matrix, self.rcond_limit) + if self.rank == 0: + timer.report_clear("Invert N_pp'^1") + + if self.write_wcov: + fname = os.path.join(self.outdir, self.outprefix + "npp.fits") + if self.zip_maps: + fname += ".gz" + self.white_noise_cov_matrix.write_healpix_fits(fname) + if self.rank == 0: + log.info("Wrote white noise covariance to {}".format(fname)) + if self.rank == 0: + timer.report_clear("Write N_pp'") + + return diff --git a/src/toast/future_ops/noise_model.py b/src/toast/future_ops/noise_model.py new file mode 100644 index 000000000..9e8c1bfb1 --- /dev/null +++ b/src/toast/future_ops/noise_model.py @@ -0,0 +1,91 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +import traitlets + +from ..utils import Environment, Logger + +from ..timing import function_timer, Timer + +from ..tod import AnalyticNoise + +from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity + +from ..operator import Operator + + +@trait_docs +class DefaultNoiseModel(Operator): + """Create a default noise model from focalplane parameters. + + A noise model is used by other operations such as simulating noise timestreams + and also map making. This operator uses the detector properties from the + focalplane in each observation to create a simple AnalyticNoise model. + + """ + + # Class traits + + API = traitlets.Int(0, help="Internal interface version for this operator") + + noisekey = traitlets.Unicode( + "noise", help="The observation key to use when storing the noise model" + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + comm = data.comm + + for obs in data.obs: + # Get the detectors we are using for this observation + dets = obs.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # The focalplane for this observation + focalplane = obs.telescope.focalplane + + # Every process has a copy of the focalplane, and every process may want + # the noise model for all detectors (not just our local detectors). + # So we simply have every process generate the same noise model locally. + + fmin = {} + fknee = {} + alpha = {} + NET = {} + rates = {} + for d in dets: + rates[d] = focalplane.sample_rate + fmin[d] = focalplane[d]["fmin"] + fknee[d] = focalplane[d]["fknee"] + alpha[d] = focalplane[d]["alpha"] + NET[d] = focalplane[d]["NET"] + + noise = AnalyticNoise( + rate=rates, fmin=fmin, detectors=dets, fknee=fknee, alpha=alpha, NET=NET + ) + + obs[self.noisekey] = noise + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + return list() + + def _provides(self): + prov = [self.noisekey] + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/pipeline.py b/src/toast/future_ops/pipeline.py new file mode 100644 index 000000000..5228359a1 --- /dev/null +++ b/src/toast/future_ops/pipeline.py @@ -0,0 +1,144 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, List + +from ..operator import Operator + + +class Pipeline(Operator): + """Class representing a sequence of Operators.""" + + # Class traits + + API = traitlets.Int(0, help="Internal interface version for this operator") + + operators = List(allow_none=True, help="List of Operator instances to run.") + + detector_sets = List( + ["ALL"], + help="List of detector sets. 'ALL' and 'SINGLE' are also valid values.", + ) + + @traitlets.validate("detector_sets") + def _check_detsets(self, proposal): + detsets = proposal["value"] + if len(detsets) == 0: + raise traitlets.TraitError( + "detector_sets must be a list with at least one entry ('ALL' and 'SINGLE' are valid entries)" + ) + for dset in detsets: + if (dset != "ALL") and (dset != "SINGLE"): + # Not a built-in name, must be an actual list of detectors + if isinstance(dset, str) or len(dset) == 0: + raise traitlets.TraitError( + "A detector set must be a list of detectors or 'ALL' / 'SINGLE'" + ) + for d in dset: + if not isinstance(d, str): + raise traitlets.TraitError( + "Each element of a det set should be a detector name" + ) + return detsets + + @traitlets.validate("operators") + def _check_operators(self, proposal): + ops = proposal["value"] + if ops is None: + return ops + for op in ops: + if not isinstance(op, Operator): + raise traitlets.TraitError( + "operators must be a list of Operator instances or None" + ) + return ops + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + acc = self.accelerators() + + if "CUDA" in acc: + # All our operators support CUDA. Stage any required data + pass + + if detectors is not None: + msg = "Use the 'detector_sets' option to control a Pipeline" + log.error(msg) + raise RuntimeError(msg) + + for dset in self.detector_sets: + if dset == "ALL": + for op in self.operators: + op.exec(data) + elif dset == "SINGLE": + # We are running one detector at a time + raise NotImplementedError("SINGLE detectors not implemented yet") + else: + # We are running sets of detectors at once. We first go through all + # observations and find the set of detectors used by each row of the + # process grid. + raise NotImplementedError("detector sets not implemented yet") + + # Copy to / from accelerator... + + return + + def _finalize(self, data, **kwargs): + if self.operators is not None: + for op in self.operators: + op.finalize(data) + + def _requires(self): + # Work through the operator list in reverse order and prune intermediate + # products. + if self.operators is None: + return dict() + keys = ["meta", "detdata", "shared", "intervals"] + req = {x: set() for x in keys} + for op in reverse(self.operators): + oreq = op.requires() + oprov = op.provides() + for k in keys: + req[k] |= oreq[k] + req[k] -= oprov[k] + for k in keys: + req[k] = list(req[k]) + return req + + def _provides(self): + # Work through the operator list and prune intermediate products. + if self.operators is None: + return dict() + keys = ["meta", "detdata", "shared", "intervals"] + prov = {x: set() for x in keys} + for op in self.operators: + oreq = op.requires() + oprov = op.provides() + for k in keys: + prov[k] |= oprov[k] + prov[k] -= oreq[k] + for k in keys: + prov[k] = list(prov[k]) + return prov + + def _accelerators(self): + # This is just the intersection of results from all operators in our list. + if self.operators is None: + return list() + acc = set() + for op in self.operators: + for support in op.accelerators(): + acc.add(support) + for op in self.operators: + supported = op.accelerators() + for a in list(acc): + if a not in supported: + acc.remove(a) + return list(acc) diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/future_ops/pointing_healpix.py new file mode 100644 index 000000000..b774702f4 --- /dev/null +++ b/src/toast/future_ops/pointing_healpix.py @@ -0,0 +1,367 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from ..utils import Environment, Logger + +from ..healpix import HealpixPixels + +from ..operator import Operator + +from ..config import ObjectConfig + +from ..timing import function_timer + +from .. import qarray as qa + +from ..pixels import PixelDistribution + +from .._libtoast import pointing_matrix_healpix + + +class PointingHealpix(Operator): + """Operator which generates I/Q/U healpix pointing weights. + + Given the individual detector pointing, this computes the pointing weights + assuming that the detector is a linear polarizer followed by a total + power measurement. An optional dictionary of pointing weight calibration factors + may be specified for each observation. + + For each observation, the cross-polar response for every detector is obtained from + the Focalplane, and if a HWP angle timestream exists, then a perfect HWP Mueller + matrix is included in the response. + + The timestream model is then (see Jones, et al, 2006): + + .. math:: + d = cal \\left[\\frac{(1+eps)}{2} I + \\frac{(1-eps)}{2} \\left[Q \\cos{2a} + U \\sin{2a}\\right]\\right] + + Or, if a HWP is included in the response with time varying angle "w", then + the total response is: + + .. math:: + d = cal \\left[\\frac{(1+eps)}{2} I + \\frac{(1-eps)}{2} \\left[Q \\cos{2a+4w} + U \\sin{2a+4w}\\right]\\right] + + Args: + config (dict): Configuration parameters. + + """ + + def __init__(self, config): + super().__init__(config) + self._parse() + + # Initialize the healpix pixels object + self.hpix = HealpixPixels(self.config["nside"]) + + self._nnz = 1 + if self.config["mode"] == "IQU": + self._nnz = 3 + + self._n_pix = 12 * self.config["nside"] ** 2 + self._n_pix_submap = 12 * self.config["nside_submap"] ** 2 + self._n_submap = (self.config["nside"] // self.config["nside_submap"]) ** 2 + + self._local_submaps = None + if self.config["create_dist"] is not None: + self._local_submaps = np.zeros(self._n_submap, dtype=np.bool) + + @classmethod + def defaults(cls): + """(Class method) Return options supported by the operator and their defaults. + + This returns an ObjectConfig instance, and each entry should have a help + string. + + Returns: + (ObjectConfig): The options. + + """ + opts = ObjectConfig() + + opts.add("class", "toast.future_ops.PointingHealpix", "The class name") + + opts.add("API", 0, "(Internal interface version for this operator)") + + opts.add("pixels", "pixels", "The observation name of the output pixels") + + opts.add("weights", "weights", "The observation name of the output weights") + + opts.add( + "quats", + None, + "If not None, save detector quaternions to this name (for debugging)", + ) + + opts.add("nside", 64, "The NSIDE resolution") + + opts.add("nside_submap", 16, "The submap resolution") + + opts.add("nest", False, "If True, use NESTED ordering instead of RING") + + opts.add("mode", "I", "The Stokes weights to generate (I or IQU)") + + opts.add("flags", None, "Optional common timestream flags to apply") + + opts.add("flag_mask", 0, "Bit mask value for optional flagging") + + opts.add( + "create_dist", + None, + "Create the submap distribution for all detectors and store in the Data key specified", + ) + + opts.add("single_precision", False, "If True, use 32bit int / float in output") + + opts.add( + "cal", + None, + "The observation key with a dictionary of pointing weight calibration for each det", + ) + + return opts + + def _parse(self): + log = Logger.get() + if self.config["nside_submap"] >= self.config["nside"]: + newsub = self.config["nside"] // 4 + if newsub == 0: + newsub = 1 + log.warning("nside_submap >= nside, setting to {}".format(newsub)) + self.config["nside_submap"] = newsub + if self.config["mode"] not in ["I", "IQU"]: + msg = "Invalide mode '{}', allowed values are 'I' and 'IQU'".format( + self.config["mode"] + ) + log.error(msg) + raise RuntimeError(msg) + + @function_timer + def exec(self, data, detectors=None): + """Create pixels and weights. + + This iterates over all observations and specified detectors, and creates + the pixel and weight arrays representing the pointing matrix. Data is stored + in newly created DetectorData members of each observation. + + The locally hit submaps are optionally computed. This is typically only done + when initially computing the pointing for all detectors. + + Args: + data (toast.Data): The distributed data. + detectors (list): A list of detector names or indices. If None, this + indicates a list of all detectors. + + Returns: + None + + """ + env = Environment.get() + log = Logger.get() + + # We do the calculation over buffers of timestream samples to reduce memory + # overhead from temporary arrays. + tod_buffer_length = env.tod_buffer_length() + + for obs in data.obs: + # Get the detectors we are using for this observation + dets = obs.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # The number of samples on this process + n_samp = obs.local_samples[1] + + # See if we have a HWP angle + hwpang = None + try: + hwpang = obs.hwp_angle + except KeyError: + if obs.mpicomm is None or obs.mpicomm.rank == 0: + msg = "Observation {} has no HWP angle- not including in response".format( + obs.name + ) + log.verbose(msg) + + # Get the flags if needed + flags = None + if self.config["flags"] is not None: + flags = obs.get_common_flags(keyname=self.config["flags"]) + flags &= self.config["flag_mask"] + + # Boresight pointing quaternions + boresight = obs.boresight_radec + + # Focalplane for this observation + focalplane = obs.telescope.focalplane + + # Optional calibration + cal = None + if self.config["cal"] is not None: + cal = obs[self.config["cal"]] + + # Create output data for the pixels, weights and optionally the + # detector quaternions. + + if self.config["single_precision"]: + obs.create_detector_data( + self.config["pixels"], + shape=(n_samp,), + dtype=np.int32, + detectors=dets, + ) + obs.create_detector_data( + self.config["weights"], + shape=(n_samp, self._nnz), + dtype=np.float32, + detectors=dets, + ) + else: + obs.create_detector_data( + self.config["pixels"], + shape=(n_samp,), + dtype=np.int64, + detectors=dets, + ) + obs.create_detector_data( + self.config["weights"], + shape=(n_samp, self._nnz), + dtype=np.float64, + detectors=dets, + ) + + if self.config["quats"] is not None: + obs.create_detector_data( + self.config["quats"], + shape=(n_samp, 4), + dtype=np.float64, + detectors=dets, + ) + + for det in dets: + props = focalplane[det] + + # Get the cross polar response from the focalplane + epsilon = 0.0 + if "pol_leakage" in props: + epsilon = props["pol_leakage"] + + # Detector quaternion offset from the boresight + detquat = props["quat"] + + # Timestream of detector quaternions + quats = qa.mult(boresight, detquat) + if self.config["quats"] is not None: + obs[self.config["quats"]][det][:] = quats + + # Cal for this detector + dcal = 1.0 + if cal is not None: + dcal = cal[det] + + # Buffered pointing calculation + buf_off = 0 + buf_n = tod_buffer_length + while buf_off < n_samp: + if buf_off + buf_n > n_samp: + buf_n = n_samp - buf_off + bslice = slice(buf_off, buf_off + buf_n) + + # This buffer of detector quaternions + detp = quats[bslice, :].reshape(-1) + + # Buffer of HWP angle + hslice = None + if hwpang is not None: + hslice = hwpang[bslice].reshape(-1) + + # Buffer of flags + fslice = None + if flags is not None: + fslice = flags[bslice].reshape(-1) + + # Pixel and weight buffers + pxslice = obs[self.config["pixels"]][det][bslice].reshape(-1) + wtslice = obs[self.config["weights"]][det][bslice].reshape(-1) + + pbuf = pxslice + wbuf = wtslice + if self.config["single_precision"]: + pbuf = np.zeros(len(pxslice), dtype=np.int64) + wbuf = np.zeros(len(wtslice), dtype=np.float64) + + pointing_matrix_healpix( + self.hpix, + self.config["nest"], + epsilon, + dcal, + self.config["mode"], + detp, + hslice, + fslice, + pxslice, + wtslice, + ) + + if self.config["single_precision"]: + pxslice[:] = pbuf.astype(np.int32) + wtslice[:] = wbuf.astype(np.float32) + + buf_off += buf_n + + if self.config["create_dist"] is not None: + self._local_submaps[ + obs[self.config["pixels"]][det] // self._n_pix_submap + ] = True + return + + def finalize(self, data): + """Perform any final operations / communication. + + Args: + data (toast.Data): The distributed data. + + Returns: + (PixelDistribution): Return the final submap distribution or None. + + """ + # Optionally return the submap distribution + if self.config["create_dist"] is not None: + submaps = None + if self.config["single_precision"]: + submaps = np.arange(self._n_submap, dtype=np.int32)[self._local_submaps] + else: + submaps = np.arange(self._n_submap, dtype=np.int64)[self._local_submaps] + data[self.config["create_dist"]] = PixelDistribution( + n_pix=self._n_pix, + n_submap=self._n_submap, + local_submaps=submaps, + comm=data.comm.comm_world, + ) + return + + def requires(self): + """List of Observation keys directly used by this Operator. + """ + req = ["BORESIGHT_RADEC", "HWP_ANGLE"] + if self.config["flags"] is not None: + req.append(self.config["flags"]) + if self.config["cal"] is not None: + req.append(self.config["cal"]) + return req + + def provides(self): + """List of Observation keys generated by this Operator. + """ + prov = [self.config["pixels"], self.config["weights"]] + if self.config["quats"] is not None: + prov.append(self.config["quats"]) + return prov + + def accelerators(self): + """List of accelerators supported by this Operator. + """ + return list() diff --git a/src/toast/future_ops/sim_ground.py b/src/toast/future_ops/sim_ground.py new file mode 100644 index 000000000..1c4d0295a --- /dev/null +++ b/src/toast/future_ops/sim_ground.py @@ -0,0 +1,287 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from scipy.constants import degree + +import healpy as hp + +from .. import qarray as qa + +from ..utils import Environment, name_UID, Logger, rate_from_times + +from ..dist import distribute_uniform + +from ..timing import function_timer, Timer + +from ..tod import Interval, TOD, regular_intervals, AnalyticNoise + +from ..operator import Operator + +from ..observation import Observation + +from ..config import ObjectConfig + +from ..instrument import Telescope + +from ..healpix import ang2vec + +from .sim_hwp import simulate_hwp_angle + + +class SimGround(Operator): + """Simulate a generic ground-based telescope. + + This uses an observing schedule to simulate observations of a ground based + telescope. + + Args: + config (dict): Configuration parameters. + + """ + + def __init__(self, config): + super().__init__(config) + self._parse() + + @classmethod + def defaults(cls): + """(Class method) Return options supported by the operator and their defaults. + + This returns an ObjectConfig instance, and each entry should have a help + string. + + Returns: + (ObjectConfig): The options. + + """ + opts = ObjectConfig() + + opts.add("class", "toast.future_ops.SimGround", "The class name") + + opts.add("API", 0, "(Internal interface version for this operator)") + + opts.add("telescope", None, "This should be an instance of a Telescope") + + opts.add("start_time", 0.0, "The mission start time in seconds") + + opts.add("hwp_rpm", None, "The rate (in RPM) of the HWP rotation") + + opts.add( + "hwp_step_deg", None, "For stepped HWP, the angle in degrees of each step" + ) + + opts.add( + "hwp_step_time_m", + None, + "For stepped HWP, the time in minutes between steps", + ) + + boresight_angle = (0,) + firsttime = (0.0,) + rate = (100.0,) + site_lon = (0,) + site_lat = (0,) + site_alt = (0,) + el = (None,) + azmin = (None,) + azmax = (None,) + el_nod = (None,) + start_with_elnod = (True,) + end_with_elnod = (False,) + scanrate = (1,) + scanrate_el = (None,) + scan_accel = (0.1,) + scan_accel_el = (None,) + CES_start = (None,) + CES_stop = (None,) + el_min = (0,) + sun_angle_min = (90,) + sampsizes = (None,) + sampbreaks = (None,) + coord = ("C",) + report_timing = (True,) + hwprpm = (None,) + hwpstep = (None,) + hwpsteptime = (None,) + cosecant_modulation = (False,) + + return opts + + def _parse(self): + if "telescope" not in self.config: + raise RuntimeError("Satellite simulations require a telescope") + try: + dets = self.config["telescope"].focalplane.detectors + except: + raise RuntimeError("'telescope' option should be an instance of Telescope") + if "start_time" not in self.config: + self.config["start_time"] = 0.0 + if "observation_time_h" not in self.config: + raise RuntimeError("Time span of each observation must be specified") + if "gap_time_h" not in self.config: + self.config["gap_time_h"] = 0.0 + if "n_observation" not in self.config: + raise RuntimeError("Number of observations must be specified") + + def exec(self, data, detectors=None): + """Create observations containing simulated satellite pointing. + + Observations will be appended to the Data object. + + Args: + data (toast.Data): The distributed data. + detectors (list): A list of detector names or indices. If None, this + indicates a list of all detectors. + + Returns: + None + + """ + log = Logger.get() + focalplane = self.config["telescope"].focalplane + comm = data.comm + + # List of detectors in this pipeline + pipedets = list() + for d in focalplane.detectors: + if d not in detectors: + continue + pipedets.append(d) + + if comm.group_size > len(pipedets): + if comm.world_rank == 0: + log.error("process group is too large for the number of detectors") + comm.comm_world.Abort() + + # Distribute the observations uniformly among groups + + groupdist = distribute_uniform(self.config["n_observation"], comm.ngroups) + + # Compute global time and sample ranges of all observations + + obsrange = regular_intervals( + self.config["n_observation"], + self.config["start_time"], + 0, + focalplane.sample_rate, + 3600 * self.config["observation_time_h"], + 3600 * self.config["gap_time_h"], + ) + + # Every process group creates its observations + + group_firstobs = groupdist[comm.group][0] + group_numobs = groupdist[comm.group][1] + + for ob in range(group_firstobs, group_firstobs + group_numobs): + obname = "science_{:05d}".format(ob) + obs = Observation( + self.config["telescope"], + name=obname, + UID=name_UID(obname), + samples=obsrange[ob].samples, + detector_ranks=comm.group_size, + mpicomm=comm.comm_group, + ) + + # Create standard shared objects. + + obs.create_times() + obs.create_common_flags() + + # Rank zero of each grid column creates the data + stamps = None + if obs.grid_comm_col is None or obs.grid_comm_col.rank == 0: + start_abs = obs.local_samples[0] + obsrange[ob].first + start_time = ( + obsrange[ob].start + float(start_abs) / focalplane.sample_rate + ) + stop_time = ( + start_time + float(obs.local_samples[1]) / focalplane.sample_rate + ) + stamps = np.linspace( + start_time, + stop_time, + num=obs.local_samples[1], + endpoint=False, + dtype=np.float64, + ) + obs.times().set(stamps, offset=(0,), fromrank=0) + + # Create boresight + start_abs = obs.local_samples[0] + obsrange[ob].first + degday = 360.0 / 365.25 + + q_prec = None + if obs.grid_comm_col is None or obs.grid_comm_col.rank == 0: + q_prec = slew_precession_axis( + first_samp=start_abs, + n_samp=obs.local_samples[1], + sample_rate=focalplane.sample_rate, + deg_day=degday, + ) + + satellite_scanning( + obs, + sample_offset=start_abs, + q_prec=q_prec, + spin_period_m=self.config["spin_period_m"], + spin_angle_deg=self.config["spin_angle_deg"], + prec_period_m=self.config["prec_period_m"], + prec_angle_deg=self.config["prec_angle_deg"], + ) + + # Set HWP angle + + simulate_hwp_angle( + obs, + obsrange[ob].start, + self.config["hwp_rpm"], + self.config["hwp_step_deg"], + self.config["hwp_step_time_m"], + ) + + data.obs.append(obs) + + return + + def finalize(self, data): + """Perform any final operations / communication. + + This calls the finalize() method on all operators in sequence. + + Args: + data (toast.Data): The distributed data. + + Returns: + None + + """ + return + + def requires(self): + """List of Observation keys directly used by this Operator. + """ + return list() + + def provides(self): + """List of Observation keys generated by this Operator. + """ + prov = [ + "TIMESTAMPS", + "BORESIGHT_RADEC", + "BORESIGHT_RESPONSE", + "COMMON_FLAGS", + "HWP_ANGLE", + "POSITION", + "VELOCITY", + ] + return prov + + def accelerators(self): + """List of accelerators supported by this Operator. + """ + return list() diff --git a/src/toast/future_ops/sim_hwp.py b/src/toast/future_ops/sim_hwp.py new file mode 100644 index 000000000..3086c84a8 --- /dev/null +++ b/src/toast/future_ops/sim_hwp.py @@ -0,0 +1,98 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from ..timing import function_timer, Timer + + +@function_timer +def simulate_hwp_angle( + obs, obs_key, hwp_start_s, hwp_rpm, hwp_step_deg, hwp_step_time_m +): + """Simulate and store the HWP angle for one observation. + + Args: + obs (Observation): The observation to populate. + obs_key (str): The observation key for the HWP angle. + hwp_start_s (float): The mission starting time in seconds of the HWP rotation. + hwp_rpm (float): The HWP rotation rate in Revolutions Per Minute. + hwp_step_deg (float): The HWP step size in degrees. + hwp_step_time_m (float): The time in minutes between steps. + + Returns: + None + + """ + if hwp_rpm is None and hwp_step_deg is None: + # Nothing to do! + return + + if (hwp_rpm is not None) and (hwp_step_deg is not None): + raise RuntimeError("choose either continuously rotating or stepped HWP") + + if hwp_step_deg is not None and hwp_step_time_m is None: + raise RuntimeError("for a stepped HWP, you must specify the time between steps") + + # compute effective sample rate + times = obs.times + dt = np.mean(times[1:-1] - times[0:-2]) + rate = 1.0 / dt + + hwp_rate = None + hwp_step = None + hwp_step_time = None + + if hwp_rpm is not None: + # convert to radians / second + hwp_rate = hwp_rpm * 2.0 * np.pi / 60.0 + + if hwp_step_deg is not None: + # convert to radians and seconds + hwp_step = hwp_step_deg * np.pi / 180.0 + hwp_step_time = hwp_step_time_m * 60.0 + + first_samp, n_samp = obs.local_samples + + obs.shared.create( + obs_key, shape=(n_samp,), dtype=np.float64, comm=obs.grid_comm_col + ) + + # Only the first process in each grid column simulates the common HWP angle + + start_sample = int(hwp_start_s * rate) + hwp_angle = None + + if obs.grid_comm_col is None or obs.grid_comm_col.rank == 0: + if hwp_rate is not None: + # continuous HWP + # HWP increment per sample is: + # (hwprate / samplerate) + hwpincr = hwp_rate / rate + startang = np.fmod((start_sample + first_samp) * hwpincr, 2 * np.pi) + hwp_angle = hwpincr * np.arange(n_samp, dtype=np.float64) + hwp_angle += startang + elif hwp_step is not None: + # stepped HWP + hwp_angle = np.ones(n_samp, dtype=np.float64) + stepsamples = int(hwp_step_time * rate) + wholesteps = int((start_sample + first_samp) / stepsamples) + remsamples = (start_sample + first_samp) - wholesteps * stepsamples + curang = np.fmod(wholesteps * hwp_step, 2 * np.pi) + curoff = 0 + fill = remsamples + while curoff < n_samp: + if curoff + fill > n_samp: + fill = n_samp - curoff + hwp_angle[curoff:fill] *= curang + curang += hwp_step + curoff += fill + fill = stepsamples + if hwp_angle is not None: + # Choose the HWP angle between [0, 2*pi) + hwp_angle %= 2 * np.pi + + obs.shared[obs_key].set(hwp_angle, offset=(0,), fromrank=0) + + return diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py new file mode 100644 index 000000000..63f756521 --- /dev/null +++ b/src/toast/future_ops/sim_satellite.py @@ -0,0 +1,543 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from scipy.constants import degree + +import traitlets + +import healpy as hp + +from astropy import units as u + +from .. import qarray as qa + +from ..utils import Environment, name_UID, Logger, rate_from_times + +from ..dist import distribute_uniform + +from ..timing import function_timer, Timer + +from ..tod import Interval, TOD, regular_intervals, AnalyticNoise + +from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity + +from ..operator import Operator + +from ..observation import Observation + +from ..instrument import Telescope + +from ..healpix import ang2vec + +from .sim_hwp import simulate_hwp_angle + + +@function_timer +def slew_precession_axis(first_samp=0, n_samp=None, sample_rate=None, deg_day=None): + """Generate quaternions for constantly slewing precession axis. + + This constructs quaternions which rotates the Z coordinate axis + to the X/Y plane, and then slowly rotates this. This can be used + to generate quaternions for the precession axis used in satellite + scanning simulations. + + Args: + first_samp (int): The offset in samples from the start + of rotation. + n_samp (int): The number of samples to simulate. + sample_rate (float): The sampling rate in Hz. + deg_day (float): The rotation rate in degrees per day. + + """ + env = Environment.get() + tod_buffer_length = env.tod_buffer_length() + + zaxis = np.array([0.0, 0.0, 1.0]) + + # this is the increment in radians per sample + angincr = deg_day * (np.pi / 180.0) / (24.0 * 3600.0 * sample_rate) + + result = np.zeros((n_samp, 4), dtype=np.float64) + + # Compute the time-varying quaternions representing the rotation + # from the coordinate frame to the precession axis frame. The + # angle of rotation is fixed (PI/2), but the axis starts at the Y + # coordinate axis and sweeps. + + buf_off = 0 + buf_n = tod_buffer_length + while buf_off < n_samp: + if buf_off + buf_n > n_samp: + buf_n = n_samp - buf_off + bslice = slice(buf_off, buf_off + buf_n) + + satang = np.arange(buf_n, dtype=np.float64) + satang *= angincr + satang += angincr * (buf_off + first_samp) + # satang += angincr * firstsamp + (np.pi / 2) + + cang = np.cos(satang) + sang = np.sin(satang) + + # this is the time-varying rotation axis + sataxis = np.concatenate( + (cang.reshape(-1, 1), sang.reshape(-1, 1), np.zeros((buf_n, 1))), axis=1 + ) + + result[bslice, :] = qa.from_vectors( + np.tile(zaxis, buf_n).reshape(-1, 3), sataxis + ) + buf_off += buf_n + + return result + + +@function_timer +def satellite_scanning( + obs, + obs_key, + sample_offset=0, + q_prec=None, + spin_period_m=1.0, + spin_angle_deg=85.0, + prec_period_m=0.0, + prec_angle_deg=0.0, +): + """Generate boresight quaternions for a generic satellite. + + Given scan strategy parameters and the relevant angles + and rates, generate an array of quaternions representing + the rotation of the ecliptic coordinate axes to the + boresight. + + Args: + obs (Observation): The observation to populate. + obs_key (str): The observation shared key to create. + sample_offset (int): The global offset in samples from the start + of the mission. + q_prec (ndarray): If None (the default), then the + precession axis will be fixed along the + X axis. If a 1D array of size 4 is given, + This will be the fixed quaternion used + to rotate the Z coordinate axis to the + precession axis. If a 2D array of shape + (nsim, 4) is given, this is the time-varying + rotation of the Z axis to the precession axis. + spin_period_m (float): The period (in minutes) of the + rotation about the spin axis. + spin_angle_deg (float): The opening angle (in degrees) + of the boresight from the spin axis. + prec_period_m (float): The period (in minutes) of the + rotation about the precession axis. + prec_angle_deg (float): The opening angle (in degrees) + of the spin axis from the precession axis. + + """ + env = Environment.get() + tod_buffer_length = env.tod_buffer_length() + + first_samp, n_samp = obs.local_samples + obs.shared.create(obs_key, shape=(n_samp, 4), dtype=np.float64, comm=obs.comm_col) + + # Temporary buffer + boresight = None + + # Only the first process in each grid column simulates the shared boresight data + + if obs.comm_col_rank == 0: + boresight = np.zeros((n_samp, 4), dtype=np.float64) + + # Compute effective sample rate + (sample_rate, dt, _, _, _) = rate_from_times(obs.shared["times"]) + + spin_rate = None + if spin_period_m > 0.0: + spin_rate = 1.0 / (60.0 * spin_period_m) + else: + spin_rate = 0.0 + spin_angle = spin_angle_deg * np.pi / 180.0 + + prec_rate = None + if prec_period_m > 0.0: + prec_rate = 1.0 / (60.0 * prec_period_m) + else: + prec_rate = 0.0 + prec_angle = prec_angle_deg * np.pi / 180.0 + + xaxis = np.array([1, 0, 0], dtype=np.float64) + yaxis = np.array([0, 1, 0], dtype=np.float64) + zaxis = np.array([0, 0, 1], dtype=np.float64) + + if q_prec is not None: + if (q_prec.shape[0] != n_samp) or (q_prec.shape[1] != 4): + raise RuntimeError("q_prec array has wrong dimensions") + + buf_off = 0 + buf_n = tod_buffer_length + while buf_off < n_samp: + if buf_off + buf_n > n_samp: + buf_n = n_samp - buf_off + bslice = slice(buf_off, buf_off + buf_n) + + satrot = np.empty((buf_n, 4), np.float64) + if q_prec is None: + # in this case, we just have a fixed precession axis, pointing + # along the ecliptic X axis. + satrot[:, :] = np.tile( + qa.rotation(np.array([0.0, 1.0, 0.0]), np.pi / 2), buf_n + ).reshape(-1, 4) + elif q_prec.flatten().shape[0] == 4: + # we have a fixed precession axis. + satrot[:, :] = np.tile(q_prec.flatten(), buf_n).reshape(-1, 4) + else: + # we have full vector of quaternions + satrot[:, :] = q_prec[bslice, :] + + # Time-varying rotation about precession axis. + # Increment per sample is + # (2pi radians) X (precrate) / (samplerate) + # Construct quaternion from axis / angle form. + + # print("satrot = ", satrot[-1]) + + precang = np.arange(buf_n, dtype=np.float64) + precang += float(buf_off + first_samp + sample_offset) + precang *= prec_rate / sample_rate + precang = 2.0 * np.pi * (precang - np.floor(precang)) + + cang = np.cos(0.5 * precang) + sang = np.sin(0.5 * precang) + + precaxis = np.multiply( + sang.reshape(-1, 1), np.tile(zaxis, buf_n).reshape(-1, 3) + ) + + precrot = np.concatenate((precaxis, cang.reshape(-1, 1)), axis=1) + + # Rotation which performs the precession opening angle + precopen = qa.rotation(np.array([1.0, 0.0, 0.0]), prec_angle) + + # Time-varying rotation about spin axis. Increment + # per sample is + # (2pi radians) X (spinrate) / (samplerate) + # Construct quaternion from axis / angle form. + + spinang = np.arange(buf_n, dtype=np.float64) + spinang += float(buf_off + first_samp + sample_offset) + spinang *= spin_rate / sample_rate + spinang = 2.0 * np.pi * (spinang - np.floor(spinang)) + + cang = np.cos(0.5 * spinang) + sang = np.sin(0.5 * spinang) + + spinaxis = np.multiply( + sang.reshape(-1, 1), np.tile(zaxis, buf_n).reshape(-1, 3) + ) + + spinrot = np.concatenate((spinaxis, cang.reshape(-1, 1)), axis=1) + + # Rotation which performs the spin axis opening angle + + spinopen = qa.rotation(np.array([1.0, 0.0, 0.0]), spin_angle) + + # compose final rotation + + boresight[bslice, :] = qa.mult( + satrot, qa.mult(precrot, qa.mult(precopen, qa.mult(spinrot, spinopen))) + ) + buf_off += buf_n + + obs.shared[obs_key].set(boresight, offset=(0, 0), fromrank=0) + + return + + +@trait_docs +class SimSatellite(Operator): + """Simulate a generic satellite motion. + + This simulates satellite pointing in regular intervals ("science scans") that + may have some gaps in between for cooler cycles or other events. The precession + axis (anti-sun direction) is continuously slewed. + + """ + + # Class traits + + API = traitlets.Int(0, help="Internal interface version for this operator") + + telescope = Instance( + klass=Telescope, allow_none=True, help="This must be an instance of a Telescope" + ) + + start_time = Quantity(0.0 * u.second, help="The mission start time") + + observation_time = Quantity(0.1 * u.hour, help="The time span for each observation") + + gap_time = Quantity(0.0 * u.hour, help="The gap between each observation") + + n_observation = Int(1, help="The number of observations to simulate") + + spin_period = Quantity( + 10.0 * u.minute, help="The period of the rotation about the spin axis" + ) + + spin_angle = Quantity( + 30.0 * u.degree, help="The opening angle of the boresight from the spin axis" + ) + + prec_period = Quantity( + 50.0 * u.minute, help="The period of the rotation about the precession axis" + ) + + prec_angle = Quantity( + 65.0 * u.degree, + help="The opening angle of the spin axis from the precession axis", + ) + + hwp_rpm = Float(None, allow_none=True, help="The rate (in RPM) of the HWP rotation") + + hwp_step = Quantity( + None, allow_none=True, help="For stepped HWP, the angle of each step" + ) + + hwp_step_time = Quantity( + None, allow_none=True, help="For stepped HWP, the time between steps" + ) + + distribute_time = Bool( + False, + help="Distribute observation data along the time axis rather than detector axis", + ) + + times = Unicode("times", help="Observation shared key for timestamps") + + flags = Unicode("flags", help="Observation shared key for common flags") + + hwp = Unicode("hwp_angle", help="Observation shared key for HWP angle") + + boresight = Unicode("boresight_radec", help="Observation shared key for boresight") + + position = Unicode("position", help="Observation shared key for position") + + velocity = Unicode("velocity", help="Observation shared key for velocity") + + @traitlets.validate("telescope") + def _check_telescope(self, proposal): + tele = proposal["value"] + if tele is not None: + try: + dets = tele.focalplane.detectors + except Exception: + raise traitlets.TraitError( + "telescope must be a Telescope instance with a focalplane" + ) + return tele + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._AU = 149597870.7 + self._radperday = 0.01720209895 + self._radpersec = self._radperday / 86400.0 + self._earthspeed = self._radpersec * self._AU + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + if self.telescope is None: + raise RuntimeError( + "The telescope attribute must be set before calling exec()" + ) + focalplane = self.telescope.focalplane + comm = data.comm + + # List of detectors in this pipeline + pipedets = None + if detectors is None: + pipedets = focalplane.detectors + else: + pipedets = list() + for det in focalplane.detectors: + if det in detectors: + pipedets.append(det) + + if comm.group_size > len(pipedets): + if comm.world_rank == 0: + log.error("process group is too large for the number of detectors") + comm.comm_world.Abort() + + # Distribute the observations uniformly among groups + + groupdist = distribute_uniform(self.n_observation, comm.ngroups) + + # Compute global time and sample ranges of all observations + + obsrange = regular_intervals( + self.n_observation, + self.start_time.to_value(u.second), + 0, + focalplane.sample_rate, + self.observation_time.to_value(u.second), + self.gap_time.to_value(u.second), + ) + + det_ranks = comm.group_size + if self.distribute_time: + det_ranks = 1 + + # Every process group creates its observations + + radinc = self._radpersec / focalplane.sample_rate + + group_firstobs = groupdist[comm.group][0] + group_numobs = groupdist[comm.group][1] + + for ob in range(group_firstobs, group_firstobs + group_numobs): + obname = "science_{:05d}".format(ob) + obs = Observation( + self.telescope, + obsrange[ob].samples, + name=obname, + UID=name_UID(obname), + comm=comm.comm_group, + process_rows=det_ranks, + ) + + # Create shared objects for timestamps, common flags, position, + # and velocity. + obs.shared.create( + self.times, + shape=(obs.n_local,), + dtype=np.float64, + comm=obs.comm_col, + ) + obs.shared.create( + self.flags, + shape=(obs.n_local,), + dtype=np.uint8, + comm=obs.comm_col, + ) + obs.shared.create( + self.position, + shape=(obs.n_local, 3), + dtype=np.float64, + comm=obs.comm_col, + ) + obs.shared.create( + self.velocity, + shape=(obs.n_local, 3), + dtype=np.float64, + comm=obs.comm_col, + ) + + # Rank zero of each grid column creates the data + stamps = None + position = None + velocity = None + if obs.comm_col_rank == 0: + start_abs = obs.offset + obsrange[ob].first + start_time = ( + obsrange[ob].start + float(start_abs) / focalplane.sample_rate + ) + stop_time = start_time + float(obs.n_local) / focalplane.sample_rate + stamps = np.linspace( + start_time, + stop_time, + num=obs.n_local, + endpoint=False, + dtype=np.float64, + ) + # For this simple class, assume that the Earth is located + # along the X axis at time == 0.0s. We also just use the + # mean values for distance and angular speed. Classes for + # real experiments should obviously use ephemeris data. + rad = np.fmod( + (start_time - self.start_time.to_value(u.second)) * self._radpersec, + 2.0 * np.pi, + ) + ang = radinc * np.arange(obs.n_local, dtype=np.float64) + rad + x = self._AU * np.cos(ang) + y = self._AU * np.sin(ang) + z = np.zeros_like(x) + position = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) + + ang = ( + radinc * np.arange(obs.n_local, dtype=np.float64) + + rad + + (0.5 * np.pi) + ) + x = self._earthspeed * np.cos(ang) + y = self._earthspeed * np.sin(ang) + z = np.zeros_like(x) + velocity = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) + + obs.shared[self.times].set(stamps, offset=(0,), fromrank=0) + obs.shared[self.position].set(position, offset=(0, 0), fromrank=0) + obs.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0) + + # Create boresight pointing + start_abs = obs.offset + obsrange[ob].first + degday = 360.0 / 365.25 + + q_prec = None + if obs.comm_col_rank == 0: + q_prec = slew_precession_axis( + first_samp=start_abs, + n_samp=obs.n_local, + sample_rate=focalplane.sample_rate, + deg_day=degday, + ) + + satellite_scanning( + obs, + self.boresight, + sample_offset=start_abs, + q_prec=q_prec, + spin_period_m=self.spin_period.to_value(u.minute), + spin_angle_deg=self.spin_angle.to_value(u.degree), + prec_period_m=self.prec_period.to_value(u.minute), + prec_angle_deg=self.prec_angle.to_value(u.degree), + ) + + # Set HWP angle + + hwp_step_deg = None + hwp_step_time_m = None + if self.hwp_step is not None: + hwp_step_deg = self.hwp_step.to_value(u.degree) + hwp_step_time_m = self.hwp_step_time.to_value(u.minute) + simulate_hwp_angle( + obs, + self.hwp, + obsrange[ob].start, + self.hwp_rpm, + hwp_step_deg, + hwp_step_time_m, + ) + + data.obs.append(obs) + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + return dict() + + def _provides(self): + return { + "shared": [ + self.times, + self.flags, + self.boresight, + self.hwp_angle, + self.position, + self.velocity, + ] + } + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/future_ops/sim_tod_noise.py new file mode 100644 index 000000000..986a25231 --- /dev/null +++ b/src/toast/future_ops/sim_tod_noise.py @@ -0,0 +1,222 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from ..timing import function_timer + +from ..fft import FFTPlanReal1DStore + +from ..tod.tod_math import sim_noise_timestream + +from ..operator import Operator + +from ..config import ObjectConfig + +from ..utils import rate_from_times, Logger + + +class SimNoise(Operator): + """Operator which generates noise timestreams. + + This passes through each observation and every process generates data + for its assigned samples. The observation unique ID is used in the random + number generation. The observation dictionary can optionally include a + 'global_offset' member that might be useful if you are splitting observations and + want to enforce reproducibility of a given sample, even when using + different-sized observations. + + Args: + config (dict): Configuration parameters. + + """ + + def __init__(self, config): + super().__init__(config) + self._parse() + self._oversample = 2 + + @classmethod + def defaults(cls): + """(Class method) Return options supported by the operator and their defaults. + + This returns an ObjectConfig instance, and each entry should have a help + string. + + Returns: + (ObjectConfig): The options. + + """ + opts = ObjectConfig() + + opts.add("class", "toast.future_ops.SimNoise", "The class name") + + opts.add("API", 0, "(Internal interface version for this operator)") + + opts.add("out", None, "The name of the output signal") + + opts.add("realization", 0, "The realization index") + + opts.add("component", 0, "The component index") + + opts.add( + "noise", + "noise", + "The observation key containing the noise model to use for simulations", + ) + + return opts + + def _parse(self): + if self.config["realization"] < 0 or self.config["component"] < 0: + raise RuntimeError("realization and component indices should be positive") + if self.config["out"] is None: + self.config["out"] = "SIGNAL" + + @function_timer + def exec(self, data, detectors=None): + """Generate noise timestreams. + + This iterates over all observations and detectors and generates + the noise timestreams based on the noise object for the current + observation. + + Args: + data (toast.Data): The distributed data. + detectors (list): A list of detector names or indices. If None, this + indicates a list of all detectors. + + Raises: + KeyError: If an observation does not contain the noise or output + signal keys. + + """ + log = Logger.get() + for obs in data.obs: + # Get the detectors we are using for this observation + dets = obs.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # Unique observation ID + obsindx = obs.UID + + # FIXME: we should unify naming of UID / id. + telescope = obs.telescope.id + + # FIXME: Every observation has a set of timestamps. This global + # offset is specified separately so that opens the possibility for + # inconsistency. Perhaps the global_offset should be made a property + # of the Observation class? + global_offset = 0 + if "global_offset" in obs: + global_offset = obs["global_offset"] + + if self.config["noise"] not in obs: + msg = "Observation does not contain noise key '{}'".format( + self.config["noise"] + ) + log.error(msg) + raise KeyError(msg) + + nse = obs[self.config["noise"]] + + # Eventually we'll redistribute, to allow long correlations... + if obs.grid_size[1] != 1: + msg = "Noise simulation for process grids with multiple ranks in the sample direction not implemented" + log.error(msg) + raise NotImplementedError(msg) + + # The previous code verified that a single process has whole + # detectors within the observation. + + # Create output if it does not exist + if self.config["out"] not in obs: + obs.create_detector_data( + self.config["out"], shape=(obs.local_samples[1],), dtype=np.float64 + ) + + (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(obs.times) + + for key in nse.keys: + # Check if noise matching this PSD key is needed + weight = 0.0 + for det in dets: + weight += np.abs(nse.weight(det, key)) + if weight == 0: + continue + + # Simulate the noise matching this key + nsedata = sim_noise_timestream( + self.config["realization"], + telescope, + self.config["component"], + obsindx, + nse.index(key), + rate, + obs.local_samples[0] + global_offset, + obs.local_samples[1], + self._oversample, + nse.freq(key), + nse.psd(key), + ) + + # Add the noise to all detectors that have nonzero weights + for det in dets: + weight = nse.weight(det, key) + if weight == 0: + continue + obs.get_signal(keyname=self.config["out"])[ + obs.local_samples[0] : obs.local_samples[0] + + obs.local_samples[1] + ] += (weight * nsedata) + + # Release the work space allocated in the FFT plan store. + # + # FIXME: the fact that we are doing this begs the question of why bother + # using the plan store at all? Only one plan per process, per FFT length + # should be created. The memory use of these plans should be small relative + # to the other timestream memory use except in the case where: + # + # 1. Each process only has a few detectors + # 2. There is a broad distribution of observation lengths. + # + # If we are in this regime frequently, we should just allocate / free each plan. + store = FFTPlanReal1DStore.get() + store.clear() + + return + + def finalize(self, data): + """Perform any final operations / communication. + + This calls the finalize() method on all operators in sequence. + + Args: + data (toast.Data): The distributed data. + + Returns: + None + + """ + return + + def requires(self): + """List of Observation keys directly used by this Operator. + """ + req = [self.config["noise"]] + return req + + def provides(self): + """List of Observation keys generated by this Operator. + """ + prov = list() + prov.append(self.config["out"]) + return prov + + def accelerators(self): + """List of accelerators supported by this Operator. + """ + return list() diff --git a/src/toast/instrument.py b/src/toast/instrument.py new file mode 100644 index 000000000..612af7776 --- /dev/null +++ b/src/toast/instrument.py @@ -0,0 +1,278 @@ +# Copyright (c) 2019-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os +import sys + +import numpy as np + +import tomlkit + +from .timing import function_timer, Timer + +from .tod import AnalyticNoise + +from .utils import Logger, Environment, name_UID + +from . import qarray + +# FIXME: This Focalplane class should be much more generic and able to hold common +# metadata as well as per-detector properties. We should inherit from MutableMapping +# to support full dictionary behavior and then have a special key for the +# detector-specific properties. Unfortunately, this will break the API, so should +# be done as part of the 3.0 transition. + + +class Focalplane(object): + """Class representing the focalplane for one observation. + + Args: + detector_data (dict): Dictionary of detector attributes, such + as detector quaternions and noise parameters. + radius_deg (float): force the radius of the focal plane. + otherwise it will be calculated from the detector + offsets. + sample_rate (float): The common (nominal) sample rate for all detectors. + fname (str): Load the focalplane from this file. + + """ + + def __init__( + self, detector_data=None, radius_deg=None, sample_rate=None, fname=None + ): + self.detector_data = None + self.sample_rate = None + self._radius = None + self._detweights = None + self._detquats = None + self._noise = None + + if fname is not None: + raw = None + with open(fname, "r") as f: + raw = tomlkit.loads(f.read()) + self.sample_rate = raw["sample_rate"] + if "radius" in raw: + self._radius = raw["radius"] + self.detector_data = raw["detector_data"] + else: + if detector_data is None: + raise RuntimeError( + "If not loading from a file, must specify detector_data" + ) + self.detector_data = detector_data + if sample_rate is None: + raise RuntimeError( + "If not loading from a file, must specify sample_rate" + ) + self.sample_rate = sample_rate + if radius_deg is not None: + self._radius = radius_deg + + self._get_pol_angles() + self._get_pol_efficiency() + + def _get_pol_angles(self): + """ Get the detector polarization angles from the quaternions + """ + for detname, detdata in self.detector_data.items(): + if "pol_angle_deg" not in detdata and "pol_angle_rad" not in detdata: + quat = detdata["quat"] + psi = qarray.to_angles(quat)[2] + detdata["pol_angle_rad"] = psi + return + + def _get_pol_efficiency(self): + """ Get the polarization efficiency from polarization leakage + or vice versa + """ + for detname, detdata in self.detector_data.items(): + if "pol_leakage" in detdata and "pol_efficiency" not in detdata: + # Derive efficiency from leakage + epsilon = detdata["pol_leakage"] + eta = (1 - epsilon) / (1 + epsilon) + detdata["pol_efficiency"] = eta + elif "pol_leakage" not in detdata and "pol_efficiency" in detdata: + # Derive leakage from efficiency + eta = detdata["pol_effiency"] + epsilon = (1 - eta) / (1 + eta) + detdata["pol_leakage"] = epsilon + elif "pol_leakage" not in detdata and "pol_efficiency" not in detdata: + # Assume a perfectly polarized detector + detdata["pol_efficiency"] = 1 + detdata["pol_leakage"] = 0 + else: + # Check that efficiency and leakage are consistent + epsilon = detdata["pol_leakage"] + eta = detdata["pol_efficiency"] + np.testing.assert_almost_equal( + eta, + (1 + epsilon) / (1 - epsilon), + err_msg="inconsistent polarization leakage and efficiency", + ) + return + + def __contains__(self, key): + return key in self.detector_data + + def __getitem__(self, key): + return self.detector_data[key] + + def __setitem__(self, key, value): + self.detector_data[key] = value + if "UID" not in value: + self.detector_data[key]["UID"] = name_UID(key) + + def reset_properties(self): + """ Clear automatic properties so they will be re-generated + """ + self._detweights = None + self._radius = None + self._detquats = None + self._noise = None + + @property + def detectors(self): + return sorted(self.detector_data.keys()) + + def keys(self): + return self.detectors + + @property + def detector_index(self): + return {name: props["UID"] for name, props in self.detector_data.items()} + + @property + def detector_weights(self): + """ Return the inverse noise variance weights [K_CMB^-2] + """ + if self._detweights is None: + self._detweights = {} + for detname, detdata in self.detector_data.items(): + net = detdata["NET"] + if "fsample" in detdata: + fsample = detdata["fsample"] + else: + fsample = self.sample_rate + detweight = 1.0 / (fsample * net ** 2) + self._detweights[detname] = detweight + return self._detweights + + @property + def radius(self): + """ The focal plane radius in degrees + """ + if self._radius is None: + # Find the largest distance from the bore sight + ZAXIS = np.array([0, 0, 1]) + cosangs = [] + for detname, detdata in self.detector_data.items(): + quat = detdata["quat"] + vec = qarray.rotate(quat, ZAXIS) + cosangs.append(np.dot(ZAXIS, vec)) + mincos = np.amin(cosangs) + self._radius = np.degrees(np.arccos(mincos)) + # Add a very small margin to avoid numeric issues + # in the atmospheric simulation + self._radius *= 1.001 + return self._radius + + @property + def detector_quats(self): + if self._detquats is None: + self._detquats = {} + for detname, detdata in self.detector_data.items(): + self._detquats[detname] = detdata["quat"] + return self._detquats + + @property + def noise(self): + if self._noise is None: + fmin = {} + fknee = {} + alpha = {} + NET = {} + rates = {} + for detname in self.detectors: + detdata = self.detector_data[detname] + if "fsample" in detdata: + rates[detname] = detdata["fsample"] + else: + rates[detname] = self.sample_rate + fmin[detname] = detdata["fmin"] + fknee[detname] = detdata["fknee"] + alpha[detname] = detdata["alpha"] + NET[detname] = detdata["NET"] + self._noise = AnalyticNoise( + rate=rates, + fmin=fmin, + detectors=self.detectors, + fknee=fknee, + alpha=alpha, + NET=NET, + ) + return self._noise + + def __repr__(self): + value = "" + "".format(self.name, self.id, self.lon, self.lat, self.alt, self.weather) + ) + return value diff --git a/src/toast/instrument_sim.py b/src/toast/instrument_sim.py new file mode 100644 index 000000000..bab75ee6a --- /dev/null +++ b/src/toast/instrument_sim.py @@ -0,0 +1,649 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from . import qarray as qa + +from .instrument import Focalplane + + +def cartesian_to_quat(offsets): + """Convert cartesian angle offsets and rotation into quaternions. + + Focalplane geometries are often described in terms of wafer locations or + separations given in simple X/Y angle offsets from a center point. + this helper function converts such parameters into a quaternion describing + the rotation. + + Args: + offsets (list of arrays): each item of the list has 3 elements for + the X / Y angle offsets in degrees and the rotation in degrees + about the Z axis. + + Returns: + (list): list of quaternions for each item in the input list. + + """ + centers = list() + zaxis = np.array([0, 0, 1], dtype=np.float64) + for off in offsets: + angrot = qa.rotation(zaxis, off[2] * np.pi / 180.0) + wx = off[0] * np.pi / 180.0 + wy = off[1] * np.pi / 180.0 + wz = np.sqrt(1.0 - (wx * wx + wy * wy)) + wdir = np.array([wx, wy, wz]) + posrot = qa.from_vectors(zaxis, wdir) + centers.append(qa.mult(posrot, angrot)) + return centers + + +def hex_nring(npix): + """ + For a hexagonal layout with a given number of pixels, return the + number of rings. + """ + test = npix - 1 + nrings = 1 + while (test - 6 * nrings) >= 0: + test -= 6 * nrings + nrings += 1 + if test != 0: + raise RuntimeError( + "{} is not a valid number of pixels for a hexagonal layout".format(npix) + ) + return nrings + + +def hex_row_col(npix, pix): + """ + For a hexagonal layout, indexed in a "spiral" scheme (see hex_layout), + this function returnes the "row" and "column" of a pixel. + The row is zero along the main vertex-vertex axis, and is positive + or negative above / below this line of pixels. + """ + if pix >= npix: + raise ValueError("pixel value out of range") + test = npix - 1 + nrings = 1 + while (test - 6 * nrings) >= 0: + test -= 6 * nrings + nrings += 1 + if pix == 0: + row = 0 + col = nrings - 1 + else: + test = pix - 1 + ring = 1 + while (test - 6 * ring) >= 0: + test -= 6 * ring + ring += 1 + sector = int(test / ring) + steps = np.mod(test, ring) + coloff = nrings - ring - 1 + if sector == 0: + row = steps + col = coloff + 2 * ring - steps + elif sector == 1: + row = ring + col = coloff + ring - steps + elif sector == 2: + row = ring - steps + col = coloff + elif sector == 3: + row = -steps + col = coloff + elif sector == 4: + row = -ring + col = coloff + steps + elif sector == 5: + row = -ring + steps + col = coloff + ring + steps + return (row, col) + + +def hex_pol_angles_qu(npix, offset=0.0): + """Generates a vector of detector polarization angles. + + The returned angles can be used to construct a hexagonal detector layout. + This scheme alternates pixels between 0/90 and +/- 45 degrees. + + Args: + npix (int): the number of pixels locations in the hexagon. + offset (float): the constant angle offset in degrees to apply. + + Returns: + (array): The detector polarization angles. + + """ + pol = np.zeros(npix, dtype=np.float64) + for pix in range(npix): + # get the row / col of the pixel + row, col = hex_row_col(npix, pix) + if np.mod(col, 2) == 0: + pol[pix] = 0.0 + offset + else: + pol[pix] = 45.0 + offset + return pol + + +def hex_pol_angles_radial(npix, offset=0.0): + """Generates a vector of detector polarization angles. + + The returned angles can be used to construct a hexagonal detector layout. + This scheme orients the bolometer along the radial direction of the + hexagon. + + Args: + npix (int): the number of pixels locations in the hexagon. + offset (float): the constant angle offset in degrees to apply. + + Returns: + (array): The detector polarization angles. + + """ + sixty = np.pi / 3.0 + thirty = np.pi / 6.0 + pol = np.zeros(npix, dtype=np.float64) + pol[0] = 0.0 + for pix in range(1, npix): + # find ring for this pix + test = pix - 1 + ring = 1 + while (test - 6 * ring) >= 0: + test -= 6 * ring + ring += 1 + sectors = int(test / ring) + sectorsteps = np.mod(test, ring) + midline = 0.5 * np.sqrt(3) * float(ring) + edgedist = float(sectorsteps) - 0.5 * float(ring) + relang = np.arctan2(edgedist, midline) + pol[pix] = (sectors * sixty + thirty + relang) * 180.0 / np.pi + offset + return pol + + +def hex_layout( + npix, angwidth, prefix, suffix, pol, center=np.array([0, 0, 0, 1], dtype=np.float64) +): + """Return detectors in a hexagon layout. + + This maps the physical positions of pixels into angular positions + from the hexagon center. The X axis in the hexagon frame is along + the vertex-to-opposite-vertex direction. The Y axis is along + flat-to-opposite-flat direction. The origin is at the center of + the wafer. For example:: + + Y ^ O O O + | O O O O + | O O + O O + +--> X O O O O + O O O + + Each pixel is numbered 1..npix and each detector is named by the + prefix, the pixel number, and the suffix. The first pixel is at + the center, and then the pixels are numbered moving outward in rings. + + The extent of the hexagon is directly specified by the angwidth + parameter. These, along with the npix parameter, constrain the packing + locations of the pixel centers. + + Args: + npix (int): number of pixels packed onto wafer. + angwidth (float): the angle (in degrees) subtended by the width. + prefix (str): the detector name prefix. + suffix (str): the detector name suffix. + pol (ndarray): 1D array of detector polarization angles. The + rotation is applied to the hexagon center prior to rotation + to the pixel location. + center (ndarray): quaternion offset of the center of the layout. + + Returns: + (dict) A dictionary keyed on detector name, with each value itself a + dictionary of detector properties. + + """ + zaxis = np.array([0, 0, 1], dtype=np.float64) + nullquat = np.array([0, 0, 0, 1], dtype=np.float64) + sixty = np.pi / 3.0 + thirty = np.pi / 6.0 + rtthree = np.sqrt(3.0) + rtthreebytwo = 0.5 * rtthree + + angwidth = angwidth * np.pi / 180.0 + + # compute the diameter (vertex to vertex width) + angdiameter = angwidth / np.cos(thirty) + + # find the angular packing size of one detector + test = npix - 1 + nrings = 1 + while (test - 6 * nrings) >= 0: + test -= 6 * nrings + nrings += 1 + pixdiam = angdiameter / (2 * nrings - 1) + + # convert pol vector to radians + pol *= np.pi / 180.0 + + # number of digits for pixel indexing + + ndigit = 0 + test = npix + while test > 0: + test = test // 10 + ndigit += 1 + + nameformat = "{{}}{{:0{}d}}{{}}".format(ndigit) + + # compute positions of all detectors + + dets = {} + + for pix in range(npix): + dname = nameformat.format(prefix, pix, suffix) + + polrot = qa.rotation(zaxis, pol[pix]) + + # center pixel has no offset + pixrot = nullquat + + if pix != 0: + # Not at the center, find ring for this pix + test = pix - 1 + ring = 1 + while (test - 6 * ring) >= 0: + test -= 6 * ring + ring += 1 + sectors = int(test / ring) + sectorsteps = np.mod(test, ring) + + # Convert angular steps around the ring into the angle and distance + # in polar coordinates. Each "sector" of 60 degrees is essentially + # an equilateral triangle, and each step is equally spaced along the + # the edge opposite the vertex: + # + # O + # O O (step 2) + # O O (step 1) + # X O O O (step 0) + # + # For a given ring, "R" (center is R=0), there are R steps along + # the sector edge. The line from the origin to the opposite edge + # that bisects this triangle has length R*sqrt(3)/2. For each + # equally-spaced step, we use the right triangle formed with this + # bisection line to compute the angle and radius within this sector. + + # the distance from the origin to the midpoint of the opposite side. + midline = rtthreebytwo * float(ring) + + # the distance along the opposite edge from the midpoint (positive + # or negative) + edgedist = float(sectorsteps) - 0.5 * float(ring) + + # the angle relative to the midpoint line (positive or negative) + relang = np.arctan2(edgedist, midline) + + # total angle is based on number of sectors we have and the angle + # within the final sector. + pixang = sectors * sixty + thirty + relang + + pixdist = rtthreebytwo * pixdiam * float(ring) / np.cos(relang) + + pixx = np.sin(pixdist) * np.cos(pixang) + pixy = np.sin(pixdist) * np.sin(pixang) + pixz = np.cos(pixdist) + pixdir = np.array([pixx, pixy, pixz], dtype=np.float64) + norm = np.sqrt(np.dot(pixdir, pixdir)) + pixdir /= norm + + pixrot = qa.from_vectors(zaxis, pixdir) + + dprops = {} + dprops["quat"] = qa.mult(center, qa.mult(pixrot, polrot)) + dprops["polangle_deg"] = pol[pix] + + dets[dname] = dprops + + return dets + + +def rhomb_dim(npix): + """ + For a rhombus layout, return the dimension of one side. + """ + dim = int(np.sqrt(float(npix))) + if dim ** 2 != npix: + raise ValueError("number of pixels for a rhombus wafer must be square") + return dim + + +def rhomb_row_col(npix, pix): + """ + For a rhombus layout, indexed from top to bottom (see rhombus_layout), + this function returnes the "row" and "column" of a pixel. The column + starts at zero on the left hand side of a row. + """ + if pix >= npix: + raise ValueError("pixel value out of range") + dim = rhomb_dim(npix) + col = pix + rowcnt = 1 + row = 0 + while (col - rowcnt) >= 0: + col -= rowcnt + row += 1 + if row >= dim: + rowcnt -= 1 + else: + rowcnt += 1 + return (row, col) + + +def rhomb_pol_angles_qu(npix, offset=0.0): + """Generates a vector of detector polarization angles. + + The returned angles can be used to construct a rhombus detector layout. + This scheme alternates pixels between 0/90 and +/- 45 degrees. + + Args: + npix (int): the number of pixels locations in the rhombus. + offset (float): the constant angle offset in degrees to apply. + + Returns: + (array): The detector polarization angles. + + """ + pol = np.zeros(npix, dtype=np.float64) + for pix in range(npix): + # get the row / col of the pixel + row, col = rhomb_row_col(npix, pix) + if np.mod(col, 2) == 0: + pol[pix] = 45.0 + offset + else: + pol[pix] = 0.0 + offset + return pol + + +def rhombus_layout( + npix, + angwidth, + prefix, + suffix, + polang, + center=np.array([0, 0, 0, 1], dtype=np.float64), +): + """Return detectors in a rhombus layout. + + This particular rhombus geometry is essentially a third of a + hexagon. In other words the aspect ratio of the rhombus is + constrained to have the long dimension be sqrt(3) times the short + dimension. + + This function maps the physical positions of pixels into angular + positions from the rhombus center. The X axis is along the short + direction. The Y axis is along longer direction. The origin is + at the center of the rhombus. For example:: + + O + Y ^ O O + | O O O + | O O O O + +--> X O O O + O O + O + + Each pixel is numbered 1..npix and each detector is named by the + prefix, the pixel number, and the suffix. The first pixel is at the + "top", and then the pixels are numbered moving downward and left to + right. + + The extent of the rhombus is directly specified by the angwidth parameter. + This, along with the npix parameter, constrain the packing locations of + the pixel centers. + + Args: + npix (int): number of pixels packed onto wafer. + angwidth (float): the angle (in degrees) subtended by the short + dimension. + prefix (str): the detector name prefix. + suffix (str): the detector name suffix. + polang (ndarray): 1D array of detector polarization angles. The + rotation is applied to the hexagon center prior to rotation + to the pixel location. + center (ndarray): quaternion offset of the center of the layout. + + Returns: + (dict): A dictionary keyed on detector name, with each value itself a + dictionary of detector properties. + + """ + zaxis = np.array([0, 0, 1], dtype=np.float64) + nullquat = np.array([0, 0, 0, 1], dtype=np.float64) + rtthree = np.sqrt(3.0) + + angwidth = angwidth * np.pi / 180.0 + dim = rhomb_dim(npix) + + # compute the height + angheight = rtthree * angwidth + + # find the angular packing size of one detector + pixdiam = angwidth / dim + + # convert pol vector to radians + pol = polang * np.pi / 180.0 + + # number of digits for pixel indexing + + ndigit = 0 + test = npix + while test > 0: + test = test // 10 + ndigit += 1 + + nameformat = "{{}}{{:0{}d}}{{}}".format(ndigit) + + # compute positions of all detectors + + dets = {} + + for pix in range(npix): + dname = nameformat.format(prefix, pix, suffix) + + polrot = qa.rotation(zaxis, pol[pix]) + + pixrow, pixcol = rhomb_row_col(npix, pix) + + rowang = 0.5 * rtthree * ((dim - 1) - pixrow) * pixdiam + relrow = pixrow + if pixrow >= dim: + relrow = (2 * dim - 2) - pixrow + colang = (float(pixcol) - float(relrow) / 2.0) * pixdiam + distang = np.sqrt(rowang ** 2 + colang ** 2) + zang = np.cos(distang) + pixdir = np.array([colang, rowang, zang], dtype=np.float64) + norm = np.sqrt(np.dot(pixdir, pixdir)) + pixdir /= norm + + pixrot = qa.from_vectors(zaxis, pixdir) + + dprops = {} + dprops["quat"] = qa.mult(center, qa.mult(pixrot, polrot)) + + dets[dname] = dprops + + return dets + + +def fake_hexagon_focalplane( + n_pix=7, + width_deg=5.0, + samplerate=1.0, + epsilon=0.0, + net=1.0, + fmin=0.0, + alpha=1.0, + fknee=0.05, +): + pol_A = hex_pol_angles_qu(n_pix, offset=0.0) + pol_B = hex_pol_angles_qu(n_pix, offset=90.0) + quat_A = hex_layout(n_pix, width_deg, "D", "A", pol_A) + quat_B = hex_layout(n_pix, width_deg, "D", "B", pol_B) + + det_data = dict(quat_A) + det_data.update(quat_B) + + nrings = hex_nring(n_pix) + detfwhm = 0.5 * 60.0 * width_deg / (2 * nrings - 1) + + for det in det_data.keys(): + det_data[det]["pol_leakage"] = epsilon + det_data[det]["fmin"] = fmin + det_data[det]["fknee"] = fknee + det_data[det]["alpha"] = alpha + det_data[det]["NET"] = net + det_data[det]["fwhm_arcmin"] = detfwhm + det_data[det]["fsample"] = samplerate + + return Focalplane(detector_data=det_data, sample_rate=samplerate) + + +def plot_focalplane( + dets, width, height, outfile, fwhm=None, facecolor=None, polcolor=None, labels=None +): + """Visualize a dictionary of detectors. + + This makes a simple plot of the detector positions on the projected + focalplane. + + To avoid python overhead in large MPI jobs, we place the matplotlib + import inside this function, so that it is only imported when the + function is actually called. + + If the detector dictionary contains a key "fwhm", that will be assumed + to be in arcminutes. Otherwise a nominal value is used. + + If the detector dictionary contains a key "viscolor", then that color + will be used. + + Args: + dets (dict): dictionary of detector quaternions. + width (float): width of plot in degrees. + height (float): height of plot in degrees. + outfile (str): output PNG path. If None, then matplotlib will be + used for inline plotting. + fwhm (dict): dictionary of detector beam FWHM in arcminutes, used + to draw the circles to scale. + facecolor (dict): dictionary of color values for the face of each + detector circle. + polcolor (dict): dictionary of color values for the polarization + arrows. + labels (dict): plot this text in the center of each pixel. + + Returns: + None + + """ + if outfile is not None: + import matplotlib + import warnings + + # Try to force matplotlib to not use any Xwindows backend. + warnings.filterwarnings("ignore") + matplotlib.use("Agg") + import matplotlib.pyplot as plt + + xfigsize = int(width) + yfigsize = int(height) + figdpi = 100 + + # Compute the font size to use for detector labels + fontpix = 0.2 * figdpi + fontpt = int(0.75 * fontpix) + + fig = plt.figure(figsize=(xfigsize, yfigsize), dpi=figdpi) + ax = fig.add_subplot(1, 1, 1) + + half_width = 0.5 * width + half_height = 0.5 * height + ax.set_xlabel("Degrees", fontsize="large") + ax.set_ylabel("Degrees", fontsize="large") + ax.set_xlim([-half_width, half_width]) + ax.set_ylim([-half_height, half_height]) + + xaxis = np.array([1.0, 0.0, 0.0], dtype=np.float64) + yaxis = np.array([0.0, 1.0, 0.0], dtype=np.float64) + zaxis = np.array([0.0, 0.0, 1.0], dtype=np.float64) + + for d, quat in dets.items(): + + # radius in degrees + detradius = 0.5 * 5.0 / 60.0 + if fwhm is not None: + detradius = 0.5 * fwhm[d] / 60.0 + + # rotation from boresight + rdir = qa.rotate(quat, zaxis).flatten() + ang = np.arctan2(rdir[1], rdir[0]) + + orient = qa.rotate(quat, xaxis).flatten() + polang = np.arctan2(orient[1], orient[0]) + + mag = np.arccos(rdir[2]) * 180.0 / np.pi + xpos = mag * np.cos(ang) + ypos = mag * np.sin(ang) + + detface = "none" + if facecolor is not None: + detface = facecolor[d] + + circ = plt.Circle((xpos, ypos), radius=detradius, fc=detface, ec="k") + ax.add_artist(circ) + + ascale = 2.0 + + xtail = xpos - ascale * detradius * np.cos(polang) + ytail = ypos - ascale * detradius * np.sin(polang) + dx = ascale * 2.0 * detradius * np.cos(polang) + dy = ascale * 2.0 * detradius * np.sin(polang) + + detcolor = "black" + if polcolor is not None: + detcolor = polcolor[d] + + ax.arrow( + xtail, + ytail, + dx, + dy, + width=0.1 * detradius, + head_width=0.3 * detradius, + head_length=0.3 * detradius, + fc=detcolor, + ec=detcolor, + length_includes_head=True, + ) + + if labels is not None: + xsgn = 1.0 + if dx < 0.0: + xsgn = -1.0 + labeloff = 0.05 * xsgn * fontpix * len(labels[d]) / figdpi + ax.text( + (xtail + 1.1 * dx + labeloff), + (ytail + 1.1 * dy), + labels[d], + color="k", + fontsize=fontpt, + horizontalalignment="center", + verticalalignment="center", + bbox=dict(fc="w", ec="none", pad=1, alpha=1.0), + ) + + if outfile is None: + plt.show() + else: + plt.savefig(outfile) + plt.close() + return fig diff --git a/src/toast/intervals.py b/src/toast/intervals.py new file mode 100644 index 000000000..340bc7654 --- /dev/null +++ b/src/toast/intervals.py @@ -0,0 +1,426 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from collections.abc import MutableMapping, Sequence + +import numpy as np + +from .timing import function_timer + + +class Interval(object): + """Class storing a single time and sample range. + + Args: + start (float): The start time of the interval in seconds. + stop (float): The stop time of the interval in seconds. + first (int): The first sample index of the interval. + last (int): The last sample index (inclusive) of the interval. + + """ + + def __init__(self, start=None, stop=None, first=None, last=None): + self._start = start + self._stop = stop + self._first = first + self._last = last + + def __repr__(self): + return "".format( + self._start, self._stop, self._first, self._last + ) + + def __eq__(self, other): + if ( + (other.first == self.first) + and (other.last == self.last) + and np.isclose(other.start, self.start) + and np.isclose(other.stop, self.stop) + ): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + @property + def start(self): + """(float): the start time of the interval.""" + if self._start is None: + raise RuntimeError("Start time is not yet assigned") + return self._start + + @start.setter + def start(self, val): + if val < 0.0: + raise ValueError("Negative start time is not valid") + self._start = val + + @property + def stop(self): + """(float): the start time of the interval.""" + if self._stop is None: + raise RuntimeError("Stop time is not yet assigned") + return self._stop + + @stop.setter + def stop(self, val): + if val < 0.0: + raise ValueError("Negative stop time is not valid") + self._stop = val + + @property + def first(self): + """(int): the first sample of the interval.""" + if self._first is None: + raise RuntimeError("First sample is not yet assigned") + return self._first + + @first.setter + def first(self, val): + if val < 0: + raise ValueError("Negative first sample is not valid") + self._first = val + + @property + def last(self): + """(int): the first sample of the interval.""" + if self._last is None: + raise RuntimeError("Last sample is not yet assigned") + return self._last + + @last.setter + def last(self, val): + if val < 0: + raise ValueError("Negative last sample is not valid") + self._last = val + + @property + def range(self): + """(float): the number seconds in the interval.""" + b = self.start + e = self.stop + return e - b + + @property + def samples(self): + """(int): the number samples in the interval.""" + b = self.first + e = self.last + return e - b + 1 + + +# NOTE: This class has basic (list-based) intersection and union support +# (__and__, __or__). If we ever get so many intervals in a list that this is a +# performance bottleneck we could consider bringing in another external dependency on +# the intervaltree package for faster operations. However, such a problem likely +# indicates that Intervals are not being used in their intended fashion (for fewer, +# larger spans of time). + + +class IntervalList(Sequence): + """An list of Intervals which supports logical operations. + + Args: + timestamps (array): Array of sample times, required. + intervals (list): A list of Interval objects. + timespans (list): A list of tuples containing start and stop times. + samplespans (list): A list of tuples containing first and last (inclusive) + sample ranges. + + """ + + def __init__(self, timestamps, intervals=None, timespans=None, samplespans=None): + self.timestamps = timestamps + self._internal = None + if intervals is not None: + if timespans is not None or samplespans is not None: + raise RuntimeError( + "If constructing from intervals, other spans should be None" + ) + self._internal = list(intervals) + else: + if timespans is not None: + if samplespans is not None: + raise RuntimeError( + "Cannot construct from both time and sample spans" + ) + if len(timespans) == 0: + self._internal = list() + else: + # Construct intervals from time ranges + start_indx = np.searchsorted( + timestamps, [x[0] for x in timespans], side="left" + ) + stop_indx = np.searchsorted( + timestamps, [x[1] for x in timespans], side="right" + ) + stop_indx -= 1 + self._internal = [ + Interval( + start=timestamps[x[0]], + stop=timestamps[x[1]], + first=x[0], + last=x[1], + ) + for x in zip(start_indx, stop_indx) + ] + else: + if samplespans is None: + raise RuntimeError( + "Must specify intervals, timespans, or samplespans" + ) + if len(samplespans) == 0: + self._internal = list() + else: + # Construct intervals from sample ranges + self._internal = [ + Interval( + start=timestamps[x[0]], + stop=timestamps[x[1]], + first=x[0], + last=x[1], + ) + for x in samplespans + ] + + def __getitem__(self, key): + return self._internal[key] + + def __contains__(self, item): + for ival in self._internal: + if ival == item: + return True + return False + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def __repr__(self): + s = "[" + if len(self._internal) > 1: + for it in self._internal[0:-1]: + s += str(it) + s += ", " + if len(self._internal) > 0: + s += str(self._internal[-1]) + s += "]" + return s + + def __eq__(self, other): + if len(self._internal) != len(other): + return False + if len(self.timestamps) != len(other.timestamps): + return False + if not np.isclose(self.timestamps[0], other.timestamps[0]) or not np.isclose( + self.timestamps[-1], other.timestamps[-1] + ): + return False + for s, o in zip(self._internal, other): + if s != o: + return False + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def simplify(self): + if len(self._internal) == 0: + return + propose = list() + first = self._internal[0].first + last = self._internal[0].last + for i in range(1, len(self._internal)): + cur_first = self._internal[i].first + cur_last = self._internal[i].last + if cur_first == last + 1: + # This interval is contiguous with the previous one + last = cur_last + else: + # There is a gap + propose.append( + Interval( + first=first, + last=last, + start=self.timestamps[first], + stop=self.timestamps[last], + ) + ) + first = cur_first + last = cur_last + propose.append( + Interval( + first=first, + last=last, + start=self.timestamps[first], + stop=self.timestamps[last], + ) + ) + if len(propose) < len(self._internal): + # Need to update + self._internal = propose + + def __invert__(self): + if len(self._internal) == 0: + return + neg = list() + # Handle range before first interval + if not np.isclose(self.timestamps[0], self._internal[0].start): + last = self._internal[0].first - 1 + neg.append( + Interval( + start=self.timestamps[0], + stop=self.timestamps[last], + first=0, + last=last, + ) + ) + for i in range(len(self._internal) - 1): + # Handle gaps between intervals + cur_last = self._internal[i].last + next_first = self._internal[i + 1].first + if next_first != cur_last + 1: + # There are some samples in between + neg.append( + Interval( + start=self.timestamps[cur_last + 1], + stop=self.timestamps[next_first - 1], + first=cur_last + 1, + last=next_first - 1, + ) + ) + # Handle range after last interval + if not np.isclose(self.timestamps[-1], self._internal[-1].stop): + first = self._internal[-1].last + 1 + neg.append( + Interval( + start=self.timestamps[first], + stop=self.timestamps[-1], + first=first, + last=len(self.timestamps) - 1, + ) + ) + return IntervalList(self.timestamps, intervals=neg) + + def __and__(self, other): + if len(self.timestamps) != len(other.timestamps): + raise RuntimeError( + "Cannot do AND operation on intervals with different timestamps" + ) + if not np.isclose(self.timestamps[0], other.timestamps[0]) or not np.isclose( + self.timestamps[-1], other.timestamps[-1] + ): + raise RuntimeError( + "Cannot do AND operation on intervals with different timestamps" + ) + if len(self._internal) == 0 or len(other) == 0: + return IntervalList(self.timestamps, intervals=list()) + result = list() + curself = 0 + curother = 0 + + # Walk both sequences, building up the intersection. + while (curself < len(self._internal)) and (curother < len(other)): + low = max(self._internal[curself].first, other[curother].first) + high = min(self._internal[curself].last, other[curother].last) + if low <= high: + result.append( + Interval( + first=low, + last=high, + start=self.timestamps[low], + stop=self.timestamps[high], + ) + ) + if self._internal[curself].last < other[curother].last: + curself += 1 + else: + curother += 1 + + result = IntervalList(self.timestamps, intervals=result) + result.simplify() + return result + + def __or__(self, other): + if len(self.timestamps) != len(other.timestamps): + raise RuntimeError( + "Cannot do OR operation on intervals with different timestamps" + ) + if not np.isclose(self.timestamps[0], other.timestamps[0]) or not np.isclose( + self.timestamps[-1], other.timestamps[-1] + ): + raise RuntimeError( + "Cannot do OR operation on intervals with different timestamps" + ) + if len(self._internal) == 0: + return IntervalList(self.timestamps, intervals=other) + elif len(other) == 0: + return IntervalList(self.timestamps, intervals=self._internal) + + result = list() + res_first = None + res_last = None + curself = 0 + curother = 0 + + # Walk both sequences, building up the largest contiguous chunks possible. + done_self = False + done_other = False + while (not done_self) or (not done_other): + next = None + if done_self: + next = other[curother] + curother += 1 + elif done_other: + next = self._internal[curself] + curself += 1 + else: + if self._internal[curself].first < other[curother].first: + next = self._internal[curself] + curself += 1 + else: + next = other[curother] + curother += 1 + if curself >= len(self._internal): + done_self = True + if curother >= len(other): + done_other = True + + if res_first is None: + res_first = next.first + res_last = next.last + else: + if next.first <= res_last + 1: + # We overlap last interval + if next.last > res_last: + # This interval extends beyond the last interval + res_last = next.last + else: + # We have a break, close out previous interval and start a new one + result.append( + Interval( + first=res_first, + last=res_last, + start=self.timestamps[res_first], + stop=self.timestamps[res_last], + ) + ) + res_first = next.first + res_last = next.last + # Close out final interval + result.append( + Interval( + first=res_first, + last=res_last, + start=self.timestamps[res_first], + stop=self.timestamps[res_last], + ) + ) + + result = IntervalList(self.timestamps, intervals=result) + return result diff --git a/src/toast/map/cov.py b/src/toast/map/cov.py index 10ed958d5..be5021907 100644 --- a/src/toast/map/cov.py +++ b/src/toast/map/cov.py @@ -6,7 +6,7 @@ from ..timing import function_timer -from ..op import Operator +from ..operator import Operator from .._libtoast import cov_mult_diag, cov_apply_diag, cov_eigendecompose_diag diff --git a/src/toast/map/pixels.py b/src/toast/map/pixels.py index dcaee46fb..45760be90 100644 --- a/src/toast/map/pixels.py +++ b/src/toast/map/pixels.py @@ -12,7 +12,7 @@ import healpy as hp -from ..op import Operator +from ..operator import Operator from ..cache import Cache diff --git a/src/toast/mpi.py b/src/toast/mpi.py index 22dfbb0b9..1968f0d1b 100644 --- a/src/toast/mpi.py +++ b/src/toast/mpi.py @@ -3,14 +3,6 @@ # a BSD-style license that can be found in the LICENSE file. import os -import sys -import itertools - -import numpy as np - -from ._libtoast import Logger - -from .pshmem import MPIShared, MPILock use_mpi = None MPI = None @@ -42,6 +34,21 @@ log.info("mpi4py not found- using serial operations only") use_mpi = False +# We put other imports and checks for accelerators *after* the MPI check, since +# usually the MPI initialization is time sensitive and may timeout the job if it does +# not happen quickly enough. + +import sys +import itertools + +import numpy as np + +from pshmem import MPIShared, MPILock + +from .cuda import use_pycuda, cuda_devices, AcceleratorCuda + +from ._libtoast import Logger + def get_world(): """Retrieve the default world communicator and its properties. @@ -162,6 +169,24 @@ def __init__(self, world=None, groupsize=0): self._gcomm = self._wcomm.Split(self._group, self._grank) self._rcomm = self._wcomm.Split(self._grank, self._group) + # See if we are using CUDA and if so, determine which device each process will + # be using. + self._cuda = None + if use_pycuda: + if self._wcomm is None: + # We are not using MPI, so we will just use the first device + self._cuda = AcceleratorCuda(0) + else: + # How many processes are on this node? + nodecomm = self._wcomm.Split_type(MPI.COMM_TYPE_SHARED, 0) + nodeprocs = nodecomm.size + noderank = nodecomm.rank + # Assign this process to one of the GPUs. + # FIXME: Is it better for ranks to be spread across the devices + # or for contiguous ranks to be assigned to same device? + rank_dev = noderank % cuda_devices + self._cuda = AcceleratorCuda(rank_dev) + @property def world_size(self): """The size of the world communicator.""" @@ -207,6 +232,11 @@ def comm_rank(self): """The communicator shared by processes with the same group_rank.""" return self._rcomm + @property + def cuda(self): + """The CUDA device properties for this process.""" + return self._cuda + def __repr__(self): lines = [ " World MPI communicator = {}".format(self._wcomm), @@ -217,4 +247,8 @@ def __repr__(self): " Group MPI rank = {}".format(self._grank), " Rank MPI communicator = {}".format(self._rcomm), ] + if self._cuda is None: + lines.append(" CUDA disabled") + else: + lines.append(" Using CUDA device {}".format(self._cuda.device_index)) return "".format("\n".join(lines)) diff --git a/src/toast/observation.py b/src/toast/observation.py new file mode 100644 index 000000000..0526b2b26 --- /dev/null +++ b/src/toast/observation.py @@ -0,0 +1,1509 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import sys + +import numbers + +from collections.abc import MutableMapping, Sequence + +import numpy as np + +from .mpi import MPI + +from .instrument import Telescope, Focalplane + +from .intervals import IntervalList + +from .dist import distribute_samples + +from .utils import ( + Logger, + AlignedI8, + AlignedU8, + AlignedI16, + AlignedU16, + AlignedI32, + AlignedU32, + AlignedI64, + AlignedU64, + AlignedF32, + AlignedF64, + name_UID, +) + +from pshmem import MPIShared + +from .cuda import use_pycuda + + +class DetectorData(object): + """Class representing a logical collection of co-sampled detector data. + + This class works like an array of detector data where the first dimension is the + number of detectors and the second dimension is the data for that detector. The + data for a particular detector may itself be multi-dimensional, with the first + dimension the number of samples. + + The data in this container may be sliced by both detector indices and also by + detector name. + + Example: + Imagine we have 3 detectors and each has 10 samples. We want to store a + 4-element value at each sample using 4-byte floats. We would do:: + + detdata = DetectorData(["d01", "d02", "d03"], (10, 4), np.float32) + + and then we can access the data for an individual detector either by index + or by name with:: + + detdata["d01"] = np.ones((10, 4), dtype=np.float32) + firstdet = detdata[0] + + slicing by index and by a list of detectors is possible:: + + view = detdata[0:-1] + view = detdata[("d01", "d03")] + + Args: + detectors (list): A list of detector names in exactly the order you wish. + This order is fixed for the life of the object. + shape (tuple): The shape of the data *for each detector*. The first element + of this shape should be the number of samples. + dtype (numpy.dtype): A numpy-compatible dtype for each element of the detector + data. The only supported types are 1, 2, 4, and 8 byte signed and unsigned + integers, 4 and 8 byte floating point numbers, and 4 and 8 byte complex + numbers. + + """ + + def __init__(self, detectors, shape, dtype): + log = Logger.get() + + self._detectors = detectors + if len(self._detectors) == 0: + msg = "You must specify a list of at least one detector name" + log.error(msg) + raise ValueError(msg) + + self._name2idx = {y: x for x, y in enumerate(self._detectors)} + + # construct a new dtype in case the parameter given is shortcut string + ttype = np.dtype(dtype) + + self._storage_class = None + if ttype.char == "b": + self._storage_class = AlignedI8 + elif ttype.char == "B": + self._storage_class = AlignedU8 + elif ttype.char == "h": + self._storage_class = AlignedI16 + elif ttype.char == "H": + self._storage_class = AlignedU16 + elif ttype.char == "i": + self._storage_class = AlignedI32 + elif ttype.char == "I": + self._storage_class = AlignedU32 + elif (ttype.char == "q") or (ttype.char == "l"): + self._storage_class = AlignedI64 + elif (ttype.char == "Q") or (ttype.char == "L"): + self._storage_class = AlignedU64 + elif ttype.char == "f": + self._storage_class = AlignedF32 + elif ttype.char == "d": + self._storage_class = AlignedF64 + elif ttype.char == "F": + raise NotImplementedError("No support yet for complex numbers") + elif ttype.char == "D": + raise NotImplementedError("No support yet for complex numbers") + else: + msg = "Unsupported data typecode '{}'".format(ttype.char) + log.error(msg) + raise ValueError(msg) + self._dtype = ttype + + # Verify that our shape contains only integral values + self._flatshape = len(self._detectors) + for d in shape: + if not isinstance(d, (int, np.integer)): + msg = "input shape contains non-integer values" + log.error(msg) + raise ValueError(msg) + self._flatshape *= d + + shp = [len(self._detectors)] + shp.extend(shape) + self._shape = tuple(shp) + self._raw = self._storage_class.zeros(self._flatshape) + self._data = self._raw.array().reshape(self._shape) + + @property + def detectors(self): + return list(self._detectors) + + def keys(self): + return list(self._detectors) + + @property + def dtype(self): + return self._dtype + + @property + def shape(self): + return self._shape + + @property + def detector_shape(self): + return tuple(self._shape[1:]) + + @property + def data(self): + return self._data + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to this object. DO NOT CALL THIS unless you are sure all references + are no longer being used and you are about to delete the object. + + """ + if hasattr(self, "_data"): + del self._data + if hasattr(self, "_raw"): + self._raw.clear() + del self._raw + + def __del__(self): + self.clear() + + def _det_axis_view(self, key): + if isinstance(key, (int, np.integer)): + # Just one detector by index + view = key + elif isinstance(key, str): + # Just one detector by name + view = self._name2idx[key] + elif isinstance(key, slice): + # We are slicing detectors by index + view = key + else: + # Assume that our key is at least iterable + try: + test = iter(key) + view = list() + for k in key: + view.append(self._name2idx[k]) + except TypeError: + log = Logger.get() + msg = "Detector indexing supports slice, int, string or iterable, not '{}'".format( + key + ) + log.error(msg) + raise TypeError(msg) + return view + + def _get_view(self, key): + if isinstance(key, tuple): + # We are slicing in both detector and sample dimensions + if len(key) > 2: + msg = "DetectorData has only 2 dimensions" + log.error(msg) + raise TypeError(msg) + if len(key) == 1: + # Only detector slice + return self._det_axis_view(key[0]) + else: + detview = self._det_axis_view(key[0]) + return detview, key[1] + else: + # Only detector slice + return self._det_axis_view(key) + + def __getitem__(self, key): + view = self._get_view(key) + return np.array(self._data[view], dtype=self._dtype, copy=False) + + def __delitem__(self, key): + raise NotImplementedError("Cannot delete individual elements") + return + + def __setitem__(self, key, value): + view = self._get_view(key) + self._data[view] = value + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._detectors) + + def __repr__(self): + val = " 1: + msg = "If passing in an existing data buffer, it should exist on " + "exactly one process (rank 0). All other ranks should pass in None." + log.error(msg) + raise RuntimeError(msg) + + if shared_comm is None or shared_comm.rank == 0: + if copy_data == 1: + shared_shape = original.shape + shared_dtype = original.dtype + shared_dtype = np.dtype(shared_dtype) + + if shared_comm is not None: + shared_shape = shared_comm.bcast(shared_shape, root=0) + shared_dtype = shared_comm.bcast(shared_dtype, root=0) + + # Use defaults for shape and dtype if not set + if shared_shape is None: + shared_shape = (self.samples,) + if shared_dtype is None: + shared_dtype = np.float64 + + # Create the data object + self._internal[name] = MPIShared(shared_shape, shared_dtype, shared_comm) + + # Copy input data if given + if copy_data == 1: + self._internal[name].set(original, np.zeros_like(shared_shape), fromrank=0) + return + + # Shortcuts for creating standard data objects + + def create_times(self, name="times"): + self.create(name, shape=(self.samples,), dtype=np.float64, comm=self.comm_col) + + def create_flags(self, name="flags"): + self.create(name, shape=(self.samples,), dtype=np.uint8, comm=self.comm_col) + + def create_velocity(self, name="velocity"): + self.create(name, shape=(self.samples, 3), dtype=np.float64, comm=self.comm_col) + + def create_position(self, name="position"): + self.create(name, shape=(self.samples, 3), dtype=np.float64, comm=self.comm_col) + + def create_hwp_angle(self, name="hwp_angle"): + self.create(name, shape=(self.samples,), dtype=np.float64, comm=self.comm_col) + + def create_boresight_radec(self, name="boresight_radec"): + self.create(name, shape=(self.samples, 4), dtype=np.float64, comm=self.comm_col) + + def create_boresight_azel(self, name="boresight_azel"): + self.create(name, shape=(self.samples, 4), dtype=np.float64, comm=self.comm_col) + + def create_boresight_response(self, name="boresight_response"): + self.create( + name, shape=(self.samples, 16), dtype=np.float32, comm=self.comm_col + ) + + # Mapping methods + + def __getitem__(self, key): + return self._internal[key] + + def __delitem__(self, key): + self._internal[key].close() + del self._internal[key] + + def __setitem__(self, key, value): + self._internal[key] = value + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def clear(self): + for k in self._internal.keys(): + self._internal[k].close() + + def __del__(self): + if hasattr(self, "_internal"): + self.clear() + + def __repr__(self): + val = " 1: + for it in self.slices[0:-1]: + s += str(it) + s += ", " + if len(self.slices) > 0: + s += str(self.slices[-1]) + s += "]" + return s + + +class ViewMgr(MutableMapping): + """Class to manage views into observation data objects.""" + + def __init__(self, obj): + self.obj = obj + if not hasattr(obj, "_views"): + self.obj._views = dict() + + # Mapping methods + + def __getitem__(self, key): + if key not in self.obj._views: + # View does not yet exist, create it. + if key not in self.obj.intervals: + raise KeyError( + "Observation does not have interval list named '{}'".format(key) + ) + self.obj._views[key] = View(self.obj, key) + # Register deleter callback + self.obj.intervals.register_delete_callback(key, self.__delitem__) + return self.obj._views[key] + + def __delitem__(self, key): + del self.obj._views[key] + + def __setitem__(self, key, value): + raise RuntimeError("Cannot set views directly- simply access them.") + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def clear(self): + self.obj._views.clear() + + +class ViewInterface(object): + """Descriptor class for accessing the views in an observation.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def __get__(self, obj, cls=None): + if obj is None: + return self + else: + if not hasattr(obj, "_viewmgr"): + obj._viewmgr = ViewMgr(obj) + print("ViewInterface __get__ created ", obj._viewmgr) + return obj._viewmgr + + def __set__(self, obj, value): + raise AttributeError("Cannot reset the view interface") + + def __delete__(self, obj): + raise AttributeError("Cannot delete the view interface") + + +class DistDetSamp(object): + """Class used within an Observation to store the detector and sample distribution. + + This is just a simple container for various properties of the distribution. + + Args: + + """ + + def __init__( + self, samples, detectors, sample_sets, detector_sets, comm, process_rows + ): + log = Logger.get() + + self.detectors = detectors + self.samples = samples + self.sample_sets = sample_sets + self.detector_sets = detector_sets + self.process_rows = process_rows + + if self.samples is None or self.samples <= 0: + msg = "You must specify the number of samples as a positive integer" + log.error(msg) + raise RuntimeError(msg) + + self.comm = comm + self.comm_size = 1 + self.comm_rank = 0 + if self.comm is not None: + self.comm_size = self.comm.size + self.comm_rank = self.comm.rank + + if self.process_rows is None: + if self.comm is None: + # No MPI, default to 1 + self.process_rows = 1 + else: + # We have MPI, default to the size of the communicator + self.process_rows = self.comm.size + + self.process_cols = 1 + self.comm_row_size = 1 + self.comm_row_rank = 0 + self.comm_col_size = 1 + self.comm_col_rank = 0 + self.comm_row = None + self.comm_col = None + + if self.comm is None: + if self.process_rows != 1: + msg = "MPI is disabled, so process_rows must equal 1" + log.error(msg) + raise RuntimeError(msg) + else: + if comm.size % self.process_rows != 0: + msg = "The number of process_rows ({}) does not divide evenly into the communicator size ({})".format( + self.process_rows, comm.size + ) + log.error(msg) + raise RuntimeError(msg) + self.process_cols = comm.size // self.process_rows + self.comm_col_rank = comm.rank // self.process_cols + self.comm_row_rank = comm.rank % self.process_cols + + # Split the main communicator into process row and column + # communicators. + + if self.process_cols == 1: + self.comm_row = MPI.COMM_SELF + else: + self.comm_row = self.comm.Split(self.comm_col_rank, self.comm_row_rank) + self.comm_row_size = self.comm_row.size + + if self.process_rows == 1: + self.comm_col = MPI.COMM_SELF + else: + self.comm_col = self.comm.Split(self.comm_row_rank, self.comm_col_rank) + self.comm_col_size = self.comm_col.size + + # If detector_sets is specified, check consistency. + + if self.detector_sets is not None: + test = 0 + for ds in self.detector_sets: + test += len(ds) + for d in ds: + if d not in self.detectors: + msg = ( + "Detector {} in detector_sets but not in detectors".format( + d + ) + ) + log.error(msg) + raise RuntimeError(msg) + if test != len(detectors): + msg = "{} detectors given, but detector_sets has {}".format( + len(detectors), test + ) + log.error(msg) + raise RuntimeError(msg) + + # If sample_sets is specified, it must be consistent with + # the total number of samples. + + if self.sample_sets is not None: + test = 0 + for st in self.sample_sets: + test += np.sum(st) + if samples != test: + msg = ( + "Sum of sample_sizes ({}) does not equal total samples ({})".format( + test, samples + ) + ) + log.error(msg) + raise RuntimeError(msg) + + (self.dets, self.det_sets, self.samps, self.samp_sets) = distribute_samples( + self.comm, + self.detectors, + self.samples, + detranks=self.process_rows, + detsets=self.detector_sets, + sampsets=self.sample_sets, + ) + + +class Observation(MutableMapping): + """Class representing the data for one observation. + + An Observation stores information about data distribution across one or more MPI + processes and is a container for three types of objects: + + * Local detector data (unique to each process). + * Shared data that has one common copy for every node spanned by the + observation. + * Other arbitrary small metadata. + + Small metadata can be store directly in the Observation using normal square + bracket "[]" access to elements (an Observation is a dictionary). Groups of + detector data (e.g. "signal", "flags", etc) can be accessed in the separate + detector data dictionary (the "d" attribute). Shared data can be similarly stored + in the "shared" attribute. + + The detector data within an Observation is distributed among the processes in an + MPI communicator. The processes in the communicator are arranged in a rectangular + grid, with each process storing some number of detectors for a piece of time + covered by the observation. The most common configuration (and the default) is to + make this grid the size of the communicator in the "detector direction" and a size + of one in the "sample direction": + + MPI det1 sample(0), sample(1), sample(2), ...., sample(N-1) + rank 0 det2 sample(0), sample(1), sample(2), ...., sample(N-1) + -------------------------------------------------------------------------- + MPI det3 sample(0), sample(1), sample(2), ...., sample(N-1) + rank 1 det4 sample(0), sample(1), sample(2), ...., sample(N-1) + + So each process has a subset of detectors for the whole span of the observation + time. You can override this shape by setting the process_rows to something + else. For example, process_rows=1 would result in: + + MPI rank 0 | MPI rank 1 + | + det1 sample(0), sample(1), ..., | ...., sample(N-1) + det2 sample(0), sample(1), ..., | ...., sample(N-1) + det3 sample(0), sample(1), ..., | ...., sample(N-1) + det4 sample(0), sample(1), ..., | ...., sample(N-1) + + Args: + telescope (Telescope): An instance of a Telescope object. + samples (int): The total number of samples for this observation. + name (str): (Optional) The observation name. + UID (int): (Optional) The Unique ID for this observation. If not specified, + the UID will be computed from a hash of the name. + comm (mpi4py.MPI.Comm): (Optional) The MPI communicator to use. + detector_sets (list): (Optional) List of lists containing detector names. + These discrete detector sets are used to distribute detectors- a detector + set will always be within a single row of the process grid. If None, + every detector is a set of one. + sample_sets (list): (Optional) List of lists of chunk sizes (integer numbers of + samples). These discrete sample sets are used to distribute sample data. + A sample set will always be within a single column of the process grid. If + None, any distribution break in the sample direction will happen at an + arbitrary place. The sum of all chunks must equal the total number of + samples. + process_rows (int): (Optional) The size of the rectangular process grid + in the detector direction. This number must evenly divide into the size of + mpicomm. If not specified, defaults to the size of the communicator. + + """ + + view = ViewInterface() + + def __init__( + self, + telescope, + samples, + name=None, + UID=None, + comm=None, + detector_sets=None, + sample_sets=None, + process_rows=None, + ): + log = Logger.get() + self._telescope = telescope + self._samples = samples + self._name = name + self._UID = UID + self._comm = comm + self._detector_sets = detector_sets + self._sample_sets = sample_sets + + if self._UID is None and self._name is not None: + self._UID = name_UID(self._name) + + self.dist = DistDetSamp( + self._samples, + self._telescope.focalplane.detectors, + self._sample_sets, + self._detector_sets, + self._comm, + process_rows, + ) + + if self.dist.comm_rank == 0: + # check that all processes have some data, otherwise print warning + for d in range(self.dist.process_rows): + if len(self.dist.dets[d]) == 0: + msg = "WARNING: process row rank {} has no detectors" + " assigned in observation.".format(d) + log.warning(msg) + for r in range(self.dist.process_cols): + if self.dist.samps[r][1] <= 0: + msg = "WARNING: process column rank {} has no data assigned " + "in observation.".format(r) + log.warning(msg) + + # The internal metadata dictionary + self._internal = dict() + + # Set up the data managers + self.detdata = DetDataMgr(self._samples, self.detectors) + + self.shared = SharedDataMgr( + self._samples, + self.detectors, + self._comm, + self.dist.comm_row, + self.dist.comm_col, + ) + + self.intervals = IntervalMgr(self._comm, self.dist.comm_row, self.dist.comm_col) + + # General properties + + @property + def telescope(self): + """ + (Telescope): The Telescope instance for this observation. + """ + return self._telescope + + @property + def name(self): + """ + (str): The name of the observation. + """ + return self._name + + @property + def UID(self): + """ + (int): The Unique ID for this observation. + """ + return self._UID + + # The overall MPI communicator for this observation. + + @property + def comm(self): + """ + (mpi4py.MPI.Comm): The group communicator for this observation (or None). + """ + return self.dist.comm + + @property + def comm_size(self): + """ + (int): The number of processes in the observation communicator. + """ + return self.dist.comm_size + + @property + def comm_rank(self): + """ + (int): The rank of this process in the observation communicator. + """ + return self.dist.comm_rank + + # The MPI communicator along the current row of the process grid + + @property + def comm_row(self): + """ + (mpi4py.MPI.Comm): The communicator for processes in the same row (or None). + """ + return self.dist.comm_row + + @property + def comm_row_size(self): + """ + (int): The number of processes in the row communicator. + """ + return self.dist.comm_row_size + + @property + def comm_row_rank(self): + """ + (int): The rank of this process in the row communicator. + """ + return self.dist.comm_row_rank + + # The MPI communicator along the current column of the process grid + + @property + def comm_col(self): + """ + (mpi4py.MPI.Comm): The communicator for processes in the same column (or None). + """ + return self.dist.comm_col + + @property + def comm_col_size(self): + """ + (int): The number of processes in the column communicator. + """ + return self.dist.comm_col_size + + @property + def comm_col_rank(self): + """ + (int): The rank of this process in the column communicator. + """ + return self.dist.comm_col_rank + + # Detector distribution + + @property + def detectors(self): + """ + (list): All detectors. Convenience wrapper for telescope.focalplane.detectors + """ + return self._telescope.focalplane.detectors + + @property + def local_detectors(self): + """ + (list): The detectors assigned to this process. + """ + return self.dist.dets[self.dist.comm_col_rank] + + def select_local_detectors(self, selection=None): + """ + (list): The detectors assigned to this process, optionally pruned. + """ + if selection is None: + return self.local_detectors + else: + dets = list() + for det in self.local_detectors: + if det in selection: + dets.append(det) + return dets + + # Detector set distribution + + @property + def detector_sets(self): + """ + (list): The total list of detector sets for this observation. + """ + return self._detector_sets + + @property + def local_detector_sets(self): + """ + (list): The detector sets assigned to this process (or None). + """ + if self._detector_sets is None: + return None + else: + ds = list() + for d in range(self.dist.det_sets[self.dist.comm_col_rank][1]): + off = self.dist.det_sets[self.dist.comm_col_rank][0] + ds.append(self._detector_sets[off + d]) + return ds + + # Sample distribution + + @property + def n_sample(self): + """(int): the total number of samples in this observation.""" + return self._samples + + @property + def local_samples(self): + """ + (tuple): The first element of the tuple is the first observation sample + assigned to this process. The second element of the tuple is the number of + samples assigned to this process. + """ + return self.dist.samps[self.dist.comm_row_rank] + + @property + def offset(self): + """ + The first sample on this process, relative to the observation start. + """ + return self.local_samples[0] + + @property + def n_local(self): + """ + The number of local samples on this process. + """ + return self.local_samples[1] + + # Sample set distribution + + @property + def sample_sets(self): + """ + (list): The input full list of sample sets used in data distribution + """ + return self._sample_sets + + @property + def local_sample_sets(self): + """ + (list): The sample sets assigned to this process (or None). + """ + if self._sample_sets is None: + return None + else: + ss = list() + for s in range(self.dist.samp_sets[self.dist.comm_row_rank][1]): + off = self.dist.samp_sets[self.dist.comm_row_rank][0] + ss.append(self._sample_sets[off + d]) + return ss + + # Mapping methods + + def __getitem__(self, key): + return self._internal[key] + + def __delitem__(self, key): + del self._internal[key] + + def __setitem__(self, key, value): + self._internal[key] = value + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def __del__(self): + if hasattr(self, "d"): + self.detdata.clear() + if hasattr(self, "shared"): + self.shared.clear() + + def __repr__(self): + val = " 0: + if np.max(self._local_submaps) > self._n_submap - 1: + raise RuntimeError("local submap indices out of range") + self._n_local = len(self._local_submaps) + self._glob2loc = AlignedI64.zeros(self._n_submap) + self._glob2loc[:] = -1 + for ilocal_submap, iglobal_submap in enumerate(self._local_submaps): + self._glob2loc[iglobal_submap] = ilocal_submap + + self._submap_owners = None + self._owned_submaps = None + self._alltoallv_info = None + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to this object. DO NOT CALL THIS unless you are sure all references + are no longer being used and you are about to delete the object. + + """ + if hasattr(self, "_glob2loc"): + if self._glob2loc is not None: + self._glob2loc.clear() + del self._glob2loc + + def __del__(self): + self.clear() + + @property + def comm(self): + """(mpi4py.MPI.Comm): The MPI communicator used (or None) + """ + return self._comm + + @property + def n_pix(self): + """(int): The global number of pixels. + """ + return self._n_pix + + @property + def n_pix_submap(self): + """(int): The number of pixels in each submap. + """ + return self._n_pix_submap + + @property + def n_submap(self): + """(int): The total number of submaps. + """ + return self._n_submap + + @property + def n_local_submap(self): + """(int): The number of submaps stored on this process. + """ + return self._n_local + + @property + def local_submaps(self): + """(array): The list of local submaps or None if process has no data. + """ + return self._local_submaps + + @property + def global_submap_to_local(self): + """(array): The mapping from global submap to local. + """ + return self._glob2loc + + @function_timer + def global_pixel_to_submap(self, gl): + """Convert global pixel indices into the local submap and pixel. + + Args: + gl (array): The global pixel numbers. + + Returns: + (tuple): A tuple of arrays containing the local submap index (int) and the + pixel index local to that submap (int). + + """ + if len(gl) == 0: + return (np.zeros_like(gl), np.zeros_like(gl)) + if np.max(gl) >= self._n_pix: + log = Logger.get() + msg = "Global pixel indices exceed the maximum for the pixelization" + log.error(msg) + raise RuntimeError(msg) + return libtoast_global_to_local(gl, self._n_pix_submap, self._glob2loc) + + # global_sm = np.floor_divide(gl, self._n_pix_submap, dtype=np.int64) + # submap_pixel = np.mod(gl, self._n_pix_submap, dtype=np.int64) + # local_sm = np.array([self._glob2loc[x] for x in global_sm], dtype=np.int64) + # return (local_sm, submap_pixel) + + @function_timer + def global_pixel_to_local(self, gl): + """Convert global pixel indices into local pixel indices. + + Args: + gl (array): The global pixel numbers. + + Returns: + (array): The local raw (flat packed) buffer index for each pixel. + + """ + if len(gl) == 0: + return np.zeros_like(gl) + if np.max(gl) >= self._n_pix: + log = Logger.get() + msg = "Global pixel indices exceed the maximum for the pixelization" + log.error(msg) + raise RuntimeError(msg) + local_sm, pixels = libtoast_global_to_local( + gl, self._n_pix_submap, self._glob2loc + ) + local_sm[:] *= self._n_pix_submap + pixels[:] += local_sm + return pixels + + def __repr__(self): + val = "".format( + self._n_pix, self._n_submap, self._n_pix_submap + ) + return val + + @property + def submap_owners(self): + """The owning process for every hit submap. + + This information is used in several other operations, including serializing + PixelData objects to a single process and also communication needed for + reducing data globally. + """ + if self._submap_owners is not None: + # Already computed + return self._submap_owners + + self._submap_owners = np.empty(self._n_submap, dtype=np.int32) + self._submap_owners[:] = -1 + + if self._comm is None: + # Trivial case + if self._local_submaps is not None and len(self._local_submaps) > 0: + self._submap_owners[self._local_submaps] = 0 + else: + # Need to compute it. + local_hit_submaps = np.zeros(self._n_submap, dtype=np.uint8) + local_hit_submaps[self._local_submaps] = 1 + # print( + # "rank {} local_submaps = {}".format( + # self._comm.rank, self._local_submaps[:] + # ) + # ) + # print( + # "rank {} local_hit_submaps = {}".format( + # self._comm.rank, local_hit_submaps[:] + # ) + # ) + + hit_submaps = None + if self._comm.rank == 0: + hit_submaps = np.zeros(self._n_submap, dtype=np.uint8) + + self._comm.Reduce(local_hit_submaps, hit_submaps, op=MPI.LOR, root=0) + del local_hit_submaps + + if self._comm.rank == 0: + # print( + # "rank {} hit_submaps = {}".format(self._comm.rank, hit_submaps[:]) + # ) + total_hit_submaps = np.sum(hit_submaps.astype(np.int32)) + tdist = distribute_uniform(total_hit_submaps, self._comm.size) + + # The target number of submaps per process + target = [x[1] for x in tdist] + + # Assign the submaps in rank order. This ensures better load + # distribution when serializing some operations and also reduces needed + # memory copies when using Alltoallv. + proc_offset = 0 + proc = 0 + for sm in range(self._n_submap): + if hit_submaps[sm] > 0: + self._submap_owners[sm] = proc + proc_offset += 1 + if proc_offset >= target[proc]: + proc += 1 + proc_offset = 0 + del hit_submaps + + self._comm.Bcast(self._submap_owners, root=0) + # if self._comm.rank == 0: + # print("submap owners = {}".format(self._submap_owners[:])) + return self._submap_owners + + @property + def owned_submaps(self): + """The submaps owned by this process. + """ + if self._owned_submaps is not None: + # Already computed + return self._owned_submaps + owners = self.submap_owners + self._owned_submaps = np.array( + [x for x, y in enumerate(owners) if y == self._comm.rank], dtype=np.int32 + ) + # print("rank {} owns submaps {}".format(self._comm.rank, self._owned_submaps[:])) + return self._owned_submaps + + @property + def alltoallv_info(self): + """Return the offset information for Alltoallv communication. + + This returns a tuple containing: + - The send displacements for the Alltoallv submap gather + - The send counts for the Alltoallv submap gather + - The receive displacements for the Alltoallv submap gather + - The receive counts for the Alltoallv submap gather + - The locations in the receive buffer of each submap. + + """ + if self._comm is None: + return (None, None, None, None, None) + if self._alltoallv_info is not None: + # Already computed + return self._alltoallv_info + + owners = self.submap_owners + our_submaps = self.owned_submaps + + # Compute the other "contributing" processes that have submaps which we own. + # Also track the receive buffer offsets for each owned submap. + send = [list() for x in range(self._comm.size)] + for sm in self._local_submaps: + # Tell the owner of this submap that we are a contributor + send[owners[sm]].append(sm) + recv = self._comm.alltoall(send) + + recv_counts = np.zeros(self._comm.size, dtype=np.int32) + recv_displ = np.zeros(self._comm.size, dtype=np.int32) + recv_locations = dict() + + offset = 0 + for proc, sms in enumerate(recv): + recv_displ[proc] = offset + for sm in sms: + if sm not in recv_locations: + recv_locations[sm] = list() + recv_locations[sm].append(offset) + recv_counts[proc] += 1 + offset += 1 + + for sm in list(recv_locations.keys()): + recv_locations[sm] = np.array(recv_locations[sm], dtype=np.int32) + + # print("rank {} recv_displ = {}".format(self._comm.rank, recv_displ), flush=True) + # print( + # "rank {} recv_counts = {}".format(self._comm.rank, recv_counts), flush=True + # ) + # print( + # "rank {} recv_locations = {}".format(self._comm.rank, recv_locations), + # flush=True, + # ) + + # Compute the Alltoallv send offsets in terms of submaps + send_counts = np.zeros(self._comm.size, dtype=np.int32) + send_displ = np.zeros(self._comm.size, dtype=np.int32) + offset = 0 + last_offset = 0 + last_own = -1 + for sm in self._local_submaps: + if last_own != owners[sm]: + # Moving on to next owning process... + if last_own >= 0: + send_displ[last_own] = last_offset + last_offset = offset + send_counts[owners[sm]] += 1 + offset += 1 + last_own = owners[sm] + if last_own >= 0: + # Finish up last process + send_displ[last_own] = last_offset + + # print("rank {} send_displ = {}".format(self._comm.rank, send_displ), flush=True) + # print( + # "rank {} send_counts = {}".format(self._comm.rank, send_counts), flush=True + # ) + + self._alltoallv_info = ( + send_counts, + send_displ, + recv_counts, + recv_displ, + recv_locations, + ) + + return self._alltoallv_info + + +class PixelData(object): + """Distributed map-domain data. + + The distribution information is stored in a PixelDistribution instance passed to + the constructor. Each process has local data stored in one or more "submaps". + + Although multiple processes may have the same submap of data stored locally, only + one process is considered the "owner". This ownership is used when serializing the + data and when doing reductions in certain cases. Ownership can be set to either + the lowest rank process which has the submap or to a balanced distribution. + + Args: + dist (PixelDistribution): The distribution of submaps. + dtype (numpy.dtype): A numpy-compatible dtype for each element of the data. + The only supported types are 1, 2, 4, and 8 byte signed and unsigned + integers, 4 and 8 byte floating point numbers, and 4 and 8 byte complex + numbers. + n_value (int): The number of values per pixel. + + """ + + def __init__(self, dist, dtype, n_value=1): + log = Logger.get() + + self._dist = dist + self._n_value = n_value + + # construct a new dtype in case the parameter given is shortcut string + ttype = np.dtype(dtype) + + self.storage_class = None + if ttype.char == "b": + self.storage_class = AlignedI8 + elif ttype.char == "B": + self.storage_class = AlignedU8 + elif ttype.char == "h": + self.storage_class = AlignedI16 + elif ttype.char == "H": + self.storage_class = AlignedU16 + elif ttype.char == "i": + self.storage_class = AlignedI32 + elif ttype.char == "I": + self.storage_class = AlignedU32 + elif (ttype.char == "q") or (ttype.char == "l"): + self.storage_class = AlignedI64 + elif (ttype.char == "Q") or (ttype.char == "L"): + self.storage_class = AlignedU64 + elif ttype.char == "f": + self.storage_class = AlignedF32 + elif ttype.char == "d": + self.storage_class = AlignedF64 + elif ttype.char == "F": + raise NotImplementedError("No support yet for complex numbers") + elif ttype.char == "D": + raise NotImplementedError("No support yet for complex numbers") + else: + msg = "Unsupported data typecode '{}'".format(ttype.char) + log.error(msg) + raise ValueError(msg) + self._dtype = ttype + + self.mpitype = None + self.mpibytesize = None + if self._dist.comm is not None: + self.mpibytesize, self.mpitype = mpi_data_type(self._dist.comm, self._dtype) + + self._shape = ( + self._dist.n_local_submap, + self._dist.n_pix_submap, + self._n_value, + ) + self._flatshape = ( + self._dist.n_local_submap * self._dist.n_pix_submap * self._n_value + ) + self._n_submap_value = self._dist.n_pix_submap * self._n_value + + self.raw = self.storage_class.zeros(self._flatshape) + self.data = self.raw.array().reshape(self._shape) + + self._send_counts = None + self._send_displ = None + self._recv_counts = None + self._recv_displ = None + self._recv_locations = None + self._receive = None + self._receive_raw = None + self._reduce_buf = None + self._reduce_buf_raw = None + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to this object. DO NOT CALL THIS unless you are sure all references + are no longer being used and you are about to delete the object. + + """ + if hasattr(self, "data"): + del self.data + if hasattr(self, "raw"): + self.raw.clear() + del self.raw + if hasattr(self, "_receive"): + del self._receive + if self._receive_raw is not None: + self._receive_raw.clear() + del self._receive_raw + if hasattr(self, "_reduce_buf"): + del self._reduce_buf + if self._reduce_buf_raw is not None: + self._reduce_buf_raw.clear() + del self._reduce_buf_raw + + def __del__(self): + self.clear() + + @property + def distribution(self): + """(PixelDistribution): The distribution information. + """ + return self._dist + + @property + def dtype(self): + """(numpy.dtype): The data type of the values. + """ + return self._dtype + + @property + def n_value(self): + """(int): The number of non-zero values per pixel. + """ + return self._n_value + + def __getitem__(self, key): + return np.array(self.data[key], dtype=self._dtype, copy=False) + + def __delitem__(self, key): + raise NotImplementedError("Cannot delete individual memory elements") + return + + def __setitem__(self, key, value): + self.data[key] = value + + def __iter__(self): + return iter(self.data) + + def __len__(self): + return len(self.data) + + def __repr__(self): + val = "".format( + self._n_value, self._dtype, self._dist + ) + return val + + def comm_nsubmap(self, bytes): + """Given a buffer size, compute the number of submaps to communicate. + + Args: + bytes (int): The number of bytes. + + Returns: + (int): The number of submaps in each buffer. + + """ + dbytes = self._dtype.itemsize + nsub = int(bytes / (dbytes * self._dist.n_pix_submap * self._n_value)) + if nsub == 0: + nsub = 1 + allsub = int(self._dist.n_pix / self._dist.n_pix_submap) + if nsub > allsub: + nsub = allsub + return nsub + + @function_timer + def sync_allreduce(self, comm_bytes=10000000): + """Perform a buffered allreduce of the data. + + Args: + comm_bytes (int): The approximate message size to use. + + Returns: + None. + + """ + if self._dist.comm is None: + return + + comm_submap = self.comm_nsubmap(comm_bytes) + + dist = self._dist + nsub = dist.n_submap + + sendbuf = np.zeros( + comm_submap * dist.n_pix_submap * self._n_value, dtype=self._dtype + ) + sendview = sendbuf.reshape((comm_submap, dist.n_pix_submap, self._n_value)) + + recvbuf = np.zeros( + comm_submap * dist.n_pix_submap * self._n_value, dtype=self._dtype + ) + recvview = recvbuf.reshape((comm_submap, dist.n_pix_submap, self._n_value)) + + owners = dist.submap_owners + + submap_off = 0 + ncomm = comm_submap + + gt = GlobalTimers.get() + + while submap_off < nsub: + if submap_off + ncomm > nsub: + ncomm = nsub - submap_off + if np.sum(owners[submap_off : submap_off + ncomm]) != -ncomm: + # At least one submap has some hits. Do the allreduce. + # Otherwise we would skip this buffer to avoid reducing a + # bunch of zeros. + for c in range(ncomm): + glob = submap_off + c + if glob in dist.local_submaps: + # copy our data in. + loc = dist.global_submap_to_local[glob] + sendview[c, :, :] = self.data[loc, :, :] + + gt.start("REAL Allreduce") + dist.comm.Allreduce(sendbuf, recvbuf, op=MPI.SUM) + gt.stop("REAL Allreduce") + + for c in range(ncomm): + glob = submap_off + c + if glob in dist.local_submaps: + # copy the reduced data + loc = dist.global_submap_to_local[glob] + self.data[loc, :, :] = recvview[c, :, :] + + sendbuf.fill(0) + recvbuf.fill(0) + + submap_off += ncomm + + return + + @function_timer + def sync_alltoallv(self): + """Perform a reduction using Alltoallv operations. + + On the first call, some initialization is done to compute send and receive + displacements and counts. A persistent receive buffer is allocated. Submap + data is sent to their owners simultaneously using alltoallv. Each process does + a local reduction of their owned submaps before sending the result back with + another alltoallv call. + + Returns: + None. + + """ + if self._dist.comm is None: + return + if self._send_counts is None: + # Get the parameters in terms of submaps. + ( + send_counts, + send_displ, + recv_counts, + recv_displ, + recv_locations, + ) = self._dist.alltoallv_info + + # Pixel values per submap + scale = self._n_submap_value + + # Check that our send and receive buffers do not exceed 32bit indices + # required by MPI + max_int = 2147483647 + if scale * (recv_displ[-1] + recv_counts[-1]) > max_int: + msg = "Alltoallv receive buffer size exceeds max 32bit integer" + raise RuntimeError(msg) + if len(self.raw) > max_int: + msg = "Alltoallv send buffer size exceeds max 32bit integer" + raise RuntimeError(msg) + + # Scale these quantites by the submap size and the number of values per + # pixel. + + self._send_counts = scale * np.array(send_counts, dtype=np.int32) + self._send_displ = scale * np.array(send_displ, dtype=np.int32) + self._recv_counts = scale * np.array(recv_counts, dtype=np.int32) + self._recv_displ = scale * np.array(recv_displ, dtype=np.int32) + self._recv_locations = dict() + for sm, locs in recv_locations.items(): + self._recv_locations[sm] = scale * np.array(locs, dtype=np.int32) + + # Allocate a persistent receive buffer + self._receive_raw = self.storage_class.zeros( + self._recv_displ[-1] + self._recv_counts[-1] + ) + self._receive = self._receive_raw.array() + self._reduce_buf_raw = self.storage_class.zeros(self._n_submap_value) + self._reduce_buf = self._reduce_buf_raw.array() + + gt = GlobalTimers.get() + # Gather owned submaps locally + gt.start("REAL Alltoallv forward") + self._dist.comm.Alltoallv( + [self.raw, self._send_counts, self._send_displ, self.mpitype], + [self._receive, self._recv_counts, self._recv_displ, self.mpitype], + ) + gt.stop("REAL Alltoallv forward") + + # Locally reduce owned submaps + for sm, locs in self._recv_locations.items(): + self._reduce_buf[:] = 0 + for lc in locs: + self._reduce_buf += self._receive[lc : lc + self._n_submap_value] + for lc in locs: + self._receive[lc : lc + self._n_submap_value] = self._reduce_buf + + # Scatter result back + gt.start("REAL Alltoallv reverse") + self._dist.comm.Alltoallv( + [self._receive, self._recv_counts, self._recv_displ, self.mpitype], + [self.raw, self._send_counts, self._send_displ, self.mpitype], + ) + gt.stop("REAL Alltoallv reverse") + + return + + @function_timer + def broadcast_map(self, fdata, comm_bytes=10000000): + """Distribute a map located on a single process. + + The root process takes a map in memory and distributes it. Chunks of submaps + are broadcast to all processes, and each process copies data to its local + submaps. + + Args: + fdata (array): The input data (only significant on process 0). + comm_bytes (int): The approximate message size to use. + + Returns: + None + + """ + rank = 0 + if self._dist.comm is not None: + rank = self._dist.comm.rank + comm_submap = self._dist.comm_nsubmap(comm_bytes) + + # we make the assumption that FITS binary tables are still stored in + # blocks of 2880 bytes just like always... + dbytes = self._dtype(1).itemsize + rowbytes = self._n_value * dbytes + optrows = int(2880 / rowbytes) + + # Get a tuple of all columns in the table. We choose memmap here so + # that we can (hopefully) read through all columns in chunks such that + # we only ever have a couple FITS blocks in memory. + if rank == 0: + if self._n_value == 1: + fdata = (fdata,) + + buf = np.zeros( + comm_submap * self._dist.n_pix_submap * self._n_value, dtype=self._dtype + ) + view = buf.reshape((comm_submap, self._dist.n_pix_submap, self._n_value)) + + in_off = 0 + out_off = 0 + submap_off = 0 + + rows = optrows + while in_off < self._dist.n_pix: + if in_off + rows > self._dist.n_pix: + rows = self._dist.n_pix - in_off + # is this the last block for this communication? + islast = False + copyrows = rows + if out_off + rows > (comm_submap * self._dist.npix_submap): + copyrows = (comm_submap * self._dist.npix_submap) - out_off + islast = True + + if rank == 0: + for col in range(self._n_value): + coloff = (out_off * self._n_value) + col + buf[ + coloff : coloff + (copyrows * self._n_value) : self._n_value + ] = fdata[col][in_off : in_off + copyrows] + + out_off += copyrows + in_off += copyrows + + if islast: + if self._dist.comm is not None: + self._dist.comm.Bcast(buf, root=0) + # loop over these submaps, and copy any that we are assigned + for sm in range(submap_off, submap_off + comm_submap): + if sm in self._dist.local_submaps: + loc = self._dist.global_submap_to_local[sm] + self.data[loc, :, :] = view[sm - submap_off, :, :] + out_off = 0 + submap_off += comm_submap + buf.fill(0) + islast = False + + # flush the remaining buffer + + if out_off > 0: + if self._dist.comm is not None: + self._dist.comm.Bcast(buf, root=0) + # loop over these submaps, and copy any that we are assigned + for sm in range(submap_off, submap_off + comm_submap): + if sm in self._dist.local_submaps: + loc = self._dist.global_submap_to_local[sm] + self.data[loc, :, :] = view[sm - submap_off, :, :] + return diff --git a/src/toast/pixels_io.py b/src/toast/pixels_io.py new file mode 100644 index 000000000..e27a55446 --- /dev/null +++ b/src/toast/pixels_io.py @@ -0,0 +1,282 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np + +from .timing import function_timer, Timer + +from .mpi import MPI + +import healpy as hp + + +@function_timer +def read_healpix_fits(pix, path, nest=True, comm_bytes=10000000): + """Read and broadcast a HEALPix FITS table. + + The root process opens the FITS file in memmap mode and iterates over + chunks of the map in a way to minimize cache misses in the internal + FITS buffer. Chunks of submaps are broadcast to all processes, and + each process copies data to its local submaps. + + Args: + pix (PixelData): The distributed PixelData object. + path (str): The path to the FITS file. + nest (bool): If True, convert input to NESTED ordering, else use RING. + comm_bytes (int): The approximate message size to use in bytes. + + Returns: + None + + """ + dist = pix.distribution + rank = 0 + if dist.comm is not None: + rank = dist.comm.rank + + comm_submap = pix.comm_nsubmap(comm_bytes) + + # we make the assumption that FITS binary tables are still stored in + # blocks of 2880 bytes just like always... + dbytes = pix.dtype.itemsize + rowbytes = pix.n_value * dbytes + optrows = 2880 // rowbytes + + # get a tuple of all columns in the table. We choose memmap here so + # that we can (hopefully) read through all columns in chunks such that + # we only ever have a couple FITS blocks in memory. + fdata = None + if rank == 0: + # Check that the file is in expected format + errors = "" + h = hp.fitsfunc.pf.open(path, "readonly") + nside = hp.npix2nside(dist.n_pix) + nside_map = h[1].header["nside"] + if nside_map != nside: + errors += "Wrong NSide: {} has {}, expected {}\n" "".format( + path, nside_map, nside + ) + map_nested = False + if "order" in h[1].header and "NEST" in h[1].header["order"].upper(): + map_nested = True + if "ordering" in h[1].header and "NEST" in h[1].header["ordering"].upper(): + map_nested = True + if map_nested != nest: + errors += "Wrong ordering: {} has nest={}, expected nest={}\n" "".format( + path, map_nested, nest + ) + map_nnz = h[1].header["tfields"] + if map_nnz != pix.n_value: + errors += "Wrong number of columns: {} has {}, expected {}\n" "".format( + path, map_nnz, pix.n_value + ) + h.close() + if len(errors) != 0: + raise RuntimeError(errors) + # Now read the map + fdata = hp.read_map( + path, + field=tuple([x for x in range(pix.n_value)]), + dtype=[pix.dtype for x in range(pix.n_value)], + memmap=True, + nest=nest, + verbose=False, + ) + if pix.n_value == 1: + fdata = (fdata,) + + buf = np.zeros(comm_submap * dist.n_pix_submap * pix.n_value, dtype=pix.dtype) + view = buf.reshape(comm_submap, dist.n_pix_submap, pix.n_value) + + in_off = 0 + out_off = 0 + submap_off = 0 + + rows = optrows + while in_off < dist.n_pix: + if in_off + rows > dist.n_pix: + rows = dist.n_pix - in_off + # is this the last block for this communication? + islast = False + copyrows = rows + if out_off + rows > (comm_submap * dist.n_pix_submap): + copyrows = (comm_submap * dist.n_pix_submap) - out_off + islast = True + + if rank == 0: + for col in range(pix.n_value): + coloff = (out_off * pix.n_value) + col + buf[coloff : coloff + (copyrows * pix.n_value) : pix.n_value] = fdata[ + col + ][in_off : in_off + copyrows] + + out_off += copyrows + in_off += copyrows + + if islast: + if dist.comm is not None: + dist.comm.Bcast(buf, root=0) + # loop over these submaps, and copy any that we are assigned + for sm in range(submap_off, submap_off + comm_submap): + if sm in dist.local_submaps: + loc = dist.global_submap_to_local[sm] + pix.data[loc, :, :] = view[sm - submap_off, :, :] + out_off = 0 + submap_off += comm_submap + buf.fill(0) + islast = False + + # flush the remaining buffer + if out_off > 0: + if dist.comm is not None: + dist.comm.Bcast(buf, root=0) + # loop over these submaps, and copy any that we are assigned + for sm in range(submap_off, submap_off + comm_submap): + if sm in dist.local_submaps: + loc = dist.global_submap_to_local[sm] + pix.data[loc, :, :] = view[sm - submap_off, :, :] + return + + +@function_timer +def write_healpix_fits(pix, path, nest=True, comm_bytes=10000000): + """Write pixel data to a HEALPix format FITS table. + + The data across all processes is assumed to be synchronized (the data for a given + submap shared between processes is identical). The submap data is sent to the root + process which writes it out. For parallel writing, see write_hdf5(). + + Args: + pix (PixelData): The distributed pixel object. + path (str): The path to the output FITS file. + nest (bool): If True, data is in NESTED ordering, else data is in RING. + comm_bytes (int): The approximate message size to use. + + Returns: + None + + """ + # The distribution + dist = pix.distribution + + rank = 0 + if dist.comm is not None: + rank = dist.comm.rank + + # We will reduce some number of whole submaps at a time. + # Find the number of submaps that fit into the requested + # communication size. + comm_submap = pix.comm_nsubmap(comm_bytes) + + # Determine which processes should send submap data. We do not use the + # PixelDistribution.submap_owners here, since that is intended for operations + # parallelized over submaps, and the submap owners do not necessarily have the + # owned submaps in local memory. Instead, we do a buffered allreduce. For dumping + # large maps, we should be using HDF5 anyway. + + not_owned = None + allowners = None + if dist.comm is None: + not_owned = 1 + allowners = np.zeros(dist.n_submap, dtype=np.int32) + allowners.fill(not_owned) + for m in dist.local_submaps: + allowners[m] = rank + else: + not_owned = dist.comm.size + owners = np.zeros(dist.n_submap, dtype=np.int32) + owners.fill(not_owned) + for m in dist.local_submaps: + owners[m] = dist.comm.rank + allowners = np.zeros_like(owners) + dist.comm.Allreduce(owners, allowners, op=MPI.MIN) + + # This function requires lots of RAM, since it accumulates the full map on one + # process before writing. We also have to "unpack" the pixel data since the healpy + # write function requires a list of maps. + + fdata = None + fview = None + if rank == 0: + fdata = list() + fview = list() + for col in range(pix.n_value): + fdata.append(pix.storage_class.zeros(dist.n_pix)) + fview.append(fdata[-1].array()) + + if dist.comm is None: + # Just copy our local submaps into the FITS buffers + for lc, sm in enumerate(dist.local_submaps): + global_offset = sm * dist.n_pix_submap + n_copy = dist.n_pix_submap + if global_offset + n_copy > dist.n_pix: + n_copy = dist.n_pix - global_offset + for col in range(pix.n_value): + fdata[col][global_offset : global_offset + n_copy] = pix.data[ + lc, 0:n_copy, col + ] + else: + sendbuf = np.zeros( + comm_submap * dist.n_pix_submap * pix.n_value, dtype=pix.dtype + ) + sendview = sendbuf.reshape(comm_submap, dist.n_pix_submap, pix.n_value) + + recvbuf = None + recvview = None + if rank == 0: + recvbuf = np.zeros( + comm_submap * dist.n_pix_submap * pix.n_value, dtype=pix.dtype + ) + recvview = recvbuf.reshape(comm_submap, dist.n_pix_submap, pix.n_value) + + submap_off = 0 + ncomm = comm_submap + while submap_off < dist.n_submap: + if submap_off + ncomm > dist.n_submap: + ncomm = dist.n_submap - submap_off + if np.any(allowners[submap_off : submap_off + ncomm] != not_owned): + # at least one submap has some hits. reduce. + for c in range(ncomm): + if allowners[submap_off + c] == dist.comm.rank: + sendview[c, :, :] = pix.data[ + dist.global_submap_to_local[submap_off + c], :, : + ] + dist.comm.Reduce(sendbuf, recvbuf, op=MPI.SUM, root=0) + if rank == 0: + # copy into FITS buffers + for c in range(ncomm): + global_offset = (submap_off + c) * dist.n_pix_submap + n_copy = dist.n_pix_submap + if global_offset + n_copy > dist.n_pix: + n_copy = dist.n_pix - global_offset + for col in range(pix.n_value): + fdata[col][ + global_offset : global_offset + n_copy + ] = recvview[c, 0:n_copy, col] + sendbuf.fill(0) + if rank == 0: + recvbuf.fill(0) + submap_off += ncomm + + if rank == 0: + if os.path.isfile(path): + os.remove(path) + dtypes = [np.dtype(pix.dtype) for x in range(pix.n_value)] + hp.write_map(path, fview, dtype=dtypes, fits_IDL=False, nest=nest) + del fview + for col in range(pix.n_value): + fdata[col].clear() + del fdata + + return + + +def read_hdf5(pix, path): + pass + + +def write_hdf5(pix, path): + pass diff --git a/src/toast/pshmem/CMakeLists.txt b/src/toast/pshmem/CMakeLists.txt deleted file mode 100644 index 5cd4d22a6..000000000 --- a/src/toast/pshmem/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ - -# Install the python files - -install(FILES - __init__.py - locking.py - shmem.py - utils.py - test.py - DESTINATION ${PYTHON_SITE}/toast/pshmem -) diff --git a/src/toast/pshmem/README.md b/src/toast/pshmem/README.md deleted file mode 100644 index d3418db4a..000000000 --- a/src/toast/pshmem/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Bundled pshmem - -This directory contains the v0.1.0 release of the pshmem package: - -https://github.com/tskisner/pshmem - -Once that package is widely available on PyPI and conda-forge, we can stop bundling -it here and just import it. diff --git a/src/toast/pshmem/__init__.py b/src/toast/pshmem/__init__.py deleted file mode 100644 index 347005339..000000000 --- a/src/toast/pshmem/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -## -# Copyright (c) 2017-2020, all rights reserved. Use of this source code -# is governed by a BSD license that can be found in the top-level -# LICENSE file. -## -"""Parallel shared memory tools. - -This package contains tools for using synchronized shared memory across nodes -and implementing communicator-wide MUTEX locks. - -""" - -__version__ = "0.1.0" - -# Namespace imports - -from .shmem import MPIShared -from .locking import MPILock - -# from .test import run as test diff --git a/src/toast/pshmem/locking.py b/src/toast/pshmem/locking.py deleted file mode 100644 index 179538f05..000000000 --- a/src/toast/pshmem/locking.py +++ /dev/null @@ -1,352 +0,0 @@ -## -# Copyright (c) 2017-2020, all rights reserved. Use of this source code -# is governed by a BSD license that can be found in the top-level -# LICENSE file. -## - -import sys -import itertools - -import numpy as np - -from .utils import mpi_check_abort, mpi_data_type - - -class MPILock(object): - """ - Implement a MUTEX lock with MPI one-sided operations. - - The lock is created across the given communicator. This uses an array - of bytes (one per process) to track which processes have requested the - lock. When a given process releases the lock, it passes it to the next - process in order of request. - - Args: - comm (MPI.Comm): the full communicator to use. - root (int): the rank which stores the list of waiting processes. - debug (bool): if True, print debugging info about the lock status. - """ - - # This creates a new integer for each time the class is instantiated. - newid = next(itertools.count()) - - def __init__(self, comm, root=0, debug=False): - self._comm = comm - self._root = root - self._debug = debug - - # A unique tag for each instance of the class - self._tag = MPILock.newid - - self._rank = 0 - self._procs = 1 - if self._comm is not None: - self._rank = self._comm.rank - self._procs = self._comm.size - - if self._rank == self._root: - self._nlocal = self._procs - else: - self._nlocal = 0 - - self._dtype = np.dtype(np.int32) - - # Local memory buffer - self._waiting = np.zeros((self._procs,), dtype=self._dtype) - self._send_token = self._rank * np.ones((1,), dtype=self._dtype) - self._recv_token = np.zeros((1,), dtype=self._dtype) - - # Data type sizes - self._dsize, self._mpitype = mpi_data_type(self._comm, self._dtype) - - # Allocate the shared memory buffer. - - self._win = None - self._have_lock = False - - nbytes = self._nlocal * self._dsize - - if self._comm is not None: - from mpi4py import MPI - - # Root allocates the buffer - status = 0 - try: - self._win = MPI.Win.Allocate( - nbytes, disp_unit=self._dsize, info=MPI.INFO_NULL, comm=self._comm - ) - except: - status = 1 - mpi_check_abort(self._comm, self._root, status, "memory allocation") - - if self._rank == self._root: - # Root sets to zero - self._win.Lock(self._root, MPI.LOCK_EXCLUSIVE) - self._win.Put( - [self._waiting, self._procs, self._mpitype], - self._root, - target=[0, self._procs, self._mpitype], - ) - self._win.Flush(self._root) - self._win.Unlock(self._root) - - return - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.close() - return False - - def close(self): - # Explicitly free the shared window - if hasattr(self, "_win") and (self._win is not None): - self._win.Free() - self._win = None - return - - @property - def comm(self): - """ - The communicator. - """ - return self._comm - - def lock(self): - """ - Request the lock and wait. - - This call blocks until lock is available. Then it acquires - the lock and returns. - """ - # Do we already have the lock? - if self._have_lock: - return - - if self._comm is not None: - from mpi4py import MPI - - # lock the window - if self._debug: - print( - "lock: rank {}, instance {} locking shared window".format( - self._rank, self._tag - ), - flush=True, - ) - self._win.Lock(self._root, MPI.LOCK_EXCLUSIVE) - - # Get a local copy of the buffer - - self._win.Get( - [self._waiting, self._procs, self._mpitype], - self._root, - target=[0, self._procs, self._mpitype], - ) - if self._debug: - print( - "lock: rank {}, instance {} list = {}".format( - self._rank, self._tag, self._waiting - ), - flush=True, - ) - - # Find the highest wait number in the list. The location of this is the - # process rank that will be sending us the token. - - wait_max = np.max(self._waiting) - my_wait = (wait_max + 1) * np.ones((1,), dtype=self._dtype) - - sender = -1 - if wait_max > 0: - sender = np.argmax(self._waiting) - - # Update the waiting list - - self._win.Put( - [my_wait, 1, self._mpitype], - self._root, - target=[self._rank, 1, self._mpitype], - ) - if self._debug: - print( - "lock: rank {}, instance {} putting wait number {}".format( - self._rank, self._tag, my_wait[0] - ), - flush=True, - ) - - # Flush - - self._win.Flush(self._root) - - # Release the window lock - - if self._debug: - print( - "lock: rank {}, instance {} unlocking shared window".format( - self._rank, self._tag - ), - flush=True, - ) - self._win.Unlock(self._root) - - # If another rank has the token, wait for that - - if sender >= 0: - if self._debug: - print( - "lock: rank {} waiting for the lock from {}".format( - self._rank, sender - ), - flush=True, - ) - self._comm.Recv(self._recv_token, source=sender, tag=self._tag) - - if self._debug: - print("lock: rank {} got the lock".format(self._rank), flush=True) - - # We have the lock now! - self._have_lock = True - return - - def unlock(self): - """ - Unlock and return. - """ - # Do we even have the lock? - if not self._have_lock: - return - - if self._comm is not None: - from mpi4py import MPI - - # lock the window - if self._debug: - print( - "unlock: rank {}, instance {} locking shared window".format( - self._rank, self._tag - ), - flush=True, - ) - self._win.Lock(self._root, MPI.LOCK_EXCLUSIVE) - - # Get a local copy of the buffer - - self._win.Get( - [self._waiting, self._procs, self._mpitype], - self._root, - target=[0, self._procs, self._mpitype], - ) - if self._debug: - print( - "unlock: rank {}, instance {} list = {}".format( - self._rank, self._tag, self._waiting - ), - flush=True, - ) - - # Get our wait number - my_wait_val = self._waiting[self._rank] - - # Verify that no other processes have a number lower than ours. If we - # hold the lock, the wait numbers should only be increasing from here. - # The only time these numbers reset to zero is when no process holds the - # lock. - invalid_indx = np.where( - np.logical_and(self._waiting < my_wait_val, self._waiting > 0) - )[0] - if len(invalid_indx) > 0: - print( - "rank {} has lock (wait number {}) and found ranks with lower wait numbers: {}".format( - self._rank, my_wait_val, self._waiting - ), - flush=True, - ) - if self._comm is not None: - self._comm.Abort(1) - - # Find the next waiting process - next_proc = np.where(self._waiting == my_wait_val + 1)[0] - receiver = -1 - - if len(next_proc) > 1: - # This should never happen! - print( - "rank {} has lock (wait number {}) and found multiple ranks next in line for token: {}".format( - self._rank, my_wait_val, self._waiting - ), - flush=True, - ) - if self._comm is not None: - self._comm.Abort(1) - elif len(next_proc) == 0: - # There seems to be no processes waiting for the lock. This implies - # that there should also be no processes with even higher wait numbers. - # Check this. - invalid_indx = np.where(self._waiting > my_wait_val + 1)[0] - if len(invalid_indx) > 0: - print( - "rank {} has lock (wait number {}) and found no rank next in line but other ranks with wait numbers: {}".format( - self._rank, my_wait_val, self._waiting - ), - flush=True, - ) - if self._comm is not None: - self._comm.Abort(1) - else: - # There is one process waiting - receiver = next_proc[0] - - # Update the waiting list - my_wait = np.zeros((1,), dtype=self._dtype) - - self._win.Put( - [my_wait, 1, self._mpitype], - self._root, - target=[self._rank, 1, self._mpitype], - ) - if self._debug: - print( - "unlock: rank {}, instance {} reset wait to zero".format( - self._rank, self._tag - ), - flush=True, - ) - - # Flush - - self._win.Flush(self._root) - - # Release the window lock - - if self._debug: - print( - "unlock: rank {}, instance {} unlocking shared window".format( - self._rank, self._tag - ), - flush=True, - ) - self._win.Unlock(self._root) - - # Send the token to the next process if one is waiting - - if receiver >= 0: - if self._debug: - print( - "unlock: rank {} sending the lock to {}".format( - self._rank, receiver - ), - flush=True, - ) - self._comm.Send(self._send_token, receiver, tag=self._tag) - - if self._debug: - print("unlock: rank {} sent the lock".format(self._rank), flush=True) - - self._have_lock = False - return diff --git a/src/toast/pshmem/shmem.py b/src/toast/pshmem/shmem.py deleted file mode 100644 index d2e50286d..000000000 --- a/src/toast/pshmem/shmem.py +++ /dev/null @@ -1,395 +0,0 @@ -## -# Copyright (c) 2017-2020, all rights reserved. Use of this source code -# is governed by a BSD license that can be found in the top-level -# LICENSE file. -## - -import sys -import numpy as np - -from .utils import mpi_check_abort, mpi_data_type - - -class MPIShared(object): - """ - Create a shared memory buffer that is replicated across nodes. - - For the given array dimensions and datatype, the original communicator - is split into groups of processes that can share memory (i.e. that are - on the same node). - - The values of the memory buffer can be set by one process at a time. - When the set() method is called the data passed by the specified - process is replicated to all nodes and then copied into the desired - place in the shared memory buffer on each node. This way the shared - buffer on each node is identical. - - All processes across all nodes may do read-only access to their node- - local copy of the buffer, simply by using the standard array indexing - notation ("[]") on the object itself. - - Args: - shape (tuple): the dimensions of the array. - dtype (np.dtype): the data type of the array. - comm (MPI.Comm): the full communicator to use. This may span - multiple nodes, and each node will have a copy. - """ - - def __init__(self, shape, dtype, comm): - # Copy the datatype in order to support arguments that are aliases, - # like "numpy.float64". - self._dtype = np.dtype(dtype) - - # Verify that our shape contains only integral values - self._n = 1 - for d in shape: - if not isinstance(d, (int, np.integer)): - raise ValueError("input shape contains non-integer values") - self._n *= d - - self._shape = tuple(shape) - - # Global communicator. - - self._comm = comm - self._rank = 0 - self._procs = 1 - if self._comm is not None: - self._rank = self._comm.rank - self._procs = self._comm.size - - # Split our communicator into groups on the same node. Also - # create an inter-node communicator between corresponding - # processes on all nodes (for use in "setting" slices of the - # buffer. - - self._nodecomm = None - self._rankcomm = None - self._noderank = 0 - self._nodeprocs = 1 - self._nodes = 1 - self._mynode = 0 - if self._comm is not None: - from mpi4py import MPI - - self._nodecomm = self._comm.Split_type(MPI.COMM_TYPE_SHARED, 0) - self._noderank = self._nodecomm.rank - self._nodeprocs = self._nodecomm.size - self._nodes = self._procs // self._nodeprocs - if self._nodes * self._nodeprocs < self._procs: - self._nodes += 1 - self._mynode = self._rank // self._nodeprocs - self._rankcomm = self._comm.Split(self._noderank, self._mynode) - - # Consider a corner case of the previous calculation. Imagine that - # the number of processes is not evenly divisible by the number of - # processes per node. In that case, when we later use the set() - # method, the rank-wise communicator may not have a member on the - # final node. Here we compute the "highest" rank within a node which - # is present on all nodes. That sets the possible allowed processes - # which may call the set() method. - - dist = self._disthelper(self._procs, self._nodes) - self._maxsetrank = dist[-1][1] - 1 - - # Divide up the total memory size among the processes on each - # node. For reasonable NUMA settings, this should spread the - # allocated memory to locations across the node. - - # FIXME: the above statement works fine for allocating the window, - # and it is also great in C/C++ where the pointer to the start of - # the buffer is all you need. In mpi4py, querying the rank-0 buffer - # returns a buffer-interface-compatible object, not just a pointer. - # And this "buffer object" has the size of just the rank-0 allocated - # data. SO, for now, disable this and have rank 0 allocate the whole - # thing. We should change this back once we figure out how to - # take the raw pointer from rank zero and present it to numpy as the - # the full buffer. - - # dist = self._disthelper(self._n, self._nodeprocs) - # self._localoffset, self._nlocal = dist[self._noderank] - if self._noderank == 0: - self._localoffset = 0 - self._nlocal = self._n - else: - self._localoffset = 0 - self._nlocal = 0 - - # Compute the data sizes - self._dsize, self._mpitype = mpi_data_type(self._comm, self._dtype) - - # Allocate the shared memory buffer and wrap it in a - # numpy array. If the communicator is None, just make - # a normal numpy array. - - self._win = None - self._buffer = None - self._dbuf = None - self._flat = None - self.data = None - - # Number of bytes in our buffer - nbytes = self._nlocal * self._dsize - - self._win = None - self._buffer = None - if self._comm is None: - self._buffer = np.ndarray(shape=(nbytes,), dtype=np.dtype("B"), order="C") - else: - import mpi4py.MPI as MPI - - # Every process allocates a piece of the buffer. The per- - # process pieces are guaranteed to be contiguous. - status = 0 - try: - self._win = MPI.Win.Allocate_shared( - nbytes, - disp_unit=self._dsize, - info=MPI.INFO_NULL, - comm=self._nodecomm, - ) - except: - status = 1 - mpi_check_abort(self._nodecomm, 0, status, "shared memory allocation") - - # Every process looks up the memory address of rank zero's piece, - # which is the start of the contiguous shared buffer. - status = 0 - try: - self._buffer, dsize = self._win.Shared_query(0) - except: - status = 1 - mpi_check_abort(self._nodecomm, 0, status, "shared memory query") - - # Create a numpy array which acts as a "view" of the buffer. - self._dbuf = np.array(self._buffer, dtype=np.dtype("B"), copy=False) - self._flat = self._dbuf.view(self._dtype) - self.data = self._flat.reshape(self._shape) - - # Initialize to zero. Any of the processes could do this to the - # whole buffer, but it is safe and easy for each process to just - # initialize its local piece. - - # FIXME: change this back once every process is allocating a - # piece of the buffer. - # self._flat[self._localoffset:self._localoffset + self._nlocal] = 0 - if self._noderank == 0: - self._flat[:] = 0 - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.close() - return False - - def close(self): - # Explicitly free the shared memory window. - if hasattr(self, "_win") and (self._win is not None): - self._win.Free() - self._win = None - return - - @property - def shape(self): - """ - The tuple of dimensions of the shared array. - """ - return self._shape - - @property - def dtype(self): - """ - The numpy datatype of the shared array. - """ - return self._dtype - - @property - def comm(self): - """ - The full communicator. - """ - return self._comm - - @property - def nodecomm(self): - """ - The node-local communicator. - """ - return self._nodecomm - - def _disthelper(self, n, groups): - dist = [] - for i in range(groups): - myn = n // groups - first = 0 - leftover = n % groups - if i < leftover: - myn += 1 - first = i * myn - else: - first = ((myn + 1) * leftover) + (myn * (i - leftover)) - dist.append((first, myn)) - return dist - - def set(self, data, offset, fromrank=0): - """ - Set the values of a slice of the shared array. - - This call is collective across the full communicator, but only the - data input from process "fromrank" is meaningful. The offset - specifies the starting element along each dimension when copying - the data into the shared array. Regardless of which node the - "fromrank" process is on, the data will be replicated to the - shared memory buffer on all nodes. - - Args: - data (array): a numpy array with the same number of dimensions - as the full array. - offset (tuple): the starting offset along each dimension, which - determines where the input data should be inserted into the - shared array. - fromrank (int): the process rank of the full communicator which - is passing in the data. - - Returns: - Nothing - """ - # Explicit barrier here, to ensure that we don't try to update - # data while other processes are reading. - if self._comm is not None: - self._comm.barrier() - - # First check that the dimensions of the data and the offset tuple - # match the shape of the data. - - if self._rank == fromrank: - if len(data.shape) != len(self._shape): - if len(data.shape) != len(self._shape): - msg = ( - "input data dimensions {} incompatible with " - "buffer ({})".format(len(data.shape), len(self._shape)) - ) - if self._comm is not None: - print(msg, flush=True) - self._comm.Abort() - else: - raise RuntimeError(msg) - if len(offset) != len(self._shape): - msg = ( - "input offset dimensions {} incompatible with " - "buffer ({})".format(len(offset), len(self._shape)) - ) - if self._comm is not None: - print(msg, flush=True) - self._comm.Abort() - else: - raise RuntimeError(msg) - if data.dtype != self._dtype: - msg = ( - "input data type ({}, {}) incompatible with " - "buffer ({}, {})".format( - data.dtype.str, data.dtype.num, self._dtype.str, self._dtype.num - ) - ) - if self._comm is not None: - print(msg, flush=True) - self._comm.Abort() - else: - raise RuntimeError(msg) - - # The input data is coming from exactly one process on one node. - # First, we broadcast the data from this process to the same node-rank - # process on each of the nodes. - - if self._comm is not None: - import mpi4py.MPI as MPI - - target_noderank = self._comm.bcast(self._noderank, root=fromrank) - fromnode = self._comm.bcast(self._mynode, root=fromrank) - - # Verify that the node rank with the data actually has a member on - # every node (see notes in the constructor). - if target_noderank > self._maxsetrank: - if self._rank == 0: - print( - "set() called with data from a node rank which does" - " not exist on all nodes", - flush=True, - ) - self._comm.Abort() - - if self._noderank == target_noderank: - # We are the lucky process on this node that gets to write - # the data into shared memory! - - # Broadcast the offsets of the input slice - copyoffset = None - if self._mynode == fromnode: - copyoffset = offset - copyoffset = self._rankcomm.bcast(copyoffset, root=fromnode) - - # Pre-allocate buffer, so that we can use the low-level - # (and faster) Bcast method. - datashape = None - if self._mynode == fromnode: - datashape = data.shape - datashape = self._rankcomm.bcast(datashape, root=fromnode) - - nodedata = None - if self._mynode == fromnode: - nodedata = np.copy(data) - else: - nodedata = np.zeros(datashape, dtype=self._dtype) - - # Broadcast the data buffer - self._rankcomm.Bcast(nodedata, root=fromnode) - - # Now one process on every node has a copy of the data, and - # can copy it into the shared memory buffer. - - dslice = [] - ndims = len(nodedata.shape) - for d in range(ndims): - dslice.append( - slice(copyoffset[d], copyoffset[d] + nodedata.shape[d], 1) - ) - slc = tuple(dslice) - - # Get a write-lock on the shared memory - self._win.Lock(self._noderank, MPI.LOCK_EXCLUSIVE) - - # Copy data slice - self.data[slc] = nodedata - - # Release the write-lock - self._win.Unlock(self._noderank) - - else: - # We are just copying to a numpy array... - dslice = [] - ndims = len(data.shape) - for d in range(ndims): - dslice.append(slice(offset[d], offset[d] + data.shape[d], 1)) - slc = tuple(dslice) - - self.data[slc] = data - - # Explicit barrier here, to ensure that other processes do not try - # reading data before the writing processes have finished. - if self._comm is not None: - self._comm.barrier() - - return - - def __getitem__(self, key): - return self.data[key] - - def __setitem__(self, key, value): - raise NotImplementedError( - "Setting individual array elements not" - " supported. Use the set() method instead." - ) diff --git a/src/toast/pshmem/test.py b/src/toast/pshmem/test.py deleted file mode 100644 index bf9e45cd5..000000000 --- a/src/toast/pshmem/test.py +++ /dev/null @@ -1,214 +0,0 @@ -## -# Copyright (c) 2017-2020, all rights reserved. Use of this source code -# is governed by a BSD license that can be found in the top-level -# LICENSE file. -## - -import os -import sys -import time - -import unittest - -import numpy as np -import numpy.testing as nt - -from .shmem import MPIShared -from .locking import MPILock - -MPI = None -use_mpi = True - -if "PSHMEM_MPI_DISABLE" in os.environ: - use_mpi = False - -if use_mpi and (MPI is None): - try: - import mpi4py.MPI as MPI - except ImportError: - raise ImportError("Cannot import mpi4py, will only test serial functionality.") - - -class ShmemTest(unittest.TestCase): - def setUp(self): - self.comm = None - if MPI is not None: - self.comm = MPI.COMM_WORLD - self.rank = 0 - self.procs = 1 - if self.comm is not None: - self.rank = self.comm.rank - self.procs = self.comm.size - - def tearDown(self): - pass - - def test_allocate(self): - # Dimensions of our shared memory array - datadims = (2, 5, 10) - - # Dimensions of the incremental slab that we will - # copy during each set() call. - updatedims = (1, 1, 5) - - # How many updates are there to cover the whole - # data array? - nupdate = 1 - for d in range(len(datadims)): - nupdate *= datadims[d] // updatedims[d] - - for datatype in [np.int32, np.int64, np.float32, np.float64]: - - # For testing the "set()" method, every process is going to - # create a full-sized data buffer and fill it with its process rank. - local = np.ones(datadims, dtype=datatype) - local *= self.rank - - # A context manager is the pythonic way to make sure that the - # object has no dangling reference counts after leaving the context, - # and will ensure that the shared memory is freed properly. - - with MPIShared(local.shape, local.dtype, self.comm) as shm: - for p in range(self.procs): - # Every process takes turns writing to the buffer. - setdata = None - setoffset = (0, 0, 0) - - # Write to the whole data volume, but in small blocks - for upd in range(nupdate): - if p == self.rank: - # My turn! Write my process rank to the buffer slab. - setdata = local[ - setoffset[0] : setoffset[0] + updatedims[0], - setoffset[1] : setoffset[1] + updatedims[1], - setoffset[2] : setoffset[2] + updatedims[2], - ] - try: - # All processes call set(), but only data on rank p matters. - shm.set(setdata, setoffset, fromrank=p) - except: - print( - "proc {} threw exception during set()".format(rank), - flush=True, - ) - if self.comm is not None: - self.comm.Abort() - else: - sys.exit(1) - - # Increment the write offset within the array - - x = setoffset[0] - y = setoffset[1] - z = setoffset[2] - - z += updatedims[2] - if z >= datadims[2]: - z = 0 - y += updatedims[1] - if y >= datadims[1]: - y = 0 - x += updatedims[0] - - setoffset = (x, y, z) - - # Every process is now going to read a copy from the shared memory - # and make sure that they see the data written by the current process. - check = np.zeros_like(local) - check[:, :, :] = shm[:, :, :] - - truth = np.ones_like(local) - truth *= p - - # This should be bitwise identical, even for floats - nt.assert_equal(check[:, :, :], truth[:, :, :]) - - # Ensure that we can reference the memory buffer from numpy without - # a memory copy. The intention is that a slice of the shared memory - # buffer should appear as a C-contiguous ndarray whenever we slice - # along the last dimension. - - for p in range(self.procs): - if p == self.rank: - slc = shm[1, 2] - print( - "proc {} slice has dims {}, dtype {}, C = {}".format( - p, slc.shape, slc.dtype.str, slc.flags["C_CONTIGUOUS"] - ), - flush=True, - ) - if self.comm is not None: - self.comm.barrier() - - def test_shape(self): - good_dims = [ - (2, 5, 10), - np.array([10, 2], dtype=np.int32), - np.array([5, 2], dtype=np.int64), - np.array([10, 2], dtype=np.int), - ] - bad_dims = [ - (2, 5.5, 10), - np.array([10, 2], dtype=np.float32), - np.array([5, 2], dtype=np.float64), - np.array([10, 2.5], dtype=np.float32), - ] - - dt = np.float64 - - for dims in good_dims: - try: - shm = MPIShared(dims, dt, self.comm) - if self.rank == 0: - print("successful creation with shape {}".format(dims), flush=True) - del shm - except ValueError: - if self.rank == 0: - print( - "unsuccessful creation with shape {}".format(dims), flush=True - ) - for dims in bad_dims: - try: - shm = MPIShared(dims, dt, self.comm) - if self.rank == 0: - print("unsuccessful rejection of shape {}".format(dims), flush=True) - del shm - except ValueError: - if self.rank == 0: - print("successful rejection of shape {}".format(dims), flush=True) - - -class LockTest(unittest.TestCase): - def setUp(self): - self.comm = None - if MPI is not None: - self.comm = MPI.COMM_WORLD - self.rank = 0 - self.procs = 1 - if self.comm is not None: - self.rank = self.comm.rank - self.procs = self.comm.size - self.sleepsec = 0.2 - - def tearDown(self): - pass - - def test_lock(self): - with MPILock(self.comm, root=0, debug=True) as lock: - for lk in range(5): - msg = "test_lock: process {} got lock {}".format(self.rank, lk) - lock.lock() - print(msg, flush=True) - # time.sleep(self.sleepsec) - lock.unlock() - if self.comm is not None: - self.comm.barrier() - - -def run(): - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(LockTest)) - suite.addTest(unittest.makeSuite(ShmemTest)) - runner = unittest.TextTestRunner() - runner.run(suite) - return diff --git a/src/toast/pshmem/utils.py b/src/toast/pshmem/utils.py deleted file mode 100644 index cdd12ee92..000000000 --- a/src/toast/pshmem/utils.py +++ /dev/null @@ -1,74 +0,0 @@ -## -# Copyright (c) 2017-2020, all rights reserved. Use of this source code -# is governed by a BSD license that can be found in the top-level -# LICENSE file. -## - -import numpy as np - - -def mpi_check_abort(comm, root, status, msg): - """Check MPI return status. - - If the status is non-zero, print a message on the root process and abort. - - Args: - comm (mpi4py.Comm): The communicator, or None. - root (int): The root process. - status (int): The MPI status. - msg (str): The message to print in case of error. - - Returns: - None - - """ - if comm is not None: - from mpi4py import MPI - - failed = comm.allreduce(status, op=MPI.SUM) - if failed > 0: - if comm.rank == self._root: - print( - "MPIShared: one or more processes failed: {}".format(msg), - flush=True, - ) - comm.Abort() - else: - if status != 0: - print("MPIShared: failed: {}".format(msg), flush=True) - raise RuntimeError(msg) - return - - -def mpi_data_type(comm, dt): - """Helper function to return the byte size and MPI datatype. - - Args: - comm (mpi4py.Comm): The communicator, or None. - dt (np.dtype): The datatype. - - Returns: - (tuple): The (bytesize, MPI type) of the input dtype. - - """ - dtyp = np.dtype(dt) - dsize = None - mpitype = None - if comm is None: - dsize = dtyp.itemsize - else: - from mpi4py import MPI - - # We are actually using MPI, so we need to ensure that - # our specified numpy dtype has a corresponding MPI datatype. - status = 0 - try: - # Technically this is an internal variable, but online - # forum posts from the developers indicate this is stable - # at least until a public interface is created. - mpitype = MPI._typedict[dtyp.char] - except: - status = 1 - mpi_check_abort(comm, 0, status, "numpy to MPI type conversion") - dsize = mpitype.Get_size() - return (dsize, mpitype) diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 817856b6d..088e31b7d 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -8,15 +8,16 @@ install(FILES mpi.py env.py dist.py - timing.py - cache.py + intervals.py rng.py qarray.py fft.py healpix.py - tod.py - intervals.py - tod_satellite.py + config.py + observation.py + timing.py + pixels.py + ops_sim_satellite.py ops_applygain.py ops_simnoise.py cov.py diff --git a/src/toast/tests/_helpers.py b/src/toast/tests/_helpers.py index 04a85a286..e5cb58ef3 100644 --- a/src/toast/tests/_helpers.py +++ b/src/toast/tests/_helpers.py @@ -10,10 +10,16 @@ from ..mpi import Comm -from ..dist import Data +from ..data import Data from .. import qarray as qa +from ..instrument import Focalplane, Telescope + +from ..instrument_sim import fake_hexagon_focalplane + +from ..observation import DetectorData, Observation + ZAXIS = np.array([0, 0, 1.0]) @@ -72,7 +78,7 @@ def create_comm(mpicomm): return toastcomm -def create_distdata(mpicomm, obs_per_group=1): +def create_distdata(mpicomm, obs_per_group=1, samples=10): """Create a toast communicator and distributed data object. Use the specified MPI communicator to attempt to create 2 process groups, @@ -81,17 +87,29 @@ def create_distdata(mpicomm, obs_per_group=1): Args: mpicomm (MPI.Comm): the MPI communicator (or None). obs_per_group (int): the number of observations assigned to each group. + samples (int): number of samples per observation. Returns: - toast.Data: the distributed data with named observations (but no TOD). + toast.Data: the distributed data with named observations. """ toastcomm = create_comm(mpicomm) data = Data(toastcomm) for obs in range(obs_per_group): - ob = {} - ob["name"] = "test-{}-{}".format(toastcomm.group, obs) - ob["id"] = obs_per_group * toastcomm.group + obs + oname = "test-{}-{}".format(toastcomm.group, obs) + oid = obs_per_group * toastcomm.group + obs + npix = 1 + ring = 1 + while 2 * npix < toastcomm.group_size: + npix += 6 * ring + ring += 1 + fp = fake_hexagon_focalplane(n_pix=npix) + tele = Telescope("test", focalplane=fp) + # FIXME: for full testing we should set detranks as approximately the sqrt + # of the grid size so that we test the row / col communicators. + ob = Observation( + tele, samples=samples, name=oname, UID=oid, comm=toastcomm.comm_group + ) data.obs.append(ob) return data @@ -120,86 +138,6 @@ def uniform_chunks(samples, nchunk=100): return chunks -def boresight_focalplane( - ndet, samplerate=1.0, epsilon=0.0, net=1.0, fmin=0.0, alpha=1.0, fknee=0.05 -): - """Create a set of detectors at the boresight. - - This creates multiple detectors at the boresight, oriented in evenly - spaced increments from zero to PI. - - Args: - ndet (int): the number of detectors. - - - Returns: - (tuple): names(list), quat(dict), fmin(dict), rate(dict), fknee(dict), - alpha(dict), netd(dict) - - """ - names = ["d{:02d}".format(x) for x in range(ndet)] - pol = {"d{:02d}".format(x): (x * np.pi / ndet) for x in range(ndet)} - - quat = { - "d{:02d}".format(x): qa.rotation(ZAXIS, pol["d{:02d}".format(x)]) - for x in range(ndet) - } - - det_eps = {"d{:02d}".format(x): epsilon for x in range(ndet)} - - det_fmin = {"d{:02d}".format(x): fmin for x in range(ndet)} - det_rate = {"d{:02d}".format(x): samplerate for x in range(ndet)} - det_alpha = {"d{:02d}".format(x): alpha for x in range(ndet)} - det_net = {"d{:02d}".format(x): net for x in range(ndet)} - - det_fknee = None - if np.isscalar(fknee): - det_fknee = {"d{:02d}".format(x): fknee for x in range(ndet)} - else: - # This must be an array or list of correct length - if len(fknee) != ndet: - raise RuntimeError("length of knee frequencies must equal ndet") - det_fknee = {"d{:02d}".format(x): y for x, y in zip(range(ndet), fknee)} - - return names, quat, det_eps, det_rate, det_net, det_fmin, det_fknee, det_alpha - - -def create_weather(outfile): - from astropy.table import Table - import astropy.io.fits as af - - nstep = 101 - TQI = [np.linspace(0, 0.5, nstep) for x in range(24)] - TQL = [np.linspace(0, 0.1, nstep) for x in range(24)] - TQV = [np.linspace(1, 12, nstep) for x in range(24)] - QV10M = [np.linspace(0.001, 0.007, nstep) for x in range(24)] - PS = [np.linspace(58600, 59000, nstep) for x in range(24)] - TS = [np.linspace(270, 280, nstep) for x in range(24)] - T10M = [np.linspace(270, 280, nstep) for x in range(24)] - U10M = [np.linspace(-2, 8, nstep) for x in range(24)] - V10M = [np.linspace(-1, 3, nstep) for x in range(24)] - - hdus = af.HDUList([af.PrimaryHDU()]) - - for mon in range(12): - tab = Table( - [TQI, TQL, TQV, QV10M, PS, TS, T10M, U10M, V10M], - names=("TQI", "TQL", "TQV", "QV10M", "PS", "TS", "T10M", "U10M", "V10M"), - meta={ - "PROBSTRT": 0.0, - "PROBSTOP": 1.0, - "PROBSTEP": 0.01, - "NSTEP": nstep, - "MONTH": mon, - }, - ) - hdus.append(af.table_to_hdu(tab)) - - hdus.writeto(outfile, overwrite=True) - - return - - # # @contextmanager # def mpi_guard(comm=MPI.COMM_WORLD): diff --git a/src/toast/tests/config.py b/src/toast/tests/config.py new file mode 100644 index 000000000..02171e106 --- /dev/null +++ b/src/toast/tests/config.py @@ -0,0 +1,140 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from .mpi import MPITestCase + +import os + +import copy + +import numpy as np +import numpy.testing as nt + +from tomlkit import comment, document, nl, table, dumps, loads + +from ..utils import Environment + +from ..config import load_config, dump_toml, build_config, create + +from ..instrument import Telescope, Focalplane + +from ..future_ops import SimSatellite # , Pipeline, SimNoise, DefaultNoiseModel + +from ..data import Data + +from ._helpers import create_outdir, create_distdata, boresight_focalplane, create_comm + + +class ConfigTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + self.data = create_distdata( + self.comm, obs_per_group=1, future_obs=True, samples=10 + ) + + env = Environment.get() + + # Make a fake focalplane for pipeline tests + + self.ndet = 4 + ( + dnames, + dquat, + depsilon, + drate, + dnet, + dfmin, + dfknee, + dalpha, + ) = boresight_focalplane(self.ndet) + + detdata = {} + for d in dnames: + detdata[d] = { + "fsample": drate[d], + "NET": dnet[d], + "quat": dquat[d], + "fmin": dfmin[d], + "fknee": dfknee[d], + "alpha": dalpha[d], + "pol_leakage": depsilon[d], + } + + self.focalplane = Focalplane( + detector_data=detdata, sample_rate=drate[dnames[0]] + ) + + # Create some example configs to load + + ops = [SimSatellite(name="sim_satellite")] + # + # ops = { + # "sim_satellite": SimSatellite, + # "noise_model": DefaultNoiseModel, + # "sim_noise": SimNoise, + # } + + conf = build_config(ops) + + self.doc1_file = os.path.join(self.outdir, "doc1.toml") + dump_toml(self.doc1_file, conf) + # + # ops = {"sim_pipe": Pipeline} + # + # conf = default_config(operators=ops) + # conf["operators"]["sim_pipe"]["operators"] = [ + # "@config:/operators/sim_satellite", + # "@config:/operators/noise_model", + # "@config:/operators/sim_noise", + # ] + # + # self.doc2_file = os.path.join(self.outdir, "doc2.toml") + # dump_config(self.doc2_file, conf) + + def test_load(self): + conf = load_config(self.doc1_file) + # conf = load_config(self.doc2_file, input=conf) + + def test_roundtrip(self): + conf = load_config(self.doc1_file) + # conf = load_config(self.doc2_file, input=conf) + check_file = os.path.join(self.outdir, "check.toml") + dump_toml(check_file, conf) + check = load_config(check_file) + self.assertTrue(conf == check) + + def test_create(self): + conf = load_config(self.doc1_file) + # conf = load_config(self.doc2_file, input=conf) + + run = create(conf) + + # Add our fake telescope + run["operators"]["sim_satellite"].telescope = Telescope( + name="fake", focalplane=self.focalplane + ) + + # print(run) + + # print(run["operators"]["sim_pipe"].config["operators"]) + + # def test_run(self): + # conf = load_config(self.doc1_file) + # conf = load_config(self.doc2_file, input=conf) + # + # # Add a fake telescope for testing + # conf["operators"]["sim_satellite"]["telescope"] = Telescope( + # name="fake", focalplane=self.focalplane + # ) + # + # run = create(conf) + # + # toastcomm = create_comm(self.comm) + # data = Data(toastcomm) + # + # run["operators"]["sim_pipe"].exec(data) + # # for obs in data.obs: + # # for d in obs.signal().detectors: + # # print(d, obs.signal()[d][:5]) diff --git a/src/toast/tests/dist.py b/src/toast/tests/dist.py index 6acfe9a29..bf515f146 100644 --- a/src/toast/tests/dist.py +++ b/src/toast/tests/dist.py @@ -9,7 +9,8 @@ import numpy as np import numpy.testing as nt -from ..dist import distribute_uniform, distribute_discrete, Data +from ..dist import distribute_uniform, distribute_discrete +from ..data import Data from ..mpi import Comm, MPI from ._helpers import create_outdir, create_distdata diff --git a/src/toast/tests/intervals.py b/src/toast/tests/intervals.py index 272446615..7dca586f9 100644 --- a/src/toast/tests/intervals.py +++ b/src/toast/tests/intervals.py @@ -8,9 +8,11 @@ import numpy.testing as nt -from ..tod.interval import intervals_to_chunklist +from ..intervals import Interval, IntervalList -from ..tod.sim_interval import regular_intervals +# from ..tod.interval import intervals_to_chunklist +# +# from ..tod.sim_interval import regular_intervals class IntervalTest(MPITestCase): @@ -22,30 +24,153 @@ def setUp(self): self.first = 10 self.nint = 3 - def test_tochunks(self): - intrvls = regular_intervals( - self.nint, self.start, self.first, self.rate, self.duration, self.gap + def test_list(self): + stamps = np.arange(100, dtype=np.float64) + timespans = [(10.0 * x + 2.0, 10.0 * x + 5.0) for x in range(10)] + sampspans = [(10 * x + 2, 10 * x + 5) for x in range(10)] + check = [ + Interval( + start=float(10.0 * x + 2), + stop=float(10.0 * x + 5), + first=(10 * x + 2), + last=(10 * x + 5), + ) + for x in range(10) + ] + check_neg = [Interval(start=0.0, stop=1.0, first=0, last=1)] + check_neg.extend( + [ + Interval( + start=float(10.0 * x + 6), + stop=float(10.0 * x + 11), + first=(10 * x + 6), + last=(10 * x + 11), + ) + for x in range(9) + ] ) - totsamp = self.nint * (intrvls[0].last - intrvls[0].first + 1) - totsamp += self.nint * (intrvls[1].first - intrvls[0].last - 1) - sizes = intervals_to_chunklist(intrvls, totsamp, startsamp=self.first + 10) - # for it in intrvls: - # print(it.first," ",it.last," ",it.start," ",it.stop) - # print(sizes) - nt.assert_equal(np.sum(sizes), totsamp) - - def test_regular(self): - intrvls = regular_intervals( - self.nint, self.start, self.first, self.rate, self.duration, self.gap + check_neg.append(Interval(start=96.0, stop=99.0, first=96, last=99)) + # print("check = ", check) + # print("check_neg = ", check_neg) + + itime = IntervalList(stamps, timespans=timespans) + # print("itime = ", itime) + + for it, chk in zip(itime, check): + self.assertTrue(it == chk) + + isamp = IntervalList(stamps, samplespans=sampspans) + # print("isamp = ", isamp) + for it, chk in zip(isamp, check): + self.assertTrue(it == chk) + + negated = ~isamp + for it, chk in zip(negated, check_neg): + self.assertTrue(it == chk) + + def test_simplify(self): + stamps = np.arange(100, dtype=np.float64) + boundaries = [10 * x for x in range(1, 9)] + ranges = [(x, x + 9) for x in boundaries] + check = Interval(first=10, last=89, start=stamps[10], stop=stamps[89]) + ival = IntervalList(stamps, samplespans=ranges) + # print("ival = ", ival) + ival.simplify() + # print("simple ival = ", ival) + self.assertTrue(ival[0] == check) + + def test_bitwise(self): + stamps = np.arange(100, dtype=np.float64) + raw = [ + Interval( + start=float(10.0 * x + 2), + stop=float(10.0 * x + 5), + first=(10 * x + 2), + last=(10 * x + 5), + ) + for x in range(10) + ] + ival = IntervalList(stamps, intervals=raw) + neg = ~ival + + full = ival | neg + # print("full = ", full) + self.assertTrue( + full[0] == Interval(start=stamps[0], stop=stamps[-1], first=0, last=99) + ) + + empty = ival & neg + # print("empty = ", empty) + + rawshift = [ + Interval( + start=float(10.0 * x + 3), + stop=float(10.0 * x + 6), + first=(10 * x + 3), + last=(10 * x + 6), + ) + for x in range(10) + ] + shifted = IntervalList(stamps, intervals=rawshift) + + and_check = IntervalList( + stamps, + intervals=[ + Interval( + start=float(10.0 * x + 3), + stop=float(10.0 * x + 5), + first=(10 * x + 3), + last=(10 * x + 5), + ) + for x in range(10) + ], ) - goodsamp = self.nint * (int(self.duration * self.rate) + 1) + or_check = IntervalList( + stamps, + intervals=[ + Interval( + start=float(10.0 * x + 2), + stop=float(10.0 * x + 6), + first=(10 * x + 2), + last=(10 * x + 6), + ) + for x in range(10) + ], + ) - check = 0 + test = ival & shifted + # print("bit and = ", test) + self.assertTrue(test == and_check) - for it in intrvls: - # print(it.first," ",it.last," ",it.start," ",it.stop) - check += it.last - it.first + 1 + test = ival | shifted + # print("bit or = ", test) + self.assertTrue(test == or_check) - nt.assert_equal(check, goodsamp) - return + # def test_tochunks(self): + # intrvls = regular_intervals( + # self.nint, self.start, self.first, self.rate, self.duration, self.gap + # ) + # totsamp = self.nint * (intrvls[0].last - intrvls[0].first + 1) + # totsamp += self.nint * (intrvls[1].first - intrvls[0].last - 1) + # sizes = intervals_to_chunklist(intrvls, totsamp, startsamp=self.first + 10) + # # for it in intrvls: + # # print(it.first," ",it.last," ",it.start," ",it.stop) + # # print(sizes) + # nt.assert_equal(np.sum(sizes), totsamp) + # + # def test_regular(self): + # intrvls = regular_intervals( + # self.nint, self.start, self.first, self.rate, self.duration, self.gap + # ) + # + # goodsamp = self.nint * (int(self.duration * self.rate) + 1) + # + # check = 0 + # + # for it in intrvls: + # # print(it.first," ",it.last," ",it.start," ",it.stop) + # check += it.last - it.first + 1 + # + # nt.assert_equal(check, goodsamp) + # return diff --git a/src/toast/tests/observation.py b/src/toast/tests/observation.py new file mode 100644 index 000000000..041a85504 --- /dev/null +++ b/src/toast/tests/observation.py @@ -0,0 +1,132 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from .mpi import MPITestCase + +import os + +import numpy as np +import numpy.testing as nt + +from ..instrument import Focalplane, Telescope + +from ..observation import DetectorData, Observation + +from ..mpi import Comm, MPI + +from ._helpers import create_outdir, create_distdata + + +class ObservationTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + + self.data = create_distdata(self.comm, obs_per_group=1) + + self.dets = ["d00", "d01", "d02", "d03"] + self.shapes = [(10,), (10, 4), (10, 3, 2)] + self.types = { + "f64": np.float64, + "f32": np.float32, + "i64": np.int64, + "u64": np.uint64, + "i32": np.int32, + "u32": np.uint32, + "i16": np.int16, + "u16": np.uint16, + "i8": np.int8, + "u8": np.uint8, + } + + def test_detdata(self): + for shp in self.shapes: + for tname, tp in self.types.items(): + tdata = DetectorData(self.dets, shp, tp) + # if self.comm is None or self.comm.rank == 0: + # print(tdata) + gdets = tdata.detectors + for didx, dname in enumerate(gdets): + tdata[didx] = didx * np.ones(shp, dtype=tp) + sidata = tdata[didx] + sndata = tdata[dname] + # print(sidata) + np.testing.assert_equal(sidata, sndata) + sdata = tdata[1:-1] + sdata = tdata[[gdets[0], gdets[-1]]] + tdata.clear() + + def test_observation(self): + # Populate the observations + rms = 10.0 + for obs in self.data.obs: + n_samp = obs.n_local + dets = obs.local_detectors + fake_bore = np.ravel(np.random.random((n_samp, 4))).reshape(-1, 4) + fake_flags = np.random.uniform(low=0, high=2, size=n_samp).astype( + np.uint8, copy=True + ) + bore = None + common_flags = None + times = None + if obs.comm_col_rank == 0: + bore = fake_bore + common_flags = fake_flags + times = np.arange(n_samp) + + # Construct some default shared objects from local buffers + obs.shared.create("boresight_azel", original=bore, comm=obs.comm_col) + obs.shared.create("boresight_radec", original=bore, comm=obs.comm_col) + obs.shared.create("flags", original=common_flags, comm=obs.comm_col) + obs.shared.create("timestamps", original=times, comm=obs.comm_col) + + # Allocate the default detector data and flags + obs.detdata.create("signal", shape=(n_samp,), dtype=np.float64) + obs.detdata.create("flags", shape=(n_samp,), dtype=np.uint16) + + # Allocate some other detector data + obs.detdata.create("calibration", shape=(n_samp,), dtype=np.float32) + obs.detdata.create("sim_noise", shape=(n_samp,), dtype=np.float64) + + # Store some values for detector data + for det in dets: + obs.detdata["signal"][det, :] = np.random.normal( + loc=0.0, scale=rms, size=n_samp + ) + obs.detdata["calibration"][det, :] = np.random.normal( + loc=0.0, scale=rms, size=n_samp + ).astype(np.float32) + obs.detdata["sim_noise"][det, :] = np.random.normal( + loc=0.0, scale=rms, size=n_samp + ) + obs.detdata["flags"][det, :] = fake_flags + + # Make some shared objects, one per detector, shared across the process + # rows. + obs.shared.create( + "beam_profile", + shape=(len(dets), 1000, 1000), + dtype=np.float32, + comm=obs.comm_row, + ) + for didx, det in enumerate(dets): + beam_data = None + if obs.comm_row_rank == 0: + beam_data = ( + np.random.uniform(low=0, high=100, size=(1000 * 1000)) + .astype(np.float32, copy=True) + .reshape(1, 1000, 1000) + ) + obs.shared["beam_profile"].set( + beam_data, offset=(didx, 0, 0), fromrank=0 + ) + + # You can access detector data by index or by name + for didx, det in enumerate(dets): + np.testing.assert_equal( + obs.detdata["signal"][det], obs.detdata["signal"][didx] + ) + + # ... Or you can access it as one big array (first dimension is detector) + print("\n", obs.detdata["signal"].data, "\n") diff --git a/src/toast/tests/ops_sim_satellite.py b/src/toast/tests/ops_sim_satellite.py new file mode 100644 index 000000000..2f0fa881f --- /dev/null +++ b/src/toast/tests/ops_sim_satellite.py @@ -0,0 +1,43 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from .mpi import MPITestCase + +import os + +import numpy as np +import numpy.testing as nt + +from ..mpi import Comm, MPI + +from ..data import Data + +from ..instrument import Focalplane, Telescope + +from ..instrument_sim import fake_hexagon_focalplane + +from .. import future_ops as ops + +from ._helpers import create_outdir, create_comm + + +class SimSatelliteTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + + self.toastcomm = create_comm(self.comm) + + npix = 1 + ring = 1 + while 2 * npix < self.toastcomm.group_size: + npix += 6 * ring + ring += 1 + self.fp = fake_hexagon_focalplane(n_pix=npix) + self.tele = Telescope("test", focalplane=self.fp) + self.simsat = ops.SimSatellite(n_observation=2, telescope=self.tele) + + def test_exec(self): + data = Data(self.toastcomm) + self.simsat.exec(data) diff --git a/src/toast/tests/pixels.py b/src/toast/tests/pixels.py new file mode 100644 index 000000000..5b00df973 --- /dev/null +++ b/src/toast/tests/pixels.py @@ -0,0 +1,165 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from .mpi import MPITestCase + +import os + +import numpy as np + +import numpy.testing as nt + +from ..pixels import PixelDistribution, PixelData + +from .. import pixels_io as io + +from ._helpers import create_outdir + + +class PixelTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + + self.nsides = [8, 32] + self.nsub = [1, 10, 100] + self.types = [ + np.float64, + np.float32, + np.int64, + np.uint64, + np.int32, + np.uint32, + np.int16, + np.uint16, + np.int8, + np.uint8, + ] + self.fitstypes = [np.float64, np.float32, np.int64, np.int32] + # self.nsides = [2] + # self.nsub = [8] + # self.types = [np.int32] + + def tearDown(self): + pass + + def _make_pixdist(self, nside, nsub, comm): + npix = 12 * nside ** 2 + valid_submaps = np.arange(0, nsub, 2, dtype=np.int32) + # Make up some local submaps for each process + local_submaps = None + if comm is None: + local_submaps = valid_submaps + else: + local_submaps = np.unique( + np.floor_divide( + np.random.randint(0, 2 * nsub, size=(nsub // 2), dtype=np.int32), 2 + ) + ) + dist = PixelDistribution( + n_pix=npix, n_submap=nsub, local_submaps=local_submaps, comm=comm + ) + return dist + + def _make_pixdata(self, dist, dtype, nnz): + pdata = PixelData(dist, dtype, n_value=nnz) + gl = list() + for sm in pdata.distribution.local_submaps: + for px in range(dist.n_pix_submap): + if sm * dist.n_pix_submap + px < dist.n_pix: + gl.append(sm * dist.n_pix_submap + px) + gl = np.array(gl, dtype=np.int64) + subm, subpx = dist.global_pixel_to_submap(gl) + ploc = dist.global_pixel_to_local(gl) + ploc[:] *= 2 + pdata.raw[ploc] = 1 + for z in range(1, nnz): + ploc[:] += 1 + pdata.raw[ploc] = 1 + return pdata + + def test_data(self): + np.random.seed(0) + if self.comm is not None: + np.random.seed(self.comm.rank) + for nside in self.nsides: + for nsb in self.nsub: + dist = self._make_pixdist(nside, nsb, self.comm) + for tp in self.types: + pdata = self._make_pixdata(dist, tp, 2) + pdata = PixelData(dist, tp, n_value=2) + + other = PixelData(dist, tp, n_value=2) + other.raw[:] = pdata.raw[:] + + # if self.comm.rank == 0: + # print("----- start orig -----") + # for p in range(self.comm.size): + # if p == self.comm.rank: + # print("proc {}:".format(p)) + # for lc, sm in enumerate(pdata.distribution.local_submaps): + # print("submap {} = ".format(sm)) + # print(pdata.data[lc]) + # print("", flush=True) + # self.comm.barrier() + # + # if self.comm.rank == 0: + # print("----- start other -----") + # for p in range(self.comm.size): + # if p == self.comm.rank: + # print("proc {}:".format(p)) + # for lc, sm in enumerate(other.distribution.local_submaps): + # print("submap {} = ".format(sm)) + # print(other.data[lc]) + # print("", flush=True) + # self.comm.barrier() + + pdata.sync_allreduce() + + # if self.comm.rank == 0: + # print("----- allreduce orig -----") + # for p in range(self.comm.size): + # if p == self.comm.rank: + # print("proc {}:".format(p)) + # for lc, sm in enumerate(pdata.distribution.local_submaps): + # print("submap {} = ".format(sm)) + # print(pdata.data[lc]) + # print("", flush=True) + # self.comm.barrier() + + other.sync_alltoallv() + + # if self.comm.rank == 0: + # print("----- alltoallv other -----") + # for p in range(self.comm.size): + # if p == self.comm.rank: + # print("proc {}:".format(p)) + # for lc, sm in enumerate(other.distribution.local_submaps): + # print("submap {} = ".format(sm)) + # print(other.data[lc]) + # print("", flush=True) + # self.comm.barrier() + + nt.assert_equal(pdata.data, other.data) + + def test_io(self): + np.random.seed(0) + if self.comm is not None: + np.random.seed(self.comm.rank) + for nside in self.nsides: + for nsb in self.nsub: + dist = self._make_pixdist(nside, nsb, self.comm) + for tp in self.fitstypes: + pdata = self._make_pixdata(dist, tp, 2) + pdata = PixelData(dist, tp, n_value=2) + fitsfile = os.path.join( + self.outdir, + "data_N{}_sub{}_type-{}.fits".format( + nside, nsb, np.dtype(tp).char + ), + ) + io.write_healpix_fits(pdata, fitsfile) + check = PixelData(dist, tp, n_value=2) + io.read_healpix_fits(check, fitsfile) + nt.assert_equal(pdata.data, check.data) diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index b219d183e..0d834f88d 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -15,65 +15,77 @@ from .._libtoast import libtoast_tests from . import env as testenv -from . import cache as testcache from . import timing as testtiming from . import rng as testrng from . import fft as testfft from . import healpix as testhealpix -from . import dist as testdist from . import qarray as testqarray -from . import tod as testtod - -from . import psd_math as testpsdmath from . import intervals as testintervals - -from . import cov as testcov - -from . import ops_pmat as testopspmat - -from . import ops_dipole as testopsdipole -from . import ops_simnoise as testopssimnoise -from . import ops_sim_sss as testopssimsss - -from . import ops_polyfilter as testopspolyfilter -from . import ops_groundfilter as testopsgroundfilter - -from . import ops_gainscrambler as testopsgainscrambler -from . import ops_applygain as testopsapplygain - -from . import ops_memorycounter as testopsmemorycounter - -from . import ops_madam as testopsmadam -from . import ops_mapmaker as testopsmapmaker - -from . import map_satellite as testmapsatellite - -from . import map_ground as testmapground - -from . import binned as testbinned - -from . import sim_focalplane as testsimfocalplane -from . import tod_satellite as testtodsat - -from ..todmap import pysm - -if pysm is not None: - from . import ops_sim_pysm as testopspysm - -from . import ops_sim_atm as testopsatm - -from ..tod import tidas_available - -# if tidas_available: -# from . import tidas as testtidas -testtidas = None -tidas_available = False - -# from ..tod import spt3g_available -# if spt3g_available: -# from . import spt3g as testspt3g -testspt3g = None -spt3g_available = False +from . import pixels as testpixels + +from . import observation as testobs + +from . import ops_sim_satellite as testsimsat + + +# +# from . import cache as testcache +# from . import config as testconfig +# +# +# from . import dist as testdist +# +# from . import tod as testtod +# +# from . import psd_math as testpsdmath +# +# from . import cov as testcov +# +# from . import ops_pmat as testopspmat +# +# from . import ops_dipole as testopsdipole +# from . import ops_simnoise as testopssimnoise +# from . import ops_sim_sss as testopssimsss +# +# from . import ops_polyfilter as testopspolyfilter +# from . import ops_groundfilter as testopsgroundfilter +# +# from . import ops_gainscrambler as testopsgainscrambler +# from . import ops_applygain as testopsapplygain +# +# from . import ops_memorycounter as testopsmemorycounter +# +# from . import ops_madam as testopsmadam +# from . import ops_mapmaker as testopsmapmaker +# +# from . import map_satellite as testmapsatellite +# +# from . import map_ground as testmapground +# +# from . import binned as testbinned +# +# from . import sim_focalplane as testsimfocalplane +# from . import tod_satellite as testtodsat +# +# from ..todmap import pysm +# +# if pysm is not None: +# from . import ops_sim_pysm as testopspysm +# +# from . import ops_sim_atm as testopsatm +# +# from ..tod import tidas_available +# +# # if tidas_available: +# # from . import tidas as testtidas +# testtidas = None +# tidas_available = False +# +# # from ..tod import spt3g_available +# # if spt3g_available: +# # from . import spt3g as testspt3g +# testspt3g = None +# spt3g_available = False def test(name=None, verbosity=2): @@ -108,7 +120,6 @@ def test(name=None, verbosity=2): if name is None: suite.addTest(loader.loadTestsFromModule(testenv)) - suite.addTest(loader.loadTestsFromModule(testcache)) if not (("CONDA_BUILD" in os.environ) or ("CIBUILDWHEEL" in os.environ)): # When doing a conda build on CI services in containers # the timing information is not accurate and these tests @@ -117,55 +128,67 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(testrng)) suite.addTest(loader.loadTestsFromModule(testfft)) suite.addTest(loader.loadTestsFromModule(testhealpix)) - suite.addTest(loader.loadTestsFromModule(testdist)) suite.addTest(loader.loadTestsFromModule(testqarray)) - suite.addTest(loader.loadTestsFromModule(testtod)) - suite.addTest(loader.loadTestsFromModule(testtodsat)) suite.addTest(loader.loadTestsFromModule(testintervals)) - suite.addTest(loader.loadTestsFromModule(testopssimnoise)) - suite.addTest(loader.loadTestsFromModule(testopssimsss)) - suite.addTest(loader.loadTestsFromModule(testopsapplygain)) - suite.addTest(loader.loadTestsFromModule(testopspmat)) - suite.addTest(loader.loadTestsFromModule(testcov)) - suite.addTest(loader.loadTestsFromModule(testopsdipole)) - suite.addTest(loader.loadTestsFromModule(testopsgroundfilter)) - suite.addTest(loader.loadTestsFromModule(testsimfocalplane)) - suite.addTest(loader.loadTestsFromModule(testopspolyfilter)) - suite.addTest(loader.loadTestsFromModule(testopsmemorycounter)) - suite.addTest(loader.loadTestsFromModule(testopsgainscrambler)) - suite.addTest(loader.loadTestsFromModule(testpsdmath)) - suite.addTest(loader.loadTestsFromModule(testopsmadam)) - suite.addTest(loader.loadTestsFromModule(testopsmapmaker)) - suite.addTest(loader.loadTestsFromModule(testmapsatellite)) - suite.addTest(loader.loadTestsFromModule(testmapground)) - suite.addTest(loader.loadTestsFromModule(testbinned)) - suite.addTest(loader.loadTestsFromModule(testopsatm)) - # These tests segfault locally. Re-enable once we are doing bandpass - # integration on on the fly. - # if pysm is not None: - # suite.addTest(loader.loadTestsFromModule(testopspysm)) - - if tidas_available: - suite.addTest(loader.loadTestsFromModule(testtidas)) - if spt3g_available: - suite.addTest(loader.loadTestsFromModule(testspt3g)) + suite.addTest(loader.loadTestsFromModule(testpixels)) + suite.addTest(loader.loadTestsFromModule(testobs)) + + suite.addTest(loader.loadTestsFromModule(testsimsat)) + + # suite.addTest(loader.loadTestsFromModule(testcache)) + # suite.addTest(loader.loadTestsFromModule(testconfig)) + # + # + # suite.addTest(loader.loadTestsFromModule(testdist)) + # + # suite.addTest(loader.loadTestsFromModule(testtod)) + # suite.addTest(loader.loadTestsFromModule(testtodsat)) + # + # suite.addTest(loader.loadTestsFromModule(testopssimnoise)) + # suite.addTest(loader.loadTestsFromModule(testopssimsss)) + # suite.addTest(loader.loadTestsFromModule(testopsapplygain)) + # suite.addTest(loader.loadTestsFromModule(testopspmat)) + # suite.addTest(loader.loadTestsFromModule(testcov)) + # suite.addTest(loader.loadTestsFromModule(testopsdipole)) + # suite.addTest(loader.loadTestsFromModule(testopsgroundfilter)) + # suite.addTest(loader.loadTestsFromModule(testsimfocalplane)) + # suite.addTest(loader.loadTestsFromModule(testopspolyfilter)) + # suite.addTest(loader.loadTestsFromModule(testopsmemorycounter)) + # suite.addTest(loader.loadTestsFromModule(testopsgainscrambler)) + # suite.addTest(loader.loadTestsFromModule(testpsdmath)) + # suite.addTest(loader.loadTestsFromModule(testopsmadam)) + # suite.addTest(loader.loadTestsFromModule(testopsmapmaker)) + # suite.addTest(loader.loadTestsFromModule(testmapsatellite)) + # suite.addTest(loader.loadTestsFromModule(testmapground)) + # suite.addTest(loader.loadTestsFromModule(testbinned)) + # suite.addTest(loader.loadTestsFromModule(testopsatm)) + # + # # These tests segfault locally. Re-enable once we are doing bandpass + # # integration on on the fly. + # # if pysm is not None: + # # suite.addTest(loader.loadTestsFromModule(testopspysm)) + # + # if tidas_available: + # suite.addTest(loader.loadTestsFromModule(testtidas)) + # if spt3g_available: + # suite.addTest(loader.loadTestsFromModule(testspt3g)) elif name != "libtoast": - if (name == "tidas") and (not tidas_available): - print("Cannot run TIDAS tests- package not available") - return - elif (name == "spt3g") and (not spt3g_available): - print("Cannot run SPT3G tests- package not available") - return - else: - modname = "toast.tests.{}".format(name) - if modname not in sys.modules: - result = '"{}" is not a valid test. Try'.format(name) - for name in sys.modules: - if name.startswith("toast.tests."): - result += '\n - "{}"'.format(name.replace("toast.tests.", "")) - result += "\n" - raise RuntimeError(result) - suite.addTest(loader.loadTestsFromModule(sys.modules[modname])) + # if (name == "tidas") and (not tidas_available): + # print("Cannot run TIDAS tests- package not available") + # return + # elif (name == "spt3g") and (not spt3g_available): + # print("Cannot run SPT3G tests- package not available") + # return + # else: + modname = "toast.tests.{}".format(name) + if modname not in sys.modules: + result = '"{}" is not a valid test. Try'.format(name) + for name in sys.modules: + if name.startswith("toast.tests."): + result += '\n - "{}"'.format(name.replace("toast.tests.", "")) + result += "\n" + raise RuntimeError(result) + suite.addTest(loader.loadTestsFromModule(sys.modules[modname])) ret = 0 _ret = mpirunner.run(suite) diff --git a/src/toast/tod/applygain.py b/src/toast/tod/applygain.py index fdaab1e8d..c4499b745 100644 --- a/src/toast/tod/applygain.py +++ b/src/toast/tod/applygain.py @@ -6,7 +6,7 @@ from astropy.io import fits -from ..op import Operator +from ..operator import Operator from ..timing import function_timer diff --git a/src/toast/tod/gainscrambler.py b/src/toast/tod/gainscrambler.py index 2b804b3aa..15556833f 100644 --- a/src/toast/tod/gainscrambler.py +++ b/src/toast/tod/gainscrambler.py @@ -4,7 +4,7 @@ import re -from ..op import Operator +from ..operator import Operator from ..rng import random diff --git a/src/toast/tod/interval.py b/src/toast/tod/interval.py index 711baffa9..510ed6545 100644 --- a/src/toast/tod/interval.py +++ b/src/toast/tod/interval.py @@ -4,7 +4,7 @@ import numpy as np -from ..op import Operator +from ..operator import Operator from ..timing import function_timer diff --git a/src/toast/tod/memorycounter.py b/src/toast/tod/memorycounter.py index e3fa15571..68ca0ca4e 100644 --- a/src/toast/tod/memorycounter.py +++ b/src/toast/tod/memorycounter.py @@ -4,7 +4,7 @@ from ..mpi import MPI -from ..op import Operator +from ..operator import Operator from ..utils import Logger diff --git a/src/toast/tod/polyfilter.py b/src/toast/tod/polyfilter.py index c8f7c1afd..c1e4f7ea4 100644 --- a/src/toast/tod/polyfilter.py +++ b/src/toast/tod/polyfilter.py @@ -10,7 +10,7 @@ from .._libtoast import filter_polynomial -from ..op import Operator +from ..operator import Operator from ..timing import function_timer diff --git a/src/toast/tod/sim_det_noise.py b/src/toast/tod/sim_det_noise.py index ae5ec78f6..7147e408b 100644 --- a/src/toast/tod/sim_det_noise.py +++ b/src/toast/tod/sim_det_noise.py @@ -10,7 +10,7 @@ from .tod_math import sim_noise_timestream -from ..op import Operator +from ..operator import Operator class OpSimNoise(Operator): diff --git a/src/toast/tod/spt3g.py b/src/toast/tod/spt3g.py index cfe92b360..8a207b535 100644 --- a/src/toast/tod/spt3g.py +++ b/src/toast/tod/spt3g.py @@ -25,7 +25,7 @@ # from .. import qarray as qa # # from ..dist import Data, distribute_discrete -# from ..op import Operator +# from ..operator import Operator # # from .tod import TOD # from .interval import Interval, intervals_to_chunklist diff --git a/src/toast/tod/tidas.py b/src/toast/tod/tidas.py index 65339faa8..df5899f99 100644 --- a/src/toast/tod/tidas.py +++ b/src/toast/tod/tidas.py @@ -14,8 +14,9 @@ from ..utils import Logger from ..timing import function_timer, Timer -from ..dist import Data, distribute_discrete -from ..op import Operator +from ..dist import distribute_discrete +from ..data import Data +from ..operator import Operator from .tod import TOD from .interval import Interval, intervals_to_chunklist diff --git a/src/toast/tod/tod_math.py b/src/toast/tod/tod_math.py index 695231808..39eb5c55e 100644 --- a/src/toast/tod/tod_math.py +++ b/src/toast/tod/tod_math.py @@ -7,7 +7,7 @@ import scipy.interpolate as si from scipy.signal import fftconvolve -from ..op import Operator +from ..operator import Operator from ..timing import function_timer diff --git a/src/toast/todmap/conviqt.py b/src/toast/todmap/conviqt.py index 086ba40fd..78a156c3b 100644 --- a/src/toast/todmap/conviqt.py +++ b/src/toast/todmap/conviqt.py @@ -10,7 +10,7 @@ from .. import qarray as qa -from ..op import Operator +from ..operator import Operator from ..timing import function_timer, Timer diff --git a/src/toast/todmap/groundfilter.py b/src/toast/todmap/groundfilter.py index 872b23b47..aa3c811b9 100644 --- a/src/toast/todmap/groundfilter.py +++ b/src/toast/todmap/groundfilter.py @@ -10,7 +10,7 @@ from .._libtoast import bin_templates, add_templates, chebyshev -from ..op import Operator +from ..operator import Operator from ..utils import Logger diff --git a/src/toast/todmap/madam.py b/src/toast/todmap/madam.py index 4bdcafc68..1f7d86a41 100644 --- a/src/toast/todmap/madam.py +++ b/src/toast/todmap/madam.py @@ -10,7 +10,7 @@ import numpy as np from ..cache import Cache -from ..op import Operator +from ..operator import Operator from ..timing import function_timer, Timer from ..utils import Logger, memreport diff --git a/src/toast/todmap/mapmaker.py b/src/toast/todmap/mapmaker.py index 2b8224a83..bf46327cf 100644 --- a/src/toast/todmap/mapmaker.py +++ b/src/toast/todmap/mapmaker.py @@ -6,12 +6,11 @@ import scipy.linalg import scipy.signal -from toast import Operator -from toast.mpi import MPI +from ..operator import Operator +from ..mpi import MPI -from ..timing import gather_timers, GlobalTimers -from toast.timing import function_timer, Timer -from toast.utils import Logger, Environment +from ..timing import gather_timers, GlobalTimers, function_timer, Timer +from ..utils import Logger, Environment from .sim_det_map import OpSimScan from .todmap_math import OpAccumDiag, OpScanScale, OpScanMask from ..tod import OpCacheClear, OpCacheCopy, OpCacheInit, OpFlagsApply, OpFlagGaps diff --git a/src/toast/todmap/pointing.py b/src/toast/todmap/pointing.py index b6764477c..a89c2fe06 100644 --- a/src/toast/todmap/pointing.py +++ b/src/toast/todmap/pointing.py @@ -12,7 +12,7 @@ from ..healpix import HealpixPixels -from ..op import Operator +from ..operator import Operator from ..timing import function_timer diff --git a/src/toast/todmap/sim_det_atm.py b/src/toast/todmap/sim_det_atm.py index 1bba410a3..b6963f871 100644 --- a/src/toast/todmap/sim_det_atm.py +++ b/src/toast/todmap/sim_det_atm.py @@ -10,7 +10,7 @@ from ..timing import function_timer, Timer -from ..op import Operator +from ..operator import Operator from .atm import available_utils diff --git a/src/toast/todmap/sim_det_dipole.py b/src/toast/todmap/sim_det_dipole.py index 7ced1ffad..8e22a3d48 100644 --- a/src/toast/todmap/sim_det_dipole.py +++ b/src/toast/todmap/sim_det_dipole.py @@ -10,7 +10,7 @@ from .todmap_math import dipole -from ..op import Operator +from ..operator import Operator from ..utils import Environment diff --git a/src/toast/todmap/sim_det_map.py b/src/toast/todmap/sim_det_map.py index d74eb55dc..cc3d6da48 100644 --- a/src/toast/todmap/sim_det_map.py +++ b/src/toast/todmap/sim_det_map.py @@ -16,7 +16,7 @@ from .._libtoast import scan_map_float64, scan_map_float32 -from ..op import Operator +from ..operator import Operator class OpSimGradient(Operator): diff --git a/src/toast/todmap/sim_det_pysm.py b/src/toast/todmap/sim_det_pysm.py index 540ddb017..7515cfc85 100644 --- a/src/toast/todmap/sim_det_pysm.py +++ b/src/toast/todmap/sim_det_pysm.py @@ -12,7 +12,7 @@ from ..utils import Logger, Timer -from ..op import Operator +from ..operator import Operator from .pysm import pysm @@ -144,11 +144,7 @@ def __init__( self.focalplanes = focalplanes self.distmap = DistPixels( - data, - comm=comm, - nnz=3, - dtype=np.float32, - pixels=self.pixels, + data, comm=comm, nnz=3, dtype=np.float32, pixels=self.pixels ) self.apply_beam = apply_beam diff --git a/src/toast/todmap/sss.py b/src/toast/todmap/sss.py index 88e133a55..1efcacc81 100644 --- a/src/toast/todmap/sss.py +++ b/src/toast/todmap/sss.py @@ -6,18 +6,18 @@ import numpy as np +import healpy as hp + from ..utils import Logger from ..timing import function_timer, Timer from ..rng import random -from ..op import Operator +from ..operator import Operator -from toast.mpi import MPI +from ..mpi import MPI -import toast.qarray as qa - -import healpy as hp +from .. import qarray as qa class OpSimScanSynchronousSignal(Operator): diff --git a/src/toast/todmap/todmap_math.py b/src/toast/todmap/todmap_math.py index d359073af..794be7445 100644 --- a/src/toast/todmap/todmap_math.py +++ b/src/toast/todmap/todmap_math.py @@ -7,7 +7,7 @@ from ..timing import function_timer, GlobalTimers -from ..op import Operator +from ..operator import Operator from .. import qarray as qa diff --git a/src/toast/traits.py b/src/toast/traits.py new file mode 100644 index 000000000..4dc06a5fa --- /dev/null +++ b/src/toast/traits.py @@ -0,0 +1,544 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import copy + +import importlib + +from collections import OrderedDict + +import traitlets + +from traitlets import ( + signature_has_traits, + HasTraits, + TraitError, + Undefined, + Unicode, + Bool, + List, + Set, + Dict, + Tuple, + Instance, + Int, + Float, +) + +from astropy import units as u + + +class Quantity(Float): + """A Quantity trait with units.""" + + default_value = 0.0 * u.dimensionless_unscaled + info_text = "a Quantity" + + def __init__(self, default_value=Undefined, **kwargs): + super().__init__(default_value=default_value, **kwargs) + + def validate(self, obj, value): + if not isinstance(value, u.Quantity): + # We can't read minds- force the user to specify the units + msg = "Value '{}' does not have units".format(value) + raise TraitError(msg) + # Use the Float validation on the actual value + valid_float = super().validate(obj, value.value) + return u.Quantity(valid_float, value.unit) + + def from_string(self, s): + if self.allow_none and s == "None": + return None + return u.Quantity(s) + + +def object_fullname(o): + """Return the fully qualified name of an object.""" + module = o.__module__ + if module is None or module == str.__module__: + return o.__qualname__ + return "{}.{}".format(module, o.__qualname__) + + +def trait_type_to_string(trait): + """Return a python type name corresponding to a trait. + + For the specified traitlet type, return the string name of the python type that + should be used when assigning to the trait. + + Args: + trait (traitlet.TraitType): The trait. + + Returns: + (str): The string name. + + """ + if isinstance(trait, Bool): + return "bool" + elif isinstance(trait, List): + return "list" + elif isinstance(trait, Set): + return "set" + elif isinstance(trait, Dict): + return "dict" + elif isinstance(trait, Tuple): + return "tuple" + elif isinstance(trait, Quantity): + return "Quantity" + elif isinstance(trait, Float): + return "float" + elif isinstance(trait, Int): + return "int" + elif isinstance(trait, Instance): + return trait.klass.__qualname__ + return "str" + + +def string_to_pytype(st): + """Return a python type corresponding to a type string. + + Used for parsing config properties. + + Args: + st (str): The type name. + + Returns: + (class): The python type. + + """ + if st == "bool": + return bool + elif st == "list": + return list + elif st == "set": + return set + elif st == "dict": + return dict + elif st == "tuple": + return tuple + elif st == "Quantity": + return u.Quantity + elif st == "int": + return int + elif st == "float": + return float + elif st == "str": + return str + # Must be a custom class... + return None + + +def trait_info(trait): + """Extract the trait properties. + + Returns: + (tuple): The name, python type, default value, and help string. + + """ + trtype = str + if isinstance(trait, Bool): + trtype = bool + elif isinstance(trait, List): + trtype = list + elif isinstance(trait, Set): + trtype = set + elif isinstance(trait, Dict): + trtype = dict + elif isinstance(trait, Tuple): + trtype = tuple + elif isinstance(trait, Quantity): + trtype = u.Quantity + elif isinstance(trait, Float): + trtype = float + elif isinstance(trait, Int): + trtype = int + elif isinstance(trait, Instance): + trtype = trait.klass + return (trait.name, trtype, trait.default_value, trait.help) + + +def trait_docs(cls): + """Decorator which adds trait properties to signature and docstring for a class. + + This appends a class docstring with argument help strings for every traitlet. It + also appends the traits to the constructor function signature. + + """ + doc = str(cls.__doc__) + for trait_name, trait in cls.class_traits().items(): + default = trait.default_value + trait_type = trait_type_to_string(trait) + if trait_type == "str": + default = "'{}'".format(default) + doc += "\t{} ({}): {} (default = {})\n".format( + trait_name, trait_type, trait.help, default + ) + doc += "\n" + cls.__doc__ = doc + return signature_has_traits(cls) + + +class TraitConfig(HasTraits): + """Base class for objects using traitlets and supporting configuration. + + This class implements some configuration functionality on top of the traitlets + HasTraits base class. The main features include: + + * Traitlet info and help string added to the docstring (cls.__doc__) for the + class constructor. + + * Dump / Load of a named INSTANCE (not just a class) to a configuration file. + This differs from the traitlets.Configuration package. + + * Creation and parsing of commandline options to set the traits on a named + instance of the class. + + """ + + name = Unicode(None, allow_none=True, help="The 'name' of this class instance") + + def __init__(self, **kwargs): + super().__init__(**kwargs) + if self.name is None: + self.name = self.__class__.__qualname__ + + def __repr__(self): + val = "<{}".format(self.__class__.__qualname__) + for trait_name, trait in self.traits().items(): + val += "\n {} = {} # {}".format(trait_name, trait.get(self), trait.help) + val += "\n>" + return val + + @staticmethod + def _check_parent(conf, section, name): + parent = conf + if section is not None: + path = section.split("/") + for p in path: + if p not in parent: + parent[p] = dict() + parent = parent[p] + if name in parent: + msg = None + if section is None: + msg = "Config object {} already exists".format(name) + else: + msg = "Config object {}/{} already exists".format(section, name) + raise TraitError(msg) + return parent + + @staticmethod + def _format_conf_trait(trt, tval): + valstr = "None" + unitstr = "None" + typestr = None + if isinstance(trt, Quantity): + if tval is not None: + valstr = "{:0.14e}".format(tval.value) + unitstr = str(tval.unit) + else: + if tval is not None: + if isinstance(trt, Float): + valstr = "{:0.14e}".format(tval) + else: + valstr = "{}".format(tval) + typestr = trait_type_to_string(trt) + return valstr, unitstr, typestr + + @classmethod + def class_config(cls, section=None, input=None): + """Return a dictionary of the default traits of a class. + + This returns a new or appended dictionary. The class default properties are + contained in a dictionary found in result[section][cls.name]. If the section + string contains forward slashes, it is interpreted as a nested dictionary + keys. For example, if section == "sect1/sect2", then the resulting instance + properties will be at result[sect1][sect2][cls.name]. + + If the specified named location in the input config already exists then an + exception is raised. + + Args: + section (str): The section to add properties to. + input (dict): The optional input dictionary to update. + + Returns: + (dict): The created or updated dictionary. + + """ + if input is None: + input = dict() + name = cls.__qualname__ + parent = cls._check_parent(input, section, name) + parent[name] = dict() + parent[name]["class"] = object_fullname(cls) + for trait_name, trait in cls.class_traits().items(): + trname, trtype, trdefault, trhelp = trait_info(trait) + parent[name][trname] = dict() + valstr, unitstr, typestr = cls._format_conf_trait(trait, trdefault) + parent[name][trname]["value"] = valstr + parent[name][trname]["unit"] = unitstr + parent[name][trname]["type"] = typestr + parent[name][trname]["help"] = trhelp + return input + + def config(self, section=None, input=None): + """Return a dictionary of the current traits of a class *instance*. + + This returns a new or appended dictionary. The class instance properties are + contained in a dictionary found in result[section][self.name]. If the section + string contains forward slashes, it is interpreted as a nested dictionary + keys. For example, if section == "sect1/sect2", then the resulting instance + properties will be at result[sect1][sect2][self.name]. + + If the specified named location in the input config already exists then an + exception is raised. + + Args: + section (str): The section to add properties to. + input (dict): The optional input dictionary to update. + + Returns: + (dict): The created or updated dictionary. + + """ + if input is None: + input = dict() + name = self.name + parent = self._check_parent(input, section, name) + parent[name] = dict() + parent[name]["class"] = object_fullname(self.__class__) + for trait_name, trait in self.traits().items(): + trname, trtype, trdefault, trhelp = trait_info(trait) + trval = None + if trait.get(self) is not None: + trval = trtype(trait.get(self)) + parent[name][trname] = dict() + valstr, unitstr, typestr = self._format_conf_trait(trait, trval) + parent[name][trname]["value"] = valstr + parent[name][trname]["unit"] = unitstr + parent[name][trname]["type"] = typestr + parent[name][trname]["help"] = trhelp + return input + + @classmethod + def translate(cls, props): + """Translate config properties prior to construction. + + This method can be overridden by derived classes to provide a way of + manipulating config properties prior to being parsed and passed to the + constructor. This is a way of detecting and accomodating old configuration + information if the class code changes. + + Args: + props (dict): The original parameter information. + + Returns: + (dict): Modified parameters. + + """ + if "class" in props: + del props["class"] + return props + + @staticmethod + def from_config(name, props): + """Factory function to instantiate derived classes from a config. + + This function uses the 'class' key in the properties dictionary to instantiate + the desired class and pass in the name and parameters to the constructor. + + Args: + name (str): The name of the class instance, passed to the constructor. + props (dict): This is a dictionary of properties corresponding to the + format returned by the config() and class_config() methods. + + Returns: + (TraitConfig): The instantiated derived class. + + """ + if "class" not in props: + msg = "Property dictionary does not contain 'class' key" + raise RuntimeError(msg) + cls_path = props["class"] + cls_parts = cls_path.split(".") + cls_name = cls_parts.pop() + cls_mod_name = ".".join(cls_parts) + cls = None + try: + cls_mod = importlib.import_module(cls_mod_name) + cls = getattr(cls_mod, cls_name) + except: + msg = "Cannot import class '{}' from module '{}'".format( + cls_name, cls_mod_name + ) + raise RuntimeError(msg) + # We got this far, so we have the class! Perform any translation + original = copy.deepcopy(props) + props = cls.translate(original) + + # Parse all the parameter type information and create values we will pass to + # the constructor. + kw = dict() + kw["name"] = name + for k, v in props.items(): + if v["unit"] == "None": + # Normal scalar, no units + if v["value"] == "None": + kw[k] = None + else: + pyt = string_to_pytype(v["type"]) + if pyt is None: + # This is some kind of more complicated class. We will let the + # constructor choose the default value. + continue + kw[k] = pyt(v["value"]) + else: + # We have a Quantity. + kw[k] = u.Quantity(float(v["value"]) * u.Unit(v["unit"])) + # Instantiate class and return + return cls(**kw) + + +def build_config(objects): + """Build a configuration of current values. + + Args: + objects (list): A list of class instances to add to the config. These objects + must inherit from the TraitConfig base class. + + Returns: + (dict): The configuration. + + """ + conf = OrderedDict() + for o in objects: + conf = o.config(input=conf) + return conf + + +def add_config_args(parser, conf, section, ignore=list(), prefix="", separator=":"): + """Add arguments to an argparser for each parameter in a config dictionary. + + Using a previously created config dictionary, add a commandline argument for each + object parameter in a section of the config. The type, units, and help string for + the commandline argument come from the config, which is in turn built from the + class traits of the object. Boolean parameters are converted to store_true or + store_false actions depending on their current value. + + Args: + parser (ArgumentParser): The parser to append to. + conf (dict): The configuration dictionary. + section (str): Process objects in this section of the config. + ignore (list): List of object parameters to ignore when adding args. + prefix (str): Prepend this to the beginning of all options. + separator (str): Use this character between the class name and parameter. + + Returns: + None + + """ + parent = conf + if section is not None: + path = section.split("/") + for p in path: + if p not in parent: + msg = "section {} does not exist in config".format(section) + raise RuntimeError(msg) + parent = parent[p] + for obj, props in parent.items(): + for name, info in props.items(): + if name in ignore: + # Skip this as requested + continue + if name == "class": + # This is not a user-configurable parameter. + continue + if info["type"] not in [bool, int, float, str, u.Quantity]: + # This is not something that we can get from parsing commandline + # options. Skip it. + continue + if info["type"] is bool: + # special case for boolean + option = "--{}{}{}{}".format(prefix, obj, separator, name) + act = "store_true" + if info["value"]: + act = "store_false" + option = "--{}{}{}no_{}".format(prefix, obj, separator, name) + parser.add_argument( + option, + required=False, + default=info["value"], + action=act, + help=info["help"], + ) + else: + option = "--{}{}{}{}".format(prefix, obj, separator, name) + parser.add_argument( + option, + required=False, + default=info["value"], + type=info["type"], + help=info["help"], + ) + return + + +def args_update_config(args, conf, defaults, section, prefix="", separator=":"): + """Override options in a config dictionary from args namespace. + + Args: + args (namespace): The args namespace returned by ArgumentParser.parse_args() + conf (dict): The configuration to update. + defaults (dict): The starting default config, used to detect which options from + argparse have been changed by the user. + section (str): Process objects in this section of the config. + prefix (str): Prepend this to the beginning of all options. + separator (str): Use this character between the class name and parameter. + + Returns: + (namespace): The un-parsed remaining arg vars. + + """ + remain = copy.deepcopy(args) + parent = conf + dparent = defaults + if section is not None: + path = section.split("/") + for p in path: + if p not in parent: + msg = "section {} does not exist in config".format(section) + raise RuntimeError(msg) + parent = parent[p] + for p in path: + if p not in dparent: + msg = "section {} does not exist in defaults".format(section) + raise RuntimeError(msg) + dparent = dparent[p] + # Build the regex match of option names + obj_pat = re.compile("{}(.*?){}(.*)".format(prefix, separator)) + for arg in vars(args): + val = getattr(args, arg) + obj_mat = obj_pat.match(arg) + if obj_mat is not None: + name = obj_mat.group(1) + optname = obj_mat.group(2) + if name not in parent: + msg = "Parsing option '{}', config does not have object named {}".format( + arg, name + ) + raise RuntimeError(msg) + if name not in dparent: + msg = "Parsing option '{}', defaults does not have object named {}".format( + arg, name + ) + raise RuntimeError(msg) + # Only update config options which are different than the default. + # Otherwise we would be overwriting values from any config files with the + # defaults from argparse. + if val != dparent[name][optname]: + parent[name][optname] = val + # This arg was recognized, remove from the namespace. + del remain.arg + return remain diff --git a/src/toast/utils.py b/src/toast/utils.py index 0d1b14041..d31bf81d0 100644 --- a/src/toast/utils.py +++ b/src/toast/utils.py @@ -4,6 +4,7 @@ import os import gc +import hashlib import numpy as np @@ -38,7 +39,6 @@ from .mpi import MPI, use_mpi - # This function sets the numba threading layer to (hopefully) be compatible with TOAST. # The TOAST threading concurrency is used to attempt to set the numba threading. We # try to use the OpenMP backend for numba and then TBB. The "workqueue" backend (which @@ -352,3 +352,57 @@ def ensure_buffer_f64(data): # # Does not support buffer protocol # print("ensure: converting non-buffer object ", data, flush=True) # return np.ascontiguousarray(data, dtype=np.float64) + + +def name_UID(name): + """Return a unique integer for a specified name string. + """ + bdet = name.encode("utf-8") + dhash = hashlib.md5() + dhash.update(bdet) + bdet = dhash.digest() + uid = None + try: + ind = int.from_bytes(bdet, byteorder="little") + uid = int(ind & 0xFFFFFFFF) + except: + raise RuntimeError( + "Cannot convert detector name {} to a unique integer-\ + maybe it is too long?".format( + name + ) + ) + return uid + + +def rate_from_times(timestamps, mean=False): + """Compute effective sample rate in Hz from timestamps. + + There are many cases when we want to apply algorithms that require a fixed + sample rate. We want to compute that from timestamps while also checking for + any outliers that could compromise the results. + + By default this function uses the median delta_t, under the assumption that + variations in the timing is due to small numerical / bit noise effects. For larger + variations using the mean may be more appropriate (set mean=True). + + This returns the sample rate and also the statistics of the time deltas between + samples. + + Args: + timestamps (array): The array of timestamps. + + Returns: + (tuple): The (rate, dt, dt_min, dt_max, dt_std) values. + + """ + tdiff = np.diff(timestamps) + dt_min = np.min(tdiff) + dt_max = np.max(tdiff) + dt_std = np.std(tdiff) + dt = None + if mean: + dt = np.mean(np.diff(timestamps)) + else: + dt = np.median(np.diff(timestamps)) + return (1.0 / dt, dt, dt_min, dt_max, dt_std) diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index ad260e773..79342dd41 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -8,7 +8,7 @@ "source": [ "# Introduction\n", "\n", - "This lesson is a brief introduction to TOAST and its data representations. This next cell is just initializing some things for the notebook." + "This lesson is a brief introduction to TOAST: how data is represented in memory and how to build processing workflows. First we import some packages we will use in this notebook." ] }, { @@ -17,23 +17,47 @@ "metadata": {}, "outputs": [], "source": [ - "# Load common tools for all lessons\n", + "# Built-in modules\n", "import sys\n", - "sys.path.insert(0, \"..\")\n", - "from lesson_tools import (\n", - " fake_focalplane\n", - ")\n", + "\n", + "# External modules\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import astropy.units as u\n", + "\n", + "# TOAST\n", + "import toast\n", + "\n", "\n", "# Capture C++ output in the jupyter cells\n", - "%load_ext wurlitzer" + "%load_ext wurlitzer\n", + "\n", + "# Display inline plots\n", + "%matplotlib inline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Runtime Environment\n", + "# Runtime Environment\n", "\n", + "The `toast` module can be influenced by a few environment variables, which must be set **before** importing `toast`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "help(toast)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "You can get the current TOAST runtime configuration from the \"Environment\" class." ] }, @@ -43,8 +67,6 @@ "metadata": {}, "outputs": [], "source": [ - "import toast\n", - "\n", "env = toast.Environment.get()\n", "print(env)" ] @@ -55,11 +77,11 @@ "toc-hr-collapsed": true }, "source": [ - "## Data Model\n", + "# Data Model\n", "\n", - "Before using TOAST for simulation or analysis, it is important to discuss how data is stored in memory and how that data can be distributed among many processes to parallelize large workflows.\n", + "The basic data model in a toast workflow consists of a set of `Observation` instances, each of which is associated with a `Focalplane` on a `Telescope`. Note that a Focalplane instance is probably just a sub-set of detectors on the actual physical focalplane. These detectors must be co-sampled and likely have other things in common (for example, they are on the same wafer or are correlated in some other way). For this notebook, we will manually create these objects, but usually these will be loaded / created by some experiment-specific function.\n", "\n", - "First, let's create a fake focalplane of detectors to use throughout this example." + "MPI is completely optional in TOAST, although it is required to achieve good parallel performance on traditional CPU systems. In this section we show how interactive use of TOAST can be done without any reference to MPI. In a later section we show how to make use of distributed data and operations.\n" ] }, { @@ -68,13 +90,27 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline\n", + "# Start by making a fake focalplane\n", "\n", - "# Generate a fake focalplane with 7 pixels, each with 2 detectors.\n", + "from toast.instrument_sim import (\n", + " fake_hexagon_focalplane,\n", + " plot_focalplane,\n", + ")\n", "\n", - "fp = fake_focalplane()" + "focalplane_pixels = 7 # (hexagonal, pixel zero at center)\n", + "field_of_view = 10.0 # degrees\n", + "sample_rate = 10.0 # Hz\n", + "\n", + "focalplane = fake_hexagon_focalplane(\n", + " focalplane_pixels,\n", + " field_of_view,\n", + " samplerate=10.0,\n", + " epsilon=0.0,\n", + " net=1.0,\n", + " fmin=1.0e-5,\n", + " alpha=1.0,\n", + " fknee=0.05,\n", + ")" ] }, { @@ -85,35 +121,70 @@ "source": [ "# Make a plot of this focalplane layout.\n", "\n", - "detnames = list(sorted(fp.keys()))\n", - "detquat = {x: fp[x][\"quat\"] for x in detnames}\n", - "detfwhm = {x: fp[x][\"fwhm_arcmin\"] for x in detnames}\n", + "detnames = focalplane.detectors\n", + "detquat = {x: focalplane[x][\"quat\"] for x in detnames}\n", + "detfwhm = {x: focalplane[x][\"fwhm_arcmin\"] for x in detnames}\n", "detlabels = {x: x for x in detnames}\n", "detpolcol = {x: \"red\" if i % 2 == 0 else \"blue\" for i, x in enumerate(detnames)}\n", "\n", - "toast.tod.plot_focalplane(\n", - " detquat, 4.0, 4.0, None, fwhm=detfwhm, polcolor=detpolcol, labels=detlabels\n", + "plot_focalplane(\n", + " detquat, \n", + " 1.3 * field_of_view, \n", + " 1.3 * field_of_view, \n", + " None, \n", + " fwhm=detfwhm, \n", + " polcolor=detpolcol, \n", + " labels=detlabels\n", ")" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "### Observations with Time Ordered Data\n", + "# Now make a fake telescope\n", "\n", - "TOAST works with data organized into *observations*. Each observation is independent of any other observation. An observation consists of co-sampled detectors for some span of time. The intrinsic detector noise is assumed to be stationary within an observation. Typically there are other quantities which are constant for an observation (e.g. elevation, weather conditions, satellite spin axis, etc).\n", + "telescope = toast.Telescope(name=\"fake\", focalplane=focalplane)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have a fake telescope created, we can create an observation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make an empty observation\n", "\n", - "An observation is just a dictionary with at least one member (\"tod\") which is an instance of a class that derives from the `toast.TOD` base class.\n", + "samples = 10\n", "\n", - "The inputs to a TOD class constructor are at least:\n", + "ob = toast.Observation(telescope, name=\"2020-07-31_A\", samples=samples)\n", "\n", - "1. The detector names for the observation.\n", - "2. The number of samples in the observation.\n", - "3. The geometric offset of the detectors from the boresight.\n", - "4. Information about how detectors and samples are distributed among processes. More on this below.\n", + "print(ob)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we see our observation simply has the starting information we passed to the constructor. Next we will discuss the 3 types of data objects that can be stored in an Observation: detector data products, shared telescope data, and arbitrary metadata." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Metadata\n", "\n", - "The TOD class can act as a storage container for different \"flavors\" of timestreams as well as a source and sink for the observation data (with the read_\\*() and write_\\*() methods):" + "By default, the observation is empty. You can add arbitrary metadata to the observation- it acts just like a dictionary." ] }, { @@ -122,12 +193,30 @@ "metadata": {}, "outputs": [], "source": [ - "import toast.qarray as qa\n", + "hk = {\n", + " \"Temperature 1\": np.array([1.0, 2.0, 3.0]),\n", + " \"Other Sensor\": 1.2345\n", + "}\n", "\n", - "nsamples = 1000\n", + "ob[\"housekeeping\"] = hk\n", "\n", - "obs = dict()\n", - "obs[\"name\"] = \"20191014_000\"" + "print(ob)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Metadata like this is not synchronized in any way between processes. A user or Operator can put any keys here to store small data objects." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Detector Data\n", + "\n", + "Detector data has some unique properties that we often want to leverage in our analyses. Each process has some detectors and some time slice of the observation. In the case of a single process like this example, all the data is local. Before using data we need to create it within the empty Observation. Here we create a default \"signal\" object for the detectors. The detector data is accessed under the \"d\" attribute of the observation:" ] }, { @@ -136,14 +225,38 @@ "metadata": {}, "outputs": [], "source": [ - "# The type of TOD class is usually specific to the data processing job.\n", - "# For example it might be one of the simulation classes or it might be\n", - "# a class that loads experiment data. Here we just use a simple class\n", - "# that is only used for testing and which reads / writes data to internal memory\n", - "# buffers.\n", + "# Create some signal\n", + "\n", + "ob.detdata.create(\"signal\")\n", "\n", - "tod = toast.tod.TODCache(None, detnames, nsamples, detquats=detquat)\n", - "obs[\"tod\"] = tod" + "# Check the contents of the detector data\n", + "\n", + "print(ob.detdata)\n", + "print(ob.detdata[\"signal\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can create other types of detector data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ob.detdata.create(\"calibrated\")\n", + "print(ob.detdata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default you will get detector data with one element per sample and float64 dtype. You can changes this:" ] }, { @@ -152,8 +265,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Print the tod to get summary info:\n", - "print(tod)" + "# Example of data with different shape and dtype\n", + "\n", + "ob.detdata.create(\"pointing\", shape=(ob.n_sample, 4), dtype=np.float32)\n", + "print(ob.detdata)\n", + "print(ob.detdata[\"pointing\"])" ] }, { @@ -162,10 +278,20 @@ "metadata": {}, "outputs": [], "source": [ - "# The TOD class has methods to get information about the data:\n", + "# Another example for flags\n", + "\n", + "ob.detdata.create(\"flags\", dtype=np.uint16)\n", + "print(ob.detdata)\n", + "print(ob.detdata[\"flags\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Details of Detector Data\n", "\n", - "print(\"TOD has detectors {}\".format(\", \".join(tod.detectors)))\n", - "print(\"TOD has {} total samples for each detector\".format(tod.total_samples))" + "In the commands above we created named data objects and each one seems to contain an array for each detector. However, this container actually allocates memory in a single block, and you can slice the object both in the detector and sample direction. For example:" ] }, { @@ -174,33 +300,16 @@ "metadata": {}, "outputs": [], "source": [ - "# Write some data. Not every TOD derived class supports writing (for example,\n", - "# TOD classes that represent simulations).\n", + "# Access one detector by name\n", + "ob.detdata[\"signal\"][\"D0A\"] = np.arange(samples, dtype=np.float64)\n", "\n", - "def fill_tod(tod, fp):\n", - " detnames = tod.detectors\n", - " t_delta = 1.0 / fp[detnames[0]][\"rate\"]\n", - " tod.write_times(stamps=np.arange(0.0, nsamples * t_delta, t_delta))\n", - " tod.write_boresight(\n", - " data=qa.from_angles(\n", - " (np.pi / 2) * np.ones(nsamples),\n", - " (2 * np.pi / nsamples) * np.arange(nsamples),\n", - " np.zeros(nsamples)\n", - " )\n", - " )\n", - " tod.write_position(pos=np.zeros((nsamples, 3), dtype=np.float64))\n", - " tod.write_velocity(vel=np.zeros((nsamples, 3), dtype=np.float64))\n", - " tod.write_common_flags(flags=np.zeros(nsamples, dtype=np.uint8))\n", - " for d in detnames:\n", - " tod.write(\n", - " detector=d, data=np.random.normal(\n", - " scale=fp[d][\"NET\"], \n", - " size=nsamples\n", - " )\n", - " )\n", - " tod.write_flags(\n", - " detector=d, flags=np.zeros(nsamples, dtype=np.uint8)\n", - " )" + "# Access one detector by index\n", + "ob.detdata[\"signal\"][1] = 10.0 * np.arange(samples, dtype=np.float64)\n", + "\n", + "# Slice by both detector and sample\n", + "ob.detdata[\"signal\"][[\"D2A\", \"D2B\"], 0:2] = 5.0\n", + "\n", + "print(ob.detdata[\"signal\"])" ] }, { @@ -209,7 +318,17 @@ "metadata": {}, "outputs": [], "source": [ - "fill_tod(tod, fp)" + "# Access the whole thing as a 2D array\n", + "print(ob.detdata[\"signal\"][:])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Shared Data\n", + "\n", + "Many types of data are common to multiple detectors. Some examples would be telescope pointing, timestamps, other sensor data, etc. When running in parallel we want to have just one copy of this data per node in order to save memory. The shared data is accessed under the \"shared\" attribute of the observation. For this serial notebook, you will not need to worry about the details of communicators, but when running in parallel it becomes important. For this reason we will use some helper functions to create some standard shared objects:" ] }, { @@ -218,9 +337,27 @@ "metadata": {}, "outputs": [], "source": [ - "# Read it back\n", + "# Equivalent to:\n", + "# ob.shared.create(\"times\", shape=(ob.n_sample,), dtype=np.float64, comm=ob.comm_col)\n", + "ob.shared.create_times()\n", "\n", - "print(\"TOD timestamps = {} ...\".format(tod.read_times()[:5]))" + "# Equivalent to:\n", + "# ob.shared.create(\"flags\", shape=(ob.n_sample,), dtype=np.uint8, comm=ob.comm_col)\n", + "ob.shared.create_flags()\n", + "\n", + "# Equivalent to:\n", + "# ob.shared.create(\"boresight_radec\", shape=(ob.n_sample, 4), dtype=np.float64, comm=ob.comm_col)\n", + "ob.shared.create_boresight_radec()\n", + "\n", + "print(ob.shared)\n", + "print(ob.shared[\"boresight_radec\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see that the data objects are a special \"MPIShared\" object from the `pshmem` package. Shared data objects can be read with slicing notation just like normal numpy arrays:" ] }, { @@ -229,7 +366,14 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"TOD boresight = \\n{} ...\".format(tod.read_boresight()[:5,:]))" + "print(ob.shared[\"boresight_radec\"][:])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "However, they are intended to be \"write once\", \"read many\" objects. You cannot simply assign data to them. The reason is that the data is replicated across nodes and so setting array values must be a collective operation using the `set()` method:" ] }, { @@ -238,13 +382,13 @@ "metadata": {}, "outputs": [], "source": [ - "for d in detnames:\n", - " print(\"TOD detector {} = {} ...\".format(\n", - " d, tod.read(detector=d, n=5))\n", - " )\n", - " print(\"TOD detector {} flags = {} ...\".format(\n", - " d, tod.read_flags(detector=d, n=5))\n", - " )" + "nullquat = np.array([0.0, 0.0, 0.0, 1.0])\n", + "\n", + "ob.shared[\"boresight_radec\"].set(np.tile(nullquat, ob.n_sample).reshape((-1, 4)))\n", + "\n", + "pntg = ob.shared[\"boresight_radec\"]\n", + "\n", + "print(pntg[:])" ] }, { @@ -253,23 +397,22 @@ "metadata": {}, "outputs": [], "source": [ - "# Store some data in the cache. The \"cache\" member variable looks like a dictionary of\n", - "# numpy arrays, but the memory used is allocated in C, so that we can actually clear\n", - "# these buffers when needed.\n", + "ob.shared[\"times\"].set(np.arange(ob.n_sample, dtype=np.float64))\n", "\n", - "for d in detnames:\n", - " processed = tod.read(detector=d)\n", - " processed /= 2.0\n", - " # By convention, we usually name buffers in the cache by _\n", - " tod.cache.put(\"processed_{}\".format(d), processed)\n", - "print(\"TOD cache now contains {} bytes\".format(tod.cache.report(silent=True)))" + "print(ob.shared[\"times\"][:])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "One common use pattern is to \"read and cache\" data. This happens if we want to keep the data in memory to re-use later. The TOD class has a set of methods that start with the string \"local_\" that perform this action." + "## Intervals\n", + "\n", + "Each `Observation` may contain one or more \"interval lists\" which act as a global (within the observation) list of time / sample ranges where some feature of the data is constant. Interval lists support sample-wise inversion, intersection and union operations using the standard python bitwise operators (`^`, `&`, and `|`).\n", + "\n", + "Intervals are **not** intended to act as individual sample quality flags. Per-sample flags should be created either as a shared timestream (for flags common to all detectors) or as a detector data object (for per-detector flags). Intervals can be used to represent things changing less frequently, for example: left or right moving telescope scans, satellite repointing maneuvers, calibration measurements, etc.\n", + "\n", + "A single `Interval` consists of a time and a (local) sample range:" ] }, { @@ -278,30 +421,79 @@ "metadata": {}, "outputs": [], "source": [ - "# Get data from cache or read and cache\n", + "? toast.Interval" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The observation starts with no lists of intervals\n", "\n", - "print(\"TOD timestamps = {} ...\".format(tod.local_times()[:2]))\n", - "for d in detnames:\n", - " print(\"TOD detector {} = {} ...\".format(\n", - " d, tod.local_signal(d)[:2])\n", - " )\n", - " print(\"TOD detector {} pointing = {} ...\".format(\n", - " d, tod.local_pointing(d)[:2,:])\n", - " )\n", - " print(\"TOD detector {} flags = {} ...\".format(\n", - " d, tod.local_flags(d)[:2])\n", - " )" + "ob.intervals" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To add a new interval list, use the `create()` method. Remember, in this notebook we have only one process, so do not have to worry about which process this information is coming from:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "help(ob.intervals.create)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Comm : Groups of Processes\n", + "Here we create one list of intervals. We specify the time ranges and the local array of timestamp values. Inside the code, the timestamps are used to convert these input time ranges into `Interval` objects." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ob.intervals.create(\"good\", [(1.5, 3.5), (4.5, 6.), (7., 8.5)], ob.shared[\"times\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Now there is one interval list in the observation\n", "\n", - "A toast.Comm instance takes the global number of processes available (MPI.COMM_WORLD) and divides them into groups. Each process group is assigned one or more observations. Since observations are independent, this means that different groups can be independently working on separate observations in parallel. It also means that inter-process communication needed when working on a single observation can occur with a smaller set of processes.\n", + "print(ob.intervals)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The create method converted the time ranges into actual Interval instances:\n", "\n", - "At NERSC, this notebook is running on a login node, so we cannot use MPI. Constructing a default `toast.Comm` whenever MPI use is disabled will just produce a single group of one process. See the parallel example at the end of this notebook for a case with multiple groups." + "print(ob.intervals[\"good\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now create another list of intervals:" ] }, { @@ -310,17 +502,27 @@ "metadata": {}, "outputs": [], "source": [ - "comm = toast.Comm()\n", - "print(comm)" + "ob.intervals.create(\"stable\", [(0.5, 2.5), (3.5, 5.), (6., 7.5)], ob.shared[\"times\"])\n", + "print(ob.intervals)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Data : a Collection of Observations\n", + "As mentioned before, we can combine these in different ways:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ob.intervals[\"stable-and-not-good\"] = ob.intervals[\"stable\"] & ~ob.intervals[\"good\"]\n", "\n", - "A toast.Data instance is mainly just a list of observations. However remember that each process group will have a different set of observations. Since we have only one group of one process, this example is not so interesting. See the MPI example." + "print(ob.intervals)\n", + "print(ob.intervals[\"stable-and-not-good\"])" ] }, { @@ -329,20 +531,19 @@ "metadata": {}, "outputs": [], "source": [ - "data = toast.Data(comm)\n", - "data.obs.append(obs)" + "ob.intervals[\"not-stable-or-not-good\"] = ~ob.intervals[\"stable\"] | ~ob.intervals[\"good\"]\n", + "\n", + "print(ob.intervals)\n", + "print(ob.intervals[\"not-stable-or-not-good\"])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Data Distribution\n", - "\n", - "Recapping previous sections, we have some groups of processes, each of which has a set of observations. Within a single process group, the detector data is distributed across the processes within the group. That distribution is controlled by the size of the communicator passed to the TOD class, and also by the `detranks` parameter of the constructor. This detranks number sets the dimension of the process grid in the detector direction. For example, a value of \"1\" means that every process has all detectors for some span of time. A value equal to the size of the communicator results in every process having some number of detectors for the entire observation. The detranks parameter must divide evenly into the number of processes in the communicator and determines how the processes are arranged in a grid.\n", + "## Views\n", "\n", - "As a concrete example, imagine that MPI.COMM_WORLD has 4 processes. We split this into 2 groups of 2 procesess. There are 3 observations of varying lengths and every group has one or 2 observations. Here is the starting point of our data distribution:\n", - "" + "Typically when defining data intervals in the last section it is because you want to do something with only the data falling in those sample ranges. Each observation has the ability to provide a \"view\" into the detector and shared data given by a previously defined interval list. Views are created on the fly on first access and are deleted automatically if the underlying interval is deleted. First, examine a view of the \"good\" interval list we defined in the previous section:" ] }, { @@ -350,14 +551,15 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "print(ob.view[\"good\"])" + ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next we split the processes into 2 groups\n", - "" + "The string represention of a view is just a list of sample slices. However, the real power is that we can get a view of any of the observation `detdata` or `shared` objects. For example, we could get a view of the detector `signal` data. Recall that the full data for this is:" ] }, { @@ -365,14 +567,15 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "ob.detdata[\"signal\"][:]" + ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Then we assign our observations to the two groups\n", - "" + "A view of the signal data falling in the \"good\" intervals is:" ] }, { @@ -380,14 +583,15 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "ob.view[\"good\"].detdata[\"signal\"][:]" + ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "When we create the TOD class in each observation, we specify how the the data is distributed within each observation. If the `detranks` parameter is \"1\", then the dimension of the process grid in the detector direction is one.\n", - "" + "This view is a list of arrays which have sliced the data in the time direction. These are **not** copies- they provide read/write access to underlying buffer. If you are doing many operations with a view it is easier to name it something else:" ] }, { @@ -395,23 +599,34 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "sng = ob.view[\"stable-and-not-good\"]\n", + "sng.detdata[\"signal\"]" + ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "If `detranks` is set to the size of the group, then we get:\n", - "" + "Again, we can use a view to assign data to a subset of the full samples:" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "## Working with Data\n", + "sng.detdata[\"signal\"] = 7.0\n", "\n", - "Once we have our distributed data set up, we usually feed this through a `pipeline`. There will be a lesson on pipelines later. Here we will create an entire fake dataset and work with it. The MPI introduction notebook will go into more details about working with distributed data." + "print(ob.detdata[\"signal\"][:])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can access shared data as well with this view, but it is read-only from the view (the `set()` method of the shared objects must be used to collectively set that data):" ] }, { @@ -420,7 +635,7 @@ "metadata": {}, "outputs": [], "source": [ - "comm = toast.Comm()" + "ob.view[\"good\"].shared[\"boresight_radec\"]" ] }, { @@ -429,7 +644,16 @@ "metadata": {}, "outputs": [], "source": [ - "data = toast.Data(comm)" + "sng.shared[\"boresight_radec\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data Container\n", + "\n", + "The `Observation` instances discussed previously are usually stored as a list inside a top-level container class called `Data`. This class also stores the TOAST MPI communicator information. For this serial example you can just instantiate an empty `Data` class and add things to the observation list:" ] }, { @@ -438,20 +662,29 @@ "metadata": {}, "outputs": [], "source": [ - "# Create 3 observations, each containing an TODCache class. We'll\n", - "# use the same focalplane and number of samples for each observation,\n", - "# but this is not required- each observation is independent.\n", + "data = toast.Data()\n", + "\n", + "print(data)\n", + "\n", + "print(data.obs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Obviously this `Data` object has no observations yet. We'll fix that in the next section!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Processing Model\n", + "\n", + "The TOAST processing model consists of `Operator` class instances running in a sequence on a subset of data. These sequences could be nested within other sequences (see the `Pipeline` operator below).\n", "\n", - "for i in range(3):\n", - " obsname = \"observation_{:02d}\".format(i)\n", - " obs = dict()\n", - " obs[\"name\"] = obsname\n", - " obs[\"id\"] = \"{:02d}\".format(i)\n", - " obs[\"tod\"] = toast.tod.TODCache(\n", - " comm.comm_group, detnames, nsamples, detquats=detquat\n", - " )\n", - " fill_tod(obs[\"tod\"], fp)\n", - " data.obs.append(obs)" + "The Operator base class defines the interfaces for operators working on data. Operators are configured by defining class traits (attributes) which can be set during construction. An operator has an exec() method that works with Data objects. We will start by looking at the SimSatellite operator to simulate fake telescope scan strategies for a generic satellite. We can always see the options and default values by using the standard help function or the '?' command:\n" ] }, { @@ -460,17 +693,37 @@ "metadata": {}, "outputs": [], "source": [ - "# What does our distributed data look like now?\n", - "data.info()" + "from toast import future_ops as ops\n", + "\n", + "?ops.SimSatellite" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Here we see a dump of the distributed data, and in this case the `TODCache` class is storing stuff \"under the hood\", which is why data shows up in the dump of the cache as well as when calling the normal TOD access methods.\n", + "You can instantiate a class directly by overriding some defaults:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "simsat = ops.SimSatellite(\n", + " n_observation=2, \n", + " observation_time=(5 * u.minute),\n", + ")\n", "\n", - "Next, we can dump this data to a TIDAS volume (directories of HDF5 files in this case)." + "print(simsat)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After the operator is constructed, the parameters can be changed directly. For example:" ] }, { @@ -479,29 +732,17 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import shutil\n", + "simsat.telescope = telescope\n", + "simsat.n_observation = 3\n", "\n", - "if toast.tod.tidas_available:\n", - " import toast.tod.tidas as tt\n", - " datapath = \"intro_data\"\n", - " if os.path.isdir(datapath):\n", - " shutil.rmtree(datapath)\n", - " exporter = tt.OpTidasExport(\n", - " datapath,\n", - " tt.TODTidas,\n", - " backend=\"hdf5\",\n", - " comp=\"none\",\n", - " use_todchunks=True,\n", - " )\n", - " exporter.exec(data)" + "print(simsat)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "And then we can load it back in..." + "And now we have an `Operator` that is ready to use. This particular operator creates observations from scratch with telescope properties generated and stored. We can create an empty `Data` object and then run this operator on it:" ] }, { @@ -510,20 +751,66 @@ "metadata": {}, "outputs": [], "source": [ - "if toast.tod.tidas_available:\n", - " data = tt.load_tidas(\n", - " comm,\n", - " comm.group_size,\n", - " datapath,\n", - " \"r\",\n", - " \"detectors\",\n", - " tt.TODTidas,\n", - " distintervals=\"chunks\",\n", - " group_dets=\"detectors\",\n", - " )\n", - "data.info()" + "simsat.exec(data)\n", + "simsat.finalize(data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(data)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You may be tempted to make a wrapper function around the `exec()` and `finalize()` methods, but these are separate for a reason. The `exec()` method might be called multiple times with subsets of the data and the `finalize()` method is only called once. In the previous example, we just happen to be making one call to exec()." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(data)\n", + "print(\"There are {} observations\".format(len(data.obs)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(data.obs[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "metadata": { @@ -540,8 +827,6 @@ "\n", "- `toast.fft`: API Wrapper around different vendor FFT packages.\n", "\n", - "- `toast.cache`: Class for dictionary of C-allocated numpy arrays.\n", - "\n", "- `toast.healpix`: Subset of pixel projection routines, simd vectorized and threaded.\n", "\n", "- `toast.timing`: Simple serial timers, global named timers per process, a decorator to time calls to functions, and MPI tools to gather timing statistics from multiple processes.\n" @@ -806,127 +1091,6 @@ "print(backfft)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Cache Example\n", - "\n", - "The Cache class provides a mechanism to work around the Python memory pool. There are times when we want to allocate memory and explicitly free it without waiting for garbage collection. Every instance of a `toast.Cache` acts as a dictionary of numpy arrays. Internally, the memory of each entry is a flat-packed std::vector with a custom allocator that ensures aligned memory allocation. Aligned memory is required for SIMD operations both in TOAST and in external libraries. Buffers in a Cache instance can be used directly for such operations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from toast.cache import Cache\n", - "\n", - "# Example array dimensions\n", - "\n", - "cnames = [\"c1\", \"c2\"]\n", - "cshapes = {\n", - " \"c1\" : (20,),\n", - " \"c2\" : (2, 3, 2)\n", - "}\n", - "ctyps = {\n", - " \"c1\" : np.float64,\n", - " \"c2\" : np.uint16\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# A cache instance\n", - "\n", - "cache = Cache()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create some empty arrays in the cache\n", - "\n", - "for cn in cnames:\n", - " cache.create(cn, ctyps[cn], cshapes[cn])\n", - "\n", - "print(\"---- Cache object ----\")\n", - "print(cache)\n", - "print(\"\\n---- Now contains ----\")\n", - "for cn in cnames:\n", - " print(\"{}: {}\".format(cn, cache.reference(cn)))\n", - "print(\"Size = \", cache.report(silent=True), \" bytes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Fill existing buffers\n", - "\n", - "# Get a reference to the buffer\n", - "cdata = cache.reference(\"c1\")\n", - "\n", - "# Assign elements.\n", - "cdata[:] = np.random.random(cshapes[\"c1\"])\n", - "\n", - "# Delete the reference\n", - "del cdata" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cdata = cache.reference(\"c2\")\n", - "idx = 0\n", - "for x in range(cshapes[\"c2\"][0]):\n", - " for y in range(cshapes[\"c2\"][1]):\n", - " for z in range(cshapes[\"c2\"][2]):\n", - " cdata[x, y, z] = idx\n", - " idx += 1\n", - "del cdata\n", - " \n", - "print(\"\\n---- Contents after filling ----\")\n", - "for cn in cnames:\n", - " print(\"{}: {}\".format(cn, cache.reference(cn)))\n", - "print(\"Size = \", cache.report(silent=True), \" bytes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# We can also \"put\" existing numpy arrays which will then be copied into\n", - "# the cache\n", - "\n", - "np1 = np.random.normal(size=10)\n", - "np2 = np.random.randint(0, high=255, dtype=np.uint16, size=12).reshape((2, 3, 2))\n", - "\n", - "cache.put(\"p1\", np1)\n", - "cache.put(\"p2\", np2)\n", - "\n", - "print(\"\\n---- Contents after putting numpy arrays ----\")\n", - "\n", - "for cn in list(cache.keys()):\n", - " print(\"{}: {}\".format(cn, cache.reference(cn)))\n", - "print(\"Size = \", cache.report(silent=True), \" bytes\")" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -982,7 +1146,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.5" + "version": "3.8.5" } }, "nbformat": 4, diff --git a/tutorial/01_Introduction/intro_parallel.ipynb b/tutorial/01_Introduction/intro_parallel.ipynb new file mode 100644 index 000000000..882534379 --- /dev/null +++ b/tutorial/01_Introduction/intro_parallel.ipynb @@ -0,0 +1,203 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "# Parallel Processing\n", + "\n", + "In the first introductory notebook we covered the basic TOAST data and processing models, including using them interactively in a serial notebook. In this notebook we will explore distributed data and processing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Built-in modules\n", + "import sys\n", + "\n", + "# External modules\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import astropy.units as u\n", + "\n", + "# TOAST\n", + "import toast\n", + "\n", + "\n", + "# Capture C++ output in the jupyter cells\n", + "%load_ext wurlitzer\n", + "\n", + "# Display inline plots\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Parallel Processing\n", + "\n", + "TBD: Major cleanup of the sections below." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Comm : Groups of Processes\n", + "\n", + "A toast.Comm instance takes the global number of processes available (MPI.COMM_WORLD) and divides them into groups. Each process group is assigned one or more observations. Since observations are independent, this means that different groups can be independently working on separate observations in parallel. It also means that inter-process communication needed when working on a single observation can occur with a smaller set of processes.\n", + "\n", + "At NERSC, this notebook is running on a login node, so we cannot use MPI. Constructing a default `toast.Comm` whenever MPI use is disabled will just produce a single group of one process. See the parallel example at the end of this notebook for a case with multiple groups." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "comm = toast.Comm()\n", + "print(comm)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Data : a Collection of Observations\n", + "\n", + "A toast.Data instance is mainly just a list of observations. However remember that each process group will have a different set of observations. Since we have only one group of one process, this example is not so interesting. See the MPI example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = toast.Data(comm)\n", + "data.obs.append(obs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Data Distribution\n", + "\n", + "Recapping previous sections, we have some groups of processes, each of which has a set of observations. Within a single process group, the detector data is distributed across the processes within the group. That distribution is controlled by the size of the communicator passed to the TOD class, and also by the `detranks` parameter of the constructor. This detranks number sets the dimension of the process grid in the detector direction. For example, a value of \"1\" means that every process has all detectors for some span of time. A value equal to the size of the communicator results in every process having some number of detectors for the entire observation. The detranks parameter must divide evenly into the number of processes in the communicator and determines how the processes are arranged in a grid.\n", + "\n", + "As a concrete example, imagine that MPI.COMM_WORLD has 4 processes. We split this into 2 groups of 2 procesess. There are 3 observations of varying lengths and every group has one or 2 observations. Here is the starting point of our data distribution:\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we split the processes into 2 groups\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we assign our observations to the two groups\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When we create the TOD class in each observation, we specify how the the data is distributed within each observation. If the `detranks` parameter is \"1\", then the dimension of the process grid in the detector direction is one.\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If `detranks` is set to the size of the group, then we get:\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 171fe01e0abe2f1c6bf1dc2d92e54858b4eeab93 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 15 Oct 2020 13:31:55 -0700 Subject: [PATCH 002/690] More work on notebook example --- src/toast/future_ops/__init__.py | 2 +- src/toast/future_ops/noise_model.py | 4 +- src/toast/future_ops/pipeline.py | 12 +++- src/toast/future_ops/sim_satellite.py | 6 +- tutorial/01_Introduction/intro.ipynb | 98 +++++++++++++++++++++++++-- 5 files changed, 110 insertions(+), 12 deletions(-) diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index 9f6cf4831..4e1f07c27 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -4,7 +4,7 @@ # import functions into our public API -# from .pipeline import Pipeline +from .pipeline import Pipeline from .sim_satellite import SimSatellite diff --git a/src/toast/future_ops/noise_model.py b/src/toast/future_ops/noise_model.py index 9e8c1bfb1..9a0aac0a5 100644 --- a/src/toast/future_ops/noise_model.py +++ b/src/toast/future_ops/noise_model.py @@ -81,10 +81,10 @@ def _finalize(self, data, **kwargs): return def _requires(self): - return list() + return dict() def _provides(self): - prov = [self.noisekey] + prov = {"meta": [self.noisekey]} return prov def _accelerators(self): diff --git a/src/toast/future_ops/pipeline.py b/src/toast/future_ops/pipeline.py index 5228359a1..4fea6fb26 100644 --- a/src/toast/future_ops/pipeline.py +++ b/src/toast/future_ops/pipeline.py @@ -2,6 +2,8 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import traitlets + from ..utils import Logger from ..traits import trait_docs, Int, Unicode, List @@ -9,12 +11,18 @@ from ..operator import Operator +@trait_docs class Pipeline(Operator): - """Class representing a sequence of Operators.""" + """Class representing a sequence of Operators. + + This runs a list of other operators over sets of detectors (default is all + detectors in one shot). + + """ # Class traits - API = traitlets.Int(0, help="Internal interface version for this operator") + API = Int(0, help="Internal interface version for this operator") operators = List(allow_none=True, help="List of Operator instances to run.") diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index 63f756521..ea54c5fb9 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -2,12 +2,12 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import traitlets + import numpy as np from scipy.constants import degree -import traitlets - import healpy as hp from astropy import units as u @@ -267,7 +267,7 @@ class SimSatellite(Operator): # Class traits - API = traitlets.Int(0, help="Internal interface version for this operator") + API = Int(0, help="Internal interface version for this operator") telescope = Instance( klass=Telescope, allow_none=True, help="This must be an instance of a Telescope" diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index 79342dd41..cf41f82e2 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -19,6 +19,7 @@ "source": [ "# Built-in modules\n", "import sys\n", + "import os\n", "\n", "# External modules\n", "import numpy as np\n", @@ -771,14 +772,29 @@ "You may be tempted to make a wrapper function around the `exec()` and `finalize()` methods, but these are separate for a reason. The `exec()` method might be called multiple times with subsets of the data and the `finalize()` method is only called once. In the previous example, we just happen to be making one call to exec()." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pipelines\n", + "\n", + "TOAST includes a special operator (the `Pipeline` class), which is designed to run other operators (including other Pipeline instances. The purpose of this operator is to run sequences of other operators over sets of detectors to reduce the memory cost of intermediate products and / or to group together operators that support the use of accelerators to avoid memory copies to the host system." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "print(data)\n", - "print(\"There are {} observations\".format(len(data.obs)))" + "? ops.Pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As an example, we can create two simple operators and put them in a pipeline:" ] }, { @@ -787,7 +803,81 @@ "metadata": {}, "outputs": [], "source": [ - "print(data.obs[0])" + "simsat = ops.SimSatellite(\n", + " n_observation=2, \n", + " observation_time=(5 * u.minute),\n", + " telescope=telescope\n", + ")\n", + "\n", + "default_noise = ops.DefaultNoiseModel()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pipe = ops.Pipeline(\n", + " operators=[simsat, default_noise]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can start with an empty Data object and run the pipeline on it:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = toast.Data()\n", + "pipe.exec(data)\n", + "pipe.finalize(data)\n", + "\n", + "print(data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see here that the same satellite simulation was run, and then a default noise model (using the focalplane properties in each observation) was created." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuration Files\n", + "\n", + "Operators are configured through class traits which can be passed as keyword arguments to the constructor. We can also dump / load these properties to a config file and construct instances from those. For this example, we'll create some temp files and load them back in." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from toast.config import dump_toml, load_toml, load_json, dump_json, load_config\n", + "import tempfile\n", + "\n", + "tmpdir = tempfile.mkdtemp()\n", + "toml_file = os.path.join(tmpdir, \"test.toml\")\n", + "json_file = os.path.join(tmpdir, \"test.json\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As an example, we can take a previous operator:" ] }, { @@ -817,7 +907,7 @@ "toc-hr-collapsed": true }, "source": [ - "## Utilities\n", + "# Utilities\n", "\n", "There are many utilities in the TOAST package that use compiled code internally. These include:\n", "\n", From 7c149d7dc9d1e5aa09373fda3cb9d6447316d88d Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 15 Oct 2020 13:58:01 -0700 Subject: [PATCH 003/690] Update RTD requirements --- docs/rtd_requirements.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/rtd_requirements.txt b/docs/rtd_requirements.txt index 3886a9943..526df57b0 100644 --- a/docs/rtd_requirements.txt +++ b/docs/rtd_requirements.txt @@ -5,3 +5,6 @@ healpy astropy h5py ephem +tomlkit +traitlets>=5.0 +pshmem From d3cd9588a9d2da4580015ed0e23dce10b81fce8d Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Fri, 16 Oct 2020 07:48:52 -0700 Subject: [PATCH 004/690] Add section on config files to intro. --- src/toast/traits.py | 23 +++-- tutorial/01_Introduction/intro.ipynb | 128 +++++++++++++++++++++++++-- 2 files changed, 135 insertions(+), 16 deletions(-) diff --git a/src/toast/traits.py b/src/toast/traits.py index 4dc06a5fa..d388ab4ac 100644 --- a/src/toast/traits.py +++ b/src/toast/traits.py @@ -268,14 +268,14 @@ def class_config(cls, section=None, input=None): """ if input is None: - input = dict() + input = OrderedDict() name = cls.__qualname__ parent = cls._check_parent(input, section, name) - parent[name] = dict() + parent[name] = OrderedDict() parent[name]["class"] = object_fullname(cls) for trait_name, trait in cls.class_traits().items(): trname, trtype, trdefault, trhelp = trait_info(trait) - parent[name][trname] = dict() + parent[name][trname] = OrderedDict() valstr, unitstr, typestr = cls._format_conf_trait(trait, trdefault) parent[name][trname]["value"] = valstr parent[name][trname]["unit"] = unitstr @@ -304,17 +304,20 @@ def config(self, section=None, input=None): """ if input is None: - input = dict() + input = OrderedDict() name = self.name parent = self._check_parent(input, section, name) - parent[name] = dict() + parent[name] = OrderedDict() parent[name]["class"] = object_fullname(self.__class__) for trait_name, trait in self.traits().items(): trname, trtype, trdefault, trhelp = trait_info(trait) trval = None if trait.get(self) is not None: - trval = trtype(trait.get(self)) - parent[name][trname] = dict() + try: + trval = trtype(trait.get(self)) + except Exception: + trval = str(trait.get(self)) + parent[name][trname] = OrderedDict() valstr, unitstr, typestr = self._format_conf_trait(trait, trval) parent[name][trname]["value"] = valstr parent[name][trname]["unit"] = unitstr @@ -525,8 +528,10 @@ def args_update_config(args, conf, defaults, section, prefix="", separator=":"): name = obj_mat.group(1) optname = obj_mat.group(2) if name not in parent: - msg = "Parsing option '{}', config does not have object named {}".format( - arg, name + msg = ( + "Parsing option '{}', config does not have object named {}".format( + arg, name + ) ) raise RuntimeError(msg) if name not in dparent: diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index cf41f82e2..96ad3303f 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -720,6 +720,28 @@ "print(simsat)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you are using multi instances of an operator in your pipeline with different configurations, then you should also pass a unique \"name\" to the constructor. This allows keeping the operators distinct when using config files (see more below):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "other_simsat = ops.SimSatellite(\n", + " name=\"other_simsat\",\n", + " n_observation=2, \n", + " observation_time=(5 * u.minute),\n", + ")\n", + "\n", + "print(other_simsat)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -854,9 +876,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Configuration Files\n", + "## Configuration of Operators\n", "\n", - "Operators are configured through class traits which can be passed as keyword arguments to the constructor. We can also dump / load these properties to a config file and construct instances from those. For this example, we'll create some temp files and load them back in." + "Operators are configured through class traits which can be passed as keyword arguments to the constructor. We can also dump information about these traits (name, type, help string) to an intermediate config dictionary and then write that to files in TOML or JSON format. These config dictionaries can also be used to instantiate operators directly." ] }, { @@ -865,8 +887,11 @@ "metadata": {}, "outputs": [], "source": [ - "from toast.config import dump_toml, load_toml, load_json, dump_json, load_config\n", + "from toast.config import dump_toml, load_toml, load_json, dump_json, load_config, create\n", "import tempfile\n", + "from pprint import PrettyPrinter\n", + "\n", + "pp = PrettyPrinter(indent=1)\n", "\n", "tmpdir = tempfile.mkdtemp()\n", "toml_file = os.path.join(tmpdir, \"test.toml\")\n", @@ -877,7 +902,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As an example, we can take a previous operator:" + "As an example, we can take a previous operator and look at the \"round trip\" from class or instance, to a config dictionary, to a file, and back into creating a new operator instance from that:" ] }, { @@ -885,21 +910,110 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# This gives us the config for an existing instance\n", + "\n", + "conf = other_simsat.config()\n", + "pp.pprint(conf)" + ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# This gives us the default config values for a class\n", + "\n", + "default_conf = ops.SimSatellite.class_config()\n", + "pp.pprint(default_conf)" + ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "dump_toml(toml_file, conf)\n", + "dump_json(json_file, conf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can see what this config looks like dumped to TOML and JSON:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cat {toml_file}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!cat {json_file}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And then we can load the config back in to a dictionary:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "newconf = load_config(toml_file)\n", + "pp.pprint(newconf)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we can create new instances of operators from this config dictionary:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run = create(newconf)\n", + "print(run)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we access our new operator and use it:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_simsat = run[\"operators\"][\"other_simsat\"]\n", + "print(new_simsat)" + ] }, { "cell_type": "markdown", From af577861d7a153642bc1496d697085a1f1f41ccf Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 22 Oct 2020 08:01:35 -0700 Subject: [PATCH 005/690] Implement improved setitem for observation attributes. Add unit tests for those. More porting of operators. --- pipelines/toast_future.py | 2 +- src/toast/CMakeLists.txt | 2 + src/toast/future_ops/__init__.py | 2 +- src/toast/future_ops/noise_model.py | 10 +- src/toast/future_ops/pointing_healpix.py | 237 +++++----- src/toast/future_ops/sim_satellite.py | 8 +- src/toast/future_ops/sim_tod_noise.py | 336 +++++++++----- src/toast/intervals.py | 68 +++ src/toast/noise.py | 151 ++++++ src/toast/noise_sim.py | 104 +++++ src/toast/observation.py | 560 ++++++++++++++--------- src/toast/tests/observation.py | 196 +++++++- tutorial/01_Introduction/intro.ipynb | 39 +- 13 files changed, 1240 insertions(+), 475 deletions(-) create mode 100644 src/toast/noise.py create mode 100644 src/toast/noise_sim.py diff --git a/pipelines/toast_future.py b/pipelines/toast_future.py index 7906ada4f..b1f562184 100644 --- a/pipelines/toast_future.py +++ b/pipelines/toast_future.py @@ -33,7 +33,7 @@ from toast.timing import dump as dump_timing -from toast import dump_config, parse_config, create +from toast import dump_toml, parse_config, create from toast import future_ops as ops diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 04e235393..faebdd263 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -93,6 +93,8 @@ install(FILES intervals.py instrument.py instrument_sim.py + noise.py + noise_sim.py observation.py operator.py vis.py diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index 4e1f07c27..669e9c29f 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -8,7 +8,7 @@ from .sim_satellite import SimSatellite -# from .sim_tod_noise import SimNoise +from .sim_tod_noise import SimNoise from .noise_model import DefaultNoiseModel diff --git a/src/toast/future_ops/noise_model.py b/src/toast/future_ops/noise_model.py index 9a0aac0a5..7a49d873f 100644 --- a/src/toast/future_ops/noise_model.py +++ b/src/toast/future_ops/noise_model.py @@ -10,7 +10,7 @@ from ..timing import function_timer, Timer -from ..tod import AnalyticNoise +from ..noise_sim import AnalyticNoise from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity @@ -31,8 +31,8 @@ class DefaultNoiseModel(Operator): API = traitlets.Int(0, help="Internal interface version for this operator") - noisekey = traitlets.Unicode( - "noise", help="The observation key to use when storing the noise model" + noise_model = traitlets.Unicode( + "noise_model", help="The observation key for storing the noise model" ) def __init__(self, **kwargs): @@ -73,7 +73,7 @@ def _exec(self, data, detectors=None, **kwargs): rate=rates, fmin=fmin, detectors=dets, fknee=fknee, alpha=alpha, NET=NET ) - obs[self.noisekey] = noise + obs[self.noise_model] = noise return @@ -84,7 +84,7 @@ def _requires(self): return dict() def _provides(self): - prov = {"meta": [self.noisekey]} + prov = {"meta": [self.noise_model]} return prov def _accelerators(self): diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/future_ops/pointing_healpix.py index b774702f4..04c79d470 100644 --- a/src/toast/future_ops/pointing_healpix.py +++ b/src/toast/future_ops/pointing_healpix.py @@ -2,16 +2,18 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import traitlets + import numpy as np from ..utils import Environment, Logger +from ..traits import trait_docs, Int, Unicode, Bool + from ..healpix import HealpixPixels from ..operator import Operator -from ..config import ObjectConfig - from ..timing import function_timer from .. import qarray as qa @@ -21,6 +23,7 @@ from .._libtoast import pointing_matrix_healpix +@trait_docs class PointingHealpix(Operator): """Operator which generates I/Q/U healpix pointing weights. @@ -44,120 +47,111 @@ class PointingHealpix(Operator): .. math:: d = cal \\left[\\frac{(1+eps)}{2} I + \\frac{(1-eps)}{2} \\left[Q \\cos{2a+4w} + U \\sin{2a+4w}\\right]\\right] - Args: - config (dict): Configuration parameters. - """ - def __init__(self, config): - super().__init__(config) - self._parse() + # Class traits - # Initialize the healpix pixels object - self.hpix = HealpixPixels(self.config["nside"]) + API = Int(0, help="Internal interface version for this operator") - self._nnz = 1 - if self.config["mode"] == "IQU": - self._nnz = 3 + nside = Int(64, help="The NSIDE resolution") - self._n_pix = 12 * self.config["nside"] ** 2 - self._n_pix_submap = 12 * self.config["nside_submap"] ** 2 - self._n_submap = (self.config["nside"] // self.config["nside_submap"]) ** 2 + nside_submap = Int(16, help="The NSIDE of the submap resolution") - self._local_submaps = None - if self.config["create_dist"] is not None: - self._local_submaps = np.zeros(self._n_submap, dtype=np.bool) + nest = Bool(False, help="If True, used NESTED ordering instead of RING") - @classmethod - def defaults(cls): - """(Class method) Return options supported by the operator and their defaults. + mode = Unicode("I", help="The Stokes weights to generate (I or IQU)") - This returns an ObjectConfig instance, and each entry should have a help - string. + boresight = Unicode("boresight_radec", help="Observation shared key for boresight") - Returns: - (ObjectConfig): The options. + hwp_angle = Unicode("hwp_angle", help="Observation shared key for HWP angle") - """ - opts = ObjectConfig() - - opts.add("class", "toast.future_ops.PointingHealpix", "The class name") - - opts.add("API", 0, "(Internal interface version for this operator)") - - opts.add("pixels", "pixels", "The observation name of the output pixels") - - opts.add("weights", "weights", "The observation name of the output weights") + flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) - opts.add( - "quats", - None, - "If not None, save detector quaternions to this name (for debugging)", - ) + flag_mask = Int(0, help="Bit mask value for optional flagging") - opts.add("nside", 64, "The NSIDE resolution") + pixels = Unicode("pixels", help="Observation detdata key for output pixel indices") - opts.add("nside_submap", 16, "The submap resolution") + weights = Unicode("weights", help="Observation detdata key for output weights") - opts.add("nest", False, "If True, use NESTED ordering instead of RING") + quats = Unicode( + "quats", + allow_none=True, + help="Observation detdata key for output quaternions (for debugging)", + ) - opts.add("mode", "I", "The Stokes weights to generate (I or IQU)") + create_dist = Unicode( + None, + allow_none=True, + help="Create the submap distribution for all detectors and store in the Data key specified", + ) - opts.add("flags", None, "Optional common timestream flags to apply") + single_precision = Bool(False, help="If True, use 32bit int / float in output") - opts.add("flag_mask", 0, "Bit mask value for optional flagging") + cal = Unicode( + None, + allow_none=True, + help="The observation key with a dictionary of pointing weight calibration for each det", + ) - opts.add( - "create_dist", - None, - "Create the submap distribution for all detectors and store in the Data key specified", - ) + @traitlets.validate("nside") + def _check_nside(self, proposal): + check = proposal["value"] + if ~check & (check - 1) != check - 1: + raise traitlets.TraitError("Invalid NSIDE value") + return check - opts.add("single_precision", False, "If True, use 32bit int / float in output") - - opts.add( - "cal", - None, - "The observation key with a dictionary of pointing weight calibration for each det", - ) - - return opts - - def _parse(self): - log = Logger.get() - if self.config["nside_submap"] >= self.config["nside"]: - newsub = self.config["nside"] // 4 - if newsub == 0: - newsub = 1 - log.warning("nside_submap >= nside, setting to {}".format(newsub)) - self.config["nside_submap"] = newsub - if self.config["mode"] not in ["I", "IQU"]: - msg = "Invalide mode '{}', allowed values are 'I' and 'IQU'".format( - self.config["mode"] + @traitlets.validate("nside_submap") + def _check_nside_submap(self, proposal): + check = proposal["value"] + if ~check & (check - 1) != check - 1: + raise traitlets.TraitError("Invalid NSIDE submap value") + if check > self.nside: + newval = 16 + if newval > self.nside: + newval = self.nside + log = Logger.get() + log.warning( + "NSIDE submap greater than NSIDE. Setting to {} instead".format(newval) ) - log.error(msg) - raise RuntimeError(msg) - - @function_timer - def exec(self, data, detectors=None): - """Create pixels and weights. + check = newval + return check + + @traitlets.validate("mode") + def _check_mode(self, proposal): + check = proposal["value"] + if check not in ["I", "IQU"]: + raise traitlets.TraitError("Invalid mode (must be 'I' or 'IQU')") + return check + + @traitlets.validate("flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + def __init__(self, **kwargs): + super().__init__(**kwargs) - This iterates over all observations and specified detectors, and creates - the pixel and weight arrays representing the pointing matrix. Data is stored - in newly created DetectorData members of each observation. + # Initialize the healpix pixels object + self.hpix = HealpixPixels(self.nside) - The locally hit submaps are optionally computed. This is typically only done - when initially computing the pointing for all detectors. + self._nnz = 1 + if self.mode == "IQU": + self._nnz = 3 - Args: - data (toast.Data): The distributed data. - detectors (list): A list of detector names or indices. If None, this - indicates a list of all detectors. + self._n_pix = 12 * self.nside ** 2 + self._n_pix_submap = 12 * self.nside_submap ** 2 + self._n_submap = (self.nside // self.nside_submap) ** 2 - Returns: - None + self._local_submaps = None + if self.create_dist is not None: + self._local_submaps = np.zeros(self._n_submap, dtype=np.bool) - """ + @function_timer + def _exec(self, data, detectors=None, **kwargs): env = Environment.get() log = Logger.get() @@ -172,41 +166,28 @@ def exec(self, data, detectors=None): # Nothing to do for this observation continue - # The number of samples on this process - n_samp = obs.local_samples[1] - - # See if we have a HWP angle - hwpang = None - try: - hwpang = obs.hwp_angle - except KeyError: - if obs.mpicomm is None or obs.mpicomm.rank == 0: - msg = "Observation {} has no HWP angle- not including in response".format( - obs.name - ) - log.verbose(msg) - # Get the flags if needed flags = None - if self.config["flags"] is not None: - flags = obs.get_common_flags(keyname=self.config["flags"]) - flags &= self.config["flag_mask"] + if self.flags is not None: + flags = obs.shared[self.flags] + flags &= self.flag_mask # Boresight pointing quaternions - boresight = obs.boresight_radec + boresight = obs.shared[self.boresight] # Focalplane for this observation focalplane = obs.telescope.focalplane # Optional calibration cal = None - if self.config["cal"] is not None: - cal = obs[self.config["cal"]] + if self.cal is not None: + cal = obs[self.cal] # Create output data for the pixels, weights and optionally the # detector quaternions. - if self.config["single_precision"]: + if self.single_precision: + obs.detdata.create(self.pixels, shape=(1,), dtype=np.int32) obs.create_detector_data( self.config["pixels"], shape=(n_samp,), @@ -344,8 +325,7 @@ def finalize(self, data): return def requires(self): - """List of Observation keys directly used by this Operator. - """ + """List of Observation keys directly used by this Operator.""" req = ["BORESIGHT_RADEC", "HWP_ANGLE"] if self.config["flags"] is not None: req.append(self.config["flags"]) @@ -354,14 +334,35 @@ def requires(self): return req def provides(self): - """List of Observation keys generated by this Operator. - """ + """List of Observation keys generated by this Operator.""" prov = [self.config["pixels"], self.config["weights"]] if self.config["quats"] is not None: prov.append(self.config["quats"]) return prov def accelerators(self): - """List of accelerators supported by this Operator. - """ + """List of accelerators supported by this Operator.""" + return list() + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + return { + "meta": [ + self.noise_model, + ], + "shared": [ + self.times, + ], + } + + def _provides(self): + return { + "detdata": [ + self.out, + ] + } + + def _accelerators(self): return list() diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index ea54c5fb9..f31e57f56 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -20,7 +20,9 @@ from ..timing import function_timer, Timer -from ..tod import Interval, TOD, regular_intervals, AnalyticNoise +from ..intervals import Interval, regular_intervals + +from ..noise_sim import AnalyticNoise from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity @@ -317,7 +319,7 @@ class SimSatellite(Operator): flags = Unicode("flags", help="Observation shared key for common flags") - hwp = Unicode("hwp_angle", help="Observation shared key for HWP angle") + hwp_angle = Unicode("hwp_angle", help="Observation shared key for HWP angle") boresight = Unicode("boresight_radec", help="Observation shared key for boresight") @@ -510,7 +512,7 @@ def _exec(self, data, detectors=None, **kwargs): hwp_step_time_m = self.hwp_step_time.to_value(u.minute) simulate_hwp_angle( obs, - self.hwp, + self.hwp_angle, obsrange[ob].start, self.hwp_rpm, hwp_step_deg, diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/future_ops/sim_tod_noise.py index 986a25231..a31d13284 100644 --- a/src/toast/future_ops/sim_tod_noise.py +++ b/src/toast/future_ops/sim_tod_noise.py @@ -2,21 +2,190 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import traitlets + import numpy as np from ..timing import function_timer -from ..fft import FFTPlanReal1DStore +from ..traits import trait_docs, Int, Unicode -from ..tod.tod_math import sim_noise_timestream +from ..fft import FFTPlanReal1DStore from ..operator import Operator -from ..config import ObjectConfig - from ..utils import rate_from_times, Logger +from .._libtoast import tod_sim_noise_timestream + + +@function_timer +def sim_noise_timestream( + realization, + telescope, + component, + obsindx, + detindx, + rate, + firstsamp, + samples, + oversample, + freq, + psd, + py=False, +): + """Generate a noise timestream, given a starting RNG state. + + Use the RNG parameters to generate unit-variance Gaussian samples + and then modify the Fourier domain amplitudes to match the desired + PSD. + + The RNG (Threefry2x64 from Random123) takes a "key" and a "counter" + which each consist of two unsigned 64bit integers. These four + numbers together uniquely identify a single sample. We construct + those four numbers in the following way: + + key1 = realization * 2^32 + telescope * 2^16 + component + key2 = obsindx * 2^32 + detindx + counter1 = currently unused (0) + counter2 = sample in stream + + counter2 is incremented internally by the RNG function as it calls + the underlying Random123 library for each sample. + + Args: + realization (int): the Monte Carlo realization. + telescope (int): a unique index assigned to a telescope. + component (int): a number representing the type of timestream + we are generating (detector noise, common mode noise, + atmosphere, etc). + obsindx (int): the global index of this observation. + detindx (int): the global index of this detector. + rate (float): the sample rate. + firstsamp (int): the start sample in the stream. + samples (int): the number of samples to generate. + oversample (int): the factor by which to expand the FFT length + beyond the number of samples. + freq (array): the frequency points of the PSD. + psd (array): the PSD values. + py (bool): if True, use a pure-python implementation. This is useful + for testing. If True, also return the interpolated PSD. + + Returns: + (array): the noise timestream. If py=True, returns a tuple of timestream, + interpolated frequencies, and interpolated PSD. + + """ + tdata = None + if py: + fftlen = 2 + while fftlen <= (oversample * samples): + fftlen *= 2 + npsd = fftlen // 2 + 1 + norm = rate * float(npsd - 1) + + interp_freq = np.fft.rfftfreq(fftlen, 1 / rate) + if interp_freq.size != npsd: + raise RuntimeError( + "interpolated PSD frequencies do not have expected length" + ) + + # Ensure that the input frequency range includes all the frequencies + # we need. Otherwise the extrapolation is not well defined. + + if np.amin(freq) < 0.0: + raise RuntimeError("input PSD frequencies should be >= zero") + + if np.amin(psd) < 0.0: + raise RuntimeError("input PSD values should be >= zero") + + increment = rate / fftlen + + if freq[0] > increment: + raise RuntimeError( + "input PSD does not go to low enough frequency to " + "allow for interpolation" + ) + + nyquist = rate / 2 + if np.abs((freq[-1] - nyquist) / nyquist) > 0.01: + raise RuntimeError( + "last frequency element does not match Nyquist " + "frequency for given sample rate: {} != {}".format(freq[-1], nyquist) + ) + + # Perform a logarithmic interpolation. In order to avoid zero values, we + # shift the PSD by a fixed amount in frequency and amplitude. + + psdshift = 0.01 * np.amin(psd[(psd > 0.0)]) + freqshift = increment + + loginterp_freq = np.log10(interp_freq + freqshift) + logfreq = np.log10(freq + freqshift) + logpsd = np.log10(psd + psdshift) + + interp = si.interp1d(logfreq, logpsd, kind="linear", fill_value="extrapolate") + + loginterp_psd = interp(loginterp_freq) + interp_psd = np.power(10.0, loginterp_psd) - psdshift + + # Zero out DC value + + interp_psd[0] = 0.0 + + scale = np.sqrt(interp_psd * norm) + + # gaussian Re/Im randoms, packed into a complex valued array + + key1 = realization * 4294967296 + telescope * 65536 + component + key2 = obsindx * 4294967296 + detindx + counter1 = 0 + counter2 = firstsamp * oversample + + rngdata = rng.random( + fftlen, sampler="gaussian", key=(key1, key2), counter=(counter1, counter2) + ).array() + + fdata = np.zeros(npsd, dtype=np.complex) + + # Set the DC and Nyquist frequency imaginary part to zero + fdata[0] = rngdata[0] + 0.0j + fdata[-1] = rngdata[npsd - 1] + 0.0j + + # Repack the other values. + fdata[1:-1] = rngdata[1 : npsd - 1] + 1j * rngdata[-1 : npsd - 1 : -1] + + # scale by PSD + fdata *= scale + + # inverse FFT + tdata = np.fft.irfft(fdata) + + # subtract the DC level- for just the samples that we are returning + offset = (fftlen - samples) // 2 + + DC = np.mean(tdata[offset : offset + samples]) + tdata[offset : offset + samples] -= DC + return (tdata[offset : offset + samples], interp_freq, interp_psd) + else: + tdata = AlignedF64(samples) + tod_sim_noise_timestream( + realization, + telescope, + component, + obsindx, + detindx, + rate, + firstsamp, + oversample, + freq.astype(np.float64), + psd.astype(np.float64), + tdata, + ) + return tdata.array() + +@trait_docs class SimNoise(Operator): """Operator which generates noise timestreams. @@ -27,71 +196,44 @@ class SimNoise(Operator): want to enforce reproducibility of a given sample, even when using different-sized observations. - Args: - config (dict): Configuration parameters. - """ - def __init__(self, config): - super().__init__(config) - self._parse() - self._oversample = 2 - - @classmethod - def defaults(cls): - """(Class method) Return options supported by the operator and their defaults. + # Class traits - This returns an ObjectConfig instance, and each entry should have a help - string. + API = Int(0, help="Internal interface version for this operator") - Returns: - (ObjectConfig): The options. + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) - """ - opts = ObjectConfig() + realization = Int(0, help="The noise realization index") - opts.add("class", "toast.future_ops.SimNoise", "The class name") + component = Int(0, help="The noise component index") - opts.add("API", 0, "(Internal interface version for this operator)") + times = Unicode("times", help="Observation shared key for timestamps") - opts.add("out", None, "The name of the output signal") + out = Unicode("noise", help="Observation detdata key for output noise timestreams") - opts.add("realization", 0, "The realization index") + @traitlets.validate("realization") + def _check_realization(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("realization index must be positive") + return check - opts.add("component", 0, "The component index") + @traitlets.validate("component") + def _check_component(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("component index must be positive") + return check - opts.add( - "noise", - "noise", - "The observation key containing the noise model to use for simulations", - ) - - return opts - - def _parse(self): - if self.config["realization"] < 0 or self.config["component"] < 0: - raise RuntimeError("realization and component indices should be positive") - if self.config["out"] is None: - self.config["out"] = "SIGNAL" + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._oversample = 2 @function_timer - def exec(self, data, detectors=None): - """Generate noise timestreams. - - This iterates over all observations and detectors and generates - the noise timestreams based on the noise object for the current - observation. - - Args: - data (toast.Data): The distributed data. - detectors (list): A list of detector names or indices. If None, this - indicates a list of all detectors. - - Raises: - KeyError: If an observation does not contain the noise or output - signal keys. - - """ + def _exec(self, data, detectors=None, **kwargs): log = Logger.get() for obs in data.obs: # Get the detectors we are using for this observation @@ -114,17 +256,17 @@ def exec(self, data, detectors=None): if "global_offset" in obs: global_offset = obs["global_offset"] - if self.config["noise"] not in obs: - msg = "Observation does not contain noise key '{}'".format( - self.config["noise"] + if self.noise_model not in obs: + msg = "Observation does not contain noise model key '{}'".format( + self.noise_model ) log.error(msg) raise KeyError(msg) - nse = obs[self.config["noise"]] + nse = obs[self.noise_model] # Eventually we'll redistribute, to allow long correlations... - if obs.grid_size[1] != 1: + if obs.comm_row_size != 1: msg = "Noise simulation for process grids with multiple ranks in the sample direction not implemented" log.error(msg) raise NotImplementedError(msg) @@ -133,12 +275,10 @@ def exec(self, data, detectors=None): # detectors within the observation. # Create output if it does not exist - if self.config["out"] not in obs: - obs.create_detector_data( - self.config["out"], shape=(obs.local_samples[1],), dtype=np.float64 - ) + if self.out not in obs: + obs.detdata.create(self.out, shape=(1,), dtype=np.float64) - (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(obs.times) + (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(obs.shared[self.times]) for key in nse.keys: # Check if noise matching this PSD key is needed @@ -150,14 +290,14 @@ def exec(self, data, detectors=None): # Simulate the noise matching this key nsedata = sim_noise_timestream( - self.config["realization"], + self.realization, telescope, - self.config["component"], + self.component, obsindx, nse.index(key), rate, - obs.local_samples[0] + global_offset, - obs.local_samples[1], + obs.offset + global_offset, + obs.n_local, self._oversample, nse.freq(key), nse.psd(key), @@ -168,10 +308,7 @@ def exec(self, data, detectors=None): weight = nse.weight(det, key) if weight == 0: continue - obs.get_signal(keyname=self.config["out"])[ - obs.local_samples[0] : obs.local_samples[0] - + obs.local_samples[1] - ] += (weight * nsedata) + obs.detdata[self.out][det] += weight * nsedata # Release the work space allocated in the FFT plan store. # @@ -183,40 +320,31 @@ def exec(self, data, detectors=None): # 1. Each process only has a few detectors # 2. There is a broad distribution of observation lengths. # - # If we are in this regime frequently, we should just allocate / free each plan. + # If we are in this regime frequently, we should just allocate / free + # each plan. store = FFTPlanReal1DStore.get() store.clear() - return - def finalize(self, data): - """Perform any final operations / communication. - - This calls the finalize() method on all operators in sequence. - - Args: - data (toast.Data): The distributed data. - - Returns: - None - - """ + def _finalize(self, data, **kwargs): return - def requires(self): - """List of Observation keys directly used by this Operator. - """ - req = [self.config["noise"]] - return req - - def provides(self): - """List of Observation keys generated by this Operator. - """ - prov = list() - prov.append(self.config["out"]) - return prov - - def accelerators(self): - """List of accelerators supported by this Operator. - """ + def _requires(self): + return { + "meta": [ + self.noise_model, + ], + "shared": [ + self.times, + ], + } + + def _provides(self): + return { + "detdata": [ + self.out, + ] + } + + def _accelerators(self): return list() diff --git a/src/toast/intervals.py b/src/toast/intervals.py index 340bc7654..095205db3 100644 --- a/src/toast/intervals.py +++ b/src/toast/intervals.py @@ -424,3 +424,71 @@ def __or__(self, other): result = IntervalList(self.timestamps, intervals=result) return result + + +@function_timer +def regular_intervals(n, start, first, rate, duration, gap): + """Function to generate regular intervals with gaps. + + This creates a python list of Interval instances (*not* an IntervalList object, + which requires full timestamp information), given a start time/sample and time + span for the interval and the gap in time between intervals. The + length of the interval and the total interval + gap are rounded down to the + nearest sample and all intervals in the list are created using those + lengths. + + If the time span is an exact multiple of the sampling, then the + final sample is excluded. The reason we always round down to the whole + number of samples that fits inside the time range is so that the requested + time span boundary (one hour, one day, etc) will fall in between the last + sample of one interval and the first sample of the next. + + Args: + n (int): the number of intervals. + start (float): the start time in seconds. + first (int): the first sample index, which occurs at "start". + rate (float): the sample rate in Hz. + duration (float): the length of the interval in seconds. + gap (float): the length of the gap in seconds. + + Returns: + (list): a list of Interval objects. + + """ + invrate = 1.0 / rate + + # Compute the whole number of samples that fit within the + # requested time span (rounded down to a whole number). Check for the + # case of the time span being an exact number of samples- in which case + # the final sample is excluded. + + lower = int((duration + gap) * rate) + totsamples = None + if np.absolute(lower * invrate - (duration + gap)) > 1.0e-12: + totsamples = lower + 1 + else: + totsamples = lower + + lower = int(duration * rate) + dursamples = None + if np.absolute(lower * invrate - duration) > 1.0e-12: + dursamples = lower + 1 + else: + dursamples = lower + + gapsamples = totsamples - dursamples + + intervals = [] + + for i in range(n): + ifirst = first + i * totsamples + ilast = ifirst + dursamples - 1 + # The time span between interval starts (the first sample of one + # interval to the first sample of the next) includes the one extra + # sample time. + istart = start + i * (totsamples * invrate) + # The stop time is the timestamp of the last valid sample (thus the -1). + istop = istart + ((dursamples - 1) * invrate) + intervals.append(Interval(start=istart, stop=istop, first=ifirst, last=ilast)) + + return intervals diff --git a/src/toast/noise.py b/src/toast/noise.py new file mode 100644 index 000000000..7bcc61969 --- /dev/null +++ b/src/toast/noise.py @@ -0,0 +1,151 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + + +class Noise(object): + """Noise objects act as containers for noise PSDs. + + Noise is a base class for an object that describes the noise + properties of all detectors for a single observation. + + Args: + detectors (list): Names of detectors. + freqs (dict): Dictionary of arrays of frequencies for `psds`. + psds (dict): Dictionary of arrays which contain the PSD values + for each detector or `mixmatrix` key. + mixmatrix (dict): Mixing matrix describing how the PSDs should + be combined for detector noise. If provided, must contain + entries for every detector, and every key specified for a + detector must be defined in `freqs` and `psds`. + indices (dict): Integer index for every PSD, useful for + generating indepedendent and repeateable noise realizations. + If absent, running indices will be assigned and provided. + + Attributes: + detectors (list): List of detector names + keys (list): List of PSD names + + Raises: + KeyError: If `freqs`, `psds`, `mixmatrix` or `indices` do not + include all relevant entries. + ValueError: If vector lengths in `freqs` and `psds` do not match. + + """ + + def __init__(self, *, detectors, freqs, psds, mixmatrix=None, indices=None): + + self._dets = list(sorted(detectors)) + if mixmatrix is None: + # Default diagonal mixing matrix + self._keys = self._dets + self._mixmatrix = None + else: + # Assemble the list of keys needed for the specified detectors + keys = set() + self._mixmatrix = {} + for det in self._dets: + self._mixmatrix[det] = {} + for key, weight in mixmatrix[det].items(): + keys.add(key) + self._mixmatrix[det][key] = weight + self._keys = list(sorted(keys)) + if indices is None: + self._indices = {} + for i, key in enumerate(self._keys): + self._indices[key] = i + else: + self._indices = dict(indices) + self._freqs = {} + self._psds = {} + self._rates = {} + + for key in self._keys: + if psds[key].shape[0] != freqs[key].shape[0]: + raise ValueError("PSD length must match the number of frequencies") + self._freqs[key] = np.copy(freqs[key]) + self._psds[key] = np.copy(psds[key]) + # last frequency point should be Nyquist + self._rates[key] = 2.0 * self._freqs[key][-1] + + @property + def detectors(self): + """(list): list of strings containing the detector names.""" + return self._dets + + @property + def keys(self): + """(list): list of strings containing the PSD names.""" + return self._keys + + def multiply_ntt(self, key, data): + """Filter the data with noise covariance.""" + raise NotImplementedError("multiply_ntt not yet implemented") + + def multiply_invntt(self, key, data): + """Filter the data with inverse noise covariance.""" + raise NotImplementedError("multiply_invntt not yet implemented") + + def weight(self, det, key): + """Return the mixing weight for noise `key` in `det`. + + Args: + det (str): Detector name + key (std): Mixing matrix key. + Returns: + weight (float): Mixing matrix weight + + """ + weight = 0 + if self._mixmatrix is None: + if det == key: + weight = 1 + elif key in self._mixmatrix[det]: + weight = self._mixmatrix[det][key] + return weight + + def index(self, key): + """Return the PSD index for `key` + + Args: + key (std): Detector name or mixing matrix key. + Returns: + index (int): PSD index. + + """ + return self._indices[key] + + def freq(self, key): + """Get the frequencies corresponding to `key`. + + Args: + key (str): Detector name or mixing matrix key. + Returns: + (array): Frequency bins that are used for the PSD. + + """ + return self._freqs[key] + + def rate(self, key): + """Get the sample rate for `key`. + + Args: + key (str): the detector name or mixing matrix key. + Returns: + (float): the sample rate in Hz. + + """ + return self._rates[key] + + def psd(self, key): + """Get the PSD corresponding to `key`. + + Args: + key (str): Detector name or mixing matrix key. + Returns: + (array): PSD matching the key. + + """ + return self._psds[key] diff --git a/src/toast/noise_sim.py b/src/toast/noise_sim.py new file mode 100644 index 000000000..e704484b9 --- /dev/null +++ b/src/toast/noise_sim.py @@ -0,0 +1,104 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from .noise import Noise + + +class AnalyticNoise(Noise): + """Class representing an analytic noise model. + + This generates an analytic PSD for a set of detectors, given + input values for the knee frequency, NET, exponent, sample rate, + minimum frequency, etc. + + Args: + detectors (list): List of detectors. + rate (dict): Dictionary of sample rates in Hertz. + fmin (dict): Dictionary of minimum frequencies for high pass + fknee (dict): Dictionary of knee frequencies. + alpha (dict): Dictionary of alpha exponents (positive, not negative!). + NET (dict): Dictionary of detector NETs. + + """ + + def __init__(self, *, detectors, rate, fmin, fknee, alpha, NET, indices=None): + + self._rate = rate + self._fmin = fmin + self._fknee = fknee + self._alpha = alpha + self._NET = NET + + for d in detectors: + if self._alpha[d] < 0.0: + raise RuntimeError( + "alpha exponents should be positive in this formalism" + ) + + freqs = {} + psds = {} + + last_nyquist = None + + for d in detectors: + if (self._fknee[d] > 0.0) and (self._fknee[d] < self._fmin[d]): + raise RuntimeError( + "If knee frequency is non-zero, it must be greater than f_min" + ) + + nyquist = self._rate[d] / 2.0 + if nyquist != last_nyquist: + tempfreq = [] + + # this starting point corresponds to a high-pass of + # 30 years, so should be low enough for any interpolation! + cur = 1.0e-9 + + # this value seems to provide a good density of points + # in log space. + while cur < nyquist: + tempfreq.append(cur) + cur *= 1.4 + + # put a final point at Nyquist + tempfreq.append(nyquist) + tempfreq = np.array(tempfreq, dtype=np.float64) + last_nyquist = nyquist + + freqs[d] = tempfreq + + if self._fknee[d] > 0.0: + ktemp = np.power(self._fknee[d], self._alpha[d]) + mtemp = np.power(self._fmin[d], self._alpha[d]) + temp = np.power(freqs[d], self._alpha[d]) + psds[d] = (temp + ktemp) / (temp + mtemp) + psds[d] *= self._NET[d] * self._NET[d] + else: + psds[d] = np.ones_like(freqs[d]) + psds[d] *= self._NET[d] * self._NET[d] + + # call the parent class constructor to store the psds + super().__init__(detectors=detectors, freqs=freqs, psds=psds, indices=indices) + + def rate(self, det): + """(float): the sample rate in Hz.""" + return self._rate[det] + + def fmin(self, det): + """(float): the minimum frequency in Hz, used as a high pass.""" + return self._fmin[det] + + def fknee(self, det): + """(float): the knee frequency in Hz.""" + return self._fknee[det] + + def alpha(self, det): + """(float): the (positive!) slope exponent.""" + return self._alpha[det] + + def NET(self, det): + """(float): the NET.""" + return self._NET[det] diff --git a/src/toast/observation.py b/src/toast/observation.py index 0526b2b26..4d84b49fb 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -6,10 +6,12 @@ import numbers -from collections.abc import MutableMapping, Sequence +from collections.abc import MutableMapping, Sequence, Mapping import numpy as np +from pshmem import MPIShared + from .mpi import MPI from .instrument import Telescope, Focalplane @@ -33,8 +35,6 @@ name_UID, ) -from pshmem import MPIShared - from .cuda import use_pycuda @@ -46,8 +46,8 @@ class DetectorData(object): data for a particular detector may itself be multi-dimensional, with the first dimension the number of samples. - The data in this container may be sliced by both detector indices and also by - detector name. + The data in this container may be sliced by both detector indices and names, as + well as by sample range. Example: Imagine we have 3 detectors and each has 10 samples. We want to store a @@ -63,8 +63,8 @@ class DetectorData(object): slicing by index and by a list of detectors is possible:: - view = detdata[0:-1] - view = detdata[("d01", "d03")] + view = detdata[0:-1, 2:4] + view = detdata[["d01", "d03"], 3:8] Args: detectors (list): A list of detector names in exactly the order you wish. @@ -181,13 +181,13 @@ def __del__(self): def _det_axis_view(self, key): if isinstance(key, (int, np.integer)): # Just one detector by index - view = key + view = (key,) elif isinstance(key, str): # Just one detector by name - view = self._name2idx[key] + view = (self._name2idx[key],) elif isinstance(key, slice): # We are slicing detectors by index - view = key + view = (key,) else: # Assume that our key is at least iterable try: @@ -195,6 +195,7 @@ def _det_axis_view(self, key): view = list() for k in key: view.append(self._name2idx[k]) + view = tuple(view) except TypeError: log = Logger.get() msg = "Detector indexing supports slice, int, string or iterable, not '{}'".format( @@ -207,19 +208,23 @@ def _det_axis_view(self, key): def _get_view(self, key): if isinstance(key, tuple): # We are slicing in both detector and sample dimensions - if len(key) > 2: - msg = "DetectorData has only 2 dimensions" + if len(key) > len(self._shape): + msg = "DetectorData has only {} dimensions".format(len(self._shape)) log.error(msg) raise TypeError(msg) - if len(key) == 1: - # Only detector slice - return self._det_axis_view(key[0]) - else: - detview = self._det_axis_view(key[0]) - return detview, key[1] + detview = self._det_axis_view(key[0]) + view = detview + for k in key[1:]: + view += (k,) + # for s in range(len(self._shape) - len(key)): + # view += (slice(None, None, None),) + return view else: # Only detector slice - return self._det_axis_view(key) + view = self._det_axis_view(key) + # for s in range(len(self._shape) - 1): + # view += (slice(None, None, None),) + return view def __getitem__(self, key): view = self._get_view(key) @@ -263,13 +268,40 @@ def __repr__(self): class DetDataMgr(MutableMapping): """Class used to manage DetectorData objects in an Observation. - New objects can be created with the "create()" method: + New objects can be created several ways. The "create()" method: + + ob.detdata.create(name, detshape=None, dtype=None, detectors=None) + + gives full control over creating the named object and specifying the shape of + each detector sample. The detectors argument can be used to restrict the object + to include only a subset of detectors. - ob.detdata.create(name, original=None, shape=None, dtype=None, detectors=None) + You can also create a new object by assignment from an existing DetectorData + object or a dictionary of detector arrays. For example: - Which supports copying data from an existing DetectorData object or a dictionary - of individual detector timestreams. The list of detectors to store can also be - reduced from the full set in the observation by giving a list of detectors. + ob.detdata[name] = DetectorData(ob.local_detectors, ob.n_local_samples, dtype) + + ob.detdata[name] = { + x: np.ones((ob.n_local_samples, 2), dtype=np.int16) + for x in ob.local_detectors + } + + Where the right hand side object must have only detectors that are included in + the ob.local_detectors and the first dimension of shape must be the number of + local samples. + + It is also possible to create a new object by assigning an array. In that case + the array must either have the full size of the DetectorData object + (n_det x n_sample x detshape) or must have dimensions (n_sample x detshape), in + which case the array is copied to all detectors. For example: + + ob.detdata[name] = np.ones( + (len(ob.local_detectors), ob.n_local_samples, 4), dtype=np.float32 + ) + + ob.detdata[name] = np.ones( + (ob.n_local_samples,), dtype=np.float32 + ) After creation, you can access a given DetectorData object by name with standard dictionary syntax: @@ -280,19 +312,14 @@ class DetDataMgr(MutableMapping): del ob.detdata[name] - There are shortcut methods to create standard data products: - - ob.detdata.create_signal() - ob.detdata.create_flags() - """ - def __init__(self, samples, detectors): + def __init__(self, detectors, samples): self.samples = samples self.detectors = detectors self._internal = dict() - def create(self, name, original=None, shape=None, dtype=None, detectors=None): + def create(self, name, detshape=None, dtype=np.float64, detectors=None): """Create a local DetectorData buffer on this process. This method can be used to create arrays of detector data for storing signal, @@ -300,14 +327,9 @@ def create(self, name, original=None, shape=None, dtype=None, detectors=None): Args: name (str): The name of the detector data (signal, flags, etc) - original (DetectorData, dict): Copy an existing data object. This can - either be another DetectorData object or a dictionary of arrays. This - must have exactly the same detectors as the local detector list, - although the ordering may be different. - shape (tuple): If not constructing from an existing array, use this shape - for the data of each detector. - dtype (np.dtype): If not constructing from an existing array, use this - dtype for each element. + detshape (tuple): Use this shape for the data of each detector sample. + Use None or an empty tuple if you want one element per sample. + dtype (np.dtype): Use this dtype for each element. detectors (list): Only construct a data object for this set of detectors. This is useful if creating temporary data within a pipeline working on a subset of detectors. @@ -316,71 +338,33 @@ def create(self, name, original=None, shape=None, dtype=None, detectors=None): None """ - if detectors is None: - detectors = self.detectors - log = Logger.get() if name in self._internal: msg = "Detector data with name '{}' already exists.".format(name) log.error(msg) raise RuntimeError(msg) - data_shape = shape - data_dtype = dtype - if original is not None: - # We are copying input data. Ensure that the detector lists are the same - # and that the shapes and types of every array are the same. - data_dets = original.keys() - if set(data_dets) != set(detectors): - msg = "Input data to copy has a different detector list" - log.error(msg) - raise RuntimeError(msg) - data_shape = None - data_dtype = None - for d in data_dets: - if data_shape is None: - data_shape = original[d].shape - data_dtype = original[d].dtype - if original[d].shape != data_shape: - msg = "All input detector arrays must have the same shape" - log.error(msg) - raise RuntimeError(msg) - if original[d].dtype != data_dtype: - msg = "All input detector arrays must have the same dtype" - log.error(msg) - raise RuntimeError(msg) + if detectors is None: + detectors = self.detectors else: - # If not specified, use defaults for shape and dtype. - if data_shape is None: - data_shape = (self.samples,) - if data_dtype is None: - data_dtype = np.float64 - - if data_shape[0] != self.samples: - msg = "Detector data first dimension size ({}) does not match number of local samples ({})".format( - data_shape[0], self.samples - ) - log.error(msg) - raise RuntimeError(msg) + for d in detectors: + if d not in self.detectors: + msg = "detector '{}' not in this observation".format(d) + raise ValueError(msg) + + data_shape = None + if detshape is None or len(detshape) == 0: + data_shape = (self.samples,) + elif len(detshape) == 1 and detshape[0] == 1: + data_shape = (self.samples,) + else: + data_shape = (self.samples,) + detshape # Create the data object - self._internal[name] = DetectorData(detectors, data_shape, data_dtype) - - # Copy input data if given - if original is not None: - for d in self._internal[name].keys(): - self._internal[name][d] = original[d] + self._internal[name] = DetectorData(detectors, data_shape, dtype) return - # Shortcuts for creating standard data objects - - def create_signal(self, name="signal"): - self.create(name, shape=(self.samples,), dtype=np.float64) - - def create_flags(self, name="flags"): - self.create(name, shape=(self.samples,), dtype=np.uint8) - # Mapping methods def __getitem__(self, key): @@ -391,7 +375,118 @@ def __delitem__(self, key): del self._internal[key] def __setitem__(self, key, value): - self._internal[key] = value + if isinstance(value, DetectorData): + # We have an input detector data object. Verify dimensions + for d in value.detectors: + if d not in self.detectors: + msg = "detector '{}' not in this observation".format(d) + raise ValueError(msg) + if value.shape[1] != self.samples: + msg = "Assignment DetectorData object has {} samples instead of {} in the observation".format( + value.shape[1], self.samples + ) + raise ValueError(msg) + if key not in self._internal: + # Create it first + self.create( + key, + detshape=value.detector_shape, + dtype=value.dtype, + detectors=value.detectors, + ) + else: + if value.detector_shape != self._internal[key].detector_shape: + msg = "Assignment value has wrong detector shape" + raise ValueError(msg) + for d in value.detectors: + self._internal[key][d] = value[d] + elif isinstance(value, Mapping): + # This is a dictionary of detector arrays + detshape = None + dtype = None + for d, ddata in value.items(): + if d not in self.detectors: + msg = "detector '{}' not in this observation".format(d) + raise ValueError(msg) + if ddata.shape[0] != self.samples: + msg = "Assigment dictionary detector {} has {} samples instead of {} in the observation".format( + ddata.shape[0], self.samples + ) + raise ValueError(msg) + if detshape is None: + detshape = ddata.shape[1:] + dtype = ddata.dtype + else: + if detshape != ddata.shape[1:]: + msg = "All detector arrays must have the same shape" + raise ValueError(msg) + if dtype != ddata.dtype: + msg = "All detector arrays must have the same type" + raise ValueError(msg) + if key not in self._internal: + self.create( + key, + detshape=detshape, + dtype=dtype, + detectors=sorted(value.keys()), + ) + else: + if (self.samples,) + detshape != self._internal[key].detector_shape: + msg = "Assignment value has wrong detector shape" + raise ValueError(msg) + for d, ddata in value.items(): + self._internal[key][d] = ddata + else: + # This must be just an array- verify the dimensions + shp = value.shape + if shp[0] == self.samples: + # This is a single detector array, being assigned to all detectors + detshape = None + if len(shp) > 1: + detshape = shp[1:] + if key not in self._internal: + self.create( + key, + detshape=detshape, + dtype=value.dtype, + detectors=self.detectors, + ) + else: + fullshape = (self.samples,) + if detshape is not None: + fullshape += detshape + if fullshape != self._internal[key].detector_shape: + msg = "Assignment value has wrong detector shape" + raise ValueError(msg) + for d in self.detectors: + self._internal[key][d] = value + elif shp[0] == len(self.detectors): + # Full sized array + if shp[1] != self.samples: + msg = "Assignment value has wrong number of samples" + raise ValueError(msg) + detshape = None + if len(shp) > 2: + detshape = shp[2:] + if key not in self._internal: + self.create( + key, + detshape=detshape, + dtype=value.dtype, + detectors=self.detectors, + ) + else: + fullshape = (self.samples,) + if detshape is not None: + fullshape += detshape + if fullshape != self._internal[key].detector_shape: + msg = "Assignment value has wrong detector shape" + raise ValueError(msg) + self._internal[key][:] = value + else: + # Incompatible + msg = "Assignment of detector data from an array only supports full size or single detector" + raise ValueError(msg) def __iter__(self): return iter(self._internal) @@ -418,20 +513,41 @@ def __repr__(self): class SharedDataMgr(MutableMapping): """Class used to manage shared data objects in an Observation. - New objects can be created with "create()" method: + New objects can be created with the "create()" method: + + obs.shared.create(name, shape=None, dtype=None, comm=None) + + The communicator defaults to sharing the data across the observation comm, but + other options would be to pass in the observation comm_row or comm_col communicators + in order to share common detector information across the process grid row or to + share telescope data across the process grid column. - obs.shared.create(name, original=None, shape=None, dtype=None, comm=None) + You can also create shared objects by assignment from an existing MPIShared object + or an array on one process. In the case of creating from an array assignment, an + extra communication step is required to determine what process is sending the data + (all processes except for one should pass 'None' as the data). For example: + + timestamps = None + if obs.comm_col_rank == 0: + # Input data only exists on one process + timestamps = np.arange(obs.n_local_samples, dtype=np.float32) - Which supports copying data from an existing MPIShared object. The communicator - defaults to sharing the data across the observation communicator, but other options - would be to pass in the observation grid_comm_row or grid_comm_col communicators - in order to share detector information across the process grid row or to share - telescope data across the process grid column. + # Explicitly create the shared data and assign: + obs.shared.create( + "times", + shape=(obs.n_local_samples,), + dtype=np.float32, + comm=obs.comm_col + ) + obs.shared["times"].set(timestamps, offset=(0,), fromrank=0) - There are shortcut methods to create standard data products: + # Create from existing MPIShared object: + sharedtime = MPIShared((obs.n_local_samples,), np.float32, obs.comm_col) + sharedtime[:] = timestamps + obs.shared["times"] = sharedtime - ob.detdata.create_signal() - ob.detdata.create_flags() + # Create from array on one process, pre-communication needed: + obs.shared["times"] = timestamps After creation, you can access a given object by name with standard dictionary syntax: @@ -442,55 +558,25 @@ class SharedDataMgr(MutableMapping): del obs.shared[name] - NOTE: These shared memory objects can be read from all processes asynchronously, - but write access must be synchronized. You must set the data collectively by - using the "set" method and passing in data on one process. Here is an example - creating timestamps that are common to all processes in a column of the grid and - setting those on the column rank zero process. - - n_time = obs.local_samples[1] - col_rank = obs.grid_ranks[0] - - timestamps = None - if col_rank == 0: - # Input data only exists on one process - timestamps = np.arange(n_time, dtype=np.float32) - - obs.shared.create( - name="times", - shape=(n_time,), - dtype=np.float32, - comm=obs.grid_comm_col - ) - obs.shared["times"].set(timestamps, offset=(0,), fromrank=0) - """ - def __init__(self, samples, detectors, comm, comm_row, comm_col): - self.samples = samples - self.detectors = detectors + def __init__(self, comm, comm_row, comm_col): self.comm = comm self.comm_row = comm_row self.comm_col = comm_col self._internal = dict() - def create(self, name, original=None, shape=None, dtype=None, comm=None): + def create(self, name, shape, dtype=None, comm=None): """Create a shared memory buffer. This buffer will be replicated across all nodes used by the processes owning the observation. This uses the MPIShared class, which falls back to a simple - numpy array if MPI is not being used. After creating the buffer, you should - set the elements by using the MPIShared.set() method. + numpy array if MPI is not being used. Args: name (str): Name of the shared memory object (e.g. "boresight"). - original (array): Construct the shared array copying this input local - memory. This argument is only meaningful on the rank zero process of the - specified communicator. - shape (tuple): If not constructing from an existing array, use this shape - for the new buffer. - dtype (np.dtype): If not constructing from an existing array, use this - dtype for each element. + shape (tuple): The shape of the new buffer. + dtype (np.dtype): Use this dtype for each element. comm (MPI.Comm): The communicator to use for the shared data. If None then the communicator for the observation is used. Other options would be to specify the grid_comm_row (for shared detector objects) or @@ -511,71 +597,17 @@ def create(self, name, original=None, shape=None, dtype=None, comm=None): # Use the observation communicator. shared_comm = self.comm - copy_data = 0 - shared_shape = shape shared_dtype = dtype - if original is not None: - copy_data = 1 - if shared_comm is not None: - copy_data = shared_comm.allreduce(copy_data, op=MPI.SUM) - if copy_data > 1: - msg = "If passing in an existing data buffer, it should exist on " - "exactly one process (rank 0). All other ranks should pass in None." - log.error(msg) - raise RuntimeError(msg) - if shared_comm is None or shared_comm.rank == 0: - if copy_data == 1: - shared_shape = original.shape - shared_dtype = original.dtype - shared_dtype = np.dtype(shared_dtype) - - if shared_comm is not None: - shared_shape = shared_comm.bcast(shared_shape, root=0) - shared_dtype = shared_comm.bcast(shared_dtype, root=0) - - # Use defaults for shape and dtype if not set - if shared_shape is None: - shared_shape = (self.samples,) + # Use defaults for dtype if not set if shared_dtype is None: shared_dtype = np.float64 # Create the data object - self._internal[name] = MPIShared(shared_shape, shared_dtype, shared_comm) + self._internal[name] = MPIShared(shape, shared_dtype, shared_comm) - # Copy input data if given - if copy_data == 1: - self._internal[name].set(original, np.zeros_like(shared_shape), fromrank=0) return - # Shortcuts for creating standard data objects - - def create_times(self, name="times"): - self.create(name, shape=(self.samples,), dtype=np.float64, comm=self.comm_col) - - def create_flags(self, name="flags"): - self.create(name, shape=(self.samples,), dtype=np.uint8, comm=self.comm_col) - - def create_velocity(self, name="velocity"): - self.create(name, shape=(self.samples, 3), dtype=np.float64, comm=self.comm_col) - - def create_position(self, name="position"): - self.create(name, shape=(self.samples, 3), dtype=np.float64, comm=self.comm_col) - - def create_hwp_angle(self, name="hwp_angle"): - self.create(name, shape=(self.samples,), dtype=np.float64, comm=self.comm_col) - - def create_boresight_radec(self, name="boresight_radec"): - self.create(name, shape=(self.samples, 4), dtype=np.float64, comm=self.comm_col) - - def create_boresight_azel(self, name="boresight_azel"): - self.create(name, shape=(self.samples, 4), dtype=np.float64, comm=self.comm_col) - - def create_boresight_response(self, name="boresight_response"): - self.create( - name, shape=(self.samples, 16), dtype=np.float32, comm=self.comm_col - ) - # Mapping methods def __getitem__(self, key): @@ -586,7 +618,65 @@ def __delitem__(self, key): del self._internal[key] def __setitem__(self, key, value): - self._internal[key] = value + if isinstance(value, MPIShared): + # This is an existing shared object. + if key not in self._internal: + self.create(key, shape=value.shape, dtype=value.dtype, comm=value.comm) + else: + # Verify that communicators and dimensions match + pass + # Assign from just one process. + offset = None + dval = None + if value.comm is None or value.comm.rank == 0: + offset = tuple([0 for x in self._internal[key].shape]) + dval = value.data + self._internal[key].set(dval, offset=offset, fromrank=0) + else: + # This must be an array on one process. + if key not in self._internal: + # We need to create it. In that case we use the default communicator + # (the full observation comm). We also need to get the array + # properties to all processes in order to create the object. + if self.comm is None: + # No MPI + self.create(key, shape=value.shape, dtype=value.dtype) + offset = tuple([0 for x in self._internal[key].shape]) + self._internal[key].set(value, offset=offset, fromrank=0) + else: + shp = None + dt = None + check_rank = np.zeros((self.comm.size,), dtype=np.int32) + check_result = np.zeros((self.comm.size,), dtype=np.int32) + if value is not None: + shp = value.shape + dt = value.dtype + check_rank[self.comm.rank] = 1 + self.comm.Allreduce(check_rank, check_result, op=MPI.SUM) + tot = np.sum(check_result) + if tot > 1: + if self.comm.rank == 0: + msg = "When creating shared data with [] notation, only one process may have a non-None value for the data" + print(msg, flush=True) + self.comm.Abort() + from_rank = np.where(check_result == 1)[0][0] + shp = self.comm.bcast(shp, root=from_rank) + dt = self.comm.bcast(dt, root=from_rank) + self.create(key, shape=shp, dtype=dt) + offset = None + if self.comm.rank == from_rank: + offset = tuple([0 for x in self._internal[key].shape]) + self._internal[key].set(value, offset=offset, fromrank=from_rank) + else: + # Already exists, just do the assignment + slc = None + if value is not None: + if value.shape != self._internal[key].shape: + raise ValueError( + "When assigning directly to a shared object, the value must have the same dimensions" + ) + slc = tuple([slice(0, x) for x in self._internal[key].shape]) + self._internal[key][slc] = value def __iter__(self): return iter(self._internal) @@ -866,7 +956,7 @@ def __repr__(self): class ViewMgr(MutableMapping): - """Class to manage views into observation data objects.""" + """Internal class to manage views into observation data objects.""" def __init__(self, obj): self.obj = obj @@ -904,7 +994,21 @@ def clear(self): class ViewInterface(object): - """Descriptor class for accessing the views in an observation.""" + """Descriptor class for accessing the views in an observation. + + You can get a view of the data for a particular interval list just by accessing + it with the name of the intervals object you want: + + obs.view["name_of_intervals"] + + Then you can use this to provide a view into either detdata or shared objects within + the observation. For example: + + print(obs.view["name_of_intervals"].detdata["signal"]) + + obs.view["bad_pointing"].shared["boresight"][:] = np.array([0., 0., 0., 1.]) + + """ def __init__(self, **kwargs): super().__init__(**kwargs) @@ -915,7 +1019,6 @@ def __get__(self, obj, cls=None): else: if not hasattr(obj, "_viewmgr"): obj._viewmgr = ViewMgr(obj) - print("ViewInterface __get__ created ", obj._viewmgr) return obj._viewmgr def __set__(self, obj, value): @@ -931,6 +1034,22 @@ class DistDetSamp(object): This is just a simple container for various properties of the distribution. Args: + samples (int): The total number of samples. + detectors (list): The list of detector names. + detector_sets (list): (Optional) List of lists containing detector names. + These discrete detector sets are used to distribute detectors- a detector + set will always be within a single row of the process grid. If None, + every detector is a set of one. + sample_sets (list): (Optional) List of lists of chunk sizes (integer numbers of + samples). These discrete sample sets are used to distribute sample data. + A sample set will always be within a single column of the process grid. If + None, any distribution break in the sample direction will happen at an + arbitrary place. The sum of all chunks must equal the total number of + samples. + comm (mpi4py.MPI.Comm): (Optional) The MPI communicator to use. + process_rows (int): (Optional) The size of the rectangular process grid + in the detector direction. This number must evenly divide into the size of + comm. If not specified, defaults to the size of the communicator. """ @@ -1056,18 +1175,21 @@ class Observation(MutableMapping): """Class representing the data for one observation. An Observation stores information about data distribution across one or more MPI - processes and is a container for three types of objects: + processes and is a container for four types of objects: * Local detector data (unique to each process). * Shared data that has one common copy for every node spanned by the observation. + * Intervals defining spans of data with some common characteristic. * Other arbitrary small metadata. - Small metadata can be store directly in the Observation using normal square + Small metadata can be stored directly in the Observation using normal square bracket "[]" access to elements (an Observation is a dictionary). Groups of detector data (e.g. "signal", "flags", etc) can be accessed in the separate - detector data dictionary (the "d" attribute). Shared data can be similarly stored - in the "shared" attribute. + detector data dictionary (the "detdata" attribute). Shared data can be similarly + stored in the "shared" attribute. Lists of intervals are accessed in the + "intervals" attribute and data views can use any interval list to access subsets + of detector and shared data. The detector data within an Observation is distributed among the processes in an MPI communicator. The processes in the communicator are arranged in a rectangular @@ -1112,7 +1234,7 @@ class Observation(MutableMapping): samples. process_rows (int): (Optional) The size of the rectangular process grid in the detector direction. This number must evenly divide into the size of - mpicomm. If not specified, defaults to the size of the communicator. + comm. If not specified, defaults to the size of the communicator. """ @@ -1167,11 +1289,9 @@ def __init__( self._internal = dict() # Set up the data managers - self.detdata = DetDataMgr(self._samples, self.detectors) + self.detdata = DetDataMgr(self.local_detectors, self.n_local_samples) self.shared = SharedDataMgr( - self._samples, - self.detectors, self._comm, self.dist.comm_row, self.dist.comm_col, @@ -1274,7 +1394,7 @@ def comm_col_rank(self): # Detector distribution @property - def detectors(self): + def all_detectors(self): """ (list): All detectors. Convenience wrapper for telescope.focalplane.detectors """ @@ -1303,7 +1423,7 @@ def select_local_detectors(self, selection=None): # Detector set distribution @property - def detector_sets(self): + def all_detector_sets(self): """ (list): The total list of detector sets for this observation. """ @@ -1326,37 +1446,28 @@ def local_detector_sets(self): # Sample distribution @property - def n_sample(self): + def n_all_samples(self): """(int): the total number of samples in this observation.""" return self._samples @property - def local_samples(self): - """ - (tuple): The first element of the tuple is the first observation sample - assigned to this process. The second element of the tuple is the number of - samples assigned to this process. - """ - return self.dist.samps[self.dist.comm_row_rank] - - @property - def offset(self): + def local_index_offset(self): """ The first sample on this process, relative to the observation start. """ - return self.local_samples[0] + return self.dist.samps[self.dist.comm_row_rank][0] @property - def n_local(self): + def n_local_samples(self): """ The number of local samples on this process. """ - return self.local_samples[1] + return self.dist.samps[self.dist.comm_row_rank][1] # Sample set distribution @property - def sample_sets(self): + def all_sample_sets(self): """ (list): The input full list of sample sets used in data distribution """ @@ -1394,7 +1505,7 @@ def __len__(self): return len(self._internal) def __del__(self): - if hasattr(self, "d"): + if hasattr(self, "detdata"): self.detdata.clear() if hasattr(self, "shared"): self.shared.clear() @@ -1413,6 +1524,7 @@ def __repr__(self): val += "\n {} samples".format(self._samples) val += "\n shared: {}".format(self.shared) val += "\n detdata: {}".format(self.detdata) + val += "\n intervals: {}".format(self.intervals) val += "\n>" return val diff --git a/src/toast/tests/observation.py b/src/toast/tests/observation.py index 041a85504..b4ad6d94c 100644 --- a/src/toast/tests/observation.py +++ b/src/toast/tests/observation.py @@ -5,10 +5,14 @@ from .mpi import MPITestCase import os +import sys +import traceback import numpy as np import numpy.testing as nt +from pshmem import MPIShared + from ..instrument import Focalplane, Telescope from ..observation import DetectorData, Observation @@ -59,10 +63,152 @@ def test_detdata(self): def test_observation(self): # Populate the observations + np.random.seed(12345) rms = 10.0 for obs in self.data.obs: - n_samp = obs.n_local + n_samp = obs.n_local_samples dets = obs.local_detectors + n_det = len(dets) + + # Test all the different ways of assigning to shared objects + + sample_common = np.ravel(np.random.random((n_samp, 3))).reshape(-1, 3) + flag_common = np.zeros(n_samp, dtype=np.uint8) + det_common = np.random.random((3, 4, 5)) + all_common = np.random.random((2, 3, 4)) + + obs.shared.create( + "samp_A", + shape=sample_common.shape, + dtype=sample_common.dtype, + comm=obs.comm_col, + ) + if obs.comm_col_rank == 0: + obs.shared["samp_A"][:, :] = sample_common + else: + obs.shared["samp_A"][None] = None + obs.shared.create( + "det_A", + shape=det_common.shape, + dtype=det_common.dtype, + comm=obs.comm_row, + ) + if obs.comm_row_rank == 0: + obs.shared["det_A"][:, :, :] = det_common + else: + obs.shared["det_A"][None] = None + obs.shared.create( + "all_A", + shape=all_common.shape, + dtype=all_common.dtype, + comm=obs.comm, + ) + if obs.comm_rank == 0: + obs.shared["all_A"][:, :, :] = all_common + else: + obs.shared["all_A"][None] = None + + obs.shared.create( + "flg_A", + shape=flag_common.shape, + dtype=flag_common.dtype, + comm=obs.comm_col, + ) + if obs.comm_col_rank == 0: + obs.shared["flg_A"][:] = flag_common + else: + obs.shared["flg_A"][None] = None + + sh = MPIShared(sample_common.shape, sample_common.dtype, obs.comm_col) + + if obs.comm_col_rank == 0: + sh[:, :] = sample_common + else: + sh[None] = None + + obs.shared["samp_B"] = sh + + sh = MPIShared(flag_common.shape, flag_common.dtype, obs.comm_col) + if obs.comm_col_rank == 0: + sh[:] = flag_common + else: + sh[None] = None + obs.shared["flg_B"] = sh + + sh = MPIShared(det_common.shape, det_common.dtype, obs.comm_row) + if obs.comm_row_rank == 0: + sh[:, :, :] = det_common + else: + sh[None] = None + obs.shared["det_B"] = sh + + sh = MPIShared(all_common.shape, all_common.dtype, obs.comm) + if obs.comm_rank == 0: + sh[:, :, :] = all_common + else: + sh[None] = None + obs.shared["all_B"] = sh + + # this style of assignment only works for the default obs.comm + if obs.comm_rank == 0: + obs.shared["all_C"] = all_common + else: + obs.shared["all_C"] = None + + np.testing.assert_equal(obs.shared["samp_A"][:], sample_common) + np.testing.assert_equal(obs.shared["samp_B"][:], sample_common) + np.testing.assert_equal(obs.shared["det_A"][:], det_common) + np.testing.assert_equal(obs.shared["det_B"][:], det_common) + np.testing.assert_equal(obs.shared["all_A"][:], all_common) + np.testing.assert_equal(obs.shared["all_B"][:], all_common) + np.testing.assert_equal(obs.shared["all_C"][:], all_common) + np.testing.assert_equal(obs.shared["flg_A"][:], flag_common) + np.testing.assert_equal(obs.shared["flg_B"][:], flag_common) + + # Test different assignment methods for detdata + + signal = np.random.random((n_samp,)) + pntg = np.ones((n_samp, 4), dtype=np.float32) + flg = np.zeros((n_samp,), dtype=np.uint8) + + obs.detdata.create("sig_A", detshape=(), dtype=signal.dtype, detectors=None) + obs.detdata["sig_A"][:] = np.tile(signal, n_det).reshape((n_det, -1)) + + obs.detdata.create( + "pntg_A", detshape=(4,), dtype=pntg.dtype, detectors=None + ) + obs.detdata["pntg_A"][:] = np.tile(pntg, n_det).reshape((n_det, -1, 4)) + + obs.detdata.create("flg_A", detshape=None, dtype=flg.dtype, detectors=None) + obs.detdata["flg_A"][:] = np.tile(flg, n_det).reshape((n_det, -1)) + + dsig = DetectorData(obs.local_detectors, (n_samp,), np.float64) + dsig[:] = np.tile(signal, n_det).reshape((n_det, -1)) + + dpntg = DetectorData(obs.local_detectors, (n_samp, 4), np.float32) + dpntg[:] = np.tile(pntg, n_det).reshape((n_det, -1, 4)) + + dflg = DetectorData(obs.local_detectors, (n_samp,), np.uint8) + dflg[:] = np.tile(flg, n_det).reshape((n_det, -1)) + + obs.detdata["sig_B"] = dsig + obs.detdata["pntg_B"] = dpntg + obs.detdata["flg_B"] = dflg + + obs.detdata["sig_C"] = {d: signal for d in obs.local_detectors} + obs.detdata["pntg_C"] = {d: pntg for d in obs.local_detectors} + obs.detdata["flg_C"] = {d: flg for d in obs.local_detectors} + + obs.detdata["sig_D"] = signal + obs.detdata["pntg_D"] = pntg + obs.detdata["flg_D"] = flg + + obs.detdata["sig_E"] = np.tile(signal, n_det).reshape((n_det, -1)) + obs.detdata["pntg_E"] = np.tile(pntg, n_det).reshape((n_det, -1, 4)) + obs.detdata["flg_E"] = np.tile(flg, n_det).reshape((n_det, -1)) + + # Now add some more normal data + fake_bore = np.ravel(np.random.random((n_samp, 4))).reshape(-1, 4) fake_flags = np.random.uniform(low=0, high=2, size=n_samp).astype( np.uint8, copy=True @@ -73,34 +219,56 @@ def test_observation(self): if obs.comm_col_rank == 0: bore = fake_bore common_flags = fake_flags - times = np.arange(n_samp) + times = np.arange(n_samp, dtype=np.float64) # Construct some default shared objects from local buffers - obs.shared.create("boresight_azel", original=bore, comm=obs.comm_col) - obs.shared.create("boresight_radec", original=bore, comm=obs.comm_col) - obs.shared.create("flags", original=common_flags, comm=obs.comm_col) - obs.shared.create("timestamps", original=times, comm=obs.comm_col) + obs.shared.create("boresight_azel", shape=(n_samp, 4), comm=obs.comm_col) + obs.shared["boresight_azel"][:, :] = bore + + obs.shared.create("boresight_radec", shape=(n_samp, 4), comm=obs.comm_col) + obs.shared["boresight_radec"][:, :] = bore + + obs.shared.create( + "flags", shape=(n_samp,), dtype=np.uint8, comm=obs.comm_col + ) + obs.shared["flags"][:] = common_flags + + obs.shared.create( + "timestamps", shape=(n_samp,), dtype=np.float64, comm=obs.comm_col + ) + obs.shared["timestamps"][:] = times + + # Create some shared objects over the whole comm + local_array = None + if obs.comm_rank == 0: + local_array = np.arange(100, dtype=np.int16) + obs.shared["everywhere"] = local_array # Allocate the default detector data and flags - obs.detdata.create("signal", shape=(n_samp,), dtype=np.float64) - obs.detdata.create("flags", shape=(n_samp,), dtype=np.uint16) + obs.detdata.create("signal", dtype=np.float64) + obs.detdata.create("flags", detshape=(), dtype=np.uint16) # Allocate some other detector data - obs.detdata.create("calibration", shape=(n_samp,), dtype=np.float32) - obs.detdata.create("sim_noise", shape=(n_samp,), dtype=np.float64) + obs.detdata["calibration"] = np.ones( + (len(obs.local_detectors), obs.n_local_samples), dtype=np.float64 + ) + + obs.detdata["sim_noise"] = np.zeros( + (obs.n_local_samples,), dtype=np.float64 + ) # Store some values for detector data for det in dets: - obs.detdata["signal"][det, :] = np.random.normal( + obs.detdata["signal"][det] = np.random.normal( loc=0.0, scale=rms, size=n_samp ) - obs.detdata["calibration"][det, :] = np.random.normal( + obs.detdata["calibration"][det] = np.random.normal( loc=0.0, scale=rms, size=n_samp ).astype(np.float32) - obs.detdata["sim_noise"][det, :] = np.random.normal( + obs.detdata["sim_noise"][det] = np.random.normal( loc=0.0, scale=rms, size=n_samp ) - obs.detdata["flags"][det, :] = fake_flags + obs.detdata["flags"][det] = fake_flags # Make some shared objects, one per detector, shared across the process # rows. diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index 96ad3303f..256fa1fbe 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -55,6 +55,22 @@ "help(toast)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "toast?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "markdown", "metadata": {}, @@ -69,7 +85,9 @@ "outputs": [], "source": [ "env = toast.Environment.get()\n", - "print(env)" + "print(env)\n", + "\n", + "# FIXME: Document how to change these." ] }, { @@ -82,7 +100,9 @@ "\n", "The basic data model in a toast workflow consists of a set of `Observation` instances, each of which is associated with a `Focalplane` on a `Telescope`. Note that a Focalplane instance is probably just a sub-set of detectors on the actual physical focalplane. These detectors must be co-sampled and likely have other things in common (for example, they are on the same wafer or are correlated in some other way). For this notebook, we will manually create these objects, but usually these will be loaded / created by some experiment-specific function.\n", "\n", - "MPI is completely optional in TOAST, although it is required to achieve good parallel performance on traditional CPU systems. In this section we show how interactive use of TOAST can be done without any reference to MPI. In a later section we show how to make use of distributed data and operations.\n" + "MPI is completely optional in TOAST, although it is required to achieve good parallel performance on traditional CPU systems. In this section we show how interactive use of TOAST can be done without any reference to MPI. In a later section we show how to make use of distributed data and operations.\n", + "\n", + "**FIXME: reference parallel intro and document mpi4py use in jupyter**\n" ] }, { @@ -111,7 +131,9 @@ " fmin=1.0e-5,\n", " alpha=1.0,\n", " fknee=0.05,\n", - ")" + ")\n", + "\n", + "# FIXME: add units throughout the codebase\n" ] }, { @@ -217,7 +239,10 @@ "source": [ "## Detector Data\n", "\n", - "Detector data has some unique properties that we often want to leverage in our analyses. Each process has some detectors and some time slice of the observation. In the case of a single process like this example, all the data is local. Before using data we need to create it within the empty Observation. Here we create a default \"signal\" object for the detectors. The detector data is accessed under the \"d\" attribute of the observation:" + "Detector data has some unique properties that we often want to leverage in our analyses. Each process has some detectors and some time slice of the observation. In the case of a single process like this example, all the data is local. Before using data we need to create it within the empty Observation. Here we create a default \"signal\" object for the detectors. The detector data is accessed under the `detdata` attribute of the observation:\n", + "\n", + "\n", + "** FIXME: talk about naming conventions**" ] }, { @@ -250,7 +275,9 @@ "outputs": [], "source": [ "ob.detdata.create(\"calibrated\")\n", - "print(ob.detdata)" + "print(ob.detdata)\n", + "\n", + "# FIXME: arrays initialized to zero" ] }, { @@ -887,6 +914,8 @@ "metadata": {}, "outputs": [], "source": [ + "# FIXME: import toast.config as tc\n", + "\n", "from toast.config import dump_toml, load_toml, load_json, dump_json, load_config, create\n", "import tempfile\n", "from pprint import PrettyPrinter\n", From 208ebb48254b57c509d730aa5bc823bac26e6d8d Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 22 Oct 2020 14:01:01 -0700 Subject: [PATCH 006/690] Manually merge changes from 2216b0b2780 --- src/toast/CMakeLists.txt | 1 + src/toast/instrument.py | 35 ++++++++++++++++++----------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index faebdd263..635f0dc40 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -104,6 +104,7 @@ install(FILES healpix.py weather.py schedule.py + spt3g.py "RELEASE" DESTINATION ${PYTHON_SITE}/toast ) diff --git a/src/toast/instrument.py b/src/toast/instrument.py index 612af7776..f9af16231 100644 --- a/src/toast/instrument.py +++ b/src/toast/instrument.py @@ -23,6 +23,8 @@ # detector-specific properties. Unfortunately, this will break the API, so should # be done as part of the 3.0 transition. +XAXIS, YAXIS, ZAXIS = np.eye(3) + class Focalplane(object): """Class representing the focalplane for one observation. @@ -74,19 +76,22 @@ def __init__( self._get_pol_efficiency() def _get_pol_angles(self): - """ Get the detector polarization angles from the quaternions - """ + """Get the detector polarization angles from the quaternions""" for detname, detdata in self.detector_data.items(): - if "pol_angle_deg" not in detdata and "pol_angle_rad" not in detdata: - quat = detdata["quat"] - psi = qarray.to_angles(quat)[2] - detdata["pol_angle_rad"] = psi + if "pol_angle_deg" in detdata or "pol_angle_rad" in detdata: + continue + quat = detdata["quat"] + theta, phi = qarray.to_position(quat) + yrot = qarray.rotation(YAXIS, -theta) + zrot = qarray.rotation(ZAXIS, -phi) + rot = qarray.norm(qarray.mult(yrot, zrot)) + pol_rot = qarray.mult(rot, quat) + pol_angle = qarray.to_angles(pol_rot)[2] + detdata["pol_angle_rad"] = pol_angle return def _get_pol_efficiency(self): - """ Get the polarization efficiency from polarization leakage - or vice versa - """ + """Get the polarization efficiency from polarization leakage or vice versa""" for detname, detdata in self.detector_data.items(): if "pol_leakage" in detdata and "pol_efficiency" not in detdata: # Derive efficiency from leakage @@ -125,8 +130,7 @@ def __setitem__(self, key, value): self.detector_data[key]["UID"] = name_UID(key) def reset_properties(self): - """ Clear automatic properties so they will be re-generated - """ + """Clear automatic properties so they will be re-generated""" self._detweights = None self._radius = None self._detquats = None @@ -145,8 +149,7 @@ def detector_index(self): @property def detector_weights(self): - """ Return the inverse noise variance weights [K_CMB^-2] - """ + """Return the inverse noise variance weights [K_CMB^-2]""" if self._detweights is None: self._detweights = {} for detname, detdata in self.detector_data.items(): @@ -161,8 +164,7 @@ def detector_weights(self): @property def radius(self): - """ The focal plane radius in degrees - """ + """The focal plane radius in degrees""" if self._radius is None: # Find the largest distance from the bore sight ZAXIS = np.array([0, 0, 1]) @@ -224,8 +226,7 @@ def __repr__(self): class Telescope(object): - """Class representing telescope properties for one observation. - """ + """Class representing telescope properties for one observation.""" def __init__(self, name, id=None, focalplane=None, site=None, coord=None): self.name = name From 9198ead4f92a6c0c9585e82b149df9460918737a Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 29 Oct 2020 10:11:14 -0700 Subject: [PATCH 007/690] Config file load / dump / instantiation working again. --- pipelines/toast_future.py | 51 ++-- src/toast/config.py | 333 ++++++++++++++++++++++---- src/toast/future_ops/sim_satellite.py | 27 ++- src/toast/future_ops/sim_tod_noise.py | 12 +- src/toast/observation.py | 2 +- src/toast/operator.py | 40 +++- src/toast/tests/_helpers.py | 19 +- src/toast/tests/config.py | 155 +++++------- src/toast/tests/observation.py | 2 - src/toast/tests/runner.py | 8 +- src/toast/traits.py | 223 +++++------------ 11 files changed, 509 insertions(+), 363 deletions(-) diff --git a/pipelines/toast_future.py b/pipelines/toast_future.py index b1f562184..f5149469e 100644 --- a/pipelines/toast_future.py +++ b/pipelines/toast_future.py @@ -25,7 +25,7 @@ from toast.mpi import get_world, Comm -from toast.dist import Data +from toast.data import Data from toast.utils import Logger, Environment @@ -33,7 +33,7 @@ from toast.timing import dump as dump_timing -from toast import dump_toml, parse_config, create +from toast.config import dump_toml, parse_config, create from toast import future_ops as ops @@ -49,11 +49,11 @@ def main(): mpiworld, procs, rank = get_world() # The operators used in this script: - operators = { - "sim_satellite": ops.SimSatellite, - "noise_model": ops.DefaultNoiseModel, - "sim_noise": ops.SimNoise, - } + operators = [ + ops.SimSatellite(name="sim_satellite"), + ops.DefaultNoiseModel(name="noise_model"), + ops.SimNoise(name="sim_noise"), + ] # Argument parsing parser = argparse.ArgumentParser(description="Demo of TOAST future features.") @@ -79,12 +79,14 @@ def main(): # Build a config dictionary starting from the operator defaults, overriding with any # config files specified with the '--config' commandline option, followed by any # individually specified parameter overrides. - config, argvars = parse_config(parser, operators=operators) + config, args = parse_config(parser, operators=operators) + print(config) + print(args) # The satellite simulation operator requires a Telescope object. Make a fake # focalplane and telescope focalplane = fake_hexagon_focalplane( - argvars["focalplane_pixels"], + args.focalplane_pixels, 10.0, samplerate=10.0, epsilon=0.0, @@ -95,32 +97,33 @@ def main(): ) print(focalplane) - # Set the telecope option of the satellite simulation operator. If we were using - # an experiment-specific operator, this would be done internally. - - config["operators"]["sim_satellite"]["telescope"] = Telescope( - name="fake", focalplane=focalplane - ) - # Log the config that was actually used at runtime. out = "future_config_log.toml" - dump_config(out, config) + dump_toml(out, config) # Instantiate our operators run = create(config) + # Add the telescope class to the satellite simulation operator (For a real + # experiment, this kind of thing would be done automatically based on other + # options to the operator). + + run["operators"]["sim_satellite"].telescope = Telescope( + name="fake", focalplane=focalplane + ) + # Put our operators into a pipeline in a specific order, running all detectors at # once. - pipe_opts = ops.Pipeline.defaults() - pipe_opts["detector_sets"] = "ALL" - pipe_opts["operators"] = [ - run["operators"][x] for x in ["sim_satellite", "noise_model", "sim_noise"] - ] - pipe = ops.Pipeline(pipe_opts) + pipe = ops.Pipeline( + detector_sets="ALL", + operators=[ + run["operators"][x] for x in ["sim_satellite", "noise_model", "sim_noise"] + ], + ) # Set up the communicator - comm = Comm(world=mpiworld, groupsize=argvars["group_size"]) + comm = Comm(world=mpiworld, groupsize=args.group_size) # Start with an empty data object (the first operator in our pipeline will create # Observations in the data). diff --git a/src/toast/config.py b/src/toast/config.py index 61f69a22f..600076831 100644 --- a/src/toast/config.py +++ b/src/toast/config.py @@ -25,11 +25,199 @@ from .operator import Operator -from .traits import TraitConfig, build_config, add_config_args, args_update_config +from .traits import TraitConfig from . import future_ops as ops +def build_config(objects): + """Build a configuration of current values. + + Args: + objects (list): A list of class instances to add to the config. These objects + must inherit from the TraitConfig base class. + + Returns: + (dict): The configuration. + + """ + conf = OrderedDict() + for o in objects: + if not isinstance(o, Operator): + raise RuntimeError("The object list should contain Operator instances") + if o.name is None: + raise RuntimeError("Cannot buid config from Operators without a name") + conf = o.get_config(input=conf) + return conf + + +def add_config_args(parser, conf, section, ignore=list(), prefix="", separator=":"): + """Add arguments to an argparser for each parameter in a config dictionary. + + Using a previously created config dictionary, add a commandline argument for each + object parameter in a section of the config. The type, units, and help string for + the commandline argument come from the config, which is in turn built from the + class traits of the object. Boolean parameters are converted to store_true or + store_false actions depending on their current value. + + Args: + parser (ArgumentParser): The parser to append to. + conf (dict): The configuration dictionary. + section (str): Process objects in this section of the config. + ignore (list): List of object parameters to ignore when adding args. + prefix (str): Prepend this to the beginning of all options. + separator (str): Use this character between the class name and parameter. + + Returns: + None + + """ + parent = conf + if section is not None: + path = section.split("/") + for p in path: + if p not in parent: + msg = "section {} does not exist in config".format(section) + raise RuntimeError(msg) + parent = parent[p] + for obj, props in parent.items(): + for name, info in props.items(): + # print("examine options for {} = {}".format(name, info)) + if name in ignore: + # Skip this as requested + # print(" ignoring") + continue + if name == "class": + # This is not a user-configurable parameter. + # print(" skipping") + continue + if info["type"] not in ["bool", "int", "float", "str", "Quantity"]: + # This is not something that we can get from parsing commandline + # options. Skip it. + # print(" no type- skipping") + continue + if info["type"] == "bool": + # special case for boolean + option = "--{}{}{}{}".format(prefix, obj, separator, name) + act = "store_true" + if info["value"] == "True": + act = "store_false" + option = "--{}{}{}no_{}".format(prefix, obj, separator, name) + # print(" add bool argument {}".format(option)) + parser.add_argument( + option, + required=False, + default=info["value"], + action=act, + help=info["help"], + ) + else: + option = "--{}{}{}{}".format(prefix, obj, separator, name) + default = None + typ = None + hlp = info["help"] + if info["type"] == "int": + typ = int + if info["value"] != "None": + default = int(info["value"]) + elif info["type"] == "float": + typ = float + if info["value"] != "None": + default = float(info["value"]) + elif info["type"] == "str": + typ = str + if info["value"] != "None": + default = info["value"] + elif info["type"] == "Quantity": + typ = u.Quantity + if info["value"] != "None": + default = u.Quantity( + "{} {}".format(info["value"], info["unit"]) + ) + # print(" add argument {}".format(option)) + parser.add_argument( + option, + required=False, + default=default, + type=typ, + help=hlp, + ) + return + + +def args_update_config(args, conf, defaults, section, prefix="", separator=":"): + """Override options in a config dictionary from args namespace. + + Args: + args (namespace): The args namespace returned by ArgumentParser.parse_args() + conf (dict): The configuration to update. + defaults (dict): The starting default config, used to detect which options from + argparse have been changed by the user. + section (str): Process objects in this section of the config. + prefix (str): Prepend this to the beginning of all options. + separator (str): Use this character between the class name and parameter. + + Returns: + (namespace): The un-parsed remaining arg vars. + + """ + remain = copy.deepcopy(args) + parent = conf + dparent = defaults + if section is not None: + path = section.split("/") + for p in path: + if p not in parent: + msg = "section {} does not exist in config".format(section) + raise RuntimeError(msg) + parent = parent[p] + for p in path: + if p not in dparent: + msg = "section {} does not exist in defaults".format(section) + raise RuntimeError(msg) + dparent = dparent[p] + # Build the regex match of option names + obj_pat = re.compile("{}(.*?){}(.*)".format(prefix, separator)) + for arg in vars(args): + val = getattr(args, arg) + obj_mat = obj_pat.match(arg) + if obj_mat is not None: + name = obj_mat.group(1) + optname = obj_mat.group(2) + if name not in parent: + msg = ( + "Parsing option '{}', config does not have object named {}".format( + arg, name + ) + ) + raise RuntimeError(msg) + if name not in dparent: + msg = "Parsing option '{}', defaults does not have object named {}".format( + arg, name + ) + raise RuntimeError(msg) + # Only update config options which are different than the default. + # Otherwise we would be overwriting values from any config files with the + # defaults from argparse. + if val is None: + val = "None" + else: + if dparent[name][optname]["unit"] != "None": + # This option is a quantity + val = "{:0.14e}".format( + val.to_value(u.Unit(dparent[name][optname]["unit"])) + ) + elif dparent[name][optname]["type"] == "float": + val = "{:0.14e}".format(val) + else: + val = str(val) + if val != dparent[name][optname]["value"]: + parent[name][optname]["value"] = val + # This arg was recognized, remove from the namespace. + delattr(remain, arg) + return remain + + def parse_config(parser, operators=list()): """Load command line arguments associated with object properties. @@ -47,13 +235,13 @@ def parse_config(parser, operators=list()): Args: parser (ArgumentParser): The argparse parser. - operators (list): The operator classes to add to the commandline. Note that - if these are classes, then the commandline names will be the class names. - If you pass a list of instances with the name attribute set, then the - commandline names will use these. + operators (list): The operator instances to add to the commandline. These + instances should have their "name" attribute set to something meaningful, + since that name is used to construct the commandline options. Returns: - (dict): The config dictionary. + (tuple): The (config dictionary, args). The args namespace contains all the + remaining parameters after extracting the operator options. """ @@ -108,11 +296,28 @@ def _merge_config(loaded, original): original[section] = objs -def _load_toml_trait(tbl): +def _load_toml_traits(tbl): + # print("LOAD TraitConfig object {}".format(tbl), flush=True) result = OrderedDict() for k in tbl.keys(): if k == "class": result[k] = tbl[k] + elif isinstance(tbl[k], tomlkit.items.Table): + # This is a dictionary trait + result[k] = OrderedDict() + result[k]["value"] = OrderedDict() + result[k]["type"] = "dict" + result[k]["unit"] = "None" + for tk, tv in tbl[k].items(): + result[k]["value"][str(tk)] = str(tv) + elif isinstance(tbl[k], tomlkit.items.Array): + # This is a list + result[k] = OrderedDict() + result[k]["value"] = list() + result[k]["type"] = "list" + result[k]["unit"] = "None" + for it in tbl[k]: + result[k]["value"].append(str(it)) elif isinstance(tbl[k], str): if tbl[k] == "None": # Copy None values. There is no way to determine the type in this case. @@ -160,6 +365,7 @@ def _load_toml_trait(tbl): result[k]["value"] = "{:0.14e}".format(tbl[k]) result[k]["type"] = "float" result[k]["unit"] = "None" + # print("LOAD toml result = {}".format(result)) return result @@ -193,7 +399,8 @@ def convert_node(raw_root, conf_root): subkeys = raw_root[k].keys() # This element is table-like. if "class" in subkeys: - conf_root[k] = _load_toml_trait(raw_root[k]) + # print("LOAD found traitconfig {}".format(k), flush=True) + conf_root[k] = _load_toml_traits(raw_root[k]) else: # This is just a dictionary conf_root[k] = OrderedDict() @@ -291,9 +498,15 @@ def dump_toml(file, conf): def convert_node(conf_root, table_root, indent_size): """Helper function to recursively convert dictionaries to tables""" if isinstance(conf_root, (dict, OrderedDict)): + # print("{}found dict".format(" " * indent_size)) for k in list(conf_root.keys()): + # print("{} examine key {}".format(" " * indent_size, k)) if isinstance(conf_root[k], (dict, OrderedDict)): + # print("{} key is a dict".format(" " * indent_size)) if "value" in conf_root[k] and "type" in conf_root[k]: + # print( + # "{} found value and type subkeys".format(" " * indent_size) + # ) # this is a trait unit = None if "unit" in conf_root[k]: @@ -301,6 +514,14 @@ def convert_node(conf_root, table_root, indent_size): help = None if "help" in conf_root[k]: help = conf_root[k]["help"] + # print( + # "{} dumping trait {}, {}, {}".format( + # " " * indent_size, + # k, + # conf_root[k]["value"], + # conf_root[k]["type"], + # ) + # ) _dump_toml_trait( table_root, indent_size, @@ -311,10 +532,15 @@ def convert_node(conf_root, table_root, indent_size): help, ) else: + # print("{} not a trait- descending".format(" " * indent_size)) # descend tree table_root[k] = table() convert_node(conf_root[k], table_root[k], indent_size + 2) else: + # print("{} value = {}".format(" " * indent_size, conf_root[k])) + # print( + # "{} key is not a dict, add to table".format(" " * indent_size) + # ) table_root.add(k, conf_root[k]) table_root[k].indent(indent_size) else: @@ -448,40 +674,52 @@ def find_object_ref(top, name): # See if the referenced object exists path = mat.group(1) path_keys = path.split("/") + # print("OBJREF checking {}".format(path_keys)) found = get_node(top, path_keys) + if found is not None: + # It exists, but is this a TraitConfig object that has not yet been + # created? + if isinstance(found, (dict, OrderedDict)) and "class" in found: + # Yes... + found = None + # print("OBJREF found = {}".format(found)) return found - def parse_tree(in_tree, out_tree, cursor): + def parse_tree(tree, cursor): unresolved = 0 # print("PARSE ------------------------") # The node at this cursor location # print("PARSE fetching node at cursor {}".format(cursor)) - in_node = get_node(in_tree, cursor) + node = get_node(tree, cursor) - # print("PARSE at input {} got node {}".format(cursor, in_node)) + # print("PARSE at cursor {} got node {}".format(cursor, node)) # The output parent node parent_cursor = list(cursor) node_name = parent_cursor.pop() - out_parent = get_node(out_tree, parent_cursor) - # print("PARSE at output parent {} got node {}".format(parent_cursor, out_parent)) - - # The output node - node_type = type(in_node) - out_parent[node_name] = node_type() + parent = get_node(tree, parent_cursor) + # print("PARSE at parent {} got node {}".format(parent_cursor, parent)) # In terms of this function, "nodes" are always dictionary-like - for child_key, child_val in in_node.items(): - if isinstance(child_val, str): + for child_key in list(node.keys()): + # We are modifying the tree in place, so we get a new reference to our + # node each time. + child_cursor = list(cursor) + child_cursor.append(child_key) + child_val = get_node(tree, child_cursor) + + if isinstance(child_val, TraitConfig): + # This is an already-created object + continue + elif isinstance(child_val, str): # print("PARSE child value {} is a string".format(child_val)) # See if this string is an object reference and try to resolve it. - check = find_object_ref(out_tree, child_val) + check = find_object_ref(tree, child_val) if check is None: unresolved += 1 - out_parent[node_name][child_key] = child_val else: - out_parent[node_name][child_key] = check + parent[node_name][child_key] = check else: is_dict = None try: @@ -491,57 +729,55 @@ def parse_tree(in_tree, out_tree, cursor): except: is_dict = False if is_dict: - child_cursor = list(cursor) - child_cursor.append(child_key) # print( # "PARSE child value {} is a dict, descend with cursor {}".format( # child_val, child_cursor # ) # ) - unresolved += parse_tree(in_tree, out_tree, child_cursor) + unresolved += parse_tree(tree, child_cursor) else: # Not a dictionary + is_list = None try: _ = len(child_val) - out_parent[node_name][child_key] = [ - None for x in range(len(child_val)) - ] - + # It is a list + is_list = True + except: + is_list = False + if is_list: for elem in range(len(child_val)): - found = find_object_ref(out_tree, child_val[elem]) + found = find_object_ref(tree, child_val[elem]) + # print( + # "find_object {} --> {}".format(child_val[elem], found) + # ) if found is None: unresolved += 1 - out_parent[node_name][child_key][elem] = child_val[elem] else: - out_parent[node_name][child_key][elem] = found + parent[node_name][child_key][elem] = found # print("PARSE child value {} is a list".format(child_val)) - except: - # Not a list / array, just leave it alone - # print("PARSE child value {} is not modified".format(child_val)) - out_parent[node_name][child_key] = child_val # If this node is an object and all refs exist, then create it. Otherwise # leave it alone. # print( - # "PARSE unresolved = {}, out_parent[{}] has class? {}".format( - # unresolved, node_name, ("class" in out_parent[node_name]) + # "PARSE unresolved = {}, parent[{}] has class? {}".format( + # unresolved, node_name, ("class" in parent[node_name]) # ) # ) - if unresolved == 0 and "class" in out_parent[node_name]: + if unresolved == 0 and "class" in parent[node_name]: # We have a TraitConfig object with all references resolved. # Instantiate it. # print("PARSE creating TraitConfig {}".format(node_name)) - obj = TraitConfig.from_config(node_name, out_parent[node_name]) + obj = TraitConfig.from_config(node_name, parent[node_name]) # print("PARSE instantiated {}".format(obj)) - out_parent[node_name] = obj + parent[node_name] = obj - # print("PARSE VERIFY parent[{}] = {}".format(node_name, out_parent[node_name])) - # print("PARSE out_tree now:\n", out_tree, "\n--------------") + # print("PARSE VERIFY parent[{}] = {}".format(node_name, parent[node_name])) + # print("PARSE tree now:\n", tree, "\n--------------") return unresolved # Iteratively instantiate objects - out = OrderedDict() + out = copy.deepcopy(conf) done = False last_unresolved = None @@ -551,13 +787,12 @@ def parse_tree(in_tree, out_tree, cursor): # print("PARSE iter ", it) done = True unresolved = 0 - for sect in list(conf.keys()): - # print("PARSE examine ", sect, "-->", type(conf[sect])) - if not isinstance(conf[sect], (dict, OrderedDict)): + for sect in list(out.keys()): + # print("PARSE examine ", sect, "-->", type(out[sect])) + if not isinstance(out[sect], (dict, OrderedDict)): continue - out[sect] = OrderedDict() # print("PARSE section ", sect) - unresolved += parse_tree(conf, out, [sect]) + unresolved += parse_tree(out, [sect]) if last_unresolved is not None: if unresolved == last_unresolved: diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index f31e57f56..b38698dff 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -141,7 +141,8 @@ def satellite_scanning( env = Environment.get() tod_buffer_length = env.tod_buffer_length() - first_samp, n_samp = obs.local_samples + first_samp = obs.local_index_offset + n_samp = obs.n_local_samples obs.shared.create(obs_key, shape=(n_samp, 4), dtype=np.float64, comm=obs.comm_col) # Temporary buffer @@ -411,25 +412,25 @@ def _exec(self, data, detectors=None, **kwargs): # and velocity. obs.shared.create( self.times, - shape=(obs.n_local,), + shape=(obs.n_local_samples,), dtype=np.float64, comm=obs.comm_col, ) obs.shared.create( self.flags, - shape=(obs.n_local,), + shape=(obs.n_local_samples,), dtype=np.uint8, comm=obs.comm_col, ) obs.shared.create( self.position, - shape=(obs.n_local, 3), + shape=(obs.n_local_samples, 3), dtype=np.float64, comm=obs.comm_col, ) obs.shared.create( self.velocity, - shape=(obs.n_local, 3), + shape=(obs.n_local_samples, 3), dtype=np.float64, comm=obs.comm_col, ) @@ -439,15 +440,17 @@ def _exec(self, data, detectors=None, **kwargs): position = None velocity = None if obs.comm_col_rank == 0: - start_abs = obs.offset + obsrange[ob].first + start_abs = obs.local_index_offset + obsrange[ob].first start_time = ( obsrange[ob].start + float(start_abs) / focalplane.sample_rate ) - stop_time = start_time + float(obs.n_local) / focalplane.sample_rate + stop_time = ( + start_time + float(obs.n_local_samples) / focalplane.sample_rate + ) stamps = np.linspace( start_time, stop_time, - num=obs.n_local, + num=obs.n_local_samples, endpoint=False, dtype=np.float64, ) @@ -459,14 +462,14 @@ def _exec(self, data, detectors=None, **kwargs): (start_time - self.start_time.to_value(u.second)) * self._radpersec, 2.0 * np.pi, ) - ang = radinc * np.arange(obs.n_local, dtype=np.float64) + rad + ang = radinc * np.arange(obs.n_local_samples, dtype=np.float64) + rad x = self._AU * np.cos(ang) y = self._AU * np.sin(ang) z = np.zeros_like(x) position = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) ang = ( - radinc * np.arange(obs.n_local, dtype=np.float64) + radinc * np.arange(obs.n_local_samples, dtype=np.float64) + rad + (0.5 * np.pi) ) @@ -480,14 +483,14 @@ def _exec(self, data, detectors=None, **kwargs): obs.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0) # Create boresight pointing - start_abs = obs.offset + obsrange[ob].first + start_abs = obs.local_index_offset + obsrange[ob].first degday = 360.0 / 365.25 q_prec = None if obs.comm_col_rank == 0: q_prec = slew_precession_axis( first_samp=start_abs, - n_samp=obs.n_local, + n_samp=obs.n_local_samples, sample_rate=focalplane.sample_rate, deg_day=degday, ) diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/future_ops/sim_tod_noise.py index a31d13284..b7771eb95 100644 --- a/src/toast/future_ops/sim_tod_noise.py +++ b/src/toast/future_ops/sim_tod_noise.py @@ -14,7 +14,7 @@ from ..operator import Operator -from ..utils import rate_from_times, Logger +from ..utils import rate_from_times, Logger, AlignedF64 from .._libtoast import tod_sim_noise_timestream @@ -276,9 +276,11 @@ def _exec(self, data, detectors=None, **kwargs): # Create output if it does not exist if self.out not in obs: - obs.detdata.create(self.out, shape=(1,), dtype=np.float64) + obs.detdata.create(self.out, detshape=(), dtype=np.float64) - (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(obs.shared[self.times]) + (rate, dt, dt_min, dt_max, dt_std) = rate_from_times( + obs.shared[self.times].data + ) for key in nse.keys: # Check if noise matching this PSD key is needed @@ -296,8 +298,8 @@ def _exec(self, data, detectors=None, **kwargs): obsindx, nse.index(key), rate, - obs.offset + global_offset, - obs.n_local, + obs.local_index_offset + global_offset, + obs.n_local_samples, self._oversample, nse.freq(key), nse.psd(key), diff --git a/src/toast/observation.py b/src/toast/observation.py index 4d84b49fb..70f2fd239 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -693,7 +693,7 @@ def __del__(self): self.clear() def __repr__(self): - val = " Date: Thu, 29 Oct 2020 15:31:13 -0700 Subject: [PATCH 008/690] Split intro tutorial further. Work on addressing feedback. --- pipelines/toast_future.py | 10 +- src/toast/observation.py | 8 +- tutorial/01_Introduction/intro.ipynb | 440 ++++-------------- tutorial/01_Introduction/intro_parallel.ipynb | 5 +- .../01_Introduction/intro_utilities.ipynb | 333 +++++++++++++ 5 files changed, 439 insertions(+), 357 deletions(-) create mode 100644 tutorial/01_Introduction/intro_utilities.ipynb diff --git a/pipelines/toast_future.py b/pipelines/toast_future.py index f5149469e..7ed893fea 100644 --- a/pipelines/toast_future.py +++ b/pipelines/toast_future.py @@ -80,8 +80,6 @@ def main(): # config files specified with the '--config' commandline option, followed by any # individually specified parameter overrides. config, args = parse_config(parser, operators=operators) - print(config) - print(args) # The satellite simulation operator requires a Telescope object. Make a fake # focalplane and telescope @@ -99,7 +97,8 @@ def main(): # Log the config that was actually used at runtime. out = "future_config_log.toml" - dump_toml(out, config) + if rank == 0: + dump_toml(out, config) # Instantiate our operators run = create(config) @@ -129,9 +128,8 @@ def main(): # Observations in the data). data = Data(comm=comm) - # Run the pipeline - pipe.exec(data) - pipe.finalize(data) + # Run the pipeline all at once + pipe.apply(data) # Print the resulting data for ob in data.obs: diff --git a/src/toast/observation.py b/src/toast/observation.py index 70f2fd239..ceba6f9a9 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -410,7 +410,7 @@ def __setitem__(self, key, value): raise ValueError(msg) if ddata.shape[0] != self.samples: msg = "Assigment dictionary detector {} has {} samples instead of {} in the observation".format( - ddata.shape[0], self.samples + d, ddata.shape[0], self.samples ) raise ValueError(msg) if detshape is None: @@ -1217,7 +1217,7 @@ class Observation(MutableMapping): Args: telescope (Telescope): An instance of a Telescope object. - samples (int): The total number of samples for this observation. + n_samples (int): The total number of samples for this observation. name (str): (Optional) The observation name. UID (int): (Optional) The Unique ID for this observation. If not specified, the UID will be computed from a hash of the name. @@ -1243,7 +1243,7 @@ class Observation(MutableMapping): def __init__( self, telescope, - samples, + n_samples, name=None, UID=None, comm=None, @@ -1253,7 +1253,7 @@ def __init__( ): log = Logger.get() self._telescope = telescope - self._samples = samples + self._samples = n_samples self._name = name self._UID = UID self._comm = comm diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index 256fa1fbe..3b821201b 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -64,13 +64,6 @@ "toast?" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", "metadata": {}, @@ -85,9 +78,14 @@ "outputs": [], "source": [ "env = toast.Environment.get()\n", - "print(env)\n", - "\n", - "# FIXME: Document how to change these." + "print(env)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The logging level can be changed by either setting the `TOAST_LOGLEVEL` environment variable to one of the supported levels (`VERBOSE`, `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`) or by using the `set_log_level()` method of the `Environment` class. The maximum number of threads is controlled by the standard `OMP_NUM_THREADS` environment variable." ] }, { @@ -100,9 +98,7 @@ "\n", "The basic data model in a toast workflow consists of a set of `Observation` instances, each of which is associated with a `Focalplane` on a `Telescope`. Note that a Focalplane instance is probably just a sub-set of detectors on the actual physical focalplane. These detectors must be co-sampled and likely have other things in common (for example, they are on the same wafer or are correlated in some other way). For this notebook, we will manually create these objects, but usually these will be loaded / created by some experiment-specific function.\n", "\n", - "MPI is completely optional in TOAST, although it is required to achieve good parallel performance on traditional CPU systems. In this section we show how interactive use of TOAST can be done without any reference to MPI. In a later section we show how to make use of distributed data and operations.\n", - "\n", - "**FIXME: reference parallel intro and document mpi4py use in jupyter**\n" + "MPI is completely optional in TOAST, although it is required to achieve good parallel performance on systems with many (e.g. 4 or more) cores. Most of the parallelism in TOAST is MPI process-based, not threaded. In this section we show how interactive use of TOAST can be done without any reference to MPI. In a separate notebook in this directory we show how to make use of distributed data and operations in parallel workflows.\n" ] }, { @@ -189,7 +185,11 @@ "\n", "samples = 10\n", "\n", - "ob = toast.Observation(telescope, name=\"2020-07-31_A\", samples=samples)\n", + "ob = toast.Observation(\n", + " telescope, \n", + " name=\"2020-07-31_A\", \n", + " n_samples=samples\n", + ")\n", "\n", "print(ob)" ] @@ -239,10 +239,35 @@ "source": [ "## Detector Data\n", "\n", - "Detector data has some unique properties that we often want to leverage in our analyses. Each process has some detectors and some time slice of the observation. In the case of a single process like this example, all the data is local. Before using data we need to create it within the empty Observation. Here we create a default \"signal\" object for the detectors. The detector data is accessed under the `detdata` attribute of the observation:\n", + "Detector data has some unique properties that we often want to leverage in our analyses. Each process has some detectors and some time slice of the observation. In the case of a single process like this example, all the data is local. Before using data we need to create it within the empty Observation. Here we create a \"signal\" object for the detectors. The detector data is accessed under the `detdata` attribute of the observation:\n", "\n", "\n", - "** FIXME: talk about naming conventions**" + "**FIXME: talk about naming conventions**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we create and initialize to zero some detector data named \"signal\". This has one value per sample per detector and each value is a 64bit float." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ob.detdata.create(\"signal\", dtype=np.float64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(ob.detdata)" ] }, { @@ -251,13 +276,6 @@ "metadata": {}, "outputs": [], "source": [ - "# Create some signal\n", - "\n", - "ob.detdata.create(\"signal\")\n", - "\n", - "# Check the contents of the detector data\n", - "\n", - "print(ob.detdata)\n", "print(ob.detdata[\"signal\"])" ] }, @@ -265,7 +283,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can create other types of detector data:" + "You can create other types of detector data, and there is some shortcut notation that can be used to create detector data objects from existing arrays. For example:" ] }, { @@ -274,17 +292,31 @@ "metadata": {}, "outputs": [], "source": [ - "ob.detdata.create(\"calibrated\")\n", - "print(ob.detdata)\n", + "# This takes an existing N_detector x N_sample array and creates from that\n", + "\n", + "some_data = 3.0 * np.ones(\n", + " (\n", + " len(ob.local_detectors), \n", + " ob.n_local_samples\n", + " ),\n", + " dtype=np.float32\n", + ")\n", "\n", - "# FIXME: arrays initialized to zero" + "ob.detdata[\"existing_signal\"] = some_data\n", + "print(ob.detdata[\"existing_signal\"])" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "By default you will get detector data with one element per sample and float64 dtype. You can changes this:" + "# This takes one detectors-worth of data and replicates it to all detectors\n", + "# while creating a new data object.\n", + "\n", + "ob.detdata[\"replicated\"] = 5 * np.ones(ob.n_local_samples, dtype=np.int32)\n", + "print(ob.detdata[\"replicated\"])" ] }, { @@ -293,11 +325,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Example of data with different shape and dtype\n", + "# You can also create detector data objects from a dictionary\n", + "# of single-detector arrays\n", + "other = dict()\n", + "for i, d in enumerate(ob.local_detectors):\n", + " other[d] = i * np.ones(ob.n_local_samples, dtype=np.int32)\n", "\n", - "ob.detdata.create(\"pointing\", shape=(ob.n_sample, 4), dtype=np.float32)\n", - "print(ob.detdata)\n", - "print(ob.detdata[\"pointing\"])" + "ob.detdata[\"other_signal\"] = other\n", + "print(ob.detdata[\"other_signal\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default you will get detector data with one element per sample and float64 dtype. However, you can specify the shape of each detector sample:" ] }, { @@ -306,11 +348,10 @@ "metadata": {}, "outputs": [], "source": [ - "# Another example for flags\n", + "# Example of data with different shape\n", "\n", - "ob.detdata.create(\"flags\", dtype=np.uint16)\n", - "print(ob.detdata)\n", - "print(ob.detdata[\"flags\"])" + "ob.detdata.create(\"pointing\", detshape=(4,), dtype=np.float32)\n", + "print(ob.detdata[\"pointing\"])" ] }, { @@ -356,7 +397,8 @@ "source": [ "## Shared Data\n", "\n", - "Many types of data are common to multiple detectors. Some examples would be telescope pointing, timestamps, other sensor data, etc. When running in parallel we want to have just one copy of this data per node in order to save memory. The shared data is accessed under the \"shared\" attribute of the observation. For this serial notebook, you will not need to worry about the details of communicators, but when running in parallel it becomes important. For this reason we will use some helper functions to create some standard shared objects:" + "Many types of data are common to multiple detectors. Some examples would be telescope pointing, timestamps, other sensor data, etc. When running in parallel we want to have just one copy of this data per node in order to save memory. The shared data is accessed under the \"shared\" attribute of the observation. For this serial notebook, you will not need to worry about the details of communicators, but when running in parallel it becomes important. \n", + "For this serial notebook, the `shared` attribute will look very much like a dictionary of numpy arrays. See the \"parallel\" intro notebook for more examples of using shared data when each observation is distributed across a grid of processes." ] }, { @@ -365,19 +407,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Equivalent to:\n", - "# ob.shared.create(\"times\", shape=(ob.n_sample,), dtype=np.float64, comm=ob.comm_col)\n", - "ob.shared.create_times()\n", + "# Create some time stamps by assigning from an existing array on one process.\n", + "# When running with multiple processes, this syntax has extra communication.\n", + "ob.shared[\"times\"] = np.arange(ob.n_local_samples, dtype=np.float64)\n", + "print(ob.shared[\"times\"])\n", "\n", - "# Equivalent to:\n", - "# ob.shared.create(\"flags\", shape=(ob.n_sample,), dtype=np.uint8, comm=ob.comm_col)\n", - "ob.shared.create_flags()\n", - "\n", - "# Equivalent to:\n", - "# ob.shared.create(\"boresight_radec\", shape=(ob.n_sample, 4), dtype=np.float64, comm=ob.comm_col)\n", - "ob.shared.create_boresight_radec()\n", - "\n", - "print(ob.shared)\n", + "# Create and initialize to zero some boresight quaternions\n", + "ob.shared.create(\"boresight_radec\", shape=(ob.n_local_samples, 4), dtype=np.float64)\n", "print(ob.shared[\"boresight_radec\"])" ] }, @@ -385,7 +421,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can see that the data objects are a special \"MPIShared\" object from the `pshmem` package. Shared data objects can be read with slicing notation just like normal numpy arrays:" + "You can see that the data objects are a special \"MPIShared\" object from the [`pshmem`](https://pypi.org/project/pshmem/) package. Shared data objects can be read with slicing notation just like normal numpy arrays:" ] }, { @@ -401,7 +437,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "However, they are intended to be \"write once\", \"read many\" objects. You cannot simply assign data to them. The reason is that the data is replicated across nodes and so setting array values must be a collective operation using the `set()` method:" + "However, they are intended to be \"write once\", \"read many\" objects. You cannot simply assign data to them. The reason is that the data is replicated across nodes and so setting array values must be a collective operation." ] }, { @@ -411,23 +447,15 @@ "outputs": [], "source": [ "nullquat = np.array([0.0, 0.0, 0.0, 1.0])\n", + "full_data = np.tile(nullquat, ob.n_local_samples).reshape((-1, 4))\n", "\n", - "ob.shared[\"boresight_radec\"].set(np.tile(nullquat, ob.n_sample).reshape((-1, 4)))\n", + "# In the serial case, simple assignment works just like array assignment\n", + "ob.shared[\"boresight_radec\"][:] = full_data\n", "\n", - "pntg = ob.shared[\"boresight_radec\"]\n", - "\n", - "print(pntg[:])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ob.shared[\"times\"].set(np.arange(ob.n_sample, dtype=np.float64))\n", + "# When running with MPI, the set() method avoids some communication\n", + "ob.shared[\"boresight_radec\"].set(full_data, fromrank=0)\n", "\n", - "print(ob.shared[\"times\"][:])" + "print(ob.shared[\"boresight_radec\"])" ] }, { @@ -1044,286 +1072,6 @@ "print(new_simsat)" ] }, - { - "cell_type": "markdown", - "metadata": { - "toc-hr-collapsed": true - }, - "source": [ - "# Utilities\n", - "\n", - "There are many utilities in the TOAST package that use compiled code internally. These include:\n", - "\n", - "- `toast.rng`: Streamed random number generation, with support for generating random samples from any location within a stream.\n", - "\n", - "- `toast.qarray`: Vectorized quaternion operations.\n", - "\n", - "- `toast.fft`: API Wrapper around different vendor FFT packages.\n", - "\n", - "- `toast.healpix`: Subset of pixel projection routines, simd vectorized and threaded.\n", - "\n", - "- `toast.timing`: Simple serial timers, global named timers per process, a decorator to time calls to functions, and MPI tools to gather timing statistics from multiple processes.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Random Number Example\n", - "\n", - "Here is a quick example of a threaded generation of random numbers drawn from a unit-variance gaussian distribution. Note the \"key\" pair of uint64 values and the first value of the \"counter\" pair determine the stream, and the second value of the counter pair is effectively the sample in that stream. We can drawn randoms from anywhere in the stream in a reproducible fashion (i.e. this random generator is stateless). Under the hood, this uses the Random123 package on each thread." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import toast.rng as rng\n", - "\n", - "# Number of random samples\n", - "nrng = 10" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Draw randoms from the beginning of a stream\n", - "rng1 = rng.random(\n", - " nrng, key=[12, 34], counter=[56, 0], sampler=\"gaussian\", threads=True\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Draw randoms from some later starting point in the stream\n", - "rng2 = rng.random(\n", - " nrng, key=[12, 34], counter=[56, 4], sampler=\"gaussian\", threads=True\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# The returned objects are buffer providers, so can be used like a numpy array.\n", - "print(\"Returned RNG buffers:\")\n", - "print(rng1)\n", - "print(rng2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compare the elements. Note how the overlapping sample indices match. The\n", - "# randoms drawn for any given sample agree regardless of the starting sample.\n", - "print(\"------ rng1 ------\")\n", - "for i in range(nrng):\n", - " print(\"rng1 {}: {}\".format(i, rng1[i]))\n", - "print(\"------ rng2 ------\")\n", - "for i in range(nrng):\n", - " print(\"rng2 {}: {}\".format(i + 4, rng2[i]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Quaternion Array Example\n", - "\n", - "The quaternion manipulation functions internally attempt to improve performance using OpenMP SIMD directives and threading in cases where it makes sense. The Python API is modelled after the quaternionarray package (https://github.com/zonca/quaternionarray/). There are functions for common operations like multiplying quaternion arrays, rotating arrays of vectors, converting to and from angle representations, SLERP, etc." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import toast.qarray as qa\n", - "\n", - "# Number points for this example\n", - "\n", - "nqa = 5" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Make some fake rotation data by sweeping through theta / phi / pa angles\n", - "\n", - "theta = np.linspace(0.0, np.pi, num=nqa)\n", - "phi = np.linspace(0.0, 2 * np.pi, num=nqa)\n", - "pa = np.zeros(nqa)\n", - "print(\"----- input angles -----\")\n", - "print(\"theta = \", theta)\n", - "print(\"phi = \", phi)\n", - "print(\"pa = \", pa)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Convert to quaternions\n", - "\n", - "quat = qa.from_angles(theta, phi, pa)\n", - "\n", - "print(\"\\n----- output quaternions -----\")\n", - "print(quat)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Use these to rotate a vector\n", - "\n", - "zaxis = np.array([0.0, 0.0, 1.0])\n", - "zrot = qa.rotate(quat, zaxis)\n", - "\n", - "print(\"\\n---- Z-axis rotated by quaternions ----\")\n", - "print(zrot)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Rotate different vector by each quaternion\n", - "\n", - "zout = qa.rotate(quat, zrot)\n", - "\n", - "print(\"\\n---- Arbitrary vectors rotated by quaternions ----\")\n", - "print(zout)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Multiply two quaternion arrays\n", - "\n", - "qcopy = np.array(quat)\n", - "qout = qa.mult(quat, qcopy)\n", - "\n", - "print(\"\\n---- Product of two quaternion arrays ----\")\n", - "print(qout)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# SLERP quaternions\n", - "\n", - "qtime = 3.0 * np.arange(nqa)\n", - "qtargettime = np.arange(3.0 * (nqa - 1) + 1)\n", - "qslerped = qa.slerp(qtargettime, qtime, quat)\n", - "\n", - "print(\"\\n---- SLERP input ----\")\n", - "for t, q in zip(qtime, quat):\n", - " print(\"t = {} : {}\".format(t, q))\n", - " \n", - "print(\"\\n---- SLERP output ----\")\n", - "for t, q in zip(qtargettime, qslerped):\n", - " print(\"t = {} : {}\".format(t, q))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### FFT Example\n", - "\n", - "The internal FFT functions in TOAST are very limited and focus only on batched 1D Real FFTs. These are used for simulated noise generation and timestream filtering. Internally the compiled code can use either FFTW or MKL for the backend calculation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Number of batched FFTs\n", - "\n", - "nbatch = 5\n", - "\n", - "# FFT length\n", - "\n", - "nfft = 65536" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create some fake data\n", - "\n", - "infft = np.zeros((nbatch, nfft), dtype=np.float64)\n", - "for b in range(nbatch):\n", - " infft[b, :] = rng.random(nfft, key=[0, 0], counter=[b, 0], sampler=\"gaussian\")\n", - "\n", - "print(\"----- FFT input -----\")\n", - "print(infft)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Forward FFT\n", - "\n", - "outfft = toast.fft.r1d_forward(infft)\n", - "\n", - "print(\"\\n----- FFT output -----\")\n", - "print(outfft)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Reverse FFT\n", - "\n", - "backfft = toast.fft.r1d_backward(outfft)\n", - "\n", - "print(\"\\n----- FFT inverse output -----\")\n", - "print(backfft)" - ] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/tutorial/01_Introduction/intro_parallel.ipynb b/tutorial/01_Introduction/intro_parallel.ipynb index 882534379..6d8a00133 100644 --- a/tutorial/01_Introduction/intro_parallel.ipynb +++ b/tutorial/01_Introduction/intro_parallel.ipynb @@ -8,7 +8,10 @@ "source": [ "# Parallel Processing\n", "\n", - "In the first introductory notebook we covered the basic TOAST data and processing models, including using them interactively in a serial notebook. In this notebook we will explore distributed data and processing." + "In the first introductory notebook we covered the basic TOAST data and processing models, including using them interactively in a serial notebook. In this notebook we will explore distributed data and processing.\n", + "\n", + "\n", + "**FIXME: reference parallel intro and document mpi4py use in jupyter**" ] }, { diff --git a/tutorial/01_Introduction/intro_utilities.ipynb b/tutorial/01_Introduction/intro_utilities.ipynb new file mode 100644 index 000000000..8789bf7d5 --- /dev/null +++ b/tutorial/01_Introduction/intro_utilities.ipynb @@ -0,0 +1,333 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "# Utilities\n", + "\n", + "In the general introduction we covered the basics of the TOAST data and processing model. In this notebook we cover several sets of utilities that are included within TOAST that can be used when constructing new Operators or working with the data interactively. Often these utilities make use of compiled code \"under the hood\" for performance. For example:\n", + "\n", + "- `toast.rng`: Streamed random number generation, with support for generating random samples from any location within a stream.\n", + "\n", + "- `toast.qarray`: Vectorized quaternion operations.\n", + "\n", + "- `toast.fft`: API Wrapper around different vendor FFT packages.\n", + "\n", + "- `toast.healpix`: Subset of pixel projection routines, simd vectorized and threaded.\n", + "\n", + "- `toast.timing`: Simple serial timers, global named timers per process, a decorator to time calls to functions, and MPI tools to gather timing statistics from multiple processes.\n", + "\n", + "First we import some packages we will use in this notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Built-in modules\n", + "import sys\n", + "import os\n", + "\n", + "# External modules\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import astropy.units as u\n", + "\n", + "# TOAST\n", + "import toast\n", + "\n", + "\n", + "# Capture C++ output in the jupyter cells\n", + "%load_ext wurlitzer\n", + "\n", + "# Display inline plots\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Random Number Example\n", + "\n", + "Here is a quick example of a threaded generation of random numbers drawn from a unit-variance gaussian distribution. Note the \"key\" pair of uint64 values and the first value of the \"counter\" pair determine the stream, and the second value of the counter pair is effectively the sample in that stream. We can drawn randoms from anywhere in the stream in a reproducible fashion (i.e. this random generator is stateless). Under the hood, this uses the Random123 package on each thread." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import toast.rng as rng\n", + "\n", + "# Number of random samples\n", + "nrng = 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Draw randoms from the beginning of a stream\n", + "rng1 = rng.random(\n", + " nrng, key=[12, 34], counter=[56, 0], sampler=\"gaussian\", threads=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Draw randoms from some later starting point in the stream\n", + "rng2 = rng.random(\n", + " nrng, key=[12, 34], counter=[56, 4], sampler=\"gaussian\", threads=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The returned objects are buffer providers, so can be used like a numpy array.\n", + "print(\"Returned RNG buffers:\")\n", + "print(rng1)\n", + "print(rng2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Compare the elements. Note how the overlapping sample indices match. The\n", + "# randoms drawn for any given sample agree regardless of the starting sample.\n", + "print(\"------ rng1 ------\")\n", + "for i in range(nrng):\n", + " print(\"rng1 {}: {}\".format(i, rng1[i]))\n", + "print(\"------ rng2 ------\")\n", + "for i in range(nrng):\n", + " print(\"rng2 {}: {}\".format(i + 4, rng2[i]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Quaternion Array Example\n", + "\n", + "The quaternion manipulation functions internally attempt to improve performance using OpenMP SIMD directives and threading in cases where it makes sense. The Python API is modelled after the quaternionarray package (https://github.com/zonca/quaternionarray/). There are functions for common operations like multiplying quaternion arrays, rotating arrays of vectors, converting to and from angle representations, SLERP, etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import toast.qarray as qa\n", + "\n", + "# Number points for this example\n", + "\n", + "nqa = 5" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make some fake rotation data by sweeping through theta / phi / pa angles\n", + "\n", + "theta = np.linspace(0.0, np.pi, num=nqa)\n", + "phi = np.linspace(0.0, 2 * np.pi, num=nqa)\n", + "pa = np.zeros(nqa)\n", + "print(\"----- input angles -----\")\n", + "print(\"theta = \", theta)\n", + "print(\"phi = \", phi)\n", + "print(\"pa = \", pa)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Convert to quaternions\n", + "\n", + "quat = qa.from_angles(theta, phi, pa)\n", + "\n", + "print(\"\\n----- output quaternions -----\")\n", + "print(quat)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use these to rotate a vector\n", + "\n", + "zaxis = np.array([0.0, 0.0, 1.0])\n", + "zrot = qa.rotate(quat, zaxis)\n", + "\n", + "print(\"\\n---- Z-axis rotated by quaternions ----\")\n", + "print(zrot)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Rotate different vector by each quaternion\n", + "\n", + "zout = qa.rotate(quat, zrot)\n", + "\n", + "print(\"\\n---- Arbitrary vectors rotated by quaternions ----\")\n", + "print(zout)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Multiply two quaternion arrays\n", + "\n", + "qcopy = np.array(quat)\n", + "qout = qa.mult(quat, qcopy)\n", + "\n", + "print(\"\\n---- Product of two quaternion arrays ----\")\n", + "print(qout)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# SLERP quaternions\n", + "\n", + "qtime = 3.0 * np.arange(nqa)\n", + "qtargettime = np.arange(3.0 * (nqa - 1) + 1)\n", + "qslerped = qa.slerp(qtargettime, qtime, quat)\n", + "\n", + "print(\"\\n---- SLERP input ----\")\n", + "for t, q in zip(qtime, quat):\n", + " print(\"t = {} : {}\".format(t, q))\n", + " \n", + "print(\"\\n---- SLERP output ----\")\n", + "for t, q in zip(qtargettime, qslerped):\n", + " print(\"t = {} : {}\".format(t, q))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FFT Example\n", + "\n", + "The internal FFT functions in TOAST are very limited and focus only on batched 1D Real FFTs. These are used for simulated noise generation and timestream filtering. Internally the compiled code can use either FFTW or MKL for the backend calculation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Number of batched FFTs\n", + "\n", + "nbatch = 5\n", + "\n", + "# FFT length\n", + "\n", + "nfft = 65536" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create some fake data\n", + "\n", + "infft = np.zeros((nbatch, nfft), dtype=np.float64)\n", + "for b in range(nbatch):\n", + " infft[b, :] = rng.random(nfft, key=[0, 0], counter=[b, 0], sampler=\"gaussian\")\n", + "\n", + "print(\"----- FFT input -----\")\n", + "print(infft)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Forward FFT\n", + "\n", + "outfft = toast.fft.r1d_forward(infft)\n", + "\n", + "print(\"\\n----- FFT output -----\")\n", + "print(outfft)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Reverse FFT\n", + "\n", + "backfft = toast.fft.r1d_backward(outfft)\n", + "\n", + "print(\"\\n----- FFT inverse output -----\")\n", + "print(backfft)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From befe1c94aed65fd699d7602a76c476e8061ac304 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 29 Oct 2020 16:11:43 -0700 Subject: [PATCH 009/690] More work on API feedback. --- src/toast/future_ops/sim_satellite.py | 6 +++--- src/toast/observation.py | 15 ++++++------- tutorial/01_Introduction/intro.ipynb | 31 +++++++++++++-------------- 3 files changed, 25 insertions(+), 27 deletions(-) diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index b38698dff..dbe8ea795 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -282,7 +282,7 @@ class SimSatellite(Operator): gap_time = Quantity(0.0 * u.hour, help="The gap between each observation") - n_observation = Int(1, help="The number of observations to simulate") + num_observations = Int(1, help="The number of observations to simulate") spin_period = Quantity( 10.0 * u.minute, help="The period of the rotation about the spin axis" @@ -373,12 +373,12 @@ def _exec(self, data, detectors=None, **kwargs): # Distribute the observations uniformly among groups - groupdist = distribute_uniform(self.n_observation, comm.ngroups) + groupdist = distribute_uniform(self.num_observations, comm.ngroups) # Compute global time and sample ranges of all observations obsrange = regular_intervals( - self.n_observation, + self.num_observations, self.start_time.to_value(u.second), 0, focalplane.sample_rate, diff --git a/src/toast/observation.py b/src/toast/observation.py index ceba6f9a9..9dbb48c07 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -181,13 +181,13 @@ def __del__(self): def _det_axis_view(self, key): if isinstance(key, (int, np.integer)): # Just one detector by index - view = (key,) + view = key elif isinstance(key, str): # Just one detector by name - view = (self._name2idx[key],) + view = self._name2idx[key] elif isinstance(key, slice): # We are slicing detectors by index - view = (key,) + view = key else: # Assume that our key is at least iterable try: @@ -206,19 +206,18 @@ def _det_axis_view(self, key): return view def _get_view(self, key): - if isinstance(key, tuple): + if isinstance(key, (tuple, Mapping)): # We are slicing in both detector and sample dimensions if len(key) > len(self._shape): msg = "DetectorData has only {} dimensions".format(len(self._shape)) log.error(msg) raise TypeError(msg) - detview = self._det_axis_view(key[0]) - view = detview + view = [self._det_axis_view(key[0])] for k in key[1:]: - view += (k,) + view.append(k) # for s in range(len(self._shape) - len(key)): # view += (slice(None, None, None),) - return view + return tuple(view) else: # Only detector slice view = self._det_axis_view(key) diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index 3b821201b..29eca691e 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -682,7 +682,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can access shared data as well with this view, but it is read-only from the view (the `set()` method of the shared objects must be used to collectively set that data):" + "We can access shared data as well with this view, but it is read-only from the view (the `set()` method of the shared objects or a collective assignment must be used to modify shared data):" ] }, { @@ -768,8 +768,8 @@ "outputs": [], "source": [ "simsat = ops.SimSatellite(\n", - " n_observation=2, \n", - " observation_time=(5 * u.minute),\n", + " num_observations=2, \n", + " observation_time=5 * u.minute,\n", ")\n", "\n", "print(simsat)" @@ -790,8 +790,8 @@ "source": [ "other_simsat = ops.SimSatellite(\n", " name=\"other_simsat\",\n", - " n_observation=2, \n", - " observation_time=(5 * u.minute),\n", + " num_observations=2, \n", + " observation_time=5 * u.minute,\n", ")\n", "\n", "print(other_simsat)" @@ -811,7 +811,7 @@ "outputs": [], "source": [ "simsat.telescope = telescope\n", - "simsat.n_observation = 3\n", + "simsat.num_observations = 3\n", "\n", "print(simsat)" ] @@ -881,8 +881,8 @@ "outputs": [], "source": [ "simsat = ops.SimSatellite(\n", - " n_observation=2, \n", - " observation_time=(5 * u.minute),\n", + " num_observations=2, \n", + " observation_time=5 * u.minute,\n", " telescope=telescope\n", ")\n", "\n", @@ -942,9 +942,8 @@ "metadata": {}, "outputs": [], "source": [ - "# FIXME: import toast.config as tc\n", + "import toast.config as tc\n", "\n", - "from toast.config import dump_toml, load_toml, load_json, dump_json, load_config, create\n", "import tempfile\n", "from pprint import PrettyPrinter\n", "\n", @@ -970,7 +969,7 @@ "source": [ "# This gives us the config for an existing instance\n", "\n", - "conf = other_simsat.config()\n", + "conf = other_simsat.get_config()\n", "pp.pprint(conf)" ] }, @@ -982,7 +981,7 @@ "source": [ "# This gives us the default config values for a class\n", "\n", - "default_conf = ops.SimSatellite.class_config()\n", + "default_conf = ops.SimSatellite.get_class_config()\n", "pp.pprint(default_conf)" ] }, @@ -992,8 +991,8 @@ "metadata": {}, "outputs": [], "source": [ - "dump_toml(toml_file, conf)\n", - "dump_json(json_file, conf)" + "tc.dump_toml(toml_file, conf)\n", + "tc.dump_json(json_file, conf)" ] }, { @@ -1034,7 +1033,7 @@ "metadata": {}, "outputs": [], "source": [ - "newconf = load_config(toml_file)\n", + "newconf = tc.load_config(toml_file)\n", "pp.pprint(newconf)" ] }, @@ -1051,7 +1050,7 @@ "metadata": {}, "outputs": [], "source": [ - "run = create(newconf)\n", + "run = tc.create(newconf)\n", "print(run)" ] }, From 67abba04b07bb9db316b6ee3b57010162ab1a39f Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 29 Oct 2020 16:22:14 -0700 Subject: [PATCH 010/690] Use the new apply() method and document operators a bit more. --- tutorial/01_Introduction/intro.ipynb | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index 29eca691e..ff92f6ba5 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -740,7 +740,7 @@ "\n", "The TOAST processing model consists of `Operator` class instances running in a sequence on a subset of data. These sequences could be nested within other sequences (see the `Pipeline` operator below).\n", "\n", - "The Operator base class defines the interfaces for operators working on data. Operators are configured by defining class traits (attributes) which can be set during construction. An operator has an exec() method that works with Data objects. We will start by looking at the SimSatellite operator to simulate fake telescope scan strategies for a generic satellite. We can always see the options and default values by using the standard help function or the '?' command:\n" + "The Operator base class defines the interfaces for operators working on data. Operators are configured by defining class traits (attributes) which can be set during construction. An operator has an `exec()` method that works with Data objects (potentially just a subset of . We will start by looking at the SimSatellite operator to simulate fake telescope scan strategies for a generic satellite. We can always see the options and default values by using the standard help function or the '?' command:\n" ] }, { @@ -829,8 +829,10 @@ "metadata": {}, "outputs": [], "source": [ - "simsat.exec(data)\n", - "simsat.finalize(data)" + "# This is equivalent to single call to \"exec()\" with all processes,\n", + "# and then a call to \"finalize()\".\n", + "\n", + "simsat.apply(data)" ] }, { @@ -846,7 +848,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You may be tempted to make a wrapper function around the `exec()` and `finalize()` methods, but these are separate for a reason. The `exec()` method might be called multiple times with subsets of the data and the `finalize()` method is only called once. In the previous example, we just happen to be making one call to exec()." + "For this trivial case, we use the `apply()` method of the operator, which simply calls `exec()` once and then `finalize()`. When running a more complicated pipeline, the `exec()` method might be called multiple times on different detector sets (for example) before calling `finalize()`." ] }, { @@ -914,8 +916,8 @@ "outputs": [], "source": [ "data = toast.Data()\n", - "pipe.exec(data)\n", - "pipe.finalize(data)\n", + "\n", + "pipe.apply(data)\n", "\n", "print(data)" ] From 5835e2962283237209b58c43bea2f874537af1b3 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 29 Oct 2020 23:41:48 -0700 Subject: [PATCH 011/690] Convert focaplane simulation and visualization to using units. --- src/toast/instrument_sim.py | 130 +++++++++++++++++---------- src/toast/tests/_helpers.py | 2 +- src/toast/vis.py | 15 +++- tutorial/01_Introduction/intro.ipynb | 47 ++++------ 4 files changed, 110 insertions(+), 84 deletions(-) diff --git a/src/toast/instrument_sim.py b/src/toast/instrument_sim.py index bab75ee6a..367bf40ed 100644 --- a/src/toast/instrument_sim.py +++ b/src/toast/instrument_sim.py @@ -4,10 +4,14 @@ import numpy as np +from astropy import units as u + from . import qarray as qa from .instrument import Focalplane +from .vis import set_matplotlib_backend + def cartesian_to_quat(offsets): """Convert cartesian angle offsets and rotation into quaternions. @@ -478,14 +482,36 @@ def rhombus_layout( def fake_hexagon_focalplane( n_pix=7, - width_deg=5.0, - samplerate=1.0, + width=5.0 * u.degree, + sample_rate=1.0 * u.Hz, epsilon=0.0, net=1.0, - fmin=0.0, + f_min=0.0 * u.Hz, alpha=1.0, - fknee=0.05, + f_knee=0.05 * u.Hz, ): + """Create a simple focalplane model for testing. + + This function creates a basic focalplane with hexagon-packed pixels, each with + two orthogonal detectors. It is intended for unit tests, benchmarking, etc where + a Focalplane is needed but the details are less important. + + Args: + n_pix (int): The number of pixels with hexagonal packing + (e.g. 1, 7, 19, 37, 61, etc). + width (Quantity): The angular width of the focalplane field of view on the sky. + sample_rate (Quantity): The sample rate for all detectors. + epsilon (float): The cross-polar response for all detectors. + net (float): The Noise Equivalent Temperature of each detector. + f_min (Quantity): The frequency below which to roll off the 1/f spectrum. + alpha (float): The spectral slope. + f_knee (Quantity): The 1/f knee frequency. + + Returns: + (Focalplane): The fake focalplane. + + """ + width_deg = width.to_value(u.degree) pol_A = hex_pol_angles_qu(n_pix, offset=0.0) pol_B = hex_pol_angles_qu(n_pix, offset=90.0) quat_A = hex_layout(n_pix, width_deg, "D", "A", pol_A) @@ -499,63 +525,67 @@ def fake_hexagon_focalplane( for det in det_data.keys(): det_data[det]["pol_leakage"] = epsilon - det_data[det]["fmin"] = fmin - det_data[det]["fknee"] = fknee + det_data[det]["fmin"] = f_min.to_value(u.Hz) + det_data[det]["fknee"] = f_knee.to_value(u.Hz) det_data[det]["alpha"] = alpha det_data[det]["NET"] = net det_data[det]["fwhm_arcmin"] = detfwhm - det_data[det]["fsample"] = samplerate + det_data[det]["fsample"] = sample_rate.to_value(u.Hz) - return Focalplane(detector_data=det_data, sample_rate=samplerate) + return Focalplane(detector_data=det_data, sample_rate=sample_rate.to_value(u.Hz)) def plot_focalplane( - dets, width, height, outfile, fwhm=None, facecolor=None, polcolor=None, labels=None + focalplane=None, + width=None, + height=None, + outfile=None, + show_labels=False, + face_color=None, + pol_color=None, ): - """Visualize a dictionary of detectors. - - This makes a simple plot of the detector positions on the projected - focalplane. + """Visualize a projected Focalplane. - To avoid python overhead in large MPI jobs, we place the matplotlib - import inside this function, so that it is only imported when the - function is actually called. + This makes a simple plot of the detector positions on the projected focalplane. - If the detector dictionary contains a key "fwhm", that will be assumed - to be in arcminutes. Otherwise a nominal value is used. - - If the detector dictionary contains a key "viscolor", then that color - will be used. + To avoid python overhead in large MPI jobs, we place the matplotlib import inside + this function, so that it is only imported when the function is actually called. Args: - dets (dict): dictionary of detector quaternions. - width (float): width of plot in degrees. - height (float): height of plot in degrees. - outfile (str): output PNG path. If None, then matplotlib will be + focalplane (Focalplane): The focalplane to plot + width (Quantity): Width of plot. + height (Quantity): Height of plot. + outfile (str): Output PDF path. If None, then matplotlib will be used for inline plotting. - fwhm (dict): dictionary of detector beam FWHM in arcminutes, used - to draw the circles to scale. - facecolor (dict): dictionary of color values for the face of each + show_labels (bool): If True, plot detector names. + face_color (dict): dictionary of color values for the face of each detector circle. - polcolor (dict): dictionary of color values for the polarization + pol_color (dict): dictionary of color values for the polarization arrows. - labels (dict): plot this text in the center of each pixel. Returns: None """ + if focalplane is None: + raise RuntimeError("You must specify a Focalplane instance") + if outfile is not None: - import matplotlib - import warnings + set_matplotlib_backend(backend="pdf") - # Try to force matplotlib to not use any Xwindows backend. - warnings.filterwarnings("ignore") - matplotlib.use("Agg") import matplotlib.pyplot as plt - xfigsize = int(width) - yfigsize = int(height) + if width is None: + width = 10.0 * u.degree + + if height is None: + height = 10.0 * u.degree + + width_deg = width.to_value(u.degree) + height_deg = height.to_value(u.degree) + + xfigsize = int(width_deg) + yfigsize = int(height_deg) figdpi = 100 # Compute the font size to use for detector labels @@ -565,8 +595,8 @@ def plot_focalplane( fig = plt.figure(figsize=(xfigsize, yfigsize), dpi=figdpi) ax = fig.add_subplot(1, 1, 1) - half_width = 0.5 * width - half_height = 0.5 * height + half_width = 0.5 * width_deg + half_height = 0.5 * height_deg ax.set_xlabel("Degrees", fontsize="large") ax.set_ylabel("Degrees", fontsize="large") ax.set_xlim([-half_width, half_width]) @@ -576,12 +606,14 @@ def plot_focalplane( yaxis = np.array([0.0, 1.0, 0.0], dtype=np.float64) zaxis = np.array([0.0, 0.0, 1.0], dtype=np.float64) - for d, quat in dets.items(): + for d in focalplane.detectors: + quat = focalplane[d]["quat"] + fwhm = focalplane[d]["fwhm_arcmin"] # radius in degrees detradius = 0.5 * 5.0 / 60.0 if fwhm is not None: - detradius = 0.5 * fwhm[d] / 60.0 + detradius = 0.5 * fwhm / 60.0 # rotation from boresight rdir = qa.rotate(quat, zaxis).flatten() @@ -595,8 +627,8 @@ def plot_focalplane( ypos = mag * np.sin(ang) detface = "none" - if facecolor is not None: - detface = facecolor[d] + if face_color is not None: + detface = face_color[d] circ = plt.Circle((xpos, ypos), radius=detradius, fc=detface, ec="k") ax.add_artist(circ) @@ -609,8 +641,8 @@ def plot_focalplane( dy = ascale * 2.0 * detradius * np.sin(polang) detcolor = "black" - if polcolor is not None: - detcolor = polcolor[d] + if pol_color is not None: + detcolor = pol_color[d] ax.arrow( xtail, @@ -625,15 +657,15 @@ def plot_focalplane( length_includes_head=True, ) - if labels is not None: + if show_labels: xsgn = 1.0 if dx < 0.0: xsgn = -1.0 - labeloff = 0.05 * xsgn * fontpix * len(labels[d]) / figdpi + labeloff = 0.05 * xsgn * fontpix * len(d) / figdpi ax.text( (xtail + 1.1 * dx + labeloff), (ytail + 1.1 * dy), - labels[d], + d, color="k", fontsize=fontpt, horizontalalignment="center", @@ -644,6 +676,6 @@ def plot_focalplane( if outfile is None: plt.show() else: - plt.savefig(outfile) + plt.savefig(outfile, dpi=300, format="pdf") plt.close() return fig diff --git a/src/toast/tests/_helpers.py b/src/toast/tests/_helpers.py index dd41b049d..73dfcc580 100644 --- a/src/toast/tests/_helpers.py +++ b/src/toast/tests/_helpers.py @@ -113,7 +113,7 @@ def create_distdata(mpicomm, obs_per_group=1, samples=10): # FIXME: for full testing we should set detranks as approximately the sqrt # of the grid size so that we test the row / col communicators. ob = Observation( - tele, samples=samples, name=oname, UID=oid, comm=toastcomm.comm_group + tele, n_samples=samples, name=oname, UID=oid, comm=toastcomm.comm_group ) data.obs.append(ob) return data diff --git a/src/toast/vis.py b/src/toast/vis.py index ef553bb2f..356d42361 100644 --- a/src/toast/vis.py +++ b/src/toast/vis.py @@ -2,14 +2,21 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import warnings + _matplotlib_backend = None -def set_backend(backend="agg"): +def set_matplotlib_backend(backend="pdf"): + """Set the matplotlib backend.""" global _matplotlib_backend - if _matplotlib_backend is None: + if _matplotlib_backend is not None: + return + try: _matplotlib_backend = backend import matplotlib - matplotlib.use(_matplotlib_backend) - return + matplotlib.use(_matplotlib_backend, warn=False) + except: + msg = "Could not set the matplotlib backend to '{}'".format(_matplotlib_backend) + warnings.warn(msg) diff --git a/tutorial/01_Introduction/intro.ipynb b/tutorial/01_Introduction/intro.ipynb index ff92f6ba5..7aabe795a 100644 --- a/tutorial/01_Introduction/intro.ipynb +++ b/tutorial/01_Introduction/intro.ipynb @@ -115,21 +115,19 @@ ")\n", "\n", "focalplane_pixels = 7 # (hexagonal, pixel zero at center)\n", - "field_of_view = 10.0 # degrees\n", - "sample_rate = 10.0 # Hz\n", + "field_of_view = 10.0 * u.degree\n", + "sample_rate = 10.0 * u.Hz\n", "\n", "focalplane = fake_hexagon_focalplane(\n", - " focalplane_pixels,\n", - " field_of_view,\n", - " samplerate=10.0,\n", + " n_pix=focalplane_pixels,\n", + " width=field_of_view,\n", + " sample_rate=sample_rate,\n", " epsilon=0.0,\n", " net=1.0,\n", - " fmin=1.0e-5,\n", + " f_min=1.0e-5 * u.Hz,\n", " alpha=1.0,\n", - " fknee=0.05,\n", - ")\n", - "\n", - "# FIXME: add units throughout the codebase\n" + " f_knee=0.05 * u.Hz,\n", + ")\n" ] }, { @@ -140,20 +138,16 @@ "source": [ "# Make a plot of this focalplane layout.\n", "\n", - "detnames = focalplane.detectors\n", - "detquat = {x: focalplane[x][\"quat\"] for x in detnames}\n", - "detfwhm = {x: focalplane[x][\"fwhm_arcmin\"] for x in detnames}\n", - "detlabels = {x: x for x in detnames}\n", - "detpolcol = {x: \"red\" if i % 2 == 0 else \"blue\" for i, x in enumerate(detnames)}\n", + "detpolcol = {\n", + " x: \"red\" if x.endswith(\"A\") else \"blue\" for x in focalplane.detectors\n", + "}\n", "\n", "plot_focalplane(\n", - " detquat, \n", - " 1.3 * field_of_view, \n", - " 1.3 * field_of_view, \n", - " None, \n", - " fwhm=detfwhm, \n", - " polcolor=detpolcol, \n", - " labels=detlabels\n", + " focalplane=focalplane,\n", + " width=1.3 * field_of_view,\n", + " height=1.3 * field_of_view,\n", + " show_labels=True,\n", + " pol_color=detpolcol\n", ")" ] }, @@ -740,7 +734,7 @@ "\n", "The TOAST processing model consists of `Operator` class instances running in a sequence on a subset of data. These sequences could be nested within other sequences (see the `Pipeline` operator below).\n", "\n", - "The Operator base class defines the interfaces for operators working on data. Operators are configured by defining class traits (attributes) which can be set during construction. An operator has an `exec()` method that works with Data objects (potentially just a subset of . We will start by looking at the SimSatellite operator to simulate fake telescope scan strategies for a generic satellite. We can always see the options and default values by using the standard help function or the '?' command:\n" + "The Operator base class defines the interfaces for operators working on data. Operators are configured by defining class traits (attributes) which can be set during construction. An operator has an `exec()` method that works with Data objects (potentially just a subset of the data). Operators also have a `finalize()` method which is designed to do any final calculations after all passes through the timestream data are done. We will start by looking at the SimSatellite operator to simulate fake telescope scan strategies for a generic satellite. We can always see the options and default values by using the standard help function or the '?' command:\n" ] }, { @@ -1103,13 +1097,6 @@ "# Now run **ALL** the (serial) tests\n", "# toast.tests.run()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From b920c1c0025ae5e55212a65558791cb31086c5ed Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Fri, 30 Oct 2020 00:34:22 -0700 Subject: [PATCH 012/690] Restore and fix data distribution unit tests. --- src/toast/data.py | 154 ++------------------------------------ src/toast/tests/dist.py | 21 ++++-- src/toast/tests/runner.py | 17 ++--- src/toast/vis.py | 2 +- 4 files changed, 26 insertions(+), 168 deletions(-) diff --git a/src/toast/data.py b/src/toast/data.py index 071175cb3..4b8c0790d 100644 --- a/src/toast/data.py +++ b/src/toast/data.py @@ -53,34 +53,25 @@ def __repr__(self): @property def comm(self): - """The toast.Comm over which the data is distributed. - """ + """The toast.Comm over which the data is distributed.""" return self._comm def clear(self): - """Clear the list of observations. - """ + """Clear the list of observations.""" for ob in self.obs: ob.clear() self.obs.clear() return - def info(self, handle=None, flag_mask=255, common_flag_mask=255, intervals=None): + def info(self, handle=None): """Print information about the distributed data. Information is written to the specified file handle. Only the rank 0 - process writes. Optional flag masks are used when computing the - number of good samples. + process writes. Args: handle (descriptor): file descriptor supporting the write() method. If None, use print(). - flag_mask (int): bit mask to use when computing the number of - good detector samples. - common_flag_mask (int): bit mask to use when computing the - number of good telescope pointings. - intervals (str): optional name of an intervals object to print - from each observation. Returns: None @@ -118,143 +109,10 @@ def _get_optional(k, dt): return None for ob in self.obs: - id = None - name = None - try: - id = ob.UID - name = ob.name - except: - id = ob["id"] - name = ob["name"] - tod = _get_optional("tod", ob) - intrvl = None - if intervals is not None: - _get_optional(intervals, ob) - if self._comm.group_rank == 0: - groupstr = "observation {} (UID = {}):\n".format(name, id) - for ko in sorted(ob.keys()): - groupstr = "{} key {}\n".format(groupstr, ko) - if tod is not None: - groupstr = "{} {} total samples, {} detectors\n".format( - groupstr, tod.total_samples, len(tod.detectors) - ) - if intrvl is not None: - groupstr = "{} {} intervals:\n".format(groupstr, len(intrvl)) - for it in intrvl: - groupstr = "{} {} --> {} ({} --> {})\n".format( - groupstr, it.first, it.last, it.start, it.stop - ) - - # rank zero of the group will print general information, - # and each process will get its statistics. - - procstr = " proc {}\n".format(self._comm.group_rank) - if tod is not None: - offset, nsamp = tod.local_samples - dets = tod.local_dets - - my_chunks = 1 - if tod.local_chunks is not None: - my_chunks = tod.local_chunks[1] - procstr = "{} sample range {} --> {} in {} chunks:\n".format( - procstr, offset, (offset + nsamp - 1), my_chunks - ) - - if tod.local_chunks is not None: - chkoff = tod.local_samples[0] - for chk in range(tod.local_chunks[1]): - abschk = tod.local_chunks[0] + chk - chkstart = chkoff - chkstop = chkstart + tod.total_chunks[abschk] - 1 - procstr = "{} {} --> {}\n".format( - procstr, chkstart, chkstop - ) - chkoff += tod.total_chunks[abschk] - - if nsamp > 0: - stamps = tod.local_times() - - procstr = "{} timestamps {} --> {}\n".format( - procstr, stamps[0], stamps[-1] - ) - - common = tod.local_common_flags() - for dt in dets: - procstr = "{} det {}:\n".format(procstr, dt) - - pdata = tod.local_pointing(dt) - - procstr = ( - "{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] " - "--> [{:.3e} {:.3e} {:.3e} {:.3e}]\n".format( - procstr, - pdata[0, 0], - pdata[0, 1], - pdata[0, 2], - pdata[0, 3], - pdata[-1, 0], - pdata[-1, 1], - pdata[-1, 2], - pdata[-1, 3], - ) - ) - - data = tod.local_signal(dt) - flags = tod.local_flags(dt) - procstr = "{} {:.3e} ({}) --> {:.3e} ({})\n".format( - procstr, data[0], flags[0], data[-1], flags[-1] - ) - good = np.where( - ((flags & flag_mask) | (common & common_flag_mask)) == 0 - )[0] - procstr = "{} {} good samples\n".format( - procstr, len(good) - ) - try: - min = np.min(data[good]) - max = np.max(data[good]) - mean = np.mean(data[good]) - rms = np.std(data[good]) - procstr = ( - "{} min = {:.4e}, max = {:.4e}," - " mean = {:.4e}, rms = {:.4e}\n".format( - procstr, min, max, mean, rms - ) - ) - except FloatingPointError: - procstr = ( - "{} min = N/A, max = N/A, " - "mean = N/A, rms = N/A\n".format(procstr) - ) - - for cname in tod.cache.keys(): - procstr = "{} cache {}:\n".format(procstr, cname) - ref = tod.cache.reference(cname) - min = np.min(ref) - max = np.max(ref) - mean = np.mean(ref) - rms = np.std(ref) - procstr = ( - "{} min = {:.4e}, max = {:.4e}, " - "mean = {:.4e}, rms = {:.4e}\n".format( - procstr, min, max, mean, rms - ) - ) - - recvstr = "" - if self._comm.group_rank == 0: - groupstr = "{}{}".format(groupstr, procstr) - if gcomm is not None: - for p in range(1, self._comm.group_size): - if gcomm.rank == 0: - recvstr = gcomm.recv(source=p, tag=p) - groupstr = "{}{}".format(groupstr, recvstr) - elif p == gcomm.rank: - gcomm.send(procstr, dest=0, tag=p) - gcomm.barrier() + groupstr = "{}{}\n".format(groupstr, str(ob)) - # the world rank 0 process collects output from all groups and + # The world rank 0 process collects output from all groups and # writes to the handle recvgrp = "" diff --git a/src/toast/tests/dist.py b/src/toast/tests/dist.py index bf515f146..a7b778f34 100644 --- a/src/toast/tests/dist.py +++ b/src/toast/tests/dist.py @@ -11,9 +11,10 @@ from ..dist import distribute_uniform, distribute_discrete from ..data import Data +from ..observation import Observation from ..mpi import Comm, MPI -from ._helpers import create_outdir, create_distdata +from ._helpers import create_outdir, create_distdata, create_comm, create_telescope class DataTest(MPITestCase): @@ -152,13 +153,17 @@ def test_construction(self): return def test_split(self): - data = Data(self.data.comm) - data.obs.append({"site": "Atacama", "season": 1}) - data.obs.append({"site": "Atacama", "season": 2}) - data.obs.append({"site": "Atacama", "season": 3}) - data.obs.append({"site": "Pole", "season": 1}) - data.obs.append({"site": "Pole", "season": 2}) - data.obs.append({"site": "Pole", "season": 3}) + toastcomm = create_comm(self.comm) + tele = create_telescope(toastcomm.group_size) + data = Data(toastcomm) + for season in range(3): + data.obs.append(Observation(tele, 10, comm=toastcomm.comm_group)) + data.obs[-1]["site"] = "Atacama" + data.obs[-1]["season"] = season + for season in range(3): + data.obs.append(Observation(tele, 10, comm=toastcomm.comm_group)) + data.obs[-1]["site"] = "Pole" + data.obs[-1]["season"] = season datasplit_site = data.split("site") datasplit_season = data.split("season") diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 88f80e905..2b40d8f07 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -10,7 +10,7 @@ from .mpi import MPITestRunner -from ..vis import set_backend +from ..vis import set_matplotlib_backend from .._libtoast import libtoast_tests @@ -25,6 +25,8 @@ from . import observation as testobs +from . import dist as testdist + from . import config as testconfig from . import ops_sim_satellite as testsimsat @@ -33,10 +35,6 @@ # # from . import cache as testcache # -# -# -# from . import dist as testdist -# # from . import tod as testtod # # from . import psd_math as testpsdmath @@ -98,7 +96,7 @@ def test(name=None, verbosity=2): comm = MPI.COMM_WORLD rank = comm.rank - set_backend() + set_matplotlib_backend(backend="agg") outdir = "toast_test_output" @@ -133,18 +131,15 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(testqarray)) suite.addTest(loader.loadTestsFromModule(testintervals)) suite.addTest(loader.loadTestsFromModule(testpixels)) - suite.addTest(loader.loadTestsFromModule(testobs)) + suite.addTest(loader.loadTestsFromModule(testobs)) + suite.addTest(loader.loadTestsFromModule(testdist)) suite.addTest(loader.loadTestsFromModule(testconfig)) suite.addTest(loader.loadTestsFromModule(testsimsat)) # suite.addTest(loader.loadTestsFromModule(testcache)) # - # - # - # suite.addTest(loader.loadTestsFromModule(testdist)) - # # suite.addTest(loader.loadTestsFromModule(testtod)) # suite.addTest(loader.loadTestsFromModule(testtodsat)) # diff --git a/src/toast/vis.py b/src/toast/vis.py index 356d42361..15f9e89ed 100644 --- a/src/toast/vis.py +++ b/src/toast/vis.py @@ -16,7 +16,7 @@ def set_matplotlib_backend(backend="pdf"): _matplotlib_backend = backend import matplotlib - matplotlib.use(_matplotlib_backend, warn=False) + matplotlib.use(_matplotlib_backend, force=False) except: msg = "Could not set the matplotlib backend to '{}'".format(_matplotlib_backend) warnings.warn(msg) From db1a468ffc163db5505c8805f9758fdd858d87c5 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Fri, 30 Oct 2020 00:57:25 -0700 Subject: [PATCH 013/690] Port map domain covariance functions. --- src/toast/CMakeLists.txt | 1 + src/toast/covariance.py | 179 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100644 src/toast/covariance.py diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 635f0dc40..37e9b915e 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -87,6 +87,7 @@ install(FILES config.py pixels.py pixels_io.py + covariance.py cuda.py dist.py data.py diff --git a/src/toast/covariance.py b/src/toast/covariance.py new file mode 100644 index 000000000..11236855c --- /dev/null +++ b/src/toast/covariance.py @@ -0,0 +1,179 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +from .timing import function_timer + +from .operator import Operator + +from ._libtoast import ( + AlignedF64, + cov_mult_diag, + cov_apply_diag, + cov_eigendecompose_diag, +) + +from .pixels import PixelData + + +@function_timer +def covariance_invert(npp, threshold, rcond=None): + """Invert a diagonal noise covariance. + + This does an inversion of the covariance. The threshold is + applied to the condition number of each block of the matrix. Pixels + failing the cut are set to zero. + + Args: + npp (PixelData): The distributed covariance, with the lower triangle of the + symmetric matrix at each pixel. + threshold (float): The condition number threshold to apply. + rcond (PixelData): (Optional) The distributed inverse condition number map + to fill. + + Returns: + None + + """ + mapnnz = int(((np.sqrt(8 * npp.n_value) - 1) / 2) + 0.5) + nppdata = npp.raw + if nppdata is None: + nppdata = np.empty(shape=0, dtype=np.float64) + if rcond is not None: + if rcond.distribution.n_pix != npp.distribution.n_pix: + raise RuntimeError( + "covariance matrix and condition number map must have same number " + "of pixels" + ) + if rcond.distribution.n_pix_submap != npp.distribution.n_pix_submap: + raise RuntimeError( + "covariance matrix and condition number map must have same submap size" + ) + if rcond.n_value != 1: + raise RuntimeError("condition number map should have n_value = 1") + + rdata = rcond.raw + if rdata is None: + rdata = np.empty(shape=0, dtype=np.float64) + cov_eigendecompose_diag( + npp.n_local_submap, + npp.n_pix_submap, + mapnnz, + nppdata, + rdata, + threshold, + True, + ) + else: + temp = AlignedF64(npp.n_local_submap * npp.n_pix_submap) + cov_eigendecompose_diag( + npp.n_local_submap, npp.n_pix_submap, mapnnz, nppdata, temp, threshold, True + ) + temp.clear() + del temp + return + + +@function_timer +def covariance_multiply(npp1, npp2): + """Multiply two diagonal noise covariances. + + This does an in-place multiplication of the covariance. + The data values of the first covariance (npp1) are replaced with + the result. + + Args: + npp1 (PixelData): The first distributed covariance. + npp2 (PixelData): The second distributed covariance. + + Returns: + None + + """ + mapnnz = int(((np.sqrt(8 * npp1.n_value) - 1) / 2) + 0.5) + + if npp1.n_pix != npp2.n_pix: + raise RuntimeError("covariance matrices must have same number of pixels") + if npp1.n_pix_submap != npp2.n_pix_submap: + raise RuntimeError("covariance matrices must have same submap size") + if npp1.n_value != npp2.n_value: + raise RuntimeError("covariance matrices must have same n_values") + + npp1data = npp1.raw + if npp1data is None: + npp1data = np.empty(shape=0, dtype=np.float64) + npp2data = npp2.raw + if npp2data is None: + npp2data = np.empty(shape=0, dtype=np.float64) + cov_mult_diag(npp1.n_submap, npp1.n_pix_submap, mapnnz, npp1data, npp2data) + return + + +@function_timer +def covariance_apply(npp, m): + """Multiply a map by a diagonal noise covariance. + + This does an in-place multiplication of the covariance and a + map. The results are returned in place of the input map. + + Args: + npp (PixelData): The distributed covariance. + m (PixelData): The distributed map. + + Returns: + None + + """ + mapnnz = int(((np.sqrt(8 * npp.n_value) - 1) / 2) + 0.5) + + if m.n_pix != npp.n_pix: + raise RuntimeError("covariance matrix and map must have same number of pixels") + if m.n_pix_submap != npp.n_pix_submap: + raise RuntimeError("covariance matrix and map must have same submap size") + if m.n_value != mapnnz: + raise RuntimeError("covariance matrix and map have incompatible NNZ values") + + nppdata = npp.raw + if nppdata is None: + nppdata = np.empty(shape=0, dtype=np.float64) + mdata = m.raw + if mdata is None: + mdata = np.empty(shape=0, dtype=np.float64) + cov_apply_diag(npp.n_submap, npp.n_pix_submap, mapnnz, nppdata, mdata) + return + + +@function_timer +def covariance_rcond(npp): + """Compute the inverse condition number map. + + This computes the inverse condition number map of the supplied + covariance matrix. + + Args: + npp (PixelData): The distributed covariance. + + Returns: + rcond (PixelData): The distributed inverse condition number map. + """ + mapnnz = int(((np.sqrt(8 * npp.n_value) - 1) / 2) + 0.5) + + rcond = PixelData(npp.distribution, np.float64, n_value=1) + + threshold = np.finfo(np.float64).eps + + nppdata = npp.raw + if nppdata is None: + nppdata = np.empty(shape=0, dtype=np.float64) + + rdata = rcond.raw + if rdata is None: + rdata = np.empty(shape=0, dtype=np.float64) + + cov_eigendecompose_diag( + npp.n_submap, npp.n_pix_submap, mapnnz, nppdata, rdata, threshold, False + ) + + return rcond From c7380d237e2757a3a9d846d033d594227ee78322 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Fri, 30 Oct 2020 13:50:09 -0700 Subject: [PATCH 014/690] Split up alltoallv communication into several steps. Support running user-defined local functions on the gathered submap buffers. Use this optionally in covariance operations to trade communication for calculation. --- src/toast/covariance.py | 209 +++++++++++++++++++++------ src/toast/pixels.py | 305 +++++++++++++++++++++++----------------- 2 files changed, 342 insertions(+), 172 deletions(-) diff --git a/src/toast/covariance.py b/src/toast/covariance.py index 11236855c..9762f0cd3 100644 --- a/src/toast/covariance.py +++ b/src/toast/covariance.py @@ -18,8 +18,52 @@ from .pixels import PixelData +def create_local_invert(n_pix_submap, mapnnz, threshold, rcond, invert=False): + """Generate a function for inverting locally owned submaps of a covariance. + + Args: + n_pix_submap (int): The number of pixels in a submap. + mapnnz (int): The number of map elements per pixel. + threshold (float): The condition number threshold to apply. + rcond (PixelData): If not None, the inverse condition number PixelData object. + + Returns: + (function): A function suitable for the sync_alltoallv() method. + + """ + + def local_invert(n_submap_value, receive_locations, receive, reduce_buf): + # Locally invert owned submaps + for sm, locs in receive_locations.items(): + # We have multiple copies of submap data- we will invert just the first + # one and copy the result into the other buffer locations to be sent + # back to the processes with this submap. + reduce_buf[:] = receive[locs[0] : locs[0] + n_submap_value] + rdata = None + if rcond is None: + rdata = np.empty(shape=0, dtype=np.float64) + else: + rcond.reduce_buf[:] = 0.0 + rdata = rcond.reduce_buf + cov_eigendecompose_diag( + 1, + n_pix_submap, + mapnnz, + reduce_buf, + rdata, + threshold, + invert, + ) + for lc in locs: + receive[lc : lc + n_submap_value] = reduce_buf + if rcond is not None: + rcond.receive[lc : lc + (n_pix_submap * mapnnz)] = rcond.reduce_buf + + return local_invert + + @function_timer -def covariance_invert(npp, threshold, rcond=None): +def covariance_invert(npp, threshold, rcond=None, use_alltoallv=False): """Invert a diagonal noise covariance. This does an inversion of the covariance. The threshold is @@ -32,6 +76,9 @@ def covariance_invert(npp, threshold, rcond=None): threshold (float): The condition number threshold to apply. rcond (PixelData): (Optional) The distributed inverse condition number map to fill. + use_alltoallv (bool): If True, communicate submaps and have every process work + on a portion of them. This may be faster than processing all submaps + locally. Returns: None @@ -54,30 +101,63 @@ def covariance_invert(npp, threshold, rcond=None): if rcond.n_value != 1: raise RuntimeError("condition number map should have n_value = 1") - rdata = rcond.raw - if rdata is None: + if use_alltoallv: + if rcond is not None: + # Stage data to receive buffer + rcond.forward_alltoallv() + linvert = create_local_invert( + npp.distribution.n_pix_submap, mapnnz, threshold, rcond, invert=True + ) + npp.sync_alltoallv(local_func=linvert) + else: + rdata = None + if rcond is None: rdata = np.empty(shape=0, dtype=np.float64) + else: + rdata = rcond.raw cov_eigendecompose_diag( - npp.n_local_submap, - npp.n_pix_submap, + 1, + n_pix_submap, mapnnz, - nppdata, + reduce_buf, rdata, threshold, True, ) - else: - temp = AlignedF64(npp.n_local_submap * npp.n_pix_submap) - cov_eigendecompose_diag( - npp.n_local_submap, npp.n_pix_submap, mapnnz, nppdata, temp, threshold, True - ) - temp.clear() - del temp + return +def create_local_multiply(n_pix_submap, mapnnz, other): + """Generate a function for multiplying locally owned submaps of covariances. + + Args: + n_pix_submap (int): The number of pixels in a submap. + mapnnz (int): The number of map elements per pixel. + other (PixelData): The other PixelData covariance object. + + Returns: + (function): A function suitable for the sync_alltoallv() method. + + """ + + def local_multiply(n_submap_value, receive_locations, receive, reduce_buf): + for sm, locs in receive_locations.items(): + # We have multiple copies of submap data- we will multiply just the first + # one and copy the result into the other buffer locations to be sent + # back to the processes with this submap. + reduce_buf[:] = receive[locs[0] : locs[0] + n_submap_value] + other_buf = other.reduce_buf + other_buf[:] = other.receive[locs[0] : locs[0] + n_submap_value] + cov_mult_diag(1, n_pix_submap, mapnnz, reduce_buf, other_buf) + for lc in locs: + receive[lc : lc + n_submap_value] = reduce_buf + + return local_multiply + + @function_timer -def covariance_multiply(npp1, npp2): +def covariance_multiply(npp1, npp2, use_alltoallv=False): """Multiply two diagonal noise covariances. This does an in-place multiplication of the covariance. @@ -87,6 +167,9 @@ def covariance_multiply(npp1, npp2): Args: npp1 (PixelData): The first distributed covariance. npp2 (PixelData): The second distributed covariance. + use_alltoallv (bool): If True, communicate submaps and have every process work + on a portion of them. This may be faster than processing all submaps + locally. Returns: None @@ -101,18 +184,48 @@ def covariance_multiply(npp1, npp2): if npp1.n_value != npp2.n_value: raise RuntimeError("covariance matrices must have same n_values") - npp1data = npp1.raw - if npp1data is None: - npp1data = np.empty(shape=0, dtype=np.float64) - npp2data = npp2.raw - if npp2data is None: - npp2data = np.empty(shape=0, dtype=np.float64) - cov_mult_diag(npp1.n_submap, npp1.n_pix_submap, mapnnz, npp1data, npp2data) + if use_alltoallv: + npp2.forward_alltoallv() + lmultiply = create_local_multiply(npp1.distribution.n_pix_submap, mapnnz, npp2) + npp1.sync_alltoallv(local_func=lmultiply) + else: + cov_mult_diag(npp1.n_submap, npp1.n_pix_submap, mapnnz, npp1data, npp2data) + return +def create_local_apply(n_pix_submap, mapnnz, m): + """Generate a function for applying locally owned submaps of covariances. + + Args: + n_pix_submap (int): The number of pixels in a submap. + mapnnz (int): The number of map elements per pixel. + m (PixelData): The PixelData map object. + + Returns: + (function): A function suitable for the sync_alltoallv() method. + + """ + + def local_apply(n_submap_value, receive_locations, receive, reduce_buf): + for sm, locs in receive_locations.items(): + # We have multiple copies of submap data- we will multiply just the first + # one and copy the result into the other buffer locations to be sent + # back to the processes with this submap. + reduce_buf[:] = receive[locs[0] : locs[0] + n_submap_value] + m_buf = m.reduce_buf + m_buf[:] = m.receive[locs[0] : locs[0] + (n_pix_submap * mapnnz)] + + cov_apply_diag(1, n_pix_submap, mapnnz, reduce_buf, m_buf) + + for lc in locs: + m.receive[lc : lc + (n_pix_submap * mapnnz)] = m.reduce_buf + + return local_apply + + @function_timer -def covariance_apply(npp, m): +def covariance_apply(npp, m, use_alltoallv=False): """Multiply a map by a diagonal noise covariance. This does an in-place multiplication of the covariance and a @@ -121,6 +234,9 @@ def covariance_apply(npp, m): Args: npp (PixelData): The distributed covariance. m (PixelData): The distributed map. + use_alltoallv (bool): If True, communicate submaps and have every process work + on a portion of them. This may be faster than processing all submaps + locally. Returns: None @@ -135,18 +251,19 @@ def covariance_apply(npp, m): if m.n_value != mapnnz: raise RuntimeError("covariance matrix and map have incompatible NNZ values") - nppdata = npp.raw - if nppdata is None: - nppdata = np.empty(shape=0, dtype=np.float64) - mdata = m.raw - if mdata is None: - mdata = np.empty(shape=0, dtype=np.float64) - cov_apply_diag(npp.n_submap, npp.n_pix_submap, mapnnz, nppdata, mdata) + if use_alltoallv: + m.forward_alltoallv() + lapply = create_local_apply(npp.n_pix_submap, mapnnz, m) + npp.sync_alltoallv(local_func=lapply) + else: + nppdata = npp.raw + mdata = m.raw + cov_apply_diag(npp.n_submap, npp.n_pix_submap, mapnnz, nppdata, mdata) return @function_timer -def covariance_rcond(npp): +def covariance_rcond(npp, use_alltoallv=False): """Compute the inverse condition number map. This computes the inverse condition number map of the supplied @@ -154,6 +271,9 @@ def covariance_rcond(npp): Args: npp (PixelData): The distributed covariance. + use_alltoallv (bool): If True, communicate submaps and have every process work + on a portion of them. This may be faster than processing all submaps + locally. Returns: rcond (PixelData): The distributed inverse condition number map. @@ -164,16 +284,23 @@ def covariance_rcond(npp): threshold = np.finfo(np.float64).eps - nppdata = npp.raw - if nppdata is None: - nppdata = np.empty(shape=0, dtype=np.float64) - - rdata = rcond.raw - if rdata is None: - rdata = np.empty(shape=0, dtype=np.float64) - - cov_eigendecompose_diag( - npp.n_submap, npp.n_pix_submap, mapnnz, nppdata, rdata, threshold, False - ) + if use_alltoallv: + rcond.setup_alltoallv() + linvert = create_local_invert( + npp.distribution.n_pix_submap, mapnnz, threshold, rcond, invert=False + ) + npp.sync_alltoallv(local_func=linvert) + else: + nppdata = npp.raw + rdata = rcond.raw + cov_eigendecompose_diag( + 1, + n_pix_submap, + mapnnz, + reduce_buf, + rdata, + threshold, + False, + ) return rcond diff --git a/src/toast/pixels.py b/src/toast/pixels.py index e24220db4..bd5974408 100644 --- a/src/toast/pixels.py +++ b/src/toast/pixels.py @@ -91,44 +91,37 @@ def __del__(self): @property def comm(self): - """(mpi4py.MPI.Comm): The MPI communicator used (or None) - """ + """(mpi4py.MPI.Comm): The MPI communicator used (or None)""" return self._comm @property def n_pix(self): - """(int): The global number of pixels. - """ + """(int): The global number of pixels.""" return self._n_pix @property def n_pix_submap(self): - """(int): The number of pixels in each submap. - """ + """(int): The number of pixels in each submap.""" return self._n_pix_submap @property def n_submap(self): - """(int): The total number of submaps. - """ + """(int): The total number of submaps.""" return self._n_submap @property def n_local_submap(self): - """(int): The number of submaps stored on this process. - """ + """(int): The number of submaps stored on this process.""" return self._n_local @property def local_submaps(self): - """(array): The list of local submaps or None if process has no data. - """ + """(array): The list of local submaps or None if process has no data.""" return self._local_submaps @property def global_submap_to_local(self): - """(array): The mapping from global submap to local. - """ + """(array): The mapping from global submap to local.""" return self._glob2loc @function_timer @@ -260,8 +253,7 @@ def submap_owners(self): @property def owned_submaps(self): - """The submaps owned by this process. - """ + """The submaps owned by this process.""" if self._owned_submaps is not None: # Already computed return self._owned_submaps @@ -284,8 +276,6 @@ def alltoallv_info(self): - The locations in the receive buffer of each submap. """ - if self._comm is None: - return (None, None, None, None, None) if self._alltoallv_info is not None: # Already computed return self._alltoallv_info @@ -293,63 +283,64 @@ def alltoallv_info(self): owners = self.submap_owners our_submaps = self.owned_submaps - # Compute the other "contributing" processes that have submaps which we own. - # Also track the receive buffer offsets for each owned submap. - send = [list() for x in range(self._comm.size)] - for sm in self._local_submaps: - # Tell the owner of this submap that we are a contributor - send[owners[sm]].append(sm) - recv = self._comm.alltoall(send) - - recv_counts = np.zeros(self._comm.size, dtype=np.int32) - recv_displ = np.zeros(self._comm.size, dtype=np.int32) - recv_locations = dict() - - offset = 0 - for proc, sms in enumerate(recv): - recv_displ[proc] = offset - for sm in sms: - if sm not in recv_locations: - recv_locations[sm] = list() - recv_locations[sm].append(offset) - recv_counts[proc] += 1 - offset += 1 + send_counts = None + send_displ = None + recv_counts = None + recv_displ = None + recv_locations = None - for sm in list(recv_locations.keys()): - recv_locations[sm] = np.array(recv_locations[sm], dtype=np.int32) - - # print("rank {} recv_displ = {}".format(self._comm.rank, recv_displ), flush=True) - # print( - # "rank {} recv_counts = {}".format(self._comm.rank, recv_counts), flush=True - # ) - # print( - # "rank {} recv_locations = {}".format(self._comm.rank, recv_locations), - # flush=True, - # ) - - # Compute the Alltoallv send offsets in terms of submaps - send_counts = np.zeros(self._comm.size, dtype=np.int32) - send_displ = np.zeros(self._comm.size, dtype=np.int32) - offset = 0 - last_offset = 0 - last_own = -1 - for sm in self._local_submaps: - if last_own != owners[sm]: - # Moving on to next owning process... - if last_own >= 0: - send_displ[last_own] = last_offset - last_offset = offset - send_counts[owners[sm]] += 1 - offset += 1 - last_own = owners[sm] - if last_own >= 0: - # Finish up last process - send_displ[last_own] = last_offset - - # print("rank {} send_displ = {}".format(self._comm.rank, send_displ), flush=True) - # print( - # "rank {} send_counts = {}".format(self._comm.rank, send_counts), flush=True - # ) + if self._comm is None: + recv_counts = len(self._local_submaps) * np.ones(1, dtype=np.int32) + recv_displ = np.zeros(1, dtype=np.int32) + recv_locations = dict() + for offset, sm in enumerate(self._local_submaps): + recv_locations[sm] = np.array([offset], dtype=np.int32) + send_counts = len(self._local_submaps) * np.ones(1, dtype=np.int32) + send_displ = np.zeros(1, dtype=np.int32) + else: + # Compute the other "contributing" processes that have submaps which we own. + # Also track the receive buffer offsets for each owned submap. + send = [list() for x in range(self._comm.size)] + for sm in self._local_submaps: + # Tell the owner of this submap that we are a contributor + send[owners[sm]].append(sm) + recv = self._comm.alltoall(send) + + recv_counts = np.zeros(self._comm.size, dtype=np.int32) + recv_displ = np.zeros(self._comm.size, dtype=np.int32) + recv_locations = dict() + + offset = 0 + for proc, sms in enumerate(recv): + recv_displ[proc] = offset + for sm in sms: + if sm not in recv_locations: + recv_locations[sm] = list() + recv_locations[sm].append(offset) + recv_counts[proc] += 1 + offset += 1 + + for sm in list(recv_locations.keys()): + recv_locations[sm] = np.array(recv_locations[sm], dtype=np.int32) + + # Compute the Alltoallv send offsets in terms of submaps + send_counts = np.zeros(self._comm.size, dtype=np.int32) + send_displ = np.zeros(self._comm.size, dtype=np.int32) + offset = 0 + last_offset = 0 + last_own = -1 + for sm in self._local_submaps: + if last_own != owners[sm]: + # Moving on to next owning process... + if last_own >= 0: + send_displ[last_own] = last_offset + last_offset = offset + send_counts[owners[sm]] += 1 + offset += 1 + last_own = owners[sm] + if last_own >= 0: + # Finish up last process + send_displ[last_own] = last_offset self._alltoallv_info = ( send_counts, @@ -446,9 +437,9 @@ def __init__(self, dist, dtype, n_value=1): self._recv_counts = None self._recv_displ = None self._recv_locations = None - self._receive = None + self.receive = None self._receive_raw = None - self._reduce_buf = None + self.reduce_buf = None self._reduce_buf_raw = None def clear(self): @@ -464,13 +455,13 @@ def clear(self): if hasattr(self, "raw"): self.raw.clear() del self.raw - if hasattr(self, "_receive"): - del self._receive + if hasattr(self, "receive"): + del self.receive if self._receive_raw is not None: self._receive_raw.clear() del self._receive_raw - if hasattr(self, "_reduce_buf"): - del self._reduce_buf + if hasattr(self, "reduce_buf"): + del self.reduce_buf if self._reduce_buf_raw is not None: self._reduce_buf_raw.clear() del self._reduce_buf_raw @@ -480,20 +471,17 @@ def __del__(self): @property def distribution(self): - """(PixelDistribution): The distribution information. - """ + """(PixelDistribution): The distribution information.""" return self._dist @property def dtype(self): - """(numpy.dtype): The data type of the values. - """ + """(numpy.dtype): The data type of the values.""" return self._dtype @property def n_value(self): - """(int): The number of non-zero values per pixel. - """ + """(int): The number of non-zero values per pixel.""" return self._n_value def __getitem__(self, key): @@ -605,22 +593,19 @@ def sync_allreduce(self, comm_bytes=10000000): return - @function_timer - def sync_alltoallv(self): - """Perform a reduction using Alltoallv operations. - - On the first call, some initialization is done to compute send and receive - displacements and counts. A persistent receive buffer is allocated. Submap - data is sent to their owners simultaneously using alltoallv. Each process does - a local reduction of their owned submaps before sending the result back with - another alltoallv call. - - Returns: - None. + @staticmethod + def local_reduction(n_submap_value, receive_locations, receive, reduce_buf): + # Locally reduce owned submaps + for sm, locs in receive_locations.items(): + reduce_buf[:] = 0 + for lc in locs: + reduce_buf += receive[lc : lc + n_submap_value] + for lc in locs: + receive[lc : lc + n_submap_value] = reduce_buf - """ - if self._dist.comm is None: - return + @function_timer + def setup_alltoallv(self): + """Check that alltoallv buffers exist and create them if needed.""" if self._send_counts is None: # Get the parameters in terms of submaps. ( @@ -634,16 +619,6 @@ def sync_alltoallv(self): # Pixel values per submap scale = self._n_submap_value - # Check that our send and receive buffers do not exceed 32bit indices - # required by MPI - max_int = 2147483647 - if scale * (recv_displ[-1] + recv_counts[-1]) > max_int: - msg = "Alltoallv receive buffer size exceeds max 32bit integer" - raise RuntimeError(msg) - if len(self.raw) > max_int: - msg = "Alltoallv send buffer size exceeds max 32bit integer" - raise RuntimeError(msg) - # Scale these quantites by the submap size and the number of values per # pixel. @@ -655,39 +630,107 @@ def sync_alltoallv(self): for sm, locs in recv_locations.items(): self._recv_locations[sm] = scale * np.array(locs, dtype=np.int32) - # Allocate a persistent receive buffer - self._receive_raw = self.storage_class.zeros( - self._recv_displ[-1] + self._recv_counts[-1] - ) - self._receive = self._receive_raw.array() + # Allocate a persistent single-submap buffer self._reduce_buf_raw = self.storage_class.zeros(self._n_submap_value) - self._reduce_buf = self._reduce_buf_raw.array() + self.reduce_buf = self._reduce_buf_raw.array() + + if self._dist.comm is None: + # For this case, point the receive member to the original data. This + # will allow codes processing locally owned submaps to work + # transparently in the serial case. + self.receive = self.data + else: + # Check that our send and receive buffers do not exceed 32bit indices + # required by MPI + max_int = 2147483647 + if scale * (self._recv_displ[-1] + self._recv_counts[-1]) > max_int: + msg = "Alltoallv receive buffer size exceeds max 32bit integer" + raise RuntimeError(msg) + if len(self.raw) > max_int: + msg = "Alltoallv send buffer size exceeds max 32bit integer" + raise RuntimeError(msg) + + # Allocate a persistent receive buffer + self._receive_raw = self.storage_class.zeros( + self._recv_displ[-1] + self._recv_counts[-1] + ) + self.receive = self._receive_raw.array() + + @function_timer + def forward_alltoallv(self): + """Communicate submaps into buffers on the owning process. + + On the first call, some initialization is done to compute send and receive + displacements and counts. A persistent receive buffer is allocated. Submap + data is sent to their owners simultaneously using alltoallv. + + Returns: + None. + + """ + self.setup_alltoallv() + if self._dist.comm is None: + # No communication needed + return - gt = GlobalTimers.get() # Gather owned submaps locally - gt.start("REAL Alltoallv forward") self._dist.comm.Alltoallv( [self.raw, self._send_counts, self._send_displ, self.mpitype], - [self._receive, self._recv_counts, self._recv_displ, self.mpitype], + [self.receive, self._recv_counts, self._recv_displ, self.mpitype], ) - gt.stop("REAL Alltoallv forward") + return - # Locally reduce owned submaps - for sm, locs in self._recv_locations.items(): - self._reduce_buf[:] = 0 - for lc in locs: - self._reduce_buf += self._receive[lc : lc + self._n_submap_value] - for lc in locs: - self._receive[lc : lc + self._n_submap_value] = self._reduce_buf + @function_timer + def reverse_alltoallv(self): + """Communicate submaps from the owning process back to all processes. + + Returns: + None. + + """ + if self._dist.comm is None: + # No communication needed + return + if self._send_counts is None: + raise RuntimeError( + "Cannot do reverse alltoallv before buffers have been setup" + ) # Scatter result back - gt.start("REAL Alltoallv reverse") self._dist.comm.Alltoallv( - [self._receive, self._recv_counts, self._recv_displ, self.mpitype], + [self.receive, self._recv_counts, self._recv_displ, self.mpitype], [self.raw, self._send_counts, self._send_displ, self.mpitype], ) - gt.stop("REAL Alltoallv reverse") + return + + @function_timer + def sync_alltoallv(self, local_func=None): + """Perform operations on locally owned submaps using Alltoallv communication. + + On the first call, some initialization is done to compute send and receive + displacements and counts. A persistent receive buffer is allocated. Submap + data is sent to their owners simultaneously using alltoallv. Each process does + a local operation on their owned submaps before sending the result back with + another alltoallv call. + + Args: + local_func (function): A function for processing the local submap data. + + Returns: + None. + + """ + self.forward_alltoallv() + + if local_func is None: + local_func = self.local_reduction + + # Run operation on locally owned submaps + local_func( + self._n_submap_value, self._recv_locations, self.receive, self.reduce_buf + ) + self.reverse_alltoallv() return @function_timer From 12eebdeb2279d17b8cfd7295feecfd8ef158a207 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Fri, 30 Oct 2020 15:21:16 -0700 Subject: [PATCH 015/690] Split up observation source into several files. Finish porting healpix operator. Update two testing pipelines to new API. --- pipelines/toast_future.py | 8 +- pipelines/toast_pixel_comm.py | 78 +- src/toast/CMakeLists.txt | 2 + src/toast/config.py | 2 + src/toast/future_ops/__init__.py | 2 +- src/toast/future_ops/pointing_healpix.py | 154 ++-- src/toast/observation.py | 1005 +--------------------- src/toast/observation_data.py | 813 +++++++++++++++++ src/toast/observation_view.py | 216 +++++ 9 files changed, 1149 insertions(+), 1131 deletions(-) create mode 100644 src/toast/observation_data.py create mode 100644 src/toast/observation_view.py diff --git a/pipelines/toast_future.py b/pipelines/toast_future.py index 7ed893fea..0703efd1f 100644 --- a/pipelines/toast_future.py +++ b/pipelines/toast_future.py @@ -85,13 +85,13 @@ def main(): # focalplane and telescope focalplane = fake_hexagon_focalplane( args.focalplane_pixels, - 10.0, - samplerate=10.0, + width=10.0 * u.degree, + sample_rate=10.0 * u.Hz, epsilon=0.0, net=1.0, - fmin=1.0e-5, + f_min=1.0e-5 * u.Hz, alpha=1.0, - fknee=0.05, + f_knee=0.05 * u.Hz, ) print(focalplane) diff --git a/pipelines/toast_pixel_comm.py b/pipelines/toast_pixel_comm.py index 7294769ed..d81c46028 100644 --- a/pipelines/toast_pixel_comm.py +++ b/pipelines/toast_pixel_comm.py @@ -17,13 +17,15 @@ import numpy as np +from astropy import units as u + import healpy as hp import toast from toast.mpi import get_world, Comm -from toast.dist import Data +from toast.data import Data from toast.utils import Logger, Environment @@ -31,7 +33,7 @@ from toast.timing import dump as dump_timing -from toast import dump_config, parse_config, create +from toast.config import dump_toml, parse_config, create from toast.pixels import PixelDistribution, PixelData @@ -39,7 +41,7 @@ from toast import future_ops as ops -from toast.future_ops.sim_focalplane import fake_hexagon_focalplane +from toast.instrument_sim import fake_hexagon_focalplane from toast.instrument import Telescope @@ -54,7 +56,10 @@ def main(): mpiworld, procs, rank = get_world() # The operators used in this script: - operators = {"sim_satellite": ops.SimSatellite, "pointing": ops.PointingHealpix} + operators = [ + ops.SimSatellite(name="sim_satellite"), + ops.PointingHealpix(name="pointing"), + ] # Argument parsing parser = argparse.ArgumentParser( @@ -85,52 +90,49 @@ def main(): help="Size in MB of allreduce buffer", ) - config, argvars = parse_config(parser, operators=operators) - - # Communicator - comm = Comm(world=mpiworld, groupsize=argvars["group_size"]) + config, args = parse_config(parser, operators=operators) # Make a fake focalplane and telescope focalplane = fake_hexagon_focalplane( - argvars["focalplane_pixels"], - 10.0, - samplerate=10.0, + args.focalplane_pixels, + width=10.0 * u.degree, + sample_rate=10.0 * u.Hz, epsilon=0.0, net=1.0, - fmin=1.0e-5, + f_min=1.0e-5 * u.Hz, alpha=1.0, - fknee=0.05, + f_knee=0.05 * u.Hz, ) - config["operators"]["sim_satellite"]["telescope"] = Telescope( - name="fake", focalplane=focalplane - ) - - # Specify where to store the pixel distribution - config["operators"]["pointing"]["create_dist"] = "pixel_dist" - # Log the config that was actually used at runtime. out = "pixel_comm_config_log.toml" - dump_config(out, config) + if rank == 0: + dump_toml(out, config) # Instantiate our operators run = create(config) + run["operators"]["sim_satellite"].telescope = Telescope( + name="fake", focalplane=focalplane + ) + + # Specify where to store the pixel distribution + run["operators"]["pointing"].create_dist = "pixel_dist" + # Put our operators into a pipeline running all detectors at once. - pipe_opts = ops.Pipeline.defaults() - pipe_opts["detector_sets"] = "ALL" - pipe_opts["operators"] = [ - run["operators"][x] for x in ["sim_satellite", "pointing"] - ] + pipe = ops.Pipeline( + detector_sets=["ALL"], + operators=[run["operators"][x] for x in ["sim_satellite", "pointing"]], + ) - pipe = ops.Pipeline(pipe_opts) + # Communicator + comm = Comm(world=mpiworld, groupsize=args.group_size) # Start with empty data data = toast.Data(comm=comm) # Run the pipeline - pipe.exec(data) - pipe.finalize(data) + pipe.apply(data) # print(data) @@ -142,11 +144,11 @@ def main(): # Output file root outroot = "pixcomm_nproc-{:04d}_gsize-{:04d}_nobs-{:03d}_ndet-{:03d}_nside-{:04d}_nsub-{:03d}".format( procs, - argvars["group_size"], - config["operators"]["sim_satellite"]["n_observation"], - 2 * argvars["focalplane_pixels"], - config["operators"]["pointing"]["nside"], - config["operators"]["pointing"]["nside_submap"], + args.group_size, + run["operators"]["sim_satellite"].num_observations, + 2 * args.focalplane_pixels, + run["operators"]["pointing"].nside, + run["operators"]["pointing"].nside_submap, ) # Print out the total hit map and also the hitmap on rank zero. @@ -154,7 +156,7 @@ def main(): hview = hits.raw.array() for obs in data.obs: for det in obs.local_detectors: - global_pixels = obs["pixels"][det] + global_pixels = obs.detdata["pixels"][det] # We can do this since n_value == 1 local_pixels = pixdist.global_pixel_to_local(global_pixels) hview[local_pixels] += 1 @@ -176,7 +178,7 @@ def main(): fview, dtype=np.int32, fits_IDL=False, - nest=config["operators"]["pointing"]["nest"], + nest=run["operators"]["pointing"].nest, ) del fview fhits.clear() @@ -185,7 +187,7 @@ def main(): hits.sync_allreduce() outfile = "{}_hits.fits".format(outroot) - write_healpix_fits(hits, outfile, nest=config["operators"]["pointing"]["nest"]) + write_healpix_fits(hits, outfile, nest=run["operators"]["pointing"].nest) # Create some IQU maps with fake local data pixdata = PixelData(pixdist, dtype=np.float64, n_value=3) @@ -208,7 +210,7 @@ def main(): tm.start() gt.start("SYNC_ALLREDUCE") - cbytes = argvars["comm_mb"]*1000000 + cbytes = args.comm_mb * 1000000 for i in range(niter): pixdata.sync_allreduce(comm_bytes=cbytes) diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 37e9b915e..efa2b10da 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -97,6 +97,8 @@ install(FILES noise.py noise_sim.py observation.py + observation_data.py + observation_view.py operator.py vis.py rng.py diff --git a/src/toast/config.py b/src/toast/config.py index 600076831..cccf3c441 100644 --- a/src/toast/config.py +++ b/src/toast/config.py @@ -755,6 +755,8 @@ def parse_tree(tree, cursor): else: parent[node_name][child_key][elem] = found # print("PARSE child value {} is a list".format(child_val)) + else: + print("PARSE not modifying {}".format(child_val)) # If this node is an object and all refs exist, then create it. Otherwise # leave it alone. diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index 669e9c29f..db940fd43 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -12,4 +12,4 @@ from .noise_model import DefaultNoiseModel -# from .pointing_healpix import PointingHealpix +from .pointing_healpix import PointingHealpix diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/future_ops/pointing_healpix.py index 04c79d470..0ecb28d6f 100644 --- a/src/toast/future_ops/pointing_healpix.py +++ b/src/toast/future_ops/pointing_healpix.py @@ -63,7 +63,9 @@ class PointingHealpix(Operator): boresight = Unicode("boresight_radec", help="Observation shared key for boresight") - hwp_angle = Unicode("hwp_angle", help="Observation shared key for HWP angle") + hwp_angle = Unicode( + None, allow_none=True, help="Observation shared key for HWP angle" + ) flags = Unicode( None, allow_none=True, help="Observation shared key for telescope flags to use" @@ -76,7 +78,7 @@ class PointingHealpix(Operator): weights = Unicode("weights", help="Observation detdata key for output weights") quats = Unicode( - "quats", + None, allow_none=True, help="Observation detdata key for output quaternions (for debugging)", ) @@ -147,14 +149,15 @@ def __init__(self, **kwargs): self._n_submap = (self.nside // self.nside_submap) ** 2 self._local_submaps = None - if self.create_dist is not None: - self._local_submaps = np.zeros(self._n_submap, dtype=np.bool) @function_timer def _exec(self, data, detectors=None, **kwargs): env = Environment.get() log = Logger.get() + if self._local_submaps is None and self.create_dist is not None: + self._local_submaps = np.zeros(self._n_submap, dtype=np.bool) + # We do the calculation over buffers of timestream samples to reduce memory # overhead from temporary arrays. tod_buffer_length = env.tod_buffer_length() @@ -169,9 +172,14 @@ def _exec(self, data, detectors=None, **kwargs): # Get the flags if needed flags = None if self.flags is not None: - flags = obs.shared[self.flags] + flags = np.array(obs.shared[self.flags]) flags &= self.flag_mask + # HWP angle if needed + hwp_angle = None + if self.hwp_angle is not None: + hwp_angle = obs.shared[self.hwp_angle] + # Boresight pointing quaternions boresight = obs.shared[self.boresight] @@ -187,37 +195,30 @@ def _exec(self, data, detectors=None, **kwargs): # detector quaternions. if self.single_precision: - obs.detdata.create(self.pixels, shape=(1,), dtype=np.int32) - obs.create_detector_data( - self.config["pixels"], - shape=(n_samp,), - dtype=np.int32, - detectors=dets, + obs.detdata.create( + self.pixels, detshape=(), dtype=np.int32, detectors=dets ) - obs.create_detector_data( - self.config["weights"], - shape=(n_samp, self._nnz), + obs.detdata.create( + self.weights, + detshape=(self._nnz,), dtype=np.float32, detectors=dets, ) else: - obs.create_detector_data( - self.config["pixels"], - shape=(n_samp,), - dtype=np.int64, - detectors=dets, + obs.detdata.create( + self.pixels, detshape=(), dtype=np.int64, detectors=dets ) - obs.create_detector_data( - self.config["weights"], - shape=(n_samp, self._nnz), + obs.detdata.create( + self.weights, + detshape=(self._nnz,), dtype=np.float64, detectors=dets, ) - if self.config["quats"] is not None: - obs.create_detector_data( - self.config["quats"], - shape=(n_samp, 4), + if self.quats is not None: + obs.detdata.create( + self.quats, + detshape=(4,), dtype=np.float64, detectors=dets, ) @@ -235,8 +236,8 @@ def _exec(self, data, detectors=None, **kwargs): # Timestream of detector quaternions quats = qa.mult(boresight, detquat) - if self.config["quats"] is not None: - obs[self.config["quats"]][det][:] = quats + if self.quats is not None: + obs.detdata[self.quats][det, :] = quats # Cal for this detector dcal = 1.0 @@ -246,9 +247,9 @@ def _exec(self, data, detectors=None, **kwargs): # Buffered pointing calculation buf_off = 0 buf_n = tod_buffer_length - while buf_off < n_samp: - if buf_off + buf_n > n_samp: - buf_n = n_samp - buf_off + while buf_off < obs.n_local_samples: + if buf_off + buf_n > obs.n_local_samples: + buf_n = obs.n_local_samples - buf_off bslice = slice(buf_off, buf_off + buf_n) # This buffer of detector quaternions @@ -256,8 +257,8 @@ def _exec(self, data, detectors=None, **kwargs): # Buffer of HWP angle hslice = None - if hwpang is not None: - hslice = hwpang[bslice].reshape(-1) + if hwp_angle is not None: + hslice = hwp_angle[bslice].reshape(-1) # Buffer of flags fslice = None @@ -265,21 +266,21 @@ def _exec(self, data, detectors=None, **kwargs): fslice = flags[bslice].reshape(-1) # Pixel and weight buffers - pxslice = obs[self.config["pixels"]][det][bslice].reshape(-1) - wtslice = obs[self.config["weights"]][det][bslice].reshape(-1) + pxslice = obs.detdata[self.pixels][det, bslice].reshape(-1) + wtslice = obs.detdata[self.weights][det, bslice].reshape(-1) pbuf = pxslice wbuf = wtslice - if self.config["single_precision"]: + if self.single_precision: pbuf = np.zeros(len(pxslice), dtype=np.int64) wbuf = np.zeros(len(wtslice), dtype=np.float64) pointing_matrix_healpix( self.hpix, - self.config["nest"], + self.nest, epsilon, dcal, - self.config["mode"], + self.mode, detp, hslice, fslice, @@ -287,36 +288,26 @@ def _exec(self, data, detectors=None, **kwargs): wtslice, ) - if self.config["single_precision"]: + if self.single_precision: pxslice[:] = pbuf.astype(np.int32) wtslice[:] = wbuf.astype(np.float32) buf_off += buf_n - if self.config["create_dist"] is not None: + if self.create_dist is not None: self._local_submaps[ - obs[self.config["pixels"]][det] // self._n_pix_submap + obs.detdata["pixels"][det] // self._n_pix_submap ] = True return - def finalize(self, data): - """Perform any final operations / communication. - - Args: - data (toast.Data): The distributed data. - - Returns: - (PixelDistribution): Return the final submap distribution or None. - - """ - # Optionally return the submap distribution - if self.config["create_dist"] is not None: + def _finalize(self, data, **kwargs): + if self.create_dist is not None: submaps = None - if self.config["single_precision"]: + if self.single_precision: submaps = np.arange(self._n_submap, dtype=np.int32)[self._local_submaps] else: submaps = np.arange(self._n_submap, dtype=np.int64)[self._local_submaps] - data[self.config["create_dist"]] = PixelDistribution( + data[self.create_dist] = PixelDistribution( n_pix=self._n_pix, n_submap=self._n_submap, local_submaps=submaps, @@ -324,45 +315,36 @@ def finalize(self, data): ) return - def requires(self): - """List of Observation keys directly used by this Operator.""" - req = ["BORESIGHT_RADEC", "HWP_ANGLE"] - if self.config["flags"] is not None: - req.append(self.config["flags"]) - if self.config["cal"] is not None: - req.append(self.config["cal"]) - return req - - def provides(self): - """List of Observation keys generated by this Operator.""" - prov = [self.config["pixels"], self.config["weights"]] - if self.config["quats"] is not None: - prov.append(self.config["quats"]) - return prov - - def accelerators(self): - """List of accelerators supported by this Operator.""" - return list() - - def _finalize(self, data, **kwargs): - return - def _requires(self): - return { - "meta": [ - self.noise_model, - ], + req = { + "meta": list(), "shared": [ - self.times, + self.boresight, ], + "detdata": list(), } + if self.cal is not None: + req["meta"].append(self.cal) + if self.flags is not None: + req["shared"].append(self.flags) + if self.hwp_angle is not None: + req["shared"].append(self.hwp_angle) + return req def _provides(self): - return { + prov = { + "meta": list(), + "shared": list(), "detdata": [ - self.out, - ] + self.pixels, + self.weights, + ], } + if self.create_dist is not None: + prov["meta"].append(self.create_dist) + if self.quats is not None: + prov["detdata"].append(self.quats) + return prov def _accelerators(self): return list() diff --git a/src/toast/observation.py b/src/toast/observation.py index 9dbb48c07..69ddedb39 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -10,1021 +10,22 @@ import numpy as np -from pshmem import MPIShared - from .mpi import MPI -from .instrument import Telescope, Focalplane - -from .intervals import IntervalList +from .instrument import Telescope from .dist import distribute_samples from .utils import ( Logger, - AlignedI8, - AlignedU8, - AlignedI16, - AlignedU16, - AlignedI32, - AlignedU32, - AlignedI64, - AlignedU64, - AlignedF32, - AlignedF64, name_UID, ) from .cuda import use_pycuda +from .observation_data import DetectorData, DetDataMgr, SharedDataMgr, IntervalMgr -class DetectorData(object): - """Class representing a logical collection of co-sampled detector data. - - This class works like an array of detector data where the first dimension is the - number of detectors and the second dimension is the data for that detector. The - data for a particular detector may itself be multi-dimensional, with the first - dimension the number of samples. - - The data in this container may be sliced by both detector indices and names, as - well as by sample range. - - Example: - Imagine we have 3 detectors and each has 10 samples. We want to store a - 4-element value at each sample using 4-byte floats. We would do:: - - detdata = DetectorData(["d01", "d02", "d03"], (10, 4), np.float32) - - and then we can access the data for an individual detector either by index - or by name with:: - - detdata["d01"] = np.ones((10, 4), dtype=np.float32) - firstdet = detdata[0] - - slicing by index and by a list of detectors is possible:: - - view = detdata[0:-1, 2:4] - view = detdata[["d01", "d03"], 3:8] - - Args: - detectors (list): A list of detector names in exactly the order you wish. - This order is fixed for the life of the object. - shape (tuple): The shape of the data *for each detector*. The first element - of this shape should be the number of samples. - dtype (numpy.dtype): A numpy-compatible dtype for each element of the detector - data. The only supported types are 1, 2, 4, and 8 byte signed and unsigned - integers, 4 and 8 byte floating point numbers, and 4 and 8 byte complex - numbers. - - """ - - def __init__(self, detectors, shape, dtype): - log = Logger.get() - - self._detectors = detectors - if len(self._detectors) == 0: - msg = "You must specify a list of at least one detector name" - log.error(msg) - raise ValueError(msg) - - self._name2idx = {y: x for x, y in enumerate(self._detectors)} - - # construct a new dtype in case the parameter given is shortcut string - ttype = np.dtype(dtype) - - self._storage_class = None - if ttype.char == "b": - self._storage_class = AlignedI8 - elif ttype.char == "B": - self._storage_class = AlignedU8 - elif ttype.char == "h": - self._storage_class = AlignedI16 - elif ttype.char == "H": - self._storage_class = AlignedU16 - elif ttype.char == "i": - self._storage_class = AlignedI32 - elif ttype.char == "I": - self._storage_class = AlignedU32 - elif (ttype.char == "q") or (ttype.char == "l"): - self._storage_class = AlignedI64 - elif (ttype.char == "Q") or (ttype.char == "L"): - self._storage_class = AlignedU64 - elif ttype.char == "f": - self._storage_class = AlignedF32 - elif ttype.char == "d": - self._storage_class = AlignedF64 - elif ttype.char == "F": - raise NotImplementedError("No support yet for complex numbers") - elif ttype.char == "D": - raise NotImplementedError("No support yet for complex numbers") - else: - msg = "Unsupported data typecode '{}'".format(ttype.char) - log.error(msg) - raise ValueError(msg) - self._dtype = ttype - - # Verify that our shape contains only integral values - self._flatshape = len(self._detectors) - for d in shape: - if not isinstance(d, (int, np.integer)): - msg = "input shape contains non-integer values" - log.error(msg) - raise ValueError(msg) - self._flatshape *= d - - shp = [len(self._detectors)] - shp.extend(shape) - self._shape = tuple(shp) - self._raw = self._storage_class.zeros(self._flatshape) - self._data = self._raw.array().reshape(self._shape) - - @property - def detectors(self): - return list(self._detectors) - - def keys(self): - return list(self._detectors) - - @property - def dtype(self): - return self._dtype - - @property - def shape(self): - return self._shape - - @property - def detector_shape(self): - return tuple(self._shape[1:]) - - @property - def data(self): - return self._data - - def clear(self): - """Delete the underlying memory. - - This will forcibly delete the C-allocated memory and invalidate all python - references to this object. DO NOT CALL THIS unless you are sure all references - are no longer being used and you are about to delete the object. - - """ - if hasattr(self, "_data"): - del self._data - if hasattr(self, "_raw"): - self._raw.clear() - del self._raw - - def __del__(self): - self.clear() - - def _det_axis_view(self, key): - if isinstance(key, (int, np.integer)): - # Just one detector by index - view = key - elif isinstance(key, str): - # Just one detector by name - view = self._name2idx[key] - elif isinstance(key, slice): - # We are slicing detectors by index - view = key - else: - # Assume that our key is at least iterable - try: - test = iter(key) - view = list() - for k in key: - view.append(self._name2idx[k]) - view = tuple(view) - except TypeError: - log = Logger.get() - msg = "Detector indexing supports slice, int, string or iterable, not '{}'".format( - key - ) - log.error(msg) - raise TypeError(msg) - return view - - def _get_view(self, key): - if isinstance(key, (tuple, Mapping)): - # We are slicing in both detector and sample dimensions - if len(key) > len(self._shape): - msg = "DetectorData has only {} dimensions".format(len(self._shape)) - log.error(msg) - raise TypeError(msg) - view = [self._det_axis_view(key[0])] - for k in key[1:]: - view.append(k) - # for s in range(len(self._shape) - len(key)): - # view += (slice(None, None, None),) - return tuple(view) - else: - # Only detector slice - view = self._det_axis_view(key) - # for s in range(len(self._shape) - 1): - # view += (slice(None, None, None),) - return view - - def __getitem__(self, key): - view = self._get_view(key) - return np.array(self._data[view], dtype=self._dtype, copy=False) - - def __delitem__(self, key): - raise NotImplementedError("Cannot delete individual elements") - return - - def __setitem__(self, key, value): - view = self._get_view(key) - self._data[view] = value - - def __iter__(self): - return iter(self._data) - - def __len__(self): - return len(self._detectors) - - def __repr__(self): - val = " 1: - detshape = shp[1:] - if key not in self._internal: - self.create( - key, - detshape=detshape, - dtype=value.dtype, - detectors=self.detectors, - ) - else: - fullshape = (self.samples,) - if detshape is not None: - fullshape += detshape - if fullshape != self._internal[key].detector_shape: - msg = "Assignment value has wrong detector shape" - raise ValueError(msg) - for d in self.detectors: - self._internal[key][d] = value - elif shp[0] == len(self.detectors): - # Full sized array - if shp[1] != self.samples: - msg = "Assignment value has wrong number of samples" - raise ValueError(msg) - detshape = None - if len(shp) > 2: - detshape = shp[2:] - if key not in self._internal: - self.create( - key, - detshape=detshape, - dtype=value.dtype, - detectors=self.detectors, - ) - else: - fullshape = (self.samples,) - if detshape is not None: - fullshape += detshape - if fullshape != self._internal[key].detector_shape: - msg = "Assignment value has wrong detector shape" - raise ValueError(msg) - self._internal[key][:] = value - else: - # Incompatible - msg = "Assignment of detector data from an array only supports full size or single detector" - raise ValueError(msg) - - def __iter__(self): - return iter(self._internal) - - def __len__(self): - return len(self._internal) - - def clear(self): - for k in self._internal.keys(): - self._internal[k].clear() - - def __repr__(self): - val = " 1: - if self.comm.rank == 0: - msg = "When creating shared data with [] notation, only one process may have a non-None value for the data" - print(msg, flush=True) - self.comm.Abort() - from_rank = np.where(check_result == 1)[0][0] - shp = self.comm.bcast(shp, root=from_rank) - dt = self.comm.bcast(dt, root=from_rank) - self.create(key, shape=shp, dtype=dt) - offset = None - if self.comm.rank == from_rank: - offset = tuple([0 for x in self._internal[key].shape]) - self._internal[key].set(value, offset=offset, fromrank=from_rank) - else: - # Already exists, just do the assignment - slc = None - if value is not None: - if value.shape != self._internal[key].shape: - raise ValueError( - "When assigning directly to a shared object, the value must have the same dimensions" - ) - slc = tuple([slice(0, x) for x in self._internal[key].shape]) - self._internal[key][slc] = value - - def __iter__(self): - return iter(self._internal) - - def __len__(self): - return len(self._internal) - - def clear(self): - for k in self._internal.keys(): - self._internal[k].close() - - def __del__(self): - if hasattr(self, "_internal"): - self.clear() - - def __repr__(self): - val = " 1: - for it in self.slices[0:-1]: - s += str(it) - s += ", " - if len(self.slices) > 0: - s += str(self.slices[-1]) - s += "]" - return s - - -class ViewMgr(MutableMapping): - """Internal class to manage views into observation data objects.""" - - def __init__(self, obj): - self.obj = obj - if not hasattr(obj, "_views"): - self.obj._views = dict() - - # Mapping methods - - def __getitem__(self, key): - if key not in self.obj._views: - # View does not yet exist, create it. - if key not in self.obj.intervals: - raise KeyError( - "Observation does not have interval list named '{}'".format(key) - ) - self.obj._views[key] = View(self.obj, key) - # Register deleter callback - self.obj.intervals.register_delete_callback(key, self.__delitem__) - return self.obj._views[key] - - def __delitem__(self, key): - del self.obj._views[key] - - def __setitem__(self, key, value): - raise RuntimeError("Cannot set views directly- simply access them.") - - def __iter__(self): - return iter(self._internal) - - def __len__(self): - return len(self._internal) - - def clear(self): - self.obj._views.clear() - - -class ViewInterface(object): - """Descriptor class for accessing the views in an observation. - - You can get a view of the data for a particular interval list just by accessing - it with the name of the intervals object you want: - - obs.view["name_of_intervals"] - - Then you can use this to provide a view into either detdata or shared objects within - the observation. For example: - - print(obs.view["name_of_intervals"].detdata["signal"]) - - obs.view["bad_pointing"].shared["boresight"][:] = np.array([0., 0., 0., 1.]) - - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def __get__(self, obj, cls=None): - if obj is None: - return self - else: - if not hasattr(obj, "_viewmgr"): - obj._viewmgr = ViewMgr(obj) - return obj._viewmgr - - def __set__(self, obj, value): - raise AttributeError("Cannot reset the view interface") - - def __delete__(self, obj): - raise AttributeError("Cannot delete the view interface") +from .observation_view import DetDataView, SharedView, View, ViewMgr, ViewInterface class DistDetSamp(object): diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py new file mode 100644 index 000000000..32af526bf --- /dev/null +++ b/src/toast/observation_data.py @@ -0,0 +1,813 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import sys + +from collections.abc import MutableMapping, Mapping + +import numpy as np + +from pshmem import MPIShared + +from .mpi import MPI + +from .utils import ( + Logger, + AlignedI8, + AlignedU8, + AlignedI16, + AlignedU16, + AlignedI32, + AlignedU32, + AlignedI64, + AlignedU64, + AlignedF32, + AlignedF64, +) + +from .intervals import IntervalList + + +class DetectorData(object): + """Class representing a logical collection of co-sampled detector data. + + This class works like an array of detector data where the first dimension is the + number of detectors and the second dimension is the data for that detector. The + data for a particular detector may itself be multi-dimensional, with the first + dimension the number of samples. + + The data in this container may be sliced by both detector indices and names, as + well as by sample range. + + Example: + Imagine we have 3 detectors and each has 10 samples. We want to store a + 4-element value at each sample using 4-byte floats. We would do:: + + detdata = DetectorData(["d01", "d02", "d03"], (10, 4), np.float32) + + and then we can access the data for an individual detector either by index + or by name with:: + + detdata["d01"] = np.ones((10, 4), dtype=np.float32) + firstdet = detdata[0] + + slicing by index and by a list of detectors is possible:: + + view = detdata[0:-1, 2:4] + view = detdata[["d01", "d03"], 3:8] + + Args: + detectors (list): A list of detector names in exactly the order you wish. + This order is fixed for the life of the object. + shape (tuple): The shape of the data *for each detector*. The first element + of this shape should be the number of samples. + dtype (numpy.dtype): A numpy-compatible dtype for each element of the detector + data. The only supported types are 1, 2, 4, and 8 byte signed and unsigned + integers, 4 and 8 byte floating point numbers, and 4 and 8 byte complex + numbers. + + """ + + def __init__(self, detectors, shape, dtype): + log = Logger.get() + + self._detectors = detectors + if len(self._detectors) == 0: + msg = "You must specify a list of at least one detector name" + log.error(msg) + raise ValueError(msg) + + self._name2idx = {y: x for x, y in enumerate(self._detectors)} + + # construct a new dtype in case the parameter given is shortcut string + ttype = np.dtype(dtype) + + self._storage_class = None + if ttype.char == "b": + self._storage_class = AlignedI8 + elif ttype.char == "B": + self._storage_class = AlignedU8 + elif ttype.char == "h": + self._storage_class = AlignedI16 + elif ttype.char == "H": + self._storage_class = AlignedU16 + elif ttype.char == "i": + self._storage_class = AlignedI32 + elif ttype.char == "I": + self._storage_class = AlignedU32 + elif (ttype.char == "q") or (ttype.char == "l"): + self._storage_class = AlignedI64 + elif (ttype.char == "Q") or (ttype.char == "L"): + self._storage_class = AlignedU64 + elif ttype.char == "f": + self._storage_class = AlignedF32 + elif ttype.char == "d": + self._storage_class = AlignedF64 + elif ttype.char == "F": + raise NotImplementedError("No support yet for complex numbers") + elif ttype.char == "D": + raise NotImplementedError("No support yet for complex numbers") + else: + msg = "Unsupported data typecode '{}'".format(ttype.char) + log.error(msg) + raise ValueError(msg) + self._dtype = ttype + + # Verify that our shape contains only integral values + self._flatshape = len(self._detectors) + for d in shape: + if not isinstance(d, (int, np.integer)): + msg = "input shape contains non-integer values" + log.error(msg) + raise ValueError(msg) + self._flatshape *= d + + shp = [len(self._detectors)] + shp.extend(shape) + self._shape = tuple(shp) + self._raw = self._storage_class.zeros(self._flatshape) + self._data = self._raw.array().reshape(self._shape) + + @property + def detectors(self): + return list(self._detectors) + + def keys(self): + return list(self._detectors) + + @property + def dtype(self): + return self._dtype + + @property + def shape(self): + return self._shape + + @property + def detector_shape(self): + return tuple(self._shape[1:]) + + @property + def data(self): + return self._data + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to this object. DO NOT CALL THIS unless you are sure all references + are no longer being used and you are about to delete the object. + + """ + if hasattr(self, "_data"): + del self._data + if hasattr(self, "_raw"): + self._raw.clear() + del self._raw + + def __del__(self): + self.clear() + + def _det_axis_view(self, key): + if isinstance(key, (int, np.integer)): + # Just one detector by index + view = key + elif isinstance(key, str): + # Just one detector by name + view = self._name2idx[key] + elif isinstance(key, slice): + # We are slicing detectors by index + view = key + else: + # Assume that our key is at least iterable + try: + test = iter(key) + view = list() + for k in key: + view.append(self._name2idx[k]) + view = tuple(view) + except TypeError: + log = Logger.get() + msg = "Detector indexing supports slice, int, string or iterable, not '{}'".format( + key + ) + log.error(msg) + raise TypeError(msg) + return view + + def _get_view(self, key): + if isinstance(key, (tuple, Mapping)): + # We are slicing in both detector and sample dimensions + if len(key) > len(self._shape): + msg = "DetectorData has only {} dimensions".format(len(self._shape)) + log.error(msg) + raise TypeError(msg) + view = [self._det_axis_view(key[0])] + for k in key[1:]: + view.append(k) + # for s in range(len(self._shape) - len(key)): + # view += (slice(None, None, None),) + return tuple(view) + else: + # Only detector slice + view = self._det_axis_view(key) + # for s in range(len(self._shape) - 1): + # view += (slice(None, None, None),) + return view + + def __getitem__(self, key): + view = self._get_view(key) + return np.array(self._data[view], dtype=self._dtype, copy=False) + + def __delitem__(self, key): + raise NotImplementedError("Cannot delete individual elements") + return + + def __setitem__(self, key, value): + view = self._get_view(key) + self._data[view] = value + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._detectors) + + def __repr__(self): + val = " 1: + detshape = shp[1:] + if key not in self._internal: + self.create( + key, + detshape=detshape, + dtype=value.dtype, + detectors=self.detectors, + ) + else: + fullshape = (self.samples,) + if detshape is not None: + fullshape += detshape + if fullshape != self._internal[key].detector_shape: + msg = "Assignment value has wrong detector shape" + raise ValueError(msg) + for d in self.detectors: + self._internal[key][d] = value + elif shp[0] == len(self.detectors): + # Full sized array + if shp[1] != self.samples: + msg = "Assignment value has wrong number of samples" + raise ValueError(msg) + detshape = None + if len(shp) > 2: + detshape = shp[2:] + if key not in self._internal: + self.create( + key, + detshape=detshape, + dtype=value.dtype, + detectors=self.detectors, + ) + else: + fullshape = (self.samples,) + if detshape is not None: + fullshape += detshape + if fullshape != self._internal[key].detector_shape: + msg = "Assignment value has wrong detector shape" + raise ValueError(msg) + self._internal[key][:] = value + else: + # Incompatible + msg = "Assignment of detector data from an array only supports full size or single detector" + raise ValueError(msg) + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def clear(self): + for k in self._internal.keys(): + self._internal[k].clear() + + def __repr__(self): + val = " 1: + if self.comm.rank == 0: + msg = "When creating shared data with [] notation, only one process may have a non-None value for the data" + print(msg, flush=True) + self.comm.Abort() + from_rank = np.where(check_result == 1)[0][0] + shp = self.comm.bcast(shp, root=from_rank) + dt = self.comm.bcast(dt, root=from_rank) + self.create(key, shape=shp, dtype=dt) + offset = None + if self.comm.rank == from_rank: + offset = tuple([0 for x in self._internal[key].shape]) + self._internal[key].set(value, offset=offset, fromrank=from_rank) + else: + # Already exists, just do the assignment + slc = None + if value is not None: + if value.shape != self._internal[key].shape: + raise ValueError( + "When assigning directly to a shared object, the value must have the same dimensions" + ) + slc = tuple([slice(0, x) for x in self._internal[key].shape]) + self._internal[key][slc] = value + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def clear(self): + for k in self._internal.keys(): + self._internal[k].close() + + def __del__(self): + if hasattr(self, "_internal"): + self.clear() + + def __repr__(self): + val = " 1: + for it in self.slices[0:-1]: + s += str(it) + s += ", " + if len(self.slices) > 0: + s += str(self.slices[-1]) + s += "]" + return s + + +class ViewMgr(MutableMapping): + """Internal class to manage views into observation data objects.""" + + def __init__(self, obj): + self.obj = obj + if not hasattr(obj, "_views"): + self.obj._views = dict() + + # Mapping methods + + def __getitem__(self, key): + if key not in self.obj._views: + # View does not yet exist, create it. + if key not in self.obj.intervals: + raise KeyError( + "Observation does not have interval list named '{}'".format(key) + ) + self.obj._views[key] = View(self.obj, key) + # Register deleter callback + self.obj.intervals.register_delete_callback(key, self.__delitem__) + return self.obj._views[key] + + def __delitem__(self, key): + del self.obj._views[key] + + def __setitem__(self, key, value): + raise RuntimeError("Cannot set views directly- simply access them.") + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def clear(self): + self.obj._views.clear() + + +class ViewInterface(object): + """Descriptor class for accessing the views in an observation. + + You can get a view of the data for a particular interval list just by accessing + it with the name of the intervals object you want: + + obs.view["name_of_intervals"] + + Then you can use this to provide a view into either detdata or shared objects within + the observation. For example: + + print(obs.view["name_of_intervals"].detdata["signal"]) + + obs.view["bad_pointing"].shared["boresight"][:] = np.array([0., 0., 0., 1.]) + + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def __get__(self, obj, cls=None): + if obj is None: + return self + else: + if not hasattr(obj, "_viewmgr"): + obj._viewmgr = ViewMgr(obj) + return obj._viewmgr + + def __set__(self, obj, value): + raise AttributeError("Cannot reset the view interface") + + def __delete__(self, obj): + raise AttributeError("Cannot delete the view interface") From 078542a7b1b4f18ef4083b1a95f6c90ab35df6bf Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sat, 31 Oct 2020 10:20:52 -0700 Subject: [PATCH 016/690] Port memory counter operator and supporting observation methods. --- src/toast/future_ops/mapmaker.py | 442 ++++++++++++++++++------- src/toast/future_ops/memory_counter.py | 65 ++++ src/toast/observation.py | 23 ++ src/toast/observation_data.py | 42 +++ src/toast/operator.py | 8 +- src/toast/tests/runner.py | 52 +-- 6 files changed, 489 insertions(+), 143 deletions(-) create mode 100644 src/toast/future_ops/memory_counter.py diff --git a/src/toast/future_ops/mapmaker.py b/src/toast/future_ops/mapmaker.py index cf584c1bc..bf46327cf 100644 --- a/src/toast/future_ops/mapmaker.py +++ b/src/toast/future_ops/mapmaker.py @@ -1,7 +1,3 @@ -# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. -# All rights reserved. Use of this source code is governed by -# a BSD-style license that can be found in the LICENSE file. - from collections import OrderedDict import os import sys @@ -19,9 +15,13 @@ from .todmap_math import OpAccumDiag, OpScanScale, OpScanMask from ..tod import OpCacheClear, OpCacheCopy, OpCacheInit, OpFlagsApply, OpFlagGaps from ..map import covariance_apply, covariance_invert, DistPixels, covariance_rcond +from .. import qarray as qa from .._libtoast import add_offsets_to_signal, project_signal_offsets + +XAXIS, YAXIS, ZAXIS = np.eye(3) + temporary_names = set() @@ -42,13 +42,13 @@ def free_temporary_name(name): class TOASTMatrix: def apply(self, vector, inplace=False): - """ Every TOASTMatrix can apply itself to a distributed vectors + """Every TOASTMatrix can apply itself to a distributed vectors of signal, map or template offsets as is appropriate. """ raise NotImplementedError("Virtual apply not implemented in derived class") def apply_transpose(self, vector, inplace=False): - """ Every TOASTMatrix can apply itself to a distributed vectors + """Every TOASTMatrix can apply itself to a distributed vectors of signal, map or template offsets as is appropriate. """ raise NotImplementedError( @@ -71,7 +71,7 @@ def apply(self, vector, inplace=False): class TODTemplate: - """ Parent class for all templates that can be registered with + """Parent class for all templates that can be registered with TemplateMatrix """ @@ -83,29 +83,25 @@ def __init___(self, *args, **kwargs): raise NotImplementedError("Derived class must implement __init__()") def add_to_signal(self, signal, amplitudes): - """ signal += F.a - """ + """signal += F.a""" raise NotImplementedError("Derived class must implement add_to_signal()") def project_signal(self, signal, amplitudes): - """ a += F^T.signal - """ + """a += F^T.signal""" raise NotImplementedError("Derived class must implement project_signal()") def add_prior(self, amplitudes_in, amplitudes_out): - """ a' += C_a^{-1}.a - """ + """a' += C_a^{-1}.a""" # Not all TODTemplates implement the prior return def apply_precond(self, amplitudes_in, amplitudes_out): - """ a' = M^{-1}.a - """ + """a' = M^{-1}.a""" raise NotImplementedError("Derived class must implement apply_precond()") class SubharmonicTemplate(TODTemplate): - """ This class represents sub-harmonic noise fluctuations. + """This class represents sub-harmonic noise fluctuations. Sub-harmonic means that the characteristic frequency of the noise modes is lower than 1/T where T is the length of the interval @@ -138,8 +134,7 @@ def __init__( self.get_steps_and_preconditioner() def get_steps_and_preconditioner(self): - """ Assign each template an amplitude - """ + """Assign each template an amplitude""" self.templates = [] self.slices = [] self.preconditioners = [] @@ -173,8 +168,7 @@ def get_steps_and_preconditioner(self): return def _get_preconditioner(self, det, tod, todslice, common_flags, detweight): - """ Calculate the preconditioner for the given interval and detector - """ + """Calculate the preconditioner for the given interval and detector""" flags = tod.local_flags(det, self.flags)[todslice] good = (flags & self.flag_mask) == 0 good[common_flags[todslice]] = False @@ -202,7 +196,7 @@ def add_to_signal(self, signal, amplitudes): return def _get_templates(self, todslice): - """ Develop hierarchy of subharmonic modes matching the given length + """Develop hierarchy of subharmonic modes matching the given length The basis functions are (orthogonal) Legendre polynomials """ @@ -234,7 +228,7 @@ def project_signal(self, signal, amplitudes): pass def apply_precond(self, amplitudes_in, amplitudes_out): - """ Standard diagonal preconditioner accounting for the fact that + """Standard diagonal preconditioner accounting for the fact that the templates are not orthogonal in the presence of flagging and masking """ subharmonic_amplitudes_in = amplitudes_in[self.name] @@ -251,9 +245,238 @@ def apply_precond(self, amplitudes_in, amplitudes_out): return +class Fourier2DTemplate(TODTemplate): + """This class represents atmospheric fluctuations in front of the + focalplane as 2D Fourier modes.""" + + name = "Fourier2D" + + def __init__( + self, + data, + detweights, + focalplane_radius=None, # degrees + order=1, + fit_subharmonics=True, + intervals=None, + common_flags=None, + common_flag_mask=1, + flags=None, + flag_mask=1, + correlation_length=10, + correlation_amplitude=10, + ): + self.data = data + self.comm = data.comm.comm_group + self.detweights = detweights + self.focalplane_radius = focalplane_radius + self.order = order + self.fit_subharmonics = fit_subharmonics + self.norder = order + 1 + self.nmode = (2 * order) ** 2 + 1 + if self.fit_subharmonics: + self.nmode += 2 + self.intervals = intervals + self.common_flags = common_flags + self.common_flag_mask = common_flag_mask + self.flags = flags + self.flag_mask = flag_mask + self._get_templates() + self.correlation_length = correlation_length + self.correlation_amplitude = correlation_amplitude + if correlation_length: + self._get_prior() + return + + @function_timer + def _get_prior(self): + """Evaluate C_a^{-1} for the 2D polynomial coefficients based + on the correlation length. + """ + if self.correlation_length: + # Correlation length is given in seconds and we cannot assume + # that each observation has the same sampling rate. Therefore, + # we will build the filter for each observation + self.filters = [] # all observations + self.preconditioners = [] # all observations + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + times = tod.local_times() + corr = ( + np.exp((times[0] - times) / self.correlation_length) + * self.correlation_amplitude + ) + ihalf = times.size // 2 + corr[ihalf + 1 :] = corr[ihalf - 1 : 0 : -1] + fcorr = np.fft.rfft(corr) + invcorr = np.fft.irfft(1 / fcorr) + self.filters.append(invcorr) + # Scale the filter by the prescribed correlation strength + # and the number of modes at each angular scale + self.filter_scale = np.zeros(self.nmode) + self.filter_scale[0] = 1 + offset = 1 + if self.fit_subharmonics: + self.filter_scale[1:3] = 2 + offset += 2 + self.filter_scale[offset:] = 4 + self.filter_scale *= self.correlation_amplitude + return + + @function_timer + def _get_templates(self): + """Evaluate and normalize the polynomial templates. + + Each template corresponds to a fixed value for each detector + and depends on the position of the detector. + """ + self.templates = [] + + def evaluate_template(theta, phi, radius): + values = np.zeros(self.nmode) + values[0] = 1 + offset = 1 + if self.fit_subharmonics: + values[1:3] = theta / radius, phi / radius + offset += 2 + if self.order > 0: + rinv = np.pi / radius + orders = np.arange(self.order) + 1 + thetavec = np.zeros(self.order * 2) + phivec = np.zeros(self.order * 2) + thetavec[::2] = np.cos(orders * theta * rinv) + thetavec[1::2] = np.sin(orders * theta * rinv) + phivec[::2] = np.cos(orders * phi * rinv) + phivec[1::2] = np.sin(orders * phi * rinv) + values[offset:] = np.outer(thetavec, phivec).ravel() + return values + + self.norms = [] + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + common_flags = tod.local_common_flags(self.common_flags) + common_flags = (common_flags & self.common_flag_mask) != 0 + nsample = tod.total_samples + obs_templates = {} + focalplane = obs["focalplane"] + if self.focalplane_radius: + radius = np.radians(self.focalplane_radius) + else: + try: + radius = np.radians(focalplane.radius) + except AttributeError: + # Focalplane is just a dictionary + radius = np.radians(obs["fpradius"]) + norms = np.zeros([nsample, self.nmode]) + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + for det in tod.local_dets: + flags = tod.local_flags(det, self.flags) + good = ((flags & self.flag_mask) | common_flags) == 0 + detweight = self.detweights[iobs][det] + det_quat = focalplane[det]["quat"] + x, y, z = qa.rotate(det_quat, ZAXIS) + theta, phi = np.arcsin([x, y]) + obs_templates[det] = evaluate_template(theta, phi, radius) + norms[todslice] += np.outer(good, obs_templates[det] ** 2 * detweight) + self.comm.allreduce(norms) + good = norms != 0 + norms[good] = 1 / norms[good] + self.norms.append(norms.ravel()) + self.templates.append(obs_templates) + self.namplitude += nsample * self.nmode + + self.norms = np.hstack(self.norms) + + return + + @function_timer + def add_to_signal(self, signal, amplitudes): + """signal += F.a""" + poly_amplitudes = amplitudes[self.name] + amplitude_offset = 0 + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + nsample = tod.total_samples + # For each observation, sample indices start from 0 + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + obs_amplitudes = poly_amplitudes[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode])[todslice] + for det in tod.local_dets: + templates = self.templates[iobs][det] + signal[iobs, det, todslice] += np.sum(obs_amplitudes * templates, 1) + amplitude_offset += nsample * self.nmode + return + + @function_timer + def project_signal(self, signal, amplitudes): + """a += F^T.signal""" + poly_amplitudes = amplitudes[self.name] + amplitude_offset = 0 + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + nsample = tod.total_samples + # For each observation, sample indices start from 0 + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + obs_amplitudes = poly_amplitudes[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode]) + if self.comm is not None: + my_amplitudes = np.zeros_like(obs_amplitudes) + else: + my_amplitudes = obs_amplitudes + for det in tod.local_dets: + templates = self.templates[iobs][det] + my_amplitudes[todslice] += np.outer( + signal[iobs, det, todslice], templates + ) + if self.comm is not None: + self.comm.allreduce(my_amplitudes) + obs_amplitudes += my_amplitudes + amplitude_offset += nsample * self.nmode + return + + def add_prior(self, amplitudes_in, amplitudes_out): + """a' += C_a^{-1}.a""" + if self.correlation_length: + poly_amplitudes_in = amplitudes_in[self.name] + poly_amplitudes_out = amplitudes_out[self.name] + amplitude_offset = 0 + for obs, noisefilter in zip(self.data.obs, self.filters): + tod = obs["tod"] + nsample = tod.total_samples + obs_amplitudes_in = poly_amplitudes_in[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode]) + obs_amplitudes_out = poly_amplitudes_out[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode]) + # import pdb + # import matplotlib.pyplot as plt + # pdb.set_trace() + for mode in range(self.nmode): + scale = self.filter_scale[mode] + obs_amplitudes_out[:, mode] += scipy.signal.convolve( + obs_amplitudes_in[:, mode], + noisefilter * scale, + mode="same", + ) + amplitude_offset += nsample * self.nmode + return + + def apply_precond(self, amplitudes_in, amplitudes_out): + """a' = M^{-1}.a""" + poly_amplitudes_in = amplitudes_in[self.name] + poly_amplitudes_out = amplitudes_out[self.name] + poly_amplitudes_out[:] = poly_amplitudes_in * self.norms + return + + class OffsetTemplate(TODTemplate): - """ This class represents noise fluctuations as a step function - """ + """This class represents noise fluctuations as a step function""" name = "offset" @@ -287,7 +510,7 @@ def __init__( @function_timer def get_filters_and_preconditioners(self): - """ Compute and store the filter and associated preconditioner + """Compute and store the filter and associated preconditioner for every detector and every observation """ log = Logger.get() @@ -427,8 +650,7 @@ def truncate(noisefilter, lim=1e-4): @function_timer def get_steps(self): - """ Divide each interval into offset steps - """ + """Divide each interval into offset steps""" self.offset_templates = [] self.offset_slices = [] # slices in all observations for iobs, obs in enumerate(self.data.obs): @@ -479,7 +701,7 @@ def get_steps(self): @function_timer def _get_sigmasq(self, tod, det, todslice, common_flags, detweight): - """ calculate a rough estimate of the baseline variance + """calculate a rough estimate of the baseline variance for diagonal preconditioner """ flags = tod.local_flags(det, self.flags)[todslice] @@ -607,8 +829,7 @@ def apply_precond(self, amplitudes_in, amplitudes_out): class TemplateMatrix(TOASTMatrix): def __init__(self, data, comm, templates=None): - """ Initialize the template matrix with a given baseline length - """ + """Initialize the template matrix with a given baseline length""" self.data = data self.comm = comm self.templates = [] @@ -618,14 +839,12 @@ def __init__(self, data, comm, templates=None): @function_timer def register_template(self, template): - """ Add template to the list of templates to fit - """ + """Add template to the list of templates to fit""" self.templates.append(template) @function_timer def apply(self, amplitudes): - """ Compute and return y = F.a - """ + """Compute and return y = F.a""" new_signal = self.zero_signal() for template in self.templates: template.add_to_signal(new_signal, amplitudes) @@ -633,8 +852,7 @@ def apply(self, amplitudes): @function_timer def apply_transpose(self, signal): - """ Compute and return a = F^T.y - """ + """Compute and return a = F^T.y""" new_amplitudes = self.zero_amplitudes() for template in self.templates: template.project_signal(signal, new_amplitudes) @@ -642,16 +860,14 @@ def apply_transpose(self, signal): @function_timer def add_prior(self, amplitudes, new_amplitudes): - """ Compute a' += C_a^{-1}.a - """ + """Compute a' += C_a^{-1}.a""" for template in self.templates: template.add_prior(amplitudes, new_amplitudes) return @function_timer def apply_precond(self, amplitudes): - """ Compute a' = M^{-1}.a - """ + """Compute a' = M^{-1}.a""" new_amplitudes = self.zero_amplitudes() for template in self.templates: template.apply_precond(amplitudes, new_amplitudes) @@ -659,14 +875,13 @@ def apply_precond(self, amplitudes): @function_timer def zero_amplitudes(self): - """ Return a null amplitudes object - """ + """Return a null amplitudes object""" new_amplitudes = TemplateAmplitudes(self.templates, self.comm) return new_amplitudes @function_timer def zero_signal(self): - """ Return a distributed vector of signal set to zero. + """Return a distributed vector of signal set to zero. The zero signal object will use the same TOD objects but different cache prefix """ @@ -675,7 +890,7 @@ def zero_signal(self): @function_timer def clean_signal(self, signal, amplitudes, in_place=True): - """ Clean the given distributed signal vector by subtracting + """Clean the given distributed signal vector by subtracting the templates multiplied by the given amplitudes. """ # DEBUG begin @@ -713,8 +928,7 @@ def clean_signal(self, signal, amplitudes, in_place=True): class TemplateAmplitudes(TOASTVector): - """ TemplateAmplitudes objects hold local and shared template amplitudes - """ + """TemplateAmplitudes objects hold local and shared template amplitudes""" def __init__(self, templates, comm): self.comm = comm @@ -734,17 +948,12 @@ def __str__(self): @function_timer def dot(self, other): - """ Compute the dot product between the two amplitude vectors - """ + """Compute the dot product between the two amplitude vectors""" total = 0 for name, values in self.amplitudes.items(): - dp = np.dot(values, other.amplitudes[name]) comm = self.comms[name] - if comm is not None: - dp = comm.reduce(dp, op=MPI.SUM) - if comm.rank != 0: - dp = 0 - total += dp + if comm is None or comm.rank == 0: + total += np.dot(values, other.amplitudes[name]) if self.comm is not None: total = self.comm.allreduce(total, op=MPI.SUM) return total @@ -768,8 +977,7 @@ def copy(self): @function_timer def __iadd__(self, other): - """ Add the provided amplitudes to this one - """ + """Add the provided amplitudes to this one""" if isinstance(other, TemplateAmplitudes): for name, values in self.amplitudes.items(): values += other.amplitudes[name] @@ -780,8 +988,7 @@ def __iadd__(self, other): @function_timer def __isub__(self, other): - """ Subtract the provided amplitudes from this one - """ + """Subtract the provided amplitudes from this one""" if isinstance(other, TemplateAmplitudes): for name, values in self.amplitudes.items(): values -= other.amplitudes[name] @@ -792,16 +999,14 @@ def __isub__(self, other): @function_timer def __imul__(self, other): - """ Scale the amplitudes - """ + """Scale the amplitudes""" for name, values in self.amplitudes.items(): values *= other return self @function_timer def __itruediv__(self, other): - """ Divide the amplitudes - """ + """Divide the amplitudes""" for name, values in self.amplitudes.items(): values /= other return self @@ -813,13 +1018,13 @@ def __init__(self): class ProjectionMatrix(TOASTMatrix): - """ Projection matrix: - Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} - = I - P B, - where - `P` is the pointing matrix - `N` is the noise matrix and - `B` is the binning operator + """Projection matrix: + Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} + = I - P B, + where + `P` is the pointing matrix + `N` is the noise matrix and + `B` is the binning operator """ def __init__( @@ -842,8 +1047,7 @@ def __init__( @function_timer def apply(self, signal): - """ Return Z.y - """ + """Return Z.y""" self.bin_map(signal.name) new_signal = signal.copy() scanned_signal = Signal(self.data, temporary=True, init_val=0) @@ -870,7 +1074,7 @@ def bin_map(self, name): @function_timer def scan_map(self, name): - scansim = OpSimScan(distmap=self.dist_map, out=name) + scansim = OpSimScan(input_map=self.dist_map, out=name) scansim.exec(self.data) return @@ -887,7 +1091,7 @@ def __init__( @function_timer def apply(self, signal, in_place=False): - """ Multiplies the signal with N^{-1}. + """Multiplies the signal with N^{-1}. Note that the quality flags cause the corresponding diagonal elements of N^{-1} to be zero. @@ -916,7 +1120,7 @@ def __init__(self): class Signal(TOASTVector): - """ Signal class wraps the TOAST data object but represents only + """Signal class wraps the TOAST data object but represents only one cached signal flavor. """ @@ -941,8 +1145,7 @@ def __del__(self): @function_timer def apply_flags(self, common_flag_mask, flag_mask): - """ Set the signal at flagged samples to zero - """ + """Set the signal at flagged samples to zero""" flags_apply = OpFlagsApply( name=self.name, common_flag_mask=common_flag_mask, flag_mask=flag_mask ) @@ -951,8 +1154,7 @@ def apply_flags(self, common_flag_mask, flag_mask): @function_timer def apply_weightmap(self, weightmap): - """ Scale the signal with the provided weight map - """ + """Scale the signal with the provided weight map""" if weightmap is None: return scanscale = OpScanScale(distmap=weightmap, name=self.name) @@ -961,7 +1163,7 @@ def apply_weightmap(self, weightmap): @function_timer def copy(self): - """ Return a new Signal object with independent copies of the + """Return a new Signal object with independent copies of the signal vectors. """ new_signal = Signal(self.data, temporary=True) @@ -971,16 +1173,14 @@ def copy(self): @function_timer def __getitem__(self, key): - """ Return a reference to a slice of TOD cache - """ + """Return a reference to a slice of TOD cache""" iobs, det, todslice = key tod = self.data.obs[iobs]["tod"] return tod.local_signal(det, self.name)[todslice] @function_timer def __setitem__(self, key, value): - """ Set slice of TOD cache - """ + """Set slice of TOD cache""" iobs, det, todslice = key tod = self.data.obs[iobs]["tod"] tod.local_signal(det, self.name)[todslice] = value @@ -988,8 +1188,7 @@ def __setitem__(self, key, value): @function_timer def __iadd__(self, other): - """ Add the provided Signal object to this one - """ + """Add the provided Signal object to this one""" for iobs, obs in enumerate(self.data.obs): tod = obs["tod"] for det in tod.local_dets: @@ -1001,8 +1200,7 @@ def __iadd__(self, other): @function_timer def __isub__(self, other): - """ Subtract the provided Signal object from this one - """ + """Subtract the provided Signal object from this one""" for iobs, obs in enumerate(self.data.obs): tod = obs["tod"] for det in tod.local_dets: @@ -1014,8 +1212,7 @@ def __isub__(self, other): @function_timer def __imul__(self, other): - """ Scale the signal - """ + """Scale the signal""" for iobs, obs in enumerate(self.data.obs): tod = obs["tod"] for det in tod.local_dets: @@ -1024,8 +1221,7 @@ def __imul__(self, other): @function_timer def __itruediv__(self, other): - """ Divide the signal - """ + """Divide the signal""" for iobs, obs in enumerate(self.data.obs): tod = obs["tod"] for det in tod.local_dets: @@ -1034,8 +1230,7 @@ def __itruediv__(self, other): class PCGSolver: - """ Solves `x` in A.x = b - """ + """Solves `x` in A.x = b""" def __init__( self, @@ -1069,8 +1264,7 @@ def __init__( @function_timer def apply_lhs(self, amplitudes): - """ Return A.x - """ + """Return A.x""" new_amplitudes = self.templates.apply_transpose( self.noise.apply(self.projection.apply(self.templates.apply(amplitudes))) ) @@ -1079,7 +1273,7 @@ def apply_lhs(self, amplitudes): @function_timer def solve(self): - """ Standard issue PCG solution of A.x = b + """Standard issue PCG solution of A.x = b Returns: x : the least squares solution @@ -1144,7 +1338,7 @@ def solve(self): beta *= sqsum proposal *= beta proposal += precond_residual - log.info("{} : Solution: {}".format(self.rank, guess)) # DEBUG + # log.info("{} : Solution: {}".format(self.rank, guess)) # DEBUG return guess @@ -1177,6 +1371,8 @@ def __init__( flag_mask=1, intervals="intervals", subharmonic_order=None, + fourier2D_order=None, + fourier2D_subharmonics=False, iter_min=3, iter_max=100, use_noise_prior=True, @@ -1206,6 +1402,8 @@ def __init__( self.flag_mask = flag_mask self.intervals = intervals self.subharmonic_order = subharmonic_order + self.fourier2D_order = fourier2D_order + self.fourier2D_subharmonics = fourier2D_subharmonics self.iter_min = iter_min self.iter_max = iter_max self.use_noise_prior = use_noise_prior @@ -1251,6 +1449,7 @@ def report_timing(self): [ ("OffsetTemplate.project_signal", None), ("SubharmonicTemplate.project_signal", None), + ("fourier2DTemplate.project_signal", None), ] ), ), @@ -1311,6 +1510,7 @@ def report_timing(self): [ ("OffsetTemplate.add_to_signal", None), ("SubharmonicTemplate.add_to_signal", None), + ("fourier2DTemplate.add_to_signal", None), ] ), ), @@ -1385,11 +1585,12 @@ def get_templatematrix(self, data): log = Logger.get() templatelist = [] if self.baseline_length is not None: - log.info( - "Initializing offset template, step_length = {}".format( - self.baseline_length + if self.rank == 0: + log.info( + "Initializing offset template, step_length = {}".format( + self.baseline_length + ) ) - ) templatelist.append( OffsetTemplate( data, @@ -1403,11 +1604,12 @@ def get_templatematrix(self, data): ) ) if self.subharmonic_order is not None: - log.info( - "Initializing subharmonic template, order = {}".format( - self.subharmonic_order + if self.rank == 0: + log.info( + "Initializing subharmonic template, order = {}".format( + self.subharmonic_order + ) ) - ) templatelist.append( SubharmonicTemplate( data, @@ -1418,6 +1620,24 @@ def get_templatematrix(self, data): flag_mask=(self.flag_mask | self.mask_bit), ) ) + if self.fourier2D_order is not None: + log.info( + "Initializing fourier2D template, order = {}, subharmonics = {}".format( + self.fourier2D_order, + self.fourier2D_subharmonics, + ) + ) + templatelist.append( + Fourier2DTemplate( + data, + self.detweights, + order=self.fourier2D_order, + fit_subharmonics=self.fourier2D_subharmonics, + intervals=self.intervals, + common_flag_mask=(self.common_flag_mask | self.gap_bit), + flag_mask=(self.flag_mask | self.mask_bit), + ) + ) if len(templatelist) == 0: if self.rank == 0: log.info("No templates to fit, no destriping done.") @@ -1447,8 +1667,7 @@ def get_solver(self, data, templates, noise, projection, signal): @function_timer def load_mask(self, data): - """ Load processing mask and generate appropriate flag bits - """ + """Load processing mask and generate appropriate flag bits""" if self.maskfile is None: return log = Logger.get() @@ -1473,8 +1692,7 @@ def load_mask(self, data): @function_timer def load_weightmap(self, data): - """ Load weight map - """ + """Load weight map""" if self.weightmapfile is None: return log = Logger.get() @@ -1538,8 +1756,7 @@ def exec(self, data, comm=None): @function_timer def flag_gaps(self, data): - """ Add flag bits between the intervals - """ + """Add flag bits between the intervals""" timer = Timer() timer.start() flag_gaps = OpFlagGaps(common_flag_value=self.gap_bit, intervals=self.intervals) @@ -1584,8 +1801,7 @@ def bin_map(self, data, suffix): @function_timer def get_detweights(self, data): - """ Each observation will have its own detweight dictionary - """ + """Each observation will have its own detweight dictionary""" timer = Timer() timer.start() self.detweights = [] diff --git a/src/toast/future_ops/memory_counter.py b/src/toast/future_ops/memory_counter.py new file mode 100644 index 000000000..10458782a --- /dev/null +++ b/src/toast/future_ops/memory_counter.py @@ -0,0 +1,65 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +import traitlets + +from ..utils import Environment, Logger + +from ..timing import function_timer, Timer + +from ..noise_sim import AnalyticNoise + +from ..traits import trait_docs, Int, Bool + +from ..operator import Operator + + +@trait_docs +class MemoryCounter(Operator): + """Compute total memory used by Observations in a Data object. + + Every process group iterates over their observations and sums the total memory used + by detector and shared data. Metadata and interval lists are assumed to be + negligible and are not counted. + + """ + + # Class traits + + API = traitlets.Int(0, help="Internal interface version for this operator") + + silent = Bool( + False, + help="If True, return the memory used but do not log the result", + ) + + def __init__(self, **kwargs): + self.total_bytes = 0 + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + for ob in data.obs: + self.total_bytes += ob.memory_use() + return + + def _finalize(self, data, **kwargs): + log = Logger.get() + if not self.silent: + if data.comm.world_rank == 0: + msg = "Total timestream memory use = {:0.2f} GB".format( + self.total_bytes / 1024 ** 3 + ) + log.info(msg) + return self.total_bytes + + def _requires(self): + return dict() + + def _provides(self): + return dict() + + def _accelerators(self): + return list() diff --git a/src/toast/observation.py b/src/toast/observation.py index 69ddedb39..1090b99e2 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -528,6 +528,29 @@ def __repr__(self): val += "\n>" return val + def memory_use(self): + """Estimate the memory used by shared and detector data. + + This sums the memory used by the shared and detdata attributes and returns the + total on all processes. This function is blocking on the observation + communicator. + + Returns: + (int): The number of bytes of memory used by timestream data. + + """ + local_mem = self.detdata.memory_use() + # Sum the aggregate local memory + total = None + if self.comm is None: + total = local_mem + else: + total = self.comm.allreduce(local_mem, op=MPI.SUM) + # The total shared memory use is already returned on every process by this + # next function. + total += self.shared.memory_use() + return total + # Redistribution def redistribute(self, process_rows): diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index 32af526bf..d6e08adb3 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -83,27 +83,39 @@ def __init__(self, detectors, shape, dtype): # construct a new dtype in case the parameter given is shortcut string ttype = np.dtype(dtype) + self.itemsize = 0 + self._storage_class = None if ttype.char == "b": self._storage_class = AlignedI8 + self.itemsize = 1 elif ttype.char == "B": self._storage_class = AlignedU8 + self.itemsize = 1 elif ttype.char == "h": self._storage_class = AlignedI16 + self.itemsize = 2 elif ttype.char == "H": self._storage_class = AlignedU16 + self.itemsize = 2 elif ttype.char == "i": self._storage_class = AlignedI32 + self.itemsize = 4 elif ttype.char == "I": self._storage_class = AlignedU32 + self.itemsize = 4 elif (ttype.char == "q") or (ttype.char == "l"): self._storage_class = AlignedI64 + self.itemsize = 8 elif (ttype.char == "Q") or (ttype.char == "L"): self._storage_class = AlignedU64 + self.itemsize = 8 elif ttype.char == "f": self._storage_class = AlignedF32 + self.itemsize = 4 elif ttype.char == "d": self._storage_class = AlignedF64 + self.itemsize = 8 elif ttype.char == "F": raise NotImplementedError("No support yet for complex numbers") elif ttype.char == "D": @@ -128,6 +140,7 @@ def __init__(self, detectors, shape, dtype): self._shape = tuple(shp) self._raw = self._storage_class.zeros(self._flatshape) self._data = self._raw.array().reshape(self._shape) + self._memsize = self.itemsize * self._flatshape @property def detectors(self): @@ -148,6 +161,9 @@ def shape(self): def detector_shape(self): return tuple(self._shape[1:]) + def memory_use(self): + return self._memsize + @property def data(self): return self._data @@ -478,6 +494,12 @@ def __setitem__(self, key, value): msg = "Assignment of detector data from an array only supports full size or single detector" raise ValueError(msg) + def memory_use(self): + bytes = 0 + for k in self._internal.keys(): + bytes += self._internal[k].memory_use() + return bytes + def __iter__(self): return iter(self._internal) @@ -682,6 +704,26 @@ def __del__(self): if hasattr(self, "_internal"): self.clear() + def memory_use(self): + bytes = 0 + for k in self._internal.keys(): + shared_bytes = 0 + node_bytes = 0 + node_rank = 0 + if self._internal[k].nodecomm is not None: + node_rank = self._internal[k].nodecomm.rank + if node_rank == 0: + node_elems = 1 + for d in self._internal[k].shape: + node_elems *= d + node_bytes += node_elems * self._internal[k].data.itemsize + if self._internal[k].comm is None: + shared_bytes = node_bytes + else: + shared_bytes = self._internal[k].comm.allreduce(node_bytes, op=MPI.SUM) + bytes += shared_bytes + return bytes + def __repr__(self): val = " Date: Sat, 31 Oct 2020 10:25:19 -0700 Subject: [PATCH 017/690] Rename unit test files to match the name of the corresponding package source file. --- src/toast/tests/{cov.py => covariance.py} | 0 src/toast/tests/{ops_memorycounter.py => ops_memory_counter.py} | 0 src/toast/tests/{ops_simnoise.py => ops_sim_tod_noise.py} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename src/toast/tests/{cov.py => covariance.py} (100%) rename src/toast/tests/{ops_memorycounter.py => ops_memory_counter.py} (100%) rename src/toast/tests/{ops_simnoise.py => ops_sim_tod_noise.py} (100%) diff --git a/src/toast/tests/cov.py b/src/toast/tests/covariance.py similarity index 100% rename from src/toast/tests/cov.py rename to src/toast/tests/covariance.py diff --git a/src/toast/tests/ops_memorycounter.py b/src/toast/tests/ops_memory_counter.py similarity index 100% rename from src/toast/tests/ops_memorycounter.py rename to src/toast/tests/ops_memory_counter.py diff --git a/src/toast/tests/ops_simnoise.py b/src/toast/tests/ops_sim_tod_noise.py similarity index 100% rename from src/toast/tests/ops_simnoise.py rename to src/toast/tests/ops_sim_tod_noise.py From 9f8034c6f445e31f79f4a73c86c5befeb79f3d74 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sat, 31 Oct 2020 16:06:31 -0700 Subject: [PATCH 018/690] Consistent naming of obs / ob. Port memory counter operator and unit tests. Add clear() method to Observation class. --- src/toast/future_ops/CMakeLists.txt | 1 + src/toast/future_ops/__init__.py | 2 + src/toast/future_ops/noise_model.py | 8 +- src/toast/future_ops/pointing_healpix.py | 38 ++++----- src/toast/future_ops/sim_hwp.py | 92 ++++++++++++-------- src/toast/future_ops/sim_satellite.py | 104 +++++++++++------------ src/toast/future_ops/sim_tod_noise.py | 30 +++---- src/toast/observation.py | 9 ++ src/toast/tests/CMakeLists.txt | 6 +- src/toast/tests/ops_memory_counter.py | 102 ++++++---------------- src/toast/tests/runner.py | 5 +- 11 files changed, 191 insertions(+), 206 deletions(-) diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/future_ops/CMakeLists.txt index 67f35ac76..6f0a0e113 100644 --- a/src/toast/future_ops/CMakeLists.txt +++ b/src/toast/future_ops/CMakeLists.txt @@ -4,6 +4,7 @@ install(FILES __init__.py pipeline.py + memory_counter.py sim_hwp.py sim_tod_noise.py sim_satellite.py diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index db940fd43..0b9b9e014 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -4,6 +4,8 @@ # import functions into our public API +from .memory_counter import MemoryCounter + from .pipeline import Pipeline from .sim_satellite import SimSatellite diff --git a/src/toast/future_ops/noise_model.py b/src/toast/future_ops/noise_model.py index 7a49d873f..da1c83853 100644 --- a/src/toast/future_ops/noise_model.py +++ b/src/toast/future_ops/noise_model.py @@ -43,15 +43,15 @@ def _exec(self, data, detectors=None, **kwargs): comm = data.comm - for obs in data.obs: + for ob in data.obs: # Get the detectors we are using for this observation - dets = obs.select_local_detectors(detectors) + dets = ob.select_local_detectors(detectors) if len(dets) == 0: # Nothing to do for this observation continue # The focalplane for this observation - focalplane = obs.telescope.focalplane + focalplane = ob.telescope.focalplane # Every process has a copy of the focalplane, and every process may want # the noise model for all detectors (not just our local detectors). @@ -73,7 +73,7 @@ def _exec(self, data, detectors=None, **kwargs): rate=rates, fmin=fmin, detectors=dets, fknee=fknee, alpha=alpha, NET=NET ) - obs[self.noise_model] = noise + ob[self.noise_model] = noise return diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/future_ops/pointing_healpix.py index 0ecb28d6f..0ecf4ce9b 100644 --- a/src/toast/future_ops/pointing_healpix.py +++ b/src/toast/future_ops/pointing_healpix.py @@ -162,9 +162,9 @@ def _exec(self, data, detectors=None, **kwargs): # overhead from temporary arrays. tod_buffer_length = env.tod_buffer_length() - for obs in data.obs: + for ob in data.obs: # Get the detectors we are using for this observation - dets = obs.select_local_detectors(detectors) + dets = ob.select_local_detectors(detectors) if len(dets) == 0: # Nothing to do for this observation continue @@ -172,43 +172,43 @@ def _exec(self, data, detectors=None, **kwargs): # Get the flags if needed flags = None if self.flags is not None: - flags = np.array(obs.shared[self.flags]) + flags = np.array(ob.shared[self.flags]) flags &= self.flag_mask # HWP angle if needed hwp_angle = None if self.hwp_angle is not None: - hwp_angle = obs.shared[self.hwp_angle] + hwp_angle = ob.shared[self.hwp_angle] # Boresight pointing quaternions - boresight = obs.shared[self.boresight] + boresight = ob.shared[self.boresight] # Focalplane for this observation - focalplane = obs.telescope.focalplane + focalplane = ob.telescope.focalplane # Optional calibration cal = None if self.cal is not None: - cal = obs[self.cal] + cal = ob[self.cal] # Create output data for the pixels, weights and optionally the # detector quaternions. if self.single_precision: - obs.detdata.create( + ob.detdata.create( self.pixels, detshape=(), dtype=np.int32, detectors=dets ) - obs.detdata.create( + ob.detdata.create( self.weights, detshape=(self._nnz,), dtype=np.float32, detectors=dets, ) else: - obs.detdata.create( + ob.detdata.create( self.pixels, detshape=(), dtype=np.int64, detectors=dets ) - obs.detdata.create( + ob.detdata.create( self.weights, detshape=(self._nnz,), dtype=np.float64, @@ -216,7 +216,7 @@ def _exec(self, data, detectors=None, **kwargs): ) if self.quats is not None: - obs.detdata.create( + ob.detdata.create( self.quats, detshape=(4,), dtype=np.float64, @@ -237,7 +237,7 @@ def _exec(self, data, detectors=None, **kwargs): # Timestream of detector quaternions quats = qa.mult(boresight, detquat) if self.quats is not None: - obs.detdata[self.quats][det, :] = quats + ob.detdata[self.quats][det, :] = quats # Cal for this detector dcal = 1.0 @@ -247,9 +247,9 @@ def _exec(self, data, detectors=None, **kwargs): # Buffered pointing calculation buf_off = 0 buf_n = tod_buffer_length - while buf_off < obs.n_local_samples: - if buf_off + buf_n > obs.n_local_samples: - buf_n = obs.n_local_samples - buf_off + while buf_off < ob.n_local_samples: + if buf_off + buf_n > ob.n_local_samples: + buf_n = ob.n_local_samples - buf_off bslice = slice(buf_off, buf_off + buf_n) # This buffer of detector quaternions @@ -266,8 +266,8 @@ def _exec(self, data, detectors=None, **kwargs): fslice = flags[bslice].reshape(-1) # Pixel and weight buffers - pxslice = obs.detdata[self.pixels][det, bslice].reshape(-1) - wtslice = obs.detdata[self.weights][det, bslice].reshape(-1) + pxslice = ob.detdata[self.pixels][det, bslice].reshape(-1) + wtslice = ob.detdata[self.weights][det, bslice].reshape(-1) pbuf = pxslice wbuf = wtslice @@ -296,7 +296,7 @@ def _exec(self, data, detectors=None, **kwargs): if self.create_dist is not None: self._local_submaps[ - obs.detdata["pixels"][det] // self._n_pix_submap + ob.detdata["pixels"][det] // self._n_pix_submap ] = True return diff --git a/src/toast/future_ops/sim_hwp.py b/src/toast/future_ops/sim_hwp.py index 3086c84a8..88f187b06 100644 --- a/src/toast/future_ops/sim_hwp.py +++ b/src/toast/future_ops/sim_hwp.py @@ -4,45 +4,61 @@ import numpy as np +from astropy import units as u + from ..timing import function_timer, Timer @function_timer -def simulate_hwp_angle( - obs, obs_key, hwp_start_s, hwp_rpm, hwp_step_deg, hwp_step_time_m +def simulate_hwp_response( + ob, + ob_angle_key=None, + ob_mueller_key=None, + hwp_start=None, + hwp_rpm=None, + hwp_step=None, + hwp_step_time=None, ): """Simulate and store the HWP angle for one observation. Args: - obs (Observation): The observation to populate. - obs_key (str): The observation key for the HWP angle. - hwp_start_s (float): The mission starting time in seconds of the HWP rotation. + ob (Observation): The observation to populate. + ob_time_key (str): The observation shared key for timestamps. + ob_angle_key (str): (optional) The output observation key for the HWP angle. + ob_mueller_key (str): (optional) The output observation key for the full + Mueller matrix. + hwp_start (Quantity): The mission starting time of the HWP rotation. hwp_rpm (float): The HWP rotation rate in Revolutions Per Minute. - hwp_step_deg (float): The HWP step size in degrees. - hwp_step_time_m (float): The time in minutes between steps. + hwp_step (Quantity): The HWP step size. + hwp_step_time (Quantity): The time between steps. Returns: None """ - if hwp_rpm is None and hwp_step_deg is None: + if ob_mueller_key is not None: + raise NotImplementedError("Mueller matrix not yet implemented") + + if hwp_rpm is None and hwp_step is None: # Nothing to do! return - if (hwp_rpm is not None) and (hwp_step_deg is not None): + if (hwp_rpm is not None) and (hwp_step is not None): raise RuntimeError("choose either continuously rotating or stepped HWP") - if hwp_step_deg is not None and hwp_step_time_m is None: + if hwp_step is not None and hwp_step_time is None: raise RuntimeError("for a stepped HWP, you must specify the time between steps") + hwp_start_s = hwp_start.to_value(u.second) + # compute effective sample rate - times = obs.times + times = ob.shared[ob_time_key] dt = np.mean(times[1:-1] - times[0:-2]) rate = 1.0 / dt hwp_rate = None - hwp_step = None - hwp_step_time = None + hwp_step_rad = None + hwp_step_time_s = None if hwp_rpm is not None: # convert to radians / second @@ -50,49 +66,53 @@ def simulate_hwp_angle( if hwp_step_deg is not None: # convert to radians and seconds - hwp_step = hwp_step_deg * np.pi / 180.0 - hwp_step_time = hwp_step_time_m * 60.0 - - first_samp, n_samp = obs.local_samples - - obs.shared.create( - obs_key, shape=(n_samp,), dtype=np.float64, comm=obs.grid_comm_col - ) + hwp_step_rad = hwp_set.to_value(u.radian) + hwp_step_time_s = hwp_step_time.to_value(u.second) # Only the first process in each grid column simulates the common HWP angle start_sample = int(hwp_start_s * rate) + first_sample = ob.local_index_offset + n_sample = ob.n_local_samples + hwp_angle = None + hwp_mueller = None - if obs.grid_comm_col is None or obs.grid_comm_col.rank == 0: + if ob.grid_comm_col is None or ob.grid_comm_col.rank == 0: if hwp_rate is not None: # continuous HWP # HWP increment per sample is: # (hwprate / samplerate) hwpincr = hwp_rate / rate - startang = np.fmod((start_sample + first_samp) * hwpincr, 2 * np.pi) - hwp_angle = hwpincr * np.arange(n_samp, dtype=np.float64) + startang = np.fmod((start_sample + first_sample) * hwpincr, 2 * np.pi) + hwp_angle = hwpincr * np.arange(n_sample, dtype=np.float64) hwp_angle += startang elif hwp_step is not None: # stepped HWP - hwp_angle = np.ones(n_samp, dtype=np.float64) - stepsamples = int(hwp_step_time * rate) - wholesteps = int((start_sample + first_samp) / stepsamples) - remsamples = (start_sample + first_samp) - wholesteps * stepsamples - curang = np.fmod(wholesteps * hwp_step, 2 * np.pi) + hwp_angle = np.ones(n_sample, dtype=np.float64) + stepsamples = int(hwp_step_time_s * rate) + wholesteps = int((start_sample + first_sample) / stepsamples) + remsamples = (start_sample + first_sample) - wholesteps * stepsamples + curang = np.fmod(wholesteps * hwp_step_rad, 2 * np.pi) curoff = 0 fill = remsamples - while curoff < n_samp: - if curoff + fill > n_samp: - fill = n_samp - curoff + while curoff < n_sample: + if curoff + fill > n_sample: + fill = n_sample - curoff hwp_angle[curoff:fill] *= curang curang += hwp_step curoff += fill fill = stepsamples - if hwp_angle is not None: - # Choose the HWP angle between [0, 2*pi) - hwp_angle %= 2 * np.pi + # Choose the HWP angle between [0, 2*pi) + hwp_angle %= 2 * np.pi + + # Create a Mueller matrix if we will be writing that... - obs.shared[obs_key].set(hwp_angle, offset=(0,), fromrank=0) + # Store the angle and / or the Mueller matrix + if ob_angle_key is not None: + ob.shared.create( + ob_angle_key, shape=(n_sample,), dtype=np.float64, comm=ob.grid_comm_col + ) + ob.shared[ob_angle_key].set(hwp_angle, offset=(0,), fromrank=0) return diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index dbe8ea795..ba638f881 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -34,7 +34,7 @@ from ..healpix import ang2vec -from .sim_hwp import simulate_hwp_angle +from .sim_hwp import simulate_hwp_response @function_timer @@ -99,8 +99,8 @@ def slew_precession_axis(first_samp=0, n_samp=None, sample_rate=None, deg_day=No @function_timer def satellite_scanning( - obs, - obs_key, + ob, + ob_key, sample_offset=0, q_prec=None, spin_period_m=1.0, @@ -116,8 +116,8 @@ def satellite_scanning( boresight. Args: - obs (Observation): The observation to populate. - obs_key (str): The observation shared key to create. + ob (Observation): The observation to populate. + ob_key (str): The observation shared key to create. sample_offset (int): The global offset in samples from the start of the mission. q_prec (ndarray): If None (the default), then the @@ -141,20 +141,20 @@ def satellite_scanning( env = Environment.get() tod_buffer_length = env.tod_buffer_length() - first_samp = obs.local_index_offset - n_samp = obs.n_local_samples - obs.shared.create(obs_key, shape=(n_samp, 4), dtype=np.float64, comm=obs.comm_col) + first_samp = ob.local_index_offset + n_samp = ob.n_local_samples + ob.shared.create(ob_key, shape=(n_samp, 4), dtype=np.float64, comm=ob.comm_col) # Temporary buffer boresight = None # Only the first process in each grid column simulates the shared boresight data - if obs.comm_col_rank == 0: + if ob.comm_col_rank == 0: boresight = np.zeros((n_samp, 4), dtype=np.float64) # Compute effective sample rate - (sample_rate, dt, _, _, _) = rate_from_times(obs.shared["times"]) + (sample_rate, dt, _, _, _) = rate_from_times(ob.shared["times"]) spin_rate = None if spin_period_m > 0.0: @@ -253,7 +253,7 @@ def satellite_scanning( ) buf_off += buf_n - obs.shared[obs_key].set(boresight, offset=(0, 0), fromrank=0) + ob.shared[ob_key].set(boresight, offset=(0, 0), fromrank=0) return @@ -397,11 +397,11 @@ def _exec(self, data, detectors=None, **kwargs): group_firstobs = groupdist[comm.group][0] group_numobs = groupdist[comm.group][1] - for ob in range(group_firstobs, group_firstobs + group_numobs): - obname = "science_{:05d}".format(ob) - obs = Observation( + for obindx in range(group_firstobs, group_firstobs + group_numobs): + obname = "science_{:05d}".format(obindx) + ob = Observation( self.telescope, - obsrange[ob].samples, + obsrange[obindx].samples, name=obname, UID=name_UID(obname), comm=comm.comm_group, @@ -410,47 +410,47 @@ def _exec(self, data, detectors=None, **kwargs): # Create shared objects for timestamps, common flags, position, # and velocity. - obs.shared.create( + ob.shared.create( self.times, - shape=(obs.n_local_samples,), + shape=(ob.n_local_samples,), dtype=np.float64, - comm=obs.comm_col, + comm=ob.comm_col, ) - obs.shared.create( + ob.shared.create( self.flags, - shape=(obs.n_local_samples,), + shape=(ob.n_local_samples,), dtype=np.uint8, - comm=obs.comm_col, + comm=ob.comm_col, ) - obs.shared.create( + ob.shared.create( self.position, - shape=(obs.n_local_samples, 3), + shape=(ob.n_local_samples, 3), dtype=np.float64, - comm=obs.comm_col, + comm=ob.comm_col, ) - obs.shared.create( + ob.shared.create( self.velocity, - shape=(obs.n_local_samples, 3), + shape=(ob.n_local_samples, 3), dtype=np.float64, - comm=obs.comm_col, + comm=ob.comm_col, ) # Rank zero of each grid column creates the data stamps = None position = None velocity = None - if obs.comm_col_rank == 0: - start_abs = obs.local_index_offset + obsrange[ob].first + if ob.comm_col_rank == 0: + start_abs = ob.local_index_offset + obsrange[obindx].first start_time = ( - obsrange[ob].start + float(start_abs) / focalplane.sample_rate + obsrange[obindx].start + float(start_abs) / focalplane.sample_rate ) stop_time = ( - start_time + float(obs.n_local_samples) / focalplane.sample_rate + start_time + float(ob.n_local_samples) / focalplane.sample_rate ) stamps = np.linspace( start_time, stop_time, - num=obs.n_local_samples, + num=ob.n_local_samples, endpoint=False, dtype=np.float64, ) @@ -462,14 +462,14 @@ def _exec(self, data, detectors=None, **kwargs): (start_time - self.start_time.to_value(u.second)) * self._radpersec, 2.0 * np.pi, ) - ang = radinc * np.arange(obs.n_local_samples, dtype=np.float64) + rad + ang = radinc * np.arange(ob.n_local_samples, dtype=np.float64) + rad x = self._AU * np.cos(ang) y = self._AU * np.sin(ang) z = np.zeros_like(x) position = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) ang = ( - radinc * np.arange(obs.n_local_samples, dtype=np.float64) + radinc * np.arange(ob.n_local_samples, dtype=np.float64) + rad + (0.5 * np.pi) ) @@ -478,25 +478,25 @@ def _exec(self, data, detectors=None, **kwargs): z = np.zeros_like(x) velocity = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) - obs.shared[self.times].set(stamps, offset=(0,), fromrank=0) - obs.shared[self.position].set(position, offset=(0, 0), fromrank=0) - obs.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0) + ob.shared[self.times].set(stamps, offset=(0,), fromrank=0) + ob.shared[self.position].set(position, offset=(0, 0), fromrank=0) + ob.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0) # Create boresight pointing - start_abs = obs.local_index_offset + obsrange[ob].first + start_abs = ob.local_index_offset + obsrange[obindx].first degday = 360.0 / 365.25 q_prec = None - if obs.comm_col_rank == 0: + if ob.comm_col_rank == 0: q_prec = slew_precession_axis( first_samp=start_abs, - n_samp=obs.n_local_samples, + n_samp=ob.n_local_samples, sample_rate=focalplane.sample_rate, deg_day=degday, ) satellite_scanning( - obs, + ob, self.boresight, sample_offset=start_abs, q_prec=q_prec, @@ -508,21 +508,17 @@ def _exec(self, data, detectors=None, **kwargs): # Set HWP angle - hwp_step_deg = None - hwp_step_time_m = None - if self.hwp_step is not None: - hwp_step_deg = self.hwp_step.to_value(u.degree) - hwp_step_time_m = self.hwp_step_time.to_value(u.minute) - simulate_hwp_angle( - obs, - self.hwp_angle, - obsrange[ob].start, - self.hwp_rpm, - hwp_step_deg, - hwp_step_time_m, + simulate_hwp_response( + ob, + ob_angle_key=self.hwp_angle, + ob_mueller_key=None, + hwp_start=obsrange[obindx].start * u.second, + hwp_rpm=self.hwp_rpm, + hwp_step=self.hwp_step, + hwp_step_time=self.hwp_step_time, ) - data.obs.append(obs) + data.obs.append(ob) return diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/future_ops/sim_tod_noise.py index b7771eb95..b213f1742 100644 --- a/src/toast/future_ops/sim_tod_noise.py +++ b/src/toast/future_ops/sim_tod_noise.py @@ -235,38 +235,38 @@ def __init__(self, **kwargs): @function_timer def _exec(self, data, detectors=None, **kwargs): log = Logger.get() - for obs in data.obs: + for ob in data.obs: # Get the detectors we are using for this observation - dets = obs.select_local_detectors(detectors) + dets = ob.select_local_detectors(detectors) if len(dets) == 0: # Nothing to do for this observation continue # Unique observation ID - obsindx = obs.UID + obsindx = ob.UID # FIXME: we should unify naming of UID / id. - telescope = obs.telescope.id + telescope = ob.telescope.id # FIXME: Every observation has a set of timestamps. This global # offset is specified separately so that opens the possibility for # inconsistency. Perhaps the global_offset should be made a property # of the Observation class? global_offset = 0 - if "global_offset" in obs: - global_offset = obs["global_offset"] + if "global_offset" in ob: + global_offset = ob["global_offset"] - if self.noise_model not in obs: + if self.noise_model not in ob: msg = "Observation does not contain noise model key '{}'".format( self.noise_model ) log.error(msg) raise KeyError(msg) - nse = obs[self.noise_model] + nse = ob[self.noise_model] # Eventually we'll redistribute, to allow long correlations... - if obs.comm_row_size != 1: + if ob.comm_row_size != 1: msg = "Noise simulation for process grids with multiple ranks in the sample direction not implemented" log.error(msg) raise NotImplementedError(msg) @@ -275,11 +275,11 @@ def _exec(self, data, detectors=None, **kwargs): # detectors within the observation. # Create output if it does not exist - if self.out not in obs: - obs.detdata.create(self.out, detshape=(), dtype=np.float64) + if self.out not in ob: + ob.detdata.create(self.out, detshape=(), dtype=np.float64) (rate, dt, dt_min, dt_max, dt_std) = rate_from_times( - obs.shared[self.times].data + ob.shared[self.times].data ) for key in nse.keys: @@ -298,8 +298,8 @@ def _exec(self, data, detectors=None, **kwargs): obsindx, nse.index(key), rate, - obs.local_index_offset + global_offset, - obs.n_local_samples, + ob.local_index_offset + global_offset, + ob.n_local_samples, self._oversample, nse.freq(key), nse.psd(key), @@ -310,7 +310,7 @@ def _exec(self, data, detectors=None, **kwargs): weight = nse.weight(det, key) if weight == 0: continue - obs.detdata[self.out][det] += weight * nsedata + ob.detdata[self.out][det] += weight * nsedata # Release the work space allocated in the FFT plan store. # diff --git a/src/toast/observation.py b/src/toast/observation.py index 1090b99e2..9770538be 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -299,6 +299,15 @@ def __init__( self.intervals = IntervalMgr(self._comm, self.dist.comm_row, self.dist.comm_col) + # Fully clear the observation + + def clear(self): + self.view.clear() + self.intervals.clear() + self.detdata.clear() + self.shared.clear() + self._internal.clear() + # General properties @property diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 088e31b7d..62208be43 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -19,14 +19,14 @@ install(FILES pixels.py ops_sim_satellite.py ops_applygain.py - ops_simnoise.py - cov.py + ops_sim_tod_noise.py + covariance.py ops_pmat.py ops_dipole.py ops_groundfilter.py sim_focalplane.py ops_polyfilter.py - ops_memorycounter.py + ops_memory_counter.py ops_gainscrambler.py psd_math.py ops_madam.py diff --git a/src/toast/tests/ops_memory_counter.py b/src/toast/tests/ops_memory_counter.py index ef776ec25..260d41785 100644 --- a/src/toast/tests/ops_memory_counter.py +++ b/src/toast/tests/ops_memory_counter.py @@ -5,21 +5,18 @@ from .mpi import MPITestCase import os +import copy import numpy as np -from ..tod import OpMemoryCounter, AnalyticNoise, OpSimNoise -from ..todmap import TODHpixSpiral +from .. import future_ops as ops -from ._helpers import ( - create_outdir, - create_distdata, - boresight_focalplane, - uniform_chunks, -) +from .. import config as tc +from ._helpers import create_outdir, create_distdata, create_telescope -class OpMemoryCounterTest(MPITestCase): + +class MemoryCounterTest(MPITestCase): def setUp(self): fixture_name = os.path.splitext(os.path.basename(__file__))[0] self.outdir = create_outdir(self.comm, fixture_name) @@ -27,77 +24,34 @@ def setUp(self): # One observation per group self.data = create_distdata(self.comm, obs_per_group=1) - # Detector properties. We place one detector per process at the - # boresight with evenly spaced polarization orientations. - - self.ndet = 4 - self.NET = 5.0 - self.rate = 20.0 - - # Create detectors with default properties. - ( - dnames, - dquat, - depsilon, - drate, - dnet, - dfmin, - dfknee, - dalpha, - ) = boresight_focalplane(self.ndet, samplerate=self.rate, net=self.NET) - - # Total samples per observation - self.totsamp = 100000 - - # Chunks - chunks = uniform_chunks(self.totsamp, nchunk=self.data.comm.group_size) - - # Populate the observations - - tod = TODHpixSpiral( - self.data.comm.comm_group, - dquat, - self.totsamp, - detranks=1, - firsttime=0.0, - rate=self.rate, - nside=512, - sampsizes=chunks, - ) - - # construct an analytic noise model - - nse = AnalyticNoise( - rate=drate, - fmin=dfmin, - detectors=dnames, - fknee=dfknee, - alpha=dalpha, - NET=dnet, - ) - - self.data.obs[0]["tod"] = tod - self.data.obs[0]["noise"] = nse + # Make a fake telescope for every observation - def test_counter(self): - ob = self.data.obs[0] - tod = ob["tod"] - # Ensure timestamps are cached before simulating noise - blah = tod.local_times() - del blah + tele = create_telescope(self.data.comm.group_size) - counter = OpMemoryCounter(silent=True) + # Set up a pipeline that generates some data - tot_old = counter.exec(self.data) + pipe_ops = [ + ops.SimSatellite( + name="sim_satellite", + telescope=tele, + n_observation=self.data.comm.ngroups, + ), + ops.DefaultNoiseModel(name="noise_model"), + ops.SimNoise(name="sim_noise"), + ] - # generate timestreams - op = OpSimNoise() - op.exec(self.data) + self.pipe = ops.Pipeline(name="sim_pipe") + self.pipe.operators = pipe_ops - tot_new = counter.exec(self.data) + def test_counter(self): + # Start with empty data + self.data.clear() - expected = self.data.comm.ngroups * self.totsamp * self.ndet * 8 + # Run a standard pipeline to simulate some data + self.pipe.apply(self.data) - np.testing.assert_equal(tot_new - tot_old, expected) + # Get the memory used + mcount = ops.MemoryCounter() + bytes = mcount.apply(self.data) return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index e37329f2c..2d1bb7505 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -31,6 +31,8 @@ from . import ops_sim_satellite as test_ops_sim_satellite +from . import ops_memory_counter as test_ops_memory_counter + # # from . import cache as testcache @@ -53,7 +55,7 @@ # from . import ops_gainscrambler as testopsgainscrambler # from . import ops_applygain as testopsapplygain # -# from . import ops_memorycounter as testopsmemorycounter +# # # from . import ops_madam as testopsmadam # from . import ops_mapmaker as testopsmapmaker @@ -137,6 +139,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_config)) suite.addTest(loader.loadTestsFromModule(test_ops_sim_satellite)) + suite.addTest(loader.loadTestsFromModule(test_ops_memory_counter)) # suite.addTest(loader.loadTestsFromModule(testcache)) # From d2b9f4d30099ce6220358090467078484aa4a198 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sun, 1 Nov 2020 08:50:20 -0800 Subject: [PATCH 019/690] HWP simulation function now uses units. Add extra helper function to run a simple satellite sim for use in unit tests. Restore unit tests for healpix pointing. --- src/toast/future_ops/sim_hwp.py | 9 +- src/toast/future_ops/sim_satellite.py | 1 + src/toast/mpi.py | 32 ++++++++ src/toast/tests/CMakeLists.txt | 2 +- src/toast/tests/_helpers.py | 82 ++++++++++--------- src/toast/tests/ops_memory_counter.py | 37 +++------ .../{ops_pmat.py => ops_pointing_healpix.py} | 71 +++++----------- src/toast/tests/runner.py | 6 +- 8 files changed, 117 insertions(+), 123 deletions(-) rename src/toast/tests/{ops_pmat.py => ops_pointing_healpix.py} (84%) diff --git a/src/toast/future_ops/sim_hwp.py b/src/toast/future_ops/sim_hwp.py index 88f187b06..9f972cdad 100644 --- a/src/toast/future_ops/sim_hwp.py +++ b/src/toast/future_ops/sim_hwp.py @@ -12,6 +12,7 @@ @function_timer def simulate_hwp_response( ob, + ob_time_key=None, ob_angle_key=None, ob_mueller_key=None, hwp_start=None, @@ -64,9 +65,9 @@ def simulate_hwp_response( # convert to radians / second hwp_rate = hwp_rpm * 2.0 * np.pi / 60.0 - if hwp_step_deg is not None: + if hwp_step is not None: # convert to radians and seconds - hwp_step_rad = hwp_set.to_value(u.radian) + hwp_step_rad = hwp_step.to_value(u.radian) hwp_step_time_s = hwp_step_time.to_value(u.second) # Only the first process in each grid column simulates the common HWP angle @@ -78,7 +79,7 @@ def simulate_hwp_response( hwp_angle = None hwp_mueller = None - if ob.grid_comm_col is None or ob.grid_comm_col.rank == 0: + if ob.comm_col is None or ob.comm_col.rank == 0: if hwp_rate is not None: # continuous HWP # HWP increment per sample is: @@ -111,7 +112,7 @@ def simulate_hwp_response( # Store the angle and / or the Mueller matrix if ob_angle_key is not None: ob.shared.create( - ob_angle_key, shape=(n_sample,), dtype=np.float64, comm=ob.grid_comm_col + ob_angle_key, shape=(n_sample,), dtype=np.float64, comm=ob.comm_col ) ob.shared[ob_angle_key].set(hwp_angle, offset=(0,), fromrank=0) diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index ba638f881..589ed2e06 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -510,6 +510,7 @@ def _exec(self, data, detectors=None, **kwargs): simulate_hwp_response( ob, + ob_time_key=self.times, ob_angle_key=self.hwp_angle, ob_mueller_key=None, hwp_start=obsrange[obindx].start * u.second, diff --git a/src/toast/mpi.py b/src/toast/mpi.py index 1968f0d1b..d52518db5 100644 --- a/src/toast/mpi.py +++ b/src/toast/mpi.py @@ -40,6 +40,7 @@ import sys import itertools +from contextlib import contextmanager import numpy as np @@ -252,3 +253,34 @@ def __repr__(self): else: lines.append(" Using CUDA device {}".format(self._cuda.device_index)) return "".format("\n".join(lines)) + + +@contextmanager +def exception_guard(comm=None): + """Ensure that if one MPI process raises an un-caught exception, all of them do. + + Args: + comm (mpi4py.MPI.Comm): The MPI communicator or None. + + """ + log = Logger.get() + failed = 0 + try: + yield + except: + msg = "Exception on process {}:\n".format(comm.rank) + exc_type, exc_value, exc_traceback = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_traceback) + msg += "\n".join(lines) + log.error(msg) + failed = 1 + + failcount = None + if comm is None: + failcount = failed + else: + failcount = comm.allreduce(failed, op=MPI.SUM) + if failcount > 0: + raise RuntimeError("One or more MPI processes raised an exception") + + return diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 62208be43..232a46d72 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -21,7 +21,7 @@ install(FILES ops_applygain.py ops_sim_tod_noise.py covariance.py - ops_pmat.py + ops_pointing_healpix.py ops_dipole.py ops_groundfilter.py sim_focalplane.py diff --git a/src/toast/tests/_helpers.py b/src/toast/tests/_helpers.py index 73dfcc580..6afe2f61a 100644 --- a/src/toast/tests/_helpers.py +++ b/src/toast/tests/_helpers.py @@ -6,7 +6,7 @@ import numpy as np -# from contextlib import contextmanager +from astropy import units as u from ..mpi import Comm @@ -20,8 +20,10 @@ from ..observation import DetectorData, Observation +from .. import future_ops as ops -ZAXIS = np.array([0, 0, 1.0]) + +ZAXIS = np.array([0.0, 0.0, 1.0]) # These are helper routines for common operations used in the unit tests. @@ -78,7 +80,7 @@ def create_comm(mpicomm): return toastcomm -def create_telescope(group_size): +def create_telescope(group_size, sample_rate=10.0 * u.Hz): """Create a fake telescope with at least one detector per process.""" npix = 1 ring = 1 @@ -90,10 +92,10 @@ def create_telescope(group_size): def create_distdata(mpicomm, obs_per_group=1, samples=10): - """Create a toast communicator and distributed data object. + """Create a toast communicator and (empty) distributed data object. Use the specified MPI communicator to attempt to create 2 process groups, - each with some observations. + each with some empty observations. Args: mpicomm (MPI.Comm): the MPI communicator (or None). @@ -119,6 +121,42 @@ def create_distdata(mpicomm, obs_per_group=1, samples=10): return data +def create_satellite_data( + mpicomm, obs_per_group=1, sample_rate=10.0 * u.Hz, obs_time=5.0 * u.minute +): + """Create a data object with a simple satellite sim. + + Use the specified MPI communicator to attempt to create 2 process groups. Create + a fake telescope and run the satellite sim to make some observations for each + group. This is useful for testing many operators that need some pre-existing + observations with boresight pointing. + + Args: + mpicomm (MPI.Comm): the MPI communicator (or None). + obs_per_group (int): the number of observations assigned to each group. + samples (int): number of samples per observation. + + Returns: + toast.Data: the distributed data with named observations. + + """ + toastcomm = create_comm(mpicomm) + data = Data(toastcomm) + + tele = create_telescope(toastcomm.group_size, sample_rate=sample_rate) + + sim_sat = ops.SimSatellite( + name="sim_sat", + n_observation=(toastcomm.ngroups * obs_per_group), + telescope=tele, + hwp_rpm=10.0, + observation_time=obs_time, + ) + sim_sat.apply(data) + + return data + + def uniform_chunks(samples, nchunk=100): """Divide some number of samples into chunks. @@ -141,37 +179,3 @@ def uniform_chunks(samples, nchunk=100): for r in range(remain): chunks[r] += 1 return chunks - - -# -# @contextmanager -# def mpi_guard(comm=MPI.COMM_WORLD): -# """Ensure that if one MPI process raises an exception, all of them do. -# -# Args: -# comm (mpi4py.MPI.Comm): The MPI communicator. -# -# """ -# failed = 0 -# print(comm.rank, ": guard: enter", flush=True) -# try: -# print(comm.rank, ": guard: yield", flush=True) -# yield -# except: -# print(comm.rank, ": guard: except", flush=True) -# msg = "Exception on process {}:\n".format(comm.rank) -# exc_type, exc_value, exc_traceback = sys.exc_info() -# lines = traceback.format_exception(exc_type, exc_value, -# exc_traceback) -# msg += "\n".join(lines) -# print(msg, flush=True) -# failed = 1 -# print(comm.rank, ": guard: except done", flush=True) -# -# print(comm.rank, ": guard: failcount reduce", flush=True) -# failcount = comm.allreduce(failed, op=MPI.SUM) -# if failcount > 0: -# raise RuntimeError("One or more MPI processes raised an exception") -# print(comm.rank, ": guard: done", flush=True) -# -# return diff --git a/src/toast/tests/ops_memory_counter.py b/src/toast/tests/ops_memory_counter.py index 260d41785..97395ef4d 100644 --- a/src/toast/tests/ops_memory_counter.py +++ b/src/toast/tests/ops_memory_counter.py @@ -11,9 +11,7 @@ from .. import future_ops as ops -from .. import config as tc - -from ._helpers import create_outdir, create_distdata, create_telescope +from ._helpers import create_outdir, create_satellite_data class MemoryCounterTest(MPITestCase): @@ -21,37 +19,22 @@ def setUp(self): fixture_name = os.path.splitext(os.path.basename(__file__))[0] self.outdir = create_outdir(self.comm, fixture_name) - # One observation per group - self.data = create_distdata(self.comm, obs_per_group=1) - - # Make a fake telescope for every observation - - tele = create_telescope(self.data.comm.group_size) + def test_counter(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) # Set up a pipeline that generates some data - pipe_ops = [ - ops.SimSatellite( - name="sim_satellite", - telescope=tele, - n_observation=self.data.comm.ngroups, - ), - ops.DefaultNoiseModel(name="noise_model"), - ops.SimNoise(name="sim_noise"), + ops.DefaultNoiseModel(), + ops.SimNoise(), ] - self.pipe = ops.Pipeline(name="sim_pipe") - self.pipe.operators = pipe_ops - - def test_counter(self): - # Start with empty data - self.data.clear() - - # Run a standard pipeline to simulate some data - self.pipe.apply(self.data) + pipe = ops.Pipeline() + pipe.operators = pipe_ops + pipe.apply(data) # Get the memory used mcount = ops.MemoryCounter() - bytes = mcount.apply(self.data) + bytes = mcount.apply(data) return diff --git a/src/toast/tests/ops_pmat.py b/src/toast/tests/ops_pointing_healpix.py similarity index 84% rename from src/toast/tests/ops_pmat.py rename to src/toast/tests/ops_pointing_healpix.py index 00b80bb90..a3e629ae0 100644 --- a/src/toast/tests/ops_pmat.py +++ b/src/toast/tests/ops_pointing_healpix.py @@ -10,54 +10,21 @@ import numpy as np from .._libtoast import pointing_matrix_healpix -from ..healpix import HealpixPixels -from ..todmap import TODHpixSpiral, OpPointingHpix + from .. import qarray as qa -from ._helpers import create_outdir, create_distdata, boresight_focalplane +from ..healpix import HealpixPixels + +from .. import future_ops as ops + +from ._helpers import create_outdir, create_satellite_data -class OpPointingHpixTest(MPITestCase): +class PointingHealpixTest(MPITestCase): def setUp(self): fixture_name = os.path.splitext(os.path.basename(__file__))[0] self.outdir = create_outdir(self.comm, fixture_name) - # Create one observation per group, and each observation will have - # one detector per process and a single chunk. Data within an - # observation is distributed by detector. - - self.data = create_distdata(self.comm, obs_per_group=1) - self.ndet = self.data.comm.group_size - - # Create detectors with default properties - ( - dnames, - dquat, - depsilon, - drate, - dnet, - dfmin, - dfknee, - dalpha, - ) = boresight_focalplane(self.ndet) - - # A small number of samples - self.totsamp = 10 - - # Populate the observations (one per group) - - tod = TODHpixSpiral( - self.data.comm.comm_group, - dquat, - self.totsamp, - detranks=self.data.comm.group_size, - ) - - self.data.obs[0]["tod"] = tod - - def tearDown(self): - del self.data - def test_pointing_matrix_healpix2(self): nside = 64 npix = 12 * nside ** 2 @@ -255,31 +222,37 @@ def test_pointing_matrix_healpix_hwp(self): return def test_hpix_simple(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + pointing = ops.PointingHealpix(nside=64, mode="IQU", hwp_angle="hwp_angle") + pointing.apply(data) + rank = 0 if self.comm is not None: rank = self.comm.rank - op = OpPointingHpix() - op.exec(self.data) handle = None if rank == 0: handle = open(os.path.join(self.outdir, "out_test_hpix_simple_info"), "w") - self.data.info(handle=handle) + data.info(handle=handle) if rank == 0: handle.close() - return def test_hpix_hwpnull(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + pointing = ops.PointingHealpix(nside=64, mode="IQU") + pointing.apply(data) + rank = 0 if self.comm is not None: rank = self.comm.rank - op = OpPointingHpix(mode="IQU") - op.exec(self.data) handle = None if rank == 0: - handle = open(os.path.join(self.outdir, "out_test_hpix_hwpnull_info"), "w") - self.data.info(handle=handle) + handle = open(os.path.join(self.outdir, "out_test_hpix_hwpnull"), "w") + data.info(handle=handle) if rank == 0: handle.close() - return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 2d1bb7505..f8056748a 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -33,6 +33,8 @@ from . import ops_memory_counter as test_ops_memory_counter +from . import ops_pointing_healpix as test_ops_pointing_healpix + # # from . import cache as testcache @@ -43,8 +45,6 @@ # # from . import cov as testcov # -# from . import ops_pmat as testopspmat -# # from . import ops_dipole as testopsdipole # from . import ops_simnoise as testopssimnoise # from . import ops_sim_sss as testopssimsss @@ -140,6 +140,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_sim_satellite)) suite.addTest(loader.loadTestsFromModule(test_ops_memory_counter)) + suite.addTest(loader.loadTestsFromModule(test_ops_pointing_healpix)) # suite.addTest(loader.loadTestsFromModule(testcache)) # @@ -149,7 +150,6 @@ def test(name=None, verbosity=2): # suite.addTest(loader.loadTestsFromModule(testopssimnoise)) # suite.addTest(loader.loadTestsFromModule(testopssimsss)) # suite.addTest(loader.loadTestsFromModule(testopsapplygain)) - # suite.addTest(loader.loadTestsFromModule(testopspmat)) # suite.addTest(loader.loadTestsFromModule(testcov)) # suite.addTest(loader.loadTestsFromModule(testopsdipole)) # suite.addTest(loader.loadTestsFromModule(testopsgroundfilter)) From 5874e5660b5d671ee155e1bc51bf08c0d24e5761 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Wed, 4 Nov 2020 21:48:10 -0800 Subject: [PATCH 020/690] Restore noise simulation unit tests. --- src/toast/data.py | 4 + src/toast/future_ops/sim_tod_noise.py | 53 +- src/toast/observation_data.py | 10 +- src/toast/tests/_helpers.py | 9 +- src/toast/tests/mpi.py | 58 +-- src/toast/tests/ops_sim_tod_noise.py | 694 ++++++++++++-------------- src/toast/tests/runner.py | 13 +- 7 files changed, 396 insertions(+), 445 deletions(-) diff --git a/src/toast/data.py b/src/toast/data.py index 4b8c0790d..9df94ce56 100644 --- a/src/toast/data.py +++ b/src/toast/data.py @@ -51,6 +51,10 @@ def __repr__(self): val += "\n>" return val + def __del__(self): + if hasattr(self, "obs"): + self.clear() + @property def comm(self): """The toast.Comm over which the data is distributed.""" diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/future_ops/sim_tod_noise.py index b213f1742..40e7c516b 100644 --- a/src/toast/future_ops/sim_tod_noise.py +++ b/src/toast/future_ops/sim_tod_noise.py @@ -6,6 +6,10 @@ import numpy as np +from scipy import interpolate + +from .. import rng + from ..timing import function_timer from ..traits import trait_docs, Int, Unicode @@ -21,17 +25,17 @@ @function_timer def sim_noise_timestream( - realization, - telescope, - component, - obsindx, - detindx, - rate, - firstsamp, - samples, - oversample, - freq, - psd, + realization=0, + telescope=0, + component=0, + obsindx=0, + detindx=0, + rate=1.0, + firstsamp=0, + samples=0, + oversample=2, + freq=None, + psd=None, py=False, ): """Generate a noise timestream, given a starting RNG state. @@ -124,7 +128,9 @@ def sim_noise_timestream( logfreq = np.log10(freq + freqshift) logpsd = np.log10(psd + psdshift) - interp = si.interp1d(logfreq, logpsd, kind="linear", fill_value="extrapolate") + interp = interpolate.interp1d( + logfreq, logpsd, kind="linear", fill_value="extrapolate" + ) loginterp_psd = interp(loginterp_freq) interp_psd = np.power(10.0, loginterp_psd) - psdshift @@ -292,17 +298,18 @@ def _exec(self, data, detectors=None, **kwargs): # Simulate the noise matching this key nsedata = sim_noise_timestream( - self.realization, - telescope, - self.component, - obsindx, - nse.index(key), - rate, - ob.local_index_offset + global_offset, - ob.n_local_samples, - self._oversample, - nse.freq(key), - nse.psd(key), + realization=self.realization, + telescope=telescope, + component=self.component, + obsindx=obsindx, + detindx=nse.index(key), + rate=rate, + firstsamp=ob.local_index_offset + global_offset, + samples=ob.n_local_samples, + oversample=self._oversample, + freq=nse.freq(key), + psd=nse.psd(key), + py=False, ) # Add the noise to all detectors that have nonzero weights diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index d6e08adb3..decd7355a 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -377,8 +377,9 @@ def __getitem__(self, key): return self._internal[key] def __delitem__(self, key): - self._internal[key].clear() - del self._internal[key] + if key in self._internal: + self._internal[key].clear() + del self._internal[key] def __setitem__(self, key, value): if isinstance(value, DetectorData): @@ -626,8 +627,9 @@ def __getitem__(self, key): return self._internal[key] def __delitem__(self, key): - self._internal[key].close() - del self._internal[key] + if key in self._internal: + self._internal[key].close() + del self._internal[key] def __setitem__(self, key, value): if isinstance(value, MPIShared): diff --git a/src/toast/tests/_helpers.py b/src/toast/tests/_helpers.py index 6afe2f61a..23d20c038 100644 --- a/src/toast/tests/_helpers.py +++ b/src/toast/tests/_helpers.py @@ -87,7 +87,12 @@ def create_telescope(group_size, sample_rate=10.0 * u.Hz): while 2 * npix < group_size: npix += 6 * ring ring += 1 - fp = fake_hexagon_focalplane(n_pix=npix) + fp = fake_hexagon_focalplane( + n_pix=npix, + sample_rate=sample_rate, + f_min=1.0e-5 * u.Hz, + f_knee=(sample_rate / 2000.0), + ) return Telescope("test", focalplane=fp) @@ -147,7 +152,7 @@ def create_satellite_data( sim_sat = ops.SimSatellite( name="sim_sat", - n_observation=(toastcomm.ngroups * obs_per_group), + num_observations=(toastcomm.ngroups * obs_per_group), telescope=tele, hwp_rpm=10.0, observation_time=obs_time, diff --git a/src/toast/tests/mpi.py b/src/toast/tests/mpi.py index a77bbfba4..6e9ba89c1 100644 --- a/src/toast/tests/mpi.py +++ b/src/toast/tests/mpi.py @@ -2,26 +2,27 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from ..mpi import MPI, use_mpi + import sys import time import warnings from unittest.signals import registerResult + from unittest import TestCase from unittest import TestResult class MPITestCase(TestCase): - """A simple wrapper around the standard TestCase which provides - one extra method to set the communicator. - """ + """A simple wrapper around the standard TestCase which stores the communicator.""" def __init__(self, *args, **kwargs): - super(MPITestCase, self).__init__(*args, **kwargs) - - def setComm(self, comm): - self.comm = comm + super().__init__(*args, **kwargs) + self.comm = None + if use_mpi: + self.comm = MPI.COMM_WORLD class MPITestResult(TestResult): @@ -29,17 +30,18 @@ class MPITestResult(TestResult): The actions needed are coordinated across all processes. - Used by MPITestRunner. """ separator1 = "=" * 70 separator2 = "-" * 70 - def __init__(self, comm, stream=None, descriptions=None, verbosity=None, **kwargs): - super(MPITestResult, self).__init__( + def __init__(self, stream=None, descriptions=None, verbosity=None, **kwargs): + super().__init__( stream=stream, descriptions=descriptions, verbosity=verbosity, **kwargs ) - self.comm = comm + self.comm = None + if use_mpi: + self.comm = MPI.COMM_WORLD self.stream = stream self.descriptions = descriptions self.buffer = False @@ -53,8 +55,7 @@ def getDescription(self, test): return str(test) def startTest(self, test): - if isinstance(test, MPITestCase): - test.setComm(self.comm) + super().startTest(test) self.stream.flush() if self.comm is not None: self.comm.barrier() @@ -65,11 +66,10 @@ def startTest(self, test): self.stream.flush() if self.comm is not None: self.comm.barrier() - super(MPITestResult, self).startTest(test) return def addSuccess(self, test): - super(MPITestResult, self).addSuccess(test) + super().addSuccess(test) if self.comm is None: self.stream.write("ok ") else: @@ -78,7 +78,7 @@ def addSuccess(self, test): return def addError(self, test, err): - super(MPITestResult, self).addError(test, err) + super().addError(test, err) if self.comm is None: self.stream.write("error ") else: @@ -87,7 +87,7 @@ def addError(self, test, err): return def addFailure(self, test, err): - super(MPITestResult, self).addFailure(test, err) + super().addFailure(test, err) if self.comm is None: self.stream.write("fail ") else: @@ -96,7 +96,7 @@ def addFailure(self, test, err): return def addSkip(self, test, reason): - super(MPITestResult, self).addSkip(test, reason) + super().addSkip(test, reason) if self.comm is None: self.stream.write("skipped({}) ".format(reason)) else: @@ -105,7 +105,7 @@ def addSkip(self, test, reason): return def addExpectedFailure(self, test, err): - super(MPITestResult, self).addExpectedFailure(test, err) + super().addExpectedFailure(test, err) if self.comm is None: self.stream.write("expected-fail ") else: @@ -114,11 +114,11 @@ def addExpectedFailure(self, test, err): return def addUnexpectedSuccess(self, test): - super(MPITestResult, self).addUnexpectedSuccess(test) + super().addUnexpectedSuccess(test) if self.comm is None: - self.stream.writeln("unexpected-success ") + self.stream.write("unexpected-success ") else: - self.stream.writeln("[{}]unexpected-success ".format(self.comm.rank)) + self.stream.write("[{}]unexpected-success ".format(self.comm.rank)) return def printErrorList(self, flavour, errors): @@ -142,7 +142,6 @@ def printErrorList(self, flavour, errors): def printErrors(self): if self.comm is None: self.stream.writeln() - self.stream.flush() self.printErrorList("ERROR", self.errors) self.printErrorList("FAIL", self.failures) self.stream.flush() @@ -150,7 +149,6 @@ def printErrors(self): self.comm.barrier() if self.comm.rank == 0: self.stream.writeln() - self.stream.flush() for p in range(self.comm.size): if p == self.comm.rank: self.printErrorList("ERROR", self.errors) @@ -203,15 +201,15 @@ class MPITestRunner(object): resultclass = MPITestResult - def __init__( - self, comm, stream=None, descriptions=True, verbosity=2, warnings=None - ): + def __init__(self, stream=None, descriptions=True, verbosity=2, warnings=None): """Construct a MPITestRunner. Subclasses should accept **kwargs to ensure compatibility as the interface changes. """ - self.comm = comm + self.comm = None + if use_mpi: + self.comm = MPI.COMM_WORLD if stream is None: stream = sys.stderr self.stream = _WritelnDecorator(stream) @@ -221,9 +219,7 @@ def __init__( def run(self, test): "Run the given test case or test suite." - result = MPITestResult( - self.comm, self.stream, self.descriptions, self.verbosity - ) + result = MPITestResult(self.stream, self.descriptions, self.verbosity) registerResult(result) with warnings.catch_warnings(): if self.warnings: diff --git a/src/toast/tests/ops_sim_tod_noise.py b/src/toast/tests/ops_sim_tod_noise.py index c74ebf4f2..6d3ce3064 100644 --- a/src/toast/tests/ops_sim_tod_noise.py +++ b/src/toast/tests/ops_sim_tod_noise.py @@ -6,130 +6,35 @@ import numpy as np +from astropy import units as u + from .mpi import MPITestCase -from ..tod import Noise, sim_noise_timestream, AnalyticNoise, OpSimNoise -from ..todmap import TODHpixSpiral +from ..vis import set_matplotlib_backend from .. import rng as rng -from ._helpers import ( - create_outdir, - create_distdata, - boresight_focalplane, - uniform_chunks, -) +from ..noise import Noise +from .. import future_ops as ops -class OpSimNoiseTest(MPITestCase): - def setUp(self): - fixture_name = os.path.splitext(os.path.basename(__file__))[0] - self.outdir = create_outdir(self.comm, fixture_name) +from ..future_ops.sim_tod_noise import sim_noise_timestream - # Create one observation per group, and each observation will have - # a fixed number of detectors and one chunk per process. - - # We create two data sets- one for testing uncorrelated noise and - # one for testing correlated noise. - - self.data = create_distdata(self.comm, obs_per_group=1) - self.data_corr = create_distdata(self.comm, obs_per_group=1) - - self.ndet = 4 - self.rate = 20.0 - - # Create detectors with a range of knee frequencies. - ( - dnames, - dquat, - depsilon, - drate, - dnet, - dfmin, - dfknee, - dalpha, - ) = boresight_focalplane( - self.ndet, - samplerate=self.rate, - net=10.0, - fmin=1.0e-5, - fknee=np.linspace(0.0, 0.1, num=self.ndet), - ) - - # Total samples per observation - self.totsamp = 200000 +from ._helpers import create_outdir, create_satellite_data - # Chunks - chunks = uniform_chunks(self.totsamp, nchunk=self.data.comm.group_size) - # Noise sim oversampling +class SimNoiseTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) self.oversample = 2 - - # MCs for testing statistics of simulated noise self.nmc = 100 - # Populate the observations (one per group) - - tod = TODHpixSpiral( - self.data.comm.comm_group, - dquat, - self.totsamp, - detranks=1, - firsttime=0.0, - rate=self.rate, - nside=512, - sampsizes=chunks, - ) - - # Construct an uncorrelated analytic noise model for the detectors - - nse = AnalyticNoise( - rate=drate, - fmin=dfmin, - detectors=dnames, - fknee=dfknee, - alpha=dalpha, - NET=dnet, - ) - - self.data.obs[0]["tod"] = tod - self.data.obs[0]["noise"] = nse - - # Construct a correlated analytic noise model for the detectors - - corr_freqs = { - "noise_{}".format(x): nse.freq(dnames[x]) for x in range(self.ndet) - } - - corr_psds = {"noise_{}".format(x): nse.psd(dnames[x]) for x in range(self.ndet)} - - corr_indices = {"noise_{}".format(x): 100 + x for x in range(self.ndet)} - - corr_mix = dict() - for x in range(self.ndet): - dmix = np.random.uniform(low=-1.0, high=1.0, size=self.ndet) - corr_mix[dnames[x]] = { - "noise_{}".format(y): dmix[y] for y in range(self.ndet) - } - - nse_corr = Noise( - detectors=dnames, - freqs=corr_freqs, - psds=corr_psds, - mixmatrix=corr_mix, - indices=corr_indices, - ) - - self.data_corr.obs[0]["tod"] = tod - self.data_corr.obs[0]["noise"] = nse_corr - - return - def test_gauss(self): # Test that the same samples from different calls are reproducible. # All processes run this identical test. - detindx = self.ndet - 1 + detindx = 99 telescope = 5 realization = 1000 component = 3 @@ -170,42 +75,50 @@ def test_gauss(self): ) return - def test_sim(self): + def test_sim_once(self): # Test the uncorrelated noise generation. - # Verify that the white noise part of the spectrum is normalized # correctly. - # We have purposely distributed the TOD data so that every process has - # a single stationary interval for all detectors. + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # This is a simulation with the same focalplane for every obs... + sample_rate = data.obs[0].telescope.focalplane.sample_rate - rank = 0 - if self.comm is not None: - rank = self.comm.rank + # Create a noise model from focalplane detector properties + noise_model = ops.DefaultNoiseModel() + noise_model.apply(data) - for ob in self.data.obs: - tod = ob["tod"] - nse = ob["noise"] + # Simulate noise using this model + sim_noise = ops.SimNoise() + sim_noise.apply(data) - for det in tod.local_dets: + wrank = data.comm.world_rank + grank = data.comm.group_rank + + for ob in data.obs: + nse = ob[noise_model.noise_model] + for det in ob.local_detectors: + # Verify that the white noise level of the PSD is correctly normalized. + # Only check the high frequency part of the spectrum to avoid 1/f. fsamp = nse.rate(det) cutoff = 0.95 * (fsamp / 2.0) indx = np.where(nse.freq(det) > cutoff) - net = nse.NET(det) avg = np.mean(nse.psd(det)[indx]) netsq = net * net - # print("det {} NETsq = {}, average white noise level = {}" - # "".format(det, netsq, avg)) self.assertTrue((np.absolute(avg - netsq) / netsq) < 0.02) - if rank == 0: - # One process dumps debugging info + if wrank == 0: + set_matplotlib_backend() import matplotlib.pyplot as plt - for det in tod.local_dets: + # Just one process dumps out local noise model for debugging + + for det in ob.local_detectors: savefile = os.path.join( - self.outdir, "out_test_simnoise_rawpsd_{}.txt".format(det) + self.outdir, "out_{}_rawpsd_{}.txt".format(ob.name, det) ) np.savetxt( savefile, @@ -236,105 +149,50 @@ def test_sim(self): plt.title("Simulated PSD from toast.AnalyticNoise") savefile = os.path.join( - self.outdir, "out_test_simnoise_rawpsd_{}.png".format(det) + self.outdir, "out_{}_rawpsd_{}.pdf".format(ob.name, det) ) + plt.savefig(savefile) plt.close() - ntod = tod.local_samples[1] + # Now generate noise timestreams in python and compare to the results of + # running the operator. - # this replicates the calculation in sim_noise_timestream() + freqs = dict() + psds = dict() fftlen = 2 - while fftlen <= (self.oversample * ntod): + while fftlen <= (self.oversample * ob.n_local_samples): fftlen *= 2 - freqs = {} - psds = {} - psdnorm = {} - todvar = {} - - cfftlen = 2 - while cfftlen <= ntod: - cfftlen *= 2 - - # print("fftlen = ", fftlen) - # print("cfftlen = ", cfftlen) - - checkpsd = {} - binsamps = cfftlen // 4096 - nbins = binsamps - 1 - bstart = (self.rate / 2) / nbins - bins = np.linspace(bstart, self.rate / 2, num=(nbins - 1), endpoint=True) - # print("nbins = ",nbins) - # print(bins) - - checkfreq = np.fft.rfftfreq(cfftlen, d=1 / self.rate) - # print("checkfreq len = ",len(checkfreq)) - # print(checkfreq[:10]) - # print(checkfreq[-10:]) - checkbinmap = np.searchsorted(bins, checkfreq, side="left") - # print("checkbinmap len = ",len(checkbinmap)) - # print(checkbinmap[:10]) - # print(checkbinmap[-10:]) - bcount = np.bincount(checkbinmap) - # print("bcount len = ",len(bcount)) - # print(bcount) - - bintruth = {} - - idet = 0 - for det in tod.local_dets: - + for idet, det in enumerate(ob.local_detectors): dfreq = nse.rate(det) / float(fftlen) - (pytod, freqs[det], psds[det]) = sim_noise_timestream( - 0, - 0, - 0, - 0, - idet, - nse.rate(det), - 0, - ntod, - self.oversample, - nse.freq(det), - nse.psd(det), + realization=0, + telescope=ob.telescope.id, + component=0, + obsindx=ob.UID, + detindx=idet, + rate=nse.rate(det), + firstsamp=ob.local_index_offset, + samples=ob.n_local_samples, + oversample=self.oversample, + freq=nse.freq(det), + psd=nse.psd(det), py=True, ) - libtod = sim_noise_timestream( - 0, - 0, - 0, - 0, - idet, - nse.rate(det), - 0, - ntod, - self.oversample, - nse.freq(det), - nse.psd(det), - py=False, + np.testing.assert_array_almost_equal( + pytod, ob.detdata[sim_noise.out][det], decimal=2 ) - np.testing.assert_array_almost_equal(pytod, libtod, decimal=2) - - # Factor of 2 comes from the negative frequency values. - psdnorm[det] = 2.0 * np.sum(psds[det] * dfreq) - # print("psd[{}] integral = {}".format(det, psdnorm[det])) - - todvar[det] = np.zeros(self.nmc, dtype=np.float64) - checkpsd[det] = np.zeros((nbins - 1, self.nmc), dtype=np.float64) - - idet += 1 - - if rank == 0: + if wrank == 0: + # One process dumps out interpolated PSD for debugging import matplotlib.pyplot as plt - for det in tod.local_dets: + for det in ob.local_detectors: savefile = os.path.join( - self.outdir, "out_test_simnoise_psd_{}.txt".format(det) + self.outdir, "out_{}_interppsd_{}.txt".format(ob.name, det) ) np.savetxt( savefile, np.transpose([freqs[det], psds[det]]), delimiter=" " @@ -362,69 +220,129 @@ def test_sim(self): ax.legend(loc=1) plt.title( "Interpolated PSD with High-pass from {:0.1f} " - "second Simulation Interval".format((float(ntod) / self.rate)) + "second Simulation Interval".format( + (float(ob.n_local_samples) / sample_rate) + ) ) savefile = os.path.join( - self.outdir, "out_test_simnoise_psd_{}.png".format(det) + self.outdir, "out_{}_interppsd_{}.pdf".format(ob.name, det) ) plt.savefig(savefile) plt.close() - tmap = np.searchsorted(bins, freqs[det], side="left") - tcount = np.bincount(tmap) - tpsd = np.bincount(tmap, weights=psds[det]) - good = tcount > 0 - tpsd[good] /= tcount[good] - bintruth[det] = tpsd - - hpy = None - if rank == 0: - if "TOAST_TEST_BIGTOD" in os.environ.keys(): - try: - import h5py as hpy - except ImportError: - # just write the first realization as usual - hpy = None - - # if we have the h5py module and a special environment variable is set, - # then process zero will dump out its full timestream data for more - # extensive sharing / tests. Just dump a few detectors to keep the - # file size reasonable. - - hfile = None - dset = {} - if hpy is not None: - hfile = hpy.File( - os.path.join(self.outdir, "out_test_simnoise_tod.hdf5"), "w" - ) - for det in tod.detectors: - dset[det] = hfile.create_dataset( - det, (self.nmc, ntod), dtype="float64" - ) + if ob.comm is not None: + ob.comm.barrier() + + # For some reason not deleting here (and relying on garbage collection) causes + # a hang in the case of multiple groups. Removed after this is understood. + del data + + def test_sim_mc(self): + # Create a fake satellite data set for testing. We explicitly generate + # only one observation per group. + data = create_satellite_data( + self.comm, + obs_per_group=1, + sample_rate=100.0 * u.Hz, + obs_time=10.0 * u.minute, + ) - # Run both the numpy FFT case and the toast FFT case. + # This is a simulation with the same focalplane for every obs... + sample_rate = data.obs[0].telescope.focalplane.sample_rate + + # Create a noise model from focalplane detector properties + noise_model = ops.DefaultNoiseModel() + noise_model.apply(data) + + wrank = data.comm.world_rank + + # First we make one pass through the data and examine the noise model. + # We interpolate the PSD using pure python code and compute some normalization + # factors and also bin the true PSD to the final binning we will use for the + # PSDs made from the timestreams. + + todvar = dict() + ntod_var = None + psd_norm = dict() + checkpsd = dict() + freqs = dict() + psds = dict() + + cfftlen = 2 + while cfftlen <= data.obs[0].n_local_samples: + cfftlen *= 2 + binsamps = cfftlen // 2048 + nbins = binsamps - 1 + bstart = (sample_rate / 2) / nbins + bins = np.linspace(bstart, sample_rate / 2, num=(nbins - 1), endpoint=True) + checkfreq = np.fft.rfftfreq(cfftlen, d=(1 / sample_rate)) + checkbinmap = np.searchsorted(bins, checkfreq, side="left") + bcount = np.bincount(checkbinmap) + bintruth = dict() + tpsd = None + good = None + + for ob in data.obs[:1]: + ntod_var = ob.n_local_samples + nse = ob[noise_model.noise_model] + fftlen = 2 + while fftlen <= (self.oversample * ob.n_local_samples): + fftlen *= 2 + for idet, det in enumerate(ob.local_detectors): + dfreq = nse.rate(det) / float(fftlen) + (pytod, freqs[det], psds[det]) = sim_noise_timestream( + realization=0, + telescope=ob.telescope.id, + component=0, + obsindx=ob.UID, + detindx=idet, + rate=nse.rate(det), + firstsamp=ob.local_index_offset, + samples=ob.n_local_samples, + oversample=self.oversample, + freq=nse.freq(det), + psd=nse.psd(det), + py=True, + ) + # Factor of 2 comes from the negative frequency values. + psd_norm[det] = 2.0 * np.sum(psds[det] * dfreq) - for realization in range(self.nmc): + # Allocate buffers for MC loop + todvar[det] = np.zeros(self.nmc, dtype=np.float64) + checkpsd[det] = np.zeros((nbins - 1, self.nmc), dtype=np.float64) - # generate timestreams + # Bin the true high-resolution PSD. + tmap = np.searchsorted(bins, freqs[det], side="left") + tcount = np.bincount(tmap) + tpsd = np.bincount(tmap, weights=psds[det]) + good = tcount > 0 + tpsd[good] /= tcount[good] + bintruth[det] = tpsd - opnoise = OpSimNoise(realization=realization) - opnoise.exec(self.data) + # Perform noise realizations and accumulation statistics. - if realization == 0: - # write timestreams to disk for debugging + for realization in range(self.nmc): + # Clear any previously generated data + for ob in data.obs: + del ob.detdata["noise"] - if rank == 0: - import matplotlib.pyplot as plt + # Simulate noise using the model, with a different realization each time + sim_noise = ops.SimNoise(realization=realization) + sim_noise.apply(data) - for det in tod.local_dets: + if realization == 0: + # write timestreams to disk for debugging + if wrank == 0: + import matplotlib.pyplot as plt - check = tod.cache.reference("noise_{}".format(det)) + for ob in data.obs: + for det in ob.local_detectors: + check = ob.detdata["noise"][det] savefile = os.path.join( self.outdir, - "out_test_simnoise_tod_mc0_{}.txt" "".format(det), + "out_{}_tod-mc0_{}.txt" "".format(ob.name, det), ) np.savetxt(savefile, np.transpose([check]), delimiter=" ") @@ -438,165 +356,187 @@ def test_sim(self): ) ax.legend(loc=1) plt.title( - "First Realization of Simulated TOD " - "from toast.sim_noise_timestream()" + "Observation {}, First Realization of {}".format( + ob.name, det + ) ) savefile = os.path.join( self.outdir, - "out_test_simnoise_tod_mc0_{}.png" "".format(det), + "out_{}_tod-mc0_{}.pdf" "".format(ob.name, det), ) plt.savefig(savefile) plt.close() - for det in tod.local_dets: + for ob in data.obs[:1]: + for det in ob.local_detectors: # compute the TOD variance - ref = tod.cache.reference("noise_{}".format(det)) - dclevel = np.mean(ref) - variance = np.vdot(ref - dclevel, ref - dclevel) / ntod + tod = ob.detdata[sim_noise.out][det] + dclevel = np.mean(tod) + variance = np.vdot(tod - dclevel, tod - dclevel) / len(tod) todvar[det][realization] = variance - if hfile is not None: - if det in dset: - dset[det][realization, :] = ref[:] - # compute the PSD buffer = np.zeros(cfftlen, dtype=np.float64) - offset = (cfftlen - len(ref)) // 2 - buffer[offset : offset + len(ref)] = ref + offset = (cfftlen - len(tod)) // 2 + buffer[offset : offset + len(tod)] = tod rawpsd = np.fft.rfft(buffer) - norm = 1.0 / (self.rate * ntod) + norm = 1.0 / (sample_rate * ob.n_local_samples) rawpsd = norm * np.abs(rawpsd ** 2) bpsd = np.bincount(checkbinmap, weights=rawpsd) good = bcount > 0 bpsd[good] /= bcount[good] checkpsd[det][:, realization] = bpsd[:] - tod.cache.clear() - - if hfile is not None: - hfile.close() - - if rank == 0: - np.savetxt( - os.path.join(self.outdir, "out_test_simnoise_tod_var.txt"), - np.transpose([todvar[x] for x in tod.local_dets]), - delimiter=" ", + lds = sorted(todvar.keys()) + + if wrank == 0: + import matplotlib.pyplot as plt + + np.savetxt( + os.path.join(self.outdir, "out_tod_variance.txt"), + np.transpose([todvar[x] for x in lds]), + delimiter=" ", + ) + + for det in lds: + sig = np.mean(todvar[det]) * np.sqrt(2.0 / (ntod_var - 1)) + histrange = 5.0 * sig + histmin = psd_norm[det] - histrange + histmax = psd_norm[det] + histrange + + fig = plt.figure(figsize=(12, 8), dpi=72) + + ax = fig.add_subplot(1, 1, 1, aspect="auto") + plt.hist( + todvar[det], + 10, + range=(histmin, histmax), + facecolor="magenta", + alpha=0.75, + label="{}: PSD integral = {:0.1f} expected sigma = " + "{:0.1f}".format(det, psd_norm[det], sig), + ) + ax.legend(loc=1) + plt.title( + "Detector {} Distribution of TOD Variance for {} " + "Realizations".format(det, self.nmc) ) - if rank == 0: - import matplotlib.pyplot as plt - - for det in tod.local_dets: - savefile = os.path.join( - self.outdir, "out_test_simnoise_tod_var_{}.txt".format(det) - ) - np.savetxt(savefile, np.transpose([todvar[det]]), delimiter=" ") - - sig = np.mean(todvar[det]) * np.sqrt(2.0 / (ntod - 1)) - histrange = 5.0 * sig - histmin = psdnorm[det] - histrange - histmax = psdnorm[det] + histrange - - fig = plt.figure(figsize=(12, 8), dpi=72) - - ax = fig.add_subplot(1, 1, 1, aspect="auto") - plt.hist( - todvar[det], - 10, - range=(histmin, histmax), - facecolor="magenta", - alpha=0.75, - label="{}: PSD integral = {:0.1f} expected sigma = " - "{:0.1f}".format(det, psdnorm[det], sig), - ) - ax.legend(loc=1) - plt.title( - "Distribution of TOD Variance for {} " - "Realizations".format(self.nmc) - ) - - savefile = os.path.join( - self.outdir, "out_test_simnoise_tod_var_{}.png".format(det) - ) - plt.savefig(savefile) - plt.close() - - meanpsd = np.asarray( - [np.mean(checkpsd[det][x, :]) for x in range(nbins - 1)] - ) - - fig = plt.figure(figsize=(12, 8), dpi=72) + savefile = os.path.join( + self.outdir, "out_tod-variance_{}.pdf".format(det) + ) + plt.savefig(savefile) + plt.close() - ax = fig.add_subplot(1, 1, 1, aspect="auto") - ax.plot(bins, bintruth[det], c="k", label="Input Truth") - ax.plot(bins, meanpsd, c="b", marker="o", label="Mean Binned PSD") - ax.scatter( - np.repeat(bins, self.nmc), - checkpsd[det].flatten(), - marker="x", - color="r", - label="Binned PSD", - ) - # ax.set_xscale("log") - # ax.set_yscale("log") - ax.legend(loc=1) - plt.title( - "Detector {} Binned PSDs for {} Realizations" - "".format(det, self.nmc) - ) + meanpsd = np.asarray( + [np.mean(checkpsd[det][x, :]) for x in range(nbins - 1)] + ) - savefile = os.path.join( - self.outdir, "out_test_simnoise_binpsd_dist_{}.png".format(det) - ) - plt.savefig(savefile) - plt.close() + fig = plt.figure(figsize=(12, 8), dpi=72) + + ax = fig.add_subplot(1, 1, 1, aspect="auto") + ax.plot(bins, bintruth[det], c="k", label="Input Truth") + ax.plot(bins, meanpsd, c="b", marker="o", label="Mean Binned PSD") + ax.scatter( + np.repeat(bins, self.nmc), + checkpsd[det].flatten(), + marker="x", + color="r", + label="Binned PSD", + ) + # ax.set_xscale("log") + # ax.set_yscale("log") + ax.legend(loc=1) + plt.title( + "Detector {} Binned PSDs for {} Realizations" + "".format(det, self.nmc) + ) - # The data will likely not be gaussian distributed. - # Just check that the mean is "close enough" to the truth. - errest = np.absolute(np.mean((meanpsd - tpsd) / tpsd)) - # print("Det {} avg rel error = {}".format(det, errest), flush=True) - if nse.fknee(det) < 0.1: - self.assertTrue(errest < 0.1) - - # Verify that Parseval's theorem holds- that the variance of - # the TOD equals the integral of the PSD. We do this for an - # ensemble of realizations - # - # and compare the TOD variance to the integral of the PSD - # accounting for the error on the variance due to finite - # numbers of samples. - # - - for det in tod.local_dets: - sig = np.mean(todvar[det]) * np.sqrt(2.0 / (ntod - 1)) - over3sig = np.where( - np.absolute(todvar[det] - psdnorm[det]) > 3.0 * sig - )[0] - overfrac = float(len(over3sig)) / self.nmc - # print(det, " : ", overfrac, flush=True) - if nse.fknee(det) < 0.01: - self.assertTrue(overfrac < 0.1) - return + savefile = os.path.join( + self.outdir, "out_psd-histogram_{}.pdf".format(det) + ) + plt.savefig(savefile) + plt.close() + + # The data will likely not be gaussian distributed. + # Just check that the mean is "close enough" to the truth. + errest = np.absolute(np.mean((meanpsd - tpsd) / tpsd)) + # print("Det {} avg rel error = {}".format(det, errest), flush=True) + if nse.fknee(det) < 0.1: + self.assertTrue(errest < 0.1) + + # Verify that Parseval's theorem holds- that the variance of the TOD equals the + # integral of the PSD. We do this for an ensemble of realizations and compare + # the TOD variance to the integral of the PSD accounting for the error on the + # variance due to finite numbers of samples. + + ntod = data.obs[0].n_local_samples + for det in lds: + sig = np.mean(todvar[det]) * np.sqrt(2.0 / (ntod - 1)) + over3sig = np.where(np.absolute(todvar[det] - psd_norm[det]) > 3.0 * sig)[0] + overfrac = float(len(over3sig)) / self.nmc + # print(det, " : ", overfrac, flush=True) + if nse.fknee(det) < 0.1: + self.assertTrue(overfrac < 0.1) + + del data def test_sim_correlated(self): - # Test the correlated noise generation. - opnoise = OpSimNoise(realization=0) - opnoise.exec(self.data_corr) + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create an uncorrelated noise model from focalplane detector properties + noise_model = ops.DefaultNoiseModel() + noise_model.apply(data) + + # Construct a correlated analytic noise model for the detectors for each + # observation. + for ob in data.obs: + nse = ob[noise_model.noise_model] + corr_freqs = { + "noise_{}".format(i): nse.freq(x) + for i, x in enumerate(ob.local_detectors) + } + corr_psds = { + "noise_{}".format(i): nse.psd(x) + for i, x in enumerate(ob.local_detectors) + } + corr_indices = { + "noise_{}".format(i): 100 + i for i, x in enumerate(ob.local_detectors) + } + corr_mix = dict() + for i, x in enumerate(ob.local_detectors): + dmix = np.random.uniform( + low=-1.0, high=1.0, size=len(ob.local_detectors) + ) + corr_mix[x] = { + "noise_{}".format(y): dmix[y] + for y in range(len(ob.local_detectors)) + } + ob["noise_model_corr"] = Noise( + detectors=ob.local_detectors, + freqs=corr_freqs, + psds=corr_psds, + mixmatrix=corr_mix, + indices=corr_indices, + ) + + # Simulate noise using this model + sim_noise = ops.SimNoise(noise_model="noise_model_corr") + sim_noise.apply(data) total = None - for ob in self.data.obs: - tod = ob["tod"] - for det in tod.local_dets: + for ob in data.obs: + for det in ob.local_detectors: # compute the TOD variance - ref = tod.cache.reference("noise_{}".format(det)) - self.assertTrue(np.std(ref) > 0) + tod = ob.detdata[sim_noise.out][det] + self.assertTrue(np.std(tod) > 0) if total is None: - total = ref.copy() - else: - total[:] += ref - del ref + total = np.zeros(ob.n_local_samples, dtype=np.float64) + total[:] += tod # np.testing.assert_almost_equal(np.std(total), 0) + del data return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index f8056748a..83fed9128 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -6,6 +6,7 @@ import os import sys + import unittest from .mpi import MPITestRunner @@ -30,10 +31,9 @@ from . import config as test_config from . import ops_sim_satellite as test_ops_sim_satellite - from . import ops_memory_counter as test_ops_memory_counter - from . import ops_pointing_healpix as test_ops_pointing_healpix +from . import ops_sim_tod_noise as test_ops_sim_tod_noise # @@ -98,7 +98,7 @@ def test(name=None, verbosity=2): comm = MPI.COMM_WORLD rank = comm.rank - set_matplotlib_backend(backend="agg") + # set_matplotlib_backend(backend="agg") outdir = "toast_test_output" @@ -117,7 +117,7 @@ def test(name=None, verbosity=2): # Run python tests. loader = unittest.TestLoader() - mpirunner = MPITestRunner(comm, verbosity=verbosity, warnings="ignore") + mpirunner = MPITestRunner(verbosity=verbosity, warnings="ignore") suite = unittest.TestSuite() if name is None: @@ -141,13 +141,11 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_sim_satellite)) suite.addTest(loader.loadTestsFromModule(test_ops_memory_counter)) suite.addTest(loader.loadTestsFromModule(test_ops_pointing_healpix)) + suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) - # suite.addTest(loader.loadTestsFromModule(testcache)) # # suite.addTest(loader.loadTestsFromModule(testtod)) - # suite.addTest(loader.loadTestsFromModule(testtodsat)) # - # suite.addTest(loader.loadTestsFromModule(testopssimnoise)) # suite.addTest(loader.loadTestsFromModule(testopssimsss)) # suite.addTest(loader.loadTestsFromModule(testopsapplygain)) # suite.addTest(loader.loadTestsFromModule(testcov)) @@ -155,7 +153,6 @@ def test(name=None, verbosity=2): # suite.addTest(loader.loadTestsFromModule(testopsgroundfilter)) # suite.addTest(loader.loadTestsFromModule(testsimfocalplane)) # suite.addTest(loader.loadTestsFromModule(testopspolyfilter)) - # suite.addTest(loader.loadTestsFromModule(testopsmemorycounter)) # suite.addTest(loader.loadTestsFromModule(testopsgainscrambler)) # suite.addTest(loader.loadTestsFromModule(testpsdmath)) # suite.addTest(loader.loadTestsFromModule(testopsmadam)) From 377ce41d445a26d02a7544902134df31f103a63b Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 19 Nov 2020 11:17:51 -0800 Subject: [PATCH 021/690] Noise model now includes a method to return the relative noise weights for detectors. --- src/toast/noise.py | 73 ++++++++++++++++++++++++++++++------------ src/toast/noise_sim.py | 3 ++ 2 files changed, 55 insertions(+), 21 deletions(-) diff --git a/src/toast/noise.py b/src/toast/noise.py index 7bcc61969..890560dd2 100644 --- a/src/toast/noise.py +++ b/src/toast/noise.py @@ -8,40 +8,41 @@ class Noise(object): """Noise objects act as containers for noise PSDs. - Noise is a base class for an object that describes the noise - properties of all detectors for a single observation. + Noise is a base class for an object that describes the noise properties of all + detectors for a single observation. Args: detectors (list): Names of detectors. freqs (dict): Dictionary of arrays of frequencies for `psds`. - psds (dict): Dictionary of arrays which contain the PSD values - for each detector or `mixmatrix` key. - mixmatrix (dict): Mixing matrix describing how the PSDs should - be combined for detector noise. If provided, must contain - entries for every detector, and every key specified for a - detector must be defined in `freqs` and `psds`. - indices (dict): Integer index for every PSD, useful for - generating indepedendent and repeateable noise realizations. - If absent, running indices will be assigned and provided. + psds (dict): Dictionary of arrays which contain the PSD values for each + detector or `mixmatrix` key. + mixmatrix (dict): Mixing matrix describing how the PSDs should be combined for + each detector noise model. If provided, must contain entries for every + detector, and every key specified for a detector must be defined in `freqs` + and `psds`. + indices (dict): Integer index for every PSD, useful for generating + indepedendent and repeateable noise realizations. If absent, running + indices will be assigned and provided. Attributes: detectors (list): List of detector names keys (list): List of PSD names Raises: - KeyError: If `freqs`, `psds`, `mixmatrix` or `indices` do not - include all relevant entries. + KeyError: If `freqs`, `psds`, `mixmatrix` or `indices` do not include all + relevant entries. ValueError: If vector lengths in `freqs` and `psds` do not match. """ - def __init__(self, *, detectors, freqs, psds, mixmatrix=None, indices=None): - + def __init__(self, detectors, freqs, psds, mixmatrix=None, indices=None): self._dets = list(sorted(detectors)) if mixmatrix is None: # Default diagonal mixing matrix self._keys = self._dets - self._mixmatrix = None + self._mixmatrix = dict() + for d in self._dets: + self._mixmatrix[d] = {d: 1.0} else: # Assemble the list of keys needed for the specified detectors keys = set() @@ -70,6 +71,8 @@ def __init__(self, *, detectors, freqs, psds, mixmatrix=None, indices=None): # last frequency point should be Nyquist self._rates[key] = 2.0 * self._freqs[key][-1] + self._detweights = None + @property def detectors(self): """(list): list of strings containing the detector names.""" @@ -98,11 +101,8 @@ def weight(self, det, key): weight (float): Mixing matrix weight """ - weight = 0 - if self._mixmatrix is None: - if det == key: - weight = 1 - elif key in self._mixmatrix[det]: + weight = 0.0 + if key in self._mixmatrix[det]: weight = self._mixmatrix[det][key] return weight @@ -149,3 +149,34 @@ def psd(self, key): """ return self._psds[key] + + def _detector_weight(self, det): + """Internal function which can be overridden by derived classes.""" + if self._detweights is None: + # Compute an effective scalar "noise weight" for each detector based on the + # white noise level, accounting for the fact that the PSD may have a + # transfer function roll-off near Nyquist + self._detweights = {d: 0.0 for d in self.detectors} + for k in self.keys: + freq = self.freq(k) + psd = self.psd(k) + rate = self.rate(k) + ind = np.logical_and(freq > rate * 0.2, freq < rate * 0.4) + noisevar = np.median(psd[ind]) + for det in self.detectors: + wt = self.weight(det, k) + if wt > 0.0: + self._detweights[det] += wt * (1.0 / noisevar) + return self._detweights[det] + + def detector_weight(self, det): + """Return the relative noise weight for a detector. + + Args: + det (str): The detector name. + + Returns: + (float): The noise weight for this detector. + + """ + return self._detector_weight(det) diff --git a/src/toast/noise_sim.py b/src/toast/noise_sim.py index e704484b9..67c4f08a6 100644 --- a/src/toast/noise_sim.py +++ b/src/toast/noise_sim.py @@ -102,3 +102,6 @@ def alpha(self, det): def NET(self, det): """(float): the NET.""" return self._NET[det] + + def _detector_weight(self, det): + return 1.0 / (self._NET[det] ** 2) From dd590bb19314ae3764b194e1e69fcf2da7a1229e Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 19 Nov 2020 11:19:18 -0800 Subject: [PATCH 022/690] Add a compiled function that just returns the inverse covariance without the hits --- src/libtoast/include/toast/map_cov.hpp | 9 ++++- src/libtoast/src/toast_map_cov.cpp | 46 +++++++++++++++++++++++++- src/toast/_libtoast_map_cov.cpp | 14 +++----- 3 files changed, 57 insertions(+), 12 deletions(-) diff --git a/src/libtoast/include/toast/map_cov.hpp b/src/libtoast/include/toast/map_cov.hpp index 3eecdf701..08ae1fc00 100644 --- a/src/libtoast/include/toast/map_cov.hpp +++ b/src/libtoast/include/toast/map_cov.hpp @@ -25,7 +25,14 @@ void cov_accum_diag_invnpp(int64_t nsub, int64_t subsize, int64_t nnz, int64_t const * indx_submap, int64_t const * indx_pix, double const * weights, - double scale, int64_t * hits, double * invnpp); + double scale, double * invnpp); + +void cov_accum_diag_invnpp_hits(int64_t nsub, int64_t subsize, int64_t nnz, + int64_t nsamp, + int64_t const * indx_submap, + int64_t const * indx_pix, + double const * weights, + double scale, int64_t * hits, double * invnpp); void cov_accum_zmap(int64_t nsub, int64_t subsize, int64_t nnz, int64_t nsamp, int64_t const * indx_submap, int64_t const * indx_pix, diff --git a/src/libtoast/src/toast_map_cov.cpp b/src/libtoast/src/toast_map_cov.cpp index 314932e64..eb38dcd73 100644 --- a/src/libtoast/src/toast_map_cov.cpp +++ b/src/libtoast/src/toast_map_cov.cpp @@ -94,7 +94,7 @@ void toast::cov_accum_diag_invnpp(int64_t nsub, int64_t subsize, int64_t nnz, int64_t const * indx_submap, int64_t const * indx_pix, double const * weights, - double scale, int64_t * hits, + double scale, double * invnpp) { const int64_t block = (int64_t)(nnz * (nnz + 1) / 2); #pragma omp parallel @@ -107,6 +107,50 @@ void toast::cov_accum_diag_invnpp(int64_t nsub, int64_t subsize, int64_t nnz, int64_t last_pix = first_pix + npix_thread - 1; #endif // ifdef _OPENMP + for (size_t i = 0; i < nsamp; ++i) { + const int64_t isubmap = indx_submap[i] * subsize; + const int64_t ipix = indx_pix[i]; + if ((isubmap < 0) || (ipix < 0)) continue; + + const int64_t hpx = isubmap + ipix; + #ifdef _OPENMP + if ((hpx < first_pix) || (hpx > last_pix)) continue; + #endif // ifdef _OPENMP + const int64_t ipx = hpx * block; + + const double * wpointer = weights + i * nnz; + double * covpointer = invnpp + ipx; + for (size_t j = 0; j < nnz; ++j, ++wpointer) { + const double scaled_weight = *wpointer * scale; + const double * wpointer2 = wpointer; + for (size_t k = j; k < nnz; ++k, ++wpointer2, ++covpointer) { + *covpointer += *wpointer2 * scaled_weight; + } + } + } + } + + return; +} + +void toast::cov_accum_diag_invnpp_hits(int64_t nsub, int64_t subsize, int64_t nnz, + int64_t nsamp, + int64_t const * indx_submap, + int64_t const * indx_pix, + double const * weights, + double scale, int64_t * hits, + double * invnpp) { + const int64_t block = (int64_t)(nnz * (nnz + 1) / 2); + #pragma omp parallel + { + #ifdef _OPENMP + int nthread = omp_get_num_threads(); + int trank = omp_get_thread_num(); + int64_t npix_thread = nsub * subsize / nthread + 1; + int64_t first_pix = trank * npix_thread; + int64_t last_pix = first_pix + npix_thread - 1; + #endif // ifdef _OPENMP + for (size_t i = 0; i < nsamp; ++i) { const int64_t isubmap = indx_submap[i] * subsize; const int64_t ipix = indx_pix[i]; diff --git a/src/toast/_libtoast_map_cov.cpp b/src/toast/_libtoast_map_cov.cpp index e212cfb51..7e828470c 100644 --- a/src/toast/_libtoast_map_cov.cpp +++ b/src/toast/_libtoast_map_cov.cpp @@ -133,19 +133,16 @@ void init_map_cov(py::module & m) { m.def("cov_accum_diag_invnpp", [](int64_t nsub, int64_t nsubpix, int64_t nnz, py::buffer submap, - py::buffer subpix, py::buffer weights, double scale, py::buffer invnpp, - py::buffer hits) { + py::buffer subpix, py::buffer weights, double scale, py::buffer invnpp) { auto & gt = toast::GlobalTimers::get(); gt.start("cov_accum_diag_invnpp"); pybuffer_check_1D (submap); pybuffer_check_1D (subpix); pybuffer_check_1D (invnpp); - pybuffer_check_1D (hits); pybuffer_check_1D (weights); py::buffer_info info_submap = submap.request(); py::buffer_info info_subpix = subpix.request(); py::buffer_info info_invnpp = invnpp.request(); - py::buffer_info info_hits = hits.request(); py::buffer_info info_weights = weights.request(); size_t nsamp = info_submap.size; size_t nw = (size_t)(info_weights.size / nnz); @@ -159,18 +156,16 @@ void init_map_cov(py::module & m) { } int64_t * rawsubmap = reinterpret_cast (info_submap.ptr); int64_t * rawsubpix = reinterpret_cast (info_subpix.ptr); - int64_t * rawhits = reinterpret_cast (info_hits.ptr); double * rawinvnpp = reinterpret_cast (info_invnpp.ptr); double * rawweights = reinterpret_cast (info_weights.ptr); toast::cov_accum_diag_invnpp( nsub, nsubpix, nnz, nsamp, rawsubmap, rawsubpix, rawweights, scale, - rawhits, rawinvnpp); + rawinvnpp); gt.stop("cov_accum_diag_invnpp"); return; }, py::arg("nsub"), py::arg("nsubpix"), py::arg("nnz"), py::arg("submap"), - py::arg("subpix"), py::arg("weights"), py::arg("scale"), py::arg("invnpp"), - py::arg( - "hits"), R"( + py::arg("subpix"), py::arg("weights"), py::arg("scale"), py::arg( + "invnpp"), R"( Accumulate block diagonal noise covariance and hits. This uses a pointing matrix to accumulate the local pieces @@ -189,7 +184,6 @@ void init_map_cov(py::module & m) { scale (float): Optional scaling factor. invnpp (array, float64): The local buffer of diagonal inverse pixel covariances, stored as the lower triangle for each pixel. - hits (array, int64): The local hitmap buffer to accumulate. Returns: None. From 21247e212dd5b19966714ba276bc9d4319c44181 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 19 Nov 2020 11:20:25 -0800 Subject: [PATCH 023/690] Pipeline operator now returns a list of return values from each operators finalize method --- src/toast/future_ops/pipeline.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/toast/future_ops/pipeline.py b/src/toast/future_ops/pipeline.py index 4fea6fb26..570b5ab0d 100644 --- a/src/toast/future_ops/pipeline.py +++ b/src/toast/future_ops/pipeline.py @@ -99,9 +99,11 @@ def _exec(self, data, detectors=None, **kwargs): return def _finalize(self, data, **kwargs): + result = list() if self.operators is not None: for op in self.operators: - op.finalize(data) + result.append(op.finalize(data)) + return result def _requires(self): # Work through the operator list in reverse order and prune intermediate From 5154a40ce781fc85f206876198cad8d18d67f9df Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 19 Nov 2020 11:21:16 -0800 Subject: [PATCH 024/690] Clarify name of shared_flags traits. --- src/toast/future_ops/pointing_healpix.py | 23 +++++++++++++++-------- src/toast/future_ops/sim_satellite.py | 8 +++++--- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/future_ops/pointing_healpix.py index 0ecf4ce9b..93ee57d5d 100644 --- a/src/toast/future_ops/pointing_healpix.py +++ b/src/toast/future_ops/pointing_healpix.py @@ -67,11 +67,11 @@ class PointingHealpix(Operator): None, allow_none=True, help="Observation shared key for HWP angle" ) - flags = Unicode( + shared_flags = Unicode( None, allow_none=True, help="Observation shared key for telescope flags to use" ) - flag_mask = Int(0, help="Bit mask value for optional flagging") + shared_flag_mask = Int(0, help="Bit mask value for optional flagging") pixels = Unicode("pixels", help="Observation detdata key for output pixel indices") @@ -97,6 +97,8 @@ class PointingHealpix(Operator): help="The observation key with a dictionary of pointing weight calibration for each det", ) + overwrite = Bool(False, help="If True, regenerate pointing even if it exists") + @traitlets.validate("nside") def _check_nside(self, proposal): check = proposal["value"] @@ -127,7 +129,7 @@ def _check_mode(self, proposal): raise traitlets.TraitError("Invalid mode (must be 'I' or 'IQU')") return check - @traitlets.validate("flag_mask") + @traitlets.validate("shared_flag_mask") def _check_flag_mask(self, proposal): check = proposal["value"] if check < 0: @@ -169,11 +171,16 @@ def _exec(self, data, detectors=None, **kwargs): # Nothing to do for this observation continue + if self.pixels in ob.detdata and self.weight in ob.detdata: + # The pointing already exists! + if not self.overwrite: + continue + # Get the flags if needed flags = None - if self.flags is not None: - flags = np.array(ob.shared[self.flags]) - flags &= self.flag_mask + if self.shared_flags is not None: + flags = np.array(ob.shared[self.shared_flags]) + flags &= self.shared_flag_mask # HWP angle if needed hwp_angle = None @@ -325,8 +332,8 @@ def _requires(self): } if self.cal is not None: req["meta"].append(self.cal) - if self.flags is not None: - req["shared"].append(self.flags) + if self.shared_flags is not None: + req["shared"].append(self.shared_flags) if self.hwp_angle is not None: req["shared"].append(self.hwp_angle) return req diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index 589ed2e06..42e31c45f 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -318,7 +318,9 @@ class SimSatellite(Operator): times = Unicode("times", help="Observation shared key for timestamps") - flags = Unicode("flags", help="Observation shared key for common flags") + shared_flags = Unicode( + "shared_flags", help="Observation shared key for common flags" + ) hwp_angle = Unicode("hwp_angle", help="Observation shared key for HWP angle") @@ -417,7 +419,7 @@ def _exec(self, data, detectors=None, **kwargs): comm=ob.comm_col, ) ob.shared.create( - self.flags, + self.shared_flags, shape=(ob.n_local_samples,), dtype=np.uint8, comm=ob.comm_col, @@ -533,7 +535,7 @@ def _provides(self): return { "shared": [ self.times, - self.flags, + self.shared_flags, self.boresight, self.hwp_angle, self.position, From abf296cf4067dacde0a49a1c9fc6bf222d2d17eb Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 19 Nov 2020 11:51:40 -0800 Subject: [PATCH 025/690] Work in progress on porting noise covariance operators. --- src/toast/future_ops/mapmaker_utils.py | 524 +++++++++++++++++++++++++ src/toast/noise_sim.py | 4 - 2 files changed, 524 insertions(+), 4 deletions(-) create mode 100644 src/toast/future_ops/mapmaker_utils.py diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/future_ops/mapmaker_utils.py new file mode 100644 index 000000000..012cf8c01 --- /dev/null +++ b/src/toast/future_ops/mapmaker_utils.py @@ -0,0 +1,524 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +import numpy as np + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool + +from ..operator import Operator + +from ..timing import function_timer + +from ..pixels import PixelDistribution, PixelData + +from .._libtoast import ( + cov_accum_zmap, + cov_accum_diag_hits, + cov_accum_diag_invnpp, +) + + +@trait_docs +class BuildHitMap(Operator): + """Operator which builds a hitmap. + + Given the pointing matrix for each detector, accumulate the hit map. The PixelData + object containing the hit map is returned by the finalize() method. + + If any samples have compromised telescope pointing, those pixel indices should + have already been set to a negative value by the operator that generated the + pointing matrix. + + Although individual detector flags do not impact the pointing per se, they can be + used with this operator in order to produce a hit map that is consistent with other + pixel space products. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + pixel_dist = Unicode( + None, + allow_none=True, + help="The Data key containing the submap distribution", + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + sync_type = Unicode( + "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" + ) + + @traitlets.validate("det_flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + @traitlets.validate("sync_type") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check != "allreduce" and check != "alltoallv": + raise traitlets.TraitError("Invalid communication algorithm") + return check + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._hits = None + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.pixel_dist is None: + raise RuntimeError( + "You must set the 'pixel_dist' trait before calling exec()" + ) + + if self.pixel_dist not in data: + msg = "Data does not contain submap distribution '{}'".format( + self.pixel_dist + ) + raise RuntimeError(msg) + + dist = data[self.pixel_dist] + + # On first call, get the pixel distribution and create our distributed hitmap + if self._hits is None: + self._hits = PixelData(dist, np.int32, n_value=1) + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + for det in dets: + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap( + ob.detdata[self.pixels][det] + ) + + # Samples with telescope pointing problems are already flagged in the + # the pointing operators by setting the pixel numbers to a negative + # value. Here we optionally apply detector flags to the local + # pixel numbers to flag more samples. + + # Apply the flags if needed + if self.det_flags is not None: + flags = np.array(ob.detdata[self.det_flags]) + flags &= self.det_flag_mask + local_pix[flags != 0] = -1 + + cov_accum_diag_hits( + dist.n_submap, + dist.n_pix_submap, + 1, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + self._hits.raw, + ) + + return + + def _finalize(self, data, **kwargs): + if self._hits is not None: + if self.sync_type == "alltoallv": + self._hits.sync_alltoallv() + else: + self._hits.sync_allreduce() + return self._hits + + def _requires(self): + req = { + "meta": [self.pixel_dist], + "shared": list(), + "detdata": [self.pixels, self.weights], + } + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + return prov + + def _accelerators(self): + return list() + + +@trait_docs +class BuildInverseCovariance(Operator): + """Operator which builds a pixel-space diagonal inverse noise covariance. + + Given the pointing matrix and noise model for each detector, accumulate the inverse + noise covariance: + + .. math:: + N_pp'^{-1} = \\left( P^T N_tt'^{-1} P \\right) + + The PixelData object containing this is returned by the finalize() method. + + If any samples have compromised telescope pointing, those pixel indices should + have already been set to a negative value by the operator that generated the + pointing matrix. Individual detector flags can optionally be applied to + timesamples when accumulating data. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + pixel_dist = Unicode( + None, + allow_none=True, + help="The Data key containing the submap distribution", + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) + + sync_type = Unicode( + "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" + ) + + @traitlets.validate("det_flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + @traitlets.validate("sync_type") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check != "allreduce" and check != "alltoallv": + raise traitlets.TraitError("Invalid communication algorithm") + return check + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._invcov = None + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.pixel_dist is None: + raise RuntimeError( + "You must set the 'pixel_dist' trait before calling exec()" + ) + + if self.pixel_dist not in data: + msg = "Data does not contain submap distribution '{}'".format( + self.pixel_dist + ) + raise RuntimeError(msg) + + dist = data[self.pixel_dist] + + weight_nnz = None + cov_nnz = None + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # Check that the noise model exists + if self.noise_model not in ob: + msg = "Noise model {} does not exist in observation {}".format( + self.noise_model, ob.name + ) + raise RuntimeError(msg) + + noise = ob[self.noise_model] + + for det in dets: + # The pixels and weights for this detector. + pix = ob.detdata[self.pixels] + wts = ob.detdata[self.weights] + + # We require that the pointing matrix has the same number of + # non-zero elements for every detector and every observation. + # We check that here, and if this is the first observation and + # detector we have worked with we create the PixelData object. + if self._invcov is None: + # We will store the lower triangle of the covariance. + weight_nnz = len(wts.detector_shape) + cov_nnz = weight_nnz * (weight_nnz + 1) // 2 + self._invcov = PixelData(dist, np.float64, n_value=cov_nnz) + else: + if len(wts.detector_shape) != weight_nnz: + msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( + ob.name, det, self.weights + ) + raise RuntimeError(msg) + + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap(pix[det]) + + # Get the detector weight from the noise model. + detweight = noise.detector_weight(det) + + # Samples with telescope pointing problems are already flagged in the + # the pointing operators by setting the pixel numbers to a negative + # value. Here we optionally apply detector flags to the local + # pixel numbers to flag more samples. + + # Apply the flags if needed + if self.det_flags is not None: + flags = np.array(ob.detdata[self.det_flags]) + flags &= self.det_flag_mask + local_pix[flags != 0] = -1 + + # Accumulate + cov_accum_diag_invnpp( + dist.n_submap, + dist.n_pix_submap, + weight_nnz, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + wts.reshape(-1), + detweight, + self._invcov.raw, + ) + return + + def _finalize(self, data, **kwargs): + if self._invcov is not None: + if self.sync_type == "alltoallv": + self._invcov.sync_alltoallv() + else: + self._invcov.sync_allreduce() + return self._invcov + + def _requires(self): + req = { + "meta": [self.pixel_dist, self.noise_model], + "shared": list(), + "detdata": [self.pixels, self.weights], + } + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + return prov + + def _accelerators(self): + return list() + + +@trait_docs +class BuildNoiseWeighted(Operator): + """Operator which builds a noise-weighted map. + + Given the pointing matrix and noise model for each detector, accumulate the noise + weighted map: + + .. math:: + Z_p = P^T N_tt'^{-1} d + + Which is the timestream data waited by the diagonal time domain noise covariance + and projected into pixel space. The PixelData object containing this is returned + by the finalize() method. + + If any samples have compromised telescope pointing, those pixel indices should + have already been set to a negative value by the operator that generated the + pointing matrix. Individual detector flags can optionally be applied to + timesamples when accumulating data. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + pixel_dist = Unicode( + None, + allow_none=True, + help="The Data key containing the submap distribution", + ) + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) + + sync_type = Unicode( + "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" + ) + + @traitlets.validate("det_flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + @traitlets.validate("sync_type") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check != "allreduce" and check != "alltoallv": + raise traitlets.TraitError("Invalid communication algorithm") + return check + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._zmap = None + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.pixel_dist is None: + raise RuntimeError( + "You must set the 'pixel_dist' trait before calling exec()" + ) + + if self.pixel_dist not in data: + msg = "Data does not contain submap distribution '{}'".format( + self.pixel_dist + ) + raise RuntimeError(msg) + + dist = data[self.pixel_dist] + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # Check that the noise model exists + if self.noise_model not in ob: + msg = "Noise model {} does not exist in observation {}".format( + self.noise_model, ob.name + ) + raise RuntimeError(msg) + + noise = ob[self.noise_model] + + for det in dets: + # The pixels and weights for this detector. + pix = ob.detdata[self.pixels] + wts = ob.detdata[self.weights] + ddata = ob.detdata[self.det_data][det] + + # We require that the pointing matrix has the same number of + # non-zero elements for every detector and every observation. + # We check that here, and if this is the first observation and + # detector we have worked with we create the PixelData object. + if self._zmap is None: + self._zmap = PixelData( + dist, np.float64, n_value=len(wts.detector_shape) + ) + else: + if len(wts.detector_shape) != self._zmap.n_value: + msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( + ob.name, det, self.weights + ) + raise RuntimeError(msg) + + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap(pix[det]) + + # Get the detector weight from the noise model. + detweight = noise.detector_weight(det) + + # Samples with telescope pointing problems are already flagged in the + # the pointing operators by setting the pixel numbers to a negative + # value. Here we optionally apply detector flags to the local + # pixel numbers to flag more samples. + + # Apply the flags if needed + if self.det_flags is not None: + flags = np.array(ob.detdata[self.det_flags]) + flags &= self.det_flag_mask + local_pix[flags != 0] = -1 + + # Accumulate + cov_accum_zmap( + dist.n_submap, + dist.n_pix_submap, + self._zmap.n_value, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + wts.reshape(-1), + detweight, + ddata, + self._zmap.raw, + ) + return + + def _finalize(self, data, **kwargs): + if self._zmap is not None: + if self.sync_type == "alltoallv": + self._zmap.sync_alltoallv() + else: + self._zmap.sync_allreduce() + return self._zmap + + def _requires(self): + req = { + "meta": [self.pixel_dist, self.noise_model, self.det_data], + "shared": list(), + "detdata": [self.pixels, self.weights], + } + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/noise_sim.py b/src/toast/noise_sim.py index 67c4f08a6..132e81272 100644 --- a/src/toast/noise_sim.py +++ b/src/toast/noise_sim.py @@ -83,10 +83,6 @@ def __init__(self, *, detectors, rate, fmin, fknee, alpha, NET, indices=None): # call the parent class constructor to store the psds super().__init__(detectors=detectors, freqs=freqs, psds=psds, indices=indices) - def rate(self, det): - """(float): the sample rate in Hz.""" - return self._rate[det] - def fmin(self, det): """(float): the minimum frequency in Hz, used as a high pass.""" return self._fmin[det] From c94a3a121903f6dfae45772eb97b0c2a8f3ba5ec Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 19 Nov 2020 13:48:08 -0800 Subject: [PATCH 026/690] Add basic unit test for covariance / hit generation --- src/toast/future_ops/CMakeLists.txt | 1 + src/toast/future_ops/__init__.py | 2 + src/toast/future_ops/mapmaker_utils.py | 35 +++-- src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/ops_mapmaker_utils.py | 185 +++++++++++++++++++++++++ src/toast/tests/runner.py | 2 + 6 files changed, 217 insertions(+), 9 deletions(-) create mode 100644 src/toast/tests/ops_mapmaker_utils.py diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/future_ops/CMakeLists.txt index 6f0a0e113..e1cea9d0e 100644 --- a/src/toast/future_ops/CMakeLists.txt +++ b/src/toast/future_ops/CMakeLists.txt @@ -10,5 +10,6 @@ install(FILES sim_satellite.py noise_model.py pointing_healpix.py + mapmaker_utils.py DESTINATION ${PYTHON_SITE}/toast/future_ops ) diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index 0b9b9e014..a16dedf7e 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -15,3 +15,5 @@ from .noise_model import DefaultNoiseModel from .pointing_healpix import PointingHealpix + +from .mapmaker_utils import BuildHitMap, BuildInverseCovariance, BuildNoiseWeighted diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/future_ops/mapmaker_utils.py index 012cf8c01..12eaa87be 100644 --- a/src/toast/future_ops/mapmaker_utils.py +++ b/src/toast/future_ops/mapmaker_utils.py @@ -101,7 +101,7 @@ def _exec(self, data, detectors=None, **kwargs): # On first call, get the pixel distribution and create our distributed hitmap if self._hits is None: - self._hits = PixelData(dist, np.int32, n_value=1) + self._hits = PixelData(dist, np.int64, n_value=1) for ob in data.obs: # Get the detectors we are using for this observation @@ -276,11 +276,19 @@ def _exec(self, data, detectors=None, **kwargs): # detector we have worked with we create the PixelData object. if self._invcov is None: # We will store the lower triangle of the covariance. - weight_nnz = len(wts.detector_shape) + if len(wts.detector_shape) == 1: + weight_nnz = 1 + else: + weight_nnz = wts.detector_shape[1] cov_nnz = weight_nnz * (weight_nnz + 1) // 2 self._invcov = PixelData(dist, np.float64, n_value=cov_nnz) else: - if len(wts.detector_shape) != weight_nnz: + check_nnz = None + if len(wts.detector_shape) == 1: + check_nnz = 1 + else: + check_nnz = wts.detector_shape[1] + if check_nnz != weight_nnz: msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( ob.name, det, self.weights ) @@ -310,7 +318,7 @@ def _exec(self, data, detectors=None, **kwargs): weight_nnz, local_sm.astype(np.int64), local_pix.astype(np.int64), - wts.reshape(-1), + wts[det].reshape(-1), detweight, self._invcov.raw, ) @@ -430,6 +438,8 @@ def _exec(self, data, detectors=None, **kwargs): dist = data[self.pixel_dist] + weight_nnz = None + for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) @@ -457,11 +467,18 @@ def _exec(self, data, detectors=None, **kwargs): # We check that here, and if this is the first observation and # detector we have worked with we create the PixelData object. if self._zmap is None: - self._zmap = PixelData( - dist, np.float64, n_value=len(wts.detector_shape) - ) + if len(wts.detector_shape) == 1: + weight_nnz = 1 + else: + weight_nnz = wts.detector_shape[1] + self._zmap = PixelData(dist, np.float64, n_value=weight_nnz) else: - if len(wts.detector_shape) != self._zmap.n_value: + check_nnz = None + if len(wts.detector_shape) == 1: + check_nnz = 1 + else: + check_nnz = wts.detector_shape[1] + if check_nnz != weight_nnz: msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( ob.name, det, self.weights ) @@ -491,7 +508,7 @@ def _exec(self, data, detectors=None, **kwargs): self._zmap.n_value, local_sm.astype(np.int64), local_pix.astype(np.int64), - wts.reshape(-1), + wts[det].reshape(-1), detweight, ddata, self._zmap.raw, diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 232a46d72..7102866a4 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -20,6 +20,7 @@ install(FILES ops_sim_satellite.py ops_applygain.py ops_sim_tod_noise.py + ops_mapmaker_utils.py covariance.py ops_pointing_healpix.py ops_dipole.py diff --git a/src/toast/tests/ops_mapmaker_utils.py b/src/toast/tests/ops_mapmaker_utils.py new file mode 100644 index 000000000..690d48151 --- /dev/null +++ b/src/toast/tests/ops_mapmaker_utils.py @@ -0,0 +1,185 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np + +from astropy import units as u + +from .mpi import MPITestCase + +from ..vis import set_matplotlib_backend + +from .. import rng as rng + +from ..noise import Noise + +from .. import future_ops as ops + +from ..future_ops.sim_tod_noise import sim_noise_timestream + +from ._helpers import create_outdir, create_satellite_data + + +class MapmakerUtilsTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + + def test_hits(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + build_hits = ops.BuildHitMap(pixel_dist="pixel_dist") + hits = build_hits.apply(data) + + del data + return + + def test_inv_cov(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Construct a correlated analytic noise model for the detectors for each + # observation. + for ob in data.obs: + nse = ob[default_model.noise_model] + corr_freqs = { + "noise_{}".format(i): nse.freq(x) + for i, x in enumerate(ob.local_detectors) + } + corr_psds = { + "noise_{}".format(i): nse.psd(x) + for i, x in enumerate(ob.local_detectors) + } + corr_indices = { + "noise_{}".format(i): 100 + i for i, x in enumerate(ob.local_detectors) + } + corr_mix = dict() + for i, x in enumerate(ob.local_detectors): + dmix = np.random.uniform( + low=-1.0, high=1.0, size=len(ob.local_detectors) + ) + corr_mix[x] = { + "noise_{}".format(y): dmix[y] + for y in range(len(ob.local_detectors)) + } + ob["noise_model_corr"] = Noise( + detectors=ob.local_detectors, + freqs=corr_freqs, + psds=corr_psds, + mixmatrix=corr_mix, + indices=corr_indices, + ) + + # Simulate noise using both models + + sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + sim_noise.apply(data) + + sim_noise_corr = ops.SimNoise(noise_model="noise_model_corr", out="noise_corr") + sim_noise_corr.apply(data) + + # Build an inverse covariance from both + + build_invnpp = ops.BuildInverseCovariance( + pixel_dist="pixel_dist", noise_model="noise_model" + ) + invnpp = build_invnpp.apply(data) + + build_invnpp_corr = ops.BuildInverseCovariance( + pixel_dist="pixel_dist", noise_model="noise_model_corr" + ) + invnpp_corr = build_invnpp_corr.apply(data) + + del data + return + + def test_zmap(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Construct a correlated analytic noise model for the detectors for each + # observation. + for ob in data.obs: + nse = ob[default_model.noise_model] + corr_freqs = { + "noise_{}".format(i): nse.freq(x) + for i, x in enumerate(ob.local_detectors) + } + corr_psds = { + "noise_{}".format(i): nse.psd(x) + for i, x in enumerate(ob.local_detectors) + } + corr_indices = { + "noise_{}".format(i): 100 + i for i, x in enumerate(ob.local_detectors) + } + corr_mix = dict() + for i, x in enumerate(ob.local_detectors): + dmix = np.random.uniform( + low=-1.0, high=1.0, size=len(ob.local_detectors) + ) + corr_mix[x] = { + "noise_{}".format(y): dmix[y] + for y in range(len(ob.local_detectors)) + } + ob["noise_model_corr"] = Noise( + detectors=ob.local_detectors, + freqs=corr_freqs, + psds=corr_psds, + mixmatrix=corr_mix, + indices=corr_indices, + ) + + # Simulate noise using both models + + sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + sim_noise.apply(data) + + sim_noise_corr = ops.SimNoise(noise_model="noise_model_corr", out="noise_corr") + sim_noise_corr.apply(data) + + # Build a noise weighted map from both + + build_zmap = ops.BuildNoiseWeighted( + pixel_dist="pixel_dist", noise_model="noise_model", det_data="noise" + ) + zmap = build_zmap.apply(data) + + build_zmap_corr = ops.BuildNoiseWeighted( + pixel_dist="pixel_dist", + noise_model="noise_model_corr", + det_data="noise_corr", + ) + zmap_corr = build_zmap_corr.apply(data) + + del data + return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 83fed9128..951b117b2 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -34,6 +34,7 @@ from . import ops_memory_counter as test_ops_memory_counter from . import ops_pointing_healpix as test_ops_pointing_healpix from . import ops_sim_tod_noise as test_ops_sim_tod_noise +from . import ops_mapmaker_utils as test_ops_mapmaker_utils # @@ -142,6 +143,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_memory_counter)) suite.addTest(loader.loadTestsFromModule(test_ops_pointing_healpix)) suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) + suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_utils)) # # suite.addTest(loader.loadTestsFromModule(testtod)) From ec078c987d15dddc700c087858eaa2999cfd7016 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Wed, 25 Nov 2020 07:22:49 -0800 Subject: [PATCH 027/690] Many small fixes. Noise covariance unit tests all passing. --- src/libtoast/src/toast_map_cov.cpp | 46 +- src/toast/_libtoast_map_cov.cpp | 31 +- src/toast/covariance.py | 57 +-- src/toast/future_ops/mapmaker_utils.py | 6 +- src/toast/noise.py | 2 +- src/toast/pixels.py | 118 +++-- src/toast/tests/covariance.py | 575 ++++--------------------- src/toast/tests/ops_mapmaker_utils.py | 174 +++++--- src/toast/tests/runner.py | 6 +- 9 files changed, 381 insertions(+), 634 deletions(-) diff --git a/src/libtoast/src/toast_map_cov.cpp b/src/libtoast/src/toast_map_cov.cpp index eb38dcd73..3e6922947 100644 --- a/src/libtoast/src/toast_map_cov.cpp +++ b/src/libtoast/src/toast_map_cov.cpp @@ -127,6 +127,17 @@ void toast::cov_accum_diag_invnpp(int64_t nsub, int64_t subsize, int64_t nnz, *covpointer += *wpointer2 * scaled_weight; } } + + // std::cout << "Accum to local pixel " << hpx << " with scale " << scale << + // ":" << std::endl; + // for (size_t j = 0; j < nnz; ++j) { + // std::cout << " " << weights[i * nnz + j]; + // } + // std::cout << std::endl; + // for (size_t j = 0; j < block; ++j) { + // std::cout << " " << invnpp[ipx + j]; + // } + // std::cout << std::endl; } } @@ -225,18 +236,31 @@ void toast::cov_eigendecompose_diag(int64_t nsub, int64_t subsize, int64_t nnz, // shortcut for NNZ == 1 if (!invert) { // Not much point in calling this! - for (int64_t i = 0; i < nsub; ++i) { - for (int64_t j = 0; j < subsize; ++j) { - cond[i * subsize + j] = 1.0; + if (cond != NULL) { + for (int64_t i = 0; i < nsub; ++i) { + for (int64_t j = 0; j < subsize; ++j) { + cond[i * subsize + j] = 1.0; + } } } } else { - for (int64_t i = 0; i < nsub; ++i) { - for (int64_t j = 0; j < subsize; ++j) { - int64_t dpx = (i * subsize) + j; - cond[dpx] = 1.0; - if (data[dpx] != 0) { - data[dpx] = 1.0 / data[dpx]; + if (cond != NULL) { + for (int64_t i = 0; i < nsub; ++i) { + for (int64_t j = 0; j < subsize; ++j) { + int64_t dpx = (i * subsize) + j; + cond[dpx] = 1.0; + if (data[dpx] != 0) { + data[dpx] = 1.0 / data[dpx]; + } + } + } + } else { + for (int64_t i = 0; i < nsub; ++i) { + for (int64_t j = 0; j < subsize; ++j) { + int64_t dpx = (i * subsize) + j; + if (data[dpx] != 0) { + data[dpx] = 1.0 / data[dpx]; + } } } } @@ -368,7 +392,9 @@ void toast::cov_eigendecompose_diag(int64_t nsub, int64_t subsize, int64_t nnz, } } } - cond[i] = rcond; + if (cond != NULL) { + cond[i] = rcond; + } } } } diff --git a/src/toast/_libtoast_map_cov.cpp b/src/toast/_libtoast_map_cov.cpp index 7e828470c..acbb04cc3 100644 --- a/src/toast/_libtoast_map_cov.cpp +++ b/src/toast/_libtoast_map_cov.cpp @@ -166,10 +166,10 @@ void init_map_cov(py::module & m) { }, py::arg("nsub"), py::arg("nsubpix"), py::arg("nnz"), py::arg("submap"), py::arg("subpix"), py::arg("weights"), py::arg("scale"), py::arg( "invnpp"), R"( - Accumulate block diagonal noise covariance and hits. + Accumulate block diagonal noise covariance. This uses a pointing matrix to accumulate the local pieces - of the inverse diagonal pixel covariance and hits. + of the inverse diagonal pixel covariance. Args: nsub (int): The number of locally stored submaps. @@ -270,17 +270,24 @@ void init_map_cov(py::module & m) { py::buffer_info info_cond = cond.request(); int64_t block = (int64_t)(nnz * (nnz + 1) / 2); size_t nb = (size_t)(info_data.size / block); - if (info_cond.size != nb) { - auto log = toast::Logger::get(); - std::ostringstream o; - o << "Buffer sizes are not consistent."; - log.error(o.str().c_str()); - throw std::runtime_error(o.str().c_str()); - } double * rawdata = reinterpret_cast (info_data.ptr); - double * rawcond = reinterpret_cast (info_cond.ptr); - toast::cov_eigendecompose_diag(nsub, nsubpix, nnz, rawdata, rawcond, - threshold, invert); + double * rawcond; + if (info_cond.size > 0) { + if (info_cond.size != nb) { + auto log = toast::Logger::get(); + std::ostringstream o; + o << "Buffer sizes are not consistent."; + log.error(o.str().c_str()); + throw std::runtime_error(o.str().c_str()); + } + rawcond = reinterpret_cast (info_cond.ptr); + toast::cov_eigendecompose_diag(nsub, nsubpix, nnz, rawdata, rawcond, + threshold, invert); + } else { + rawcond = NULL; + toast::cov_eigendecompose_diag(nsub, nsubpix, nnz, rawdata, rawcond, + threshold, invert); + } gt.stop("cov_eigendecompose_diag"); return; }, py::arg("nsub"), py::arg("nsubpix"), py::arg("nnz"), py::arg("data"), diff --git a/src/toast/covariance.py b/src/toast/covariance.py index 9762f0cd3..252111aad 100644 --- a/src/toast/covariance.py +++ b/src/toast/covariance.py @@ -56,8 +56,9 @@ def local_invert(n_submap_value, receive_locations, receive, reduce_buf): ) for lc in locs: receive[lc : lc + n_submap_value] = reduce_buf - if rcond is not None: - rcond.receive[lc : lc + (n_pix_submap * mapnnz)] = rcond.reduce_buf + if rcond is not None: + for lc in rcond._recv_locations[sm]: + rcond.receive[lc : lc + n_pix_submap] = rcond.reduce_buf return local_invert @@ -86,22 +87,16 @@ def covariance_invert(npp, threshold, rcond=None, use_alltoallv=False): """ mapnnz = int(((np.sqrt(8 * npp.n_value) - 1) / 2) + 0.5) nppdata = npp.raw - if nppdata is None: - nppdata = np.empty(shape=0, dtype=np.float64) if rcond is not None: - if rcond.distribution.n_pix != npp.distribution.n_pix: + if npp.distribution != rcond.distribution: raise RuntimeError( - "covariance matrix and condition number map must have same number " - "of pixels" - ) - if rcond.distribution.n_pix_submap != npp.distribution.n_pix_submap: - raise RuntimeError( - "covariance matrix and condition number map must have same submap size" + "covariance matrix and rcond must have same pixel distribution" ) if rcond.n_value != 1: raise RuntimeError("condition number map should have n_value = 1") if use_alltoallv: + myp = npp.distribution.comm.rank if rcond is not None: # Stage data to receive buffer rcond.forward_alltoallv() @@ -116,10 +111,10 @@ def covariance_invert(npp, threshold, rcond=None, use_alltoallv=False): else: rdata = rcond.raw cov_eigendecompose_diag( - 1, - n_pix_submap, + npp.distribution.n_local_submap, + npp.distribution.n_pix_submap, mapnnz, - reduce_buf, + nppdata, rdata, threshold, True, @@ -177,10 +172,8 @@ def covariance_multiply(npp1, npp2, use_alltoallv=False): """ mapnnz = int(((np.sqrt(8 * npp1.n_value) - 1) / 2) + 0.5) - if npp1.n_pix != npp2.n_pix: - raise RuntimeError("covariance matrices must have same number of pixels") - if npp1.n_pix_submap != npp2.n_pix_submap: - raise RuntimeError("covariance matrices must have same submap size") + if npp1.distribution != npp2.distribution: + raise RuntimeError("covariance matrices must have same pixel distribution") if npp1.n_value != npp2.n_value: raise RuntimeError("covariance matrices must have same n_values") @@ -189,7 +182,9 @@ def covariance_multiply(npp1, npp2, use_alltoallv=False): lmultiply = create_local_multiply(npp1.distribution.n_pix_submap, mapnnz, npp2) npp1.sync_alltoallv(local_func=lmultiply) else: - cov_mult_diag(npp1.n_submap, npp1.n_pix_submap, mapnnz, npp1data, npp2data) + cov_mult_diag( + npp1.n_local_submap, npp1.n_pix_submap, mapnnz, npp1data, npp2data + ) return @@ -218,7 +213,7 @@ def local_apply(n_submap_value, receive_locations, receive, reduce_buf): cov_apply_diag(1, n_pix_submap, mapnnz, reduce_buf, m_buf) - for lc in locs: + for lc in m._recv_locations[sm]: m.receive[lc : lc + (n_pix_submap * mapnnz)] = m.reduce_buf return local_apply @@ -244,10 +239,10 @@ def covariance_apply(npp, m, use_alltoallv=False): """ mapnnz = int(((np.sqrt(8 * npp.n_value) - 1) / 2) + 0.5) - if m.n_pix != npp.n_pix: - raise RuntimeError("covariance matrix and map must have same number of pixels") - if m.n_pix_submap != npp.n_pix_submap: - raise RuntimeError("covariance matrix and map must have same submap size") + if npp.distribution != m.distribution: + raise RuntimeError( + "covariance matrix and map must have same pixel distribution" + ) if m.n_value != mapnnz: raise RuntimeError("covariance matrix and map have incompatible NNZ values") @@ -258,7 +253,13 @@ def covariance_apply(npp, m, use_alltoallv=False): else: nppdata = npp.raw mdata = m.raw - cov_apply_diag(npp.n_submap, npp.n_pix_submap, mapnnz, nppdata, mdata) + cov_apply_diag( + npp.distribution.n_local_submap, + npp.distribution.n_pix_submap, + mapnnz, + nppdata, + mdata, + ) return @@ -294,10 +295,10 @@ def covariance_rcond(npp, use_alltoallv=False): nppdata = npp.raw rdata = rcond.raw cov_eigendecompose_diag( - 1, - n_pix_submap, + npp.distribution.n_local_submap, + npp.distribution.n_pix_submap, mapnnz, - reduce_buf, + nppdata, rdata, threshold, False, diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/future_ops/mapmaker_utils.py index 12eaa87be..52bb5b566 100644 --- a/src/toast/future_ops/mapmaker_utils.py +++ b/src/toast/future_ops/mapmaker_utils.py @@ -128,7 +128,7 @@ def _exec(self, data, detectors=None, **kwargs): local_pix[flags != 0] = -1 cov_accum_diag_hits( - dist.n_submap, + dist.n_local_submap, dist.n_pix_submap, 1, local_sm.astype(np.int64), @@ -313,7 +313,7 @@ def _exec(self, data, detectors=None, **kwargs): # Accumulate cov_accum_diag_invnpp( - dist.n_submap, + dist.n_local_submap, dist.n_pix_submap, weight_nnz, local_sm.astype(np.int64), @@ -503,7 +503,7 @@ def _exec(self, data, detectors=None, **kwargs): # Accumulate cov_accum_zmap( - dist.n_submap, + dist.n_local_submap, dist.n_pix_submap, self._zmap.n_value, local_sm.astype(np.int64), diff --git a/src/toast/noise.py b/src/toast/noise.py index 890560dd2..db564e8b2 100644 --- a/src/toast/noise.py +++ b/src/toast/noise.py @@ -165,7 +165,7 @@ def _detector_weight(self, det): noisevar = np.median(psd[ind]) for det in self.detectors: wt = self.weight(det, k) - if wt > 0.0: + if wt != 0.0: self._detweights[det] += wt * (1.0 / noisevar) return self._detweights[det] diff --git a/src/toast/pixels.py b/src/toast/pixels.py index bd5974408..8344abf16 100644 --- a/src/toast/pixels.py +++ b/src/toast/pixels.py @@ -73,6 +73,31 @@ def __init__(self, n_pix=None, n_submap=1000, local_submaps=None, comm=None): self._owned_submaps = None self._alltoallv_info = None + def __eq__(self, other): + local_eq = True + if self._n_pix != other._n_pix: + local_eq = False + if self._n_submap != other._n_submap: + local_eq = False + if self._n_pix_submap != other._n_pix_submap: + local_eq = False + if not np.array_equal(self._local_submaps, other._local_submaps): + local_eq = False + if self._comm is None and other._comm is not None: + local_eq = False + if self._comm is not None and other._comm is None: + local_eq = False + if self._comm is not None: + comp = MPI.Comm.Compare(self._comm, other._comm) + if comp not in (MPI.IDENT, MPI.CONGRUENT): + local_eq = False + if self._comm is not None: + local_eq = self._comm.allreduce(local_eq, op=MPI.LAND) + return local_eq + + def __ne__(self, other): + return not self.__eq__(other) + def clear(self): """Delete the underlying memory. @@ -171,8 +196,8 @@ def global_pixel_to_local(self, gl): local_sm, pixels = libtoast_global_to_local( gl, self._n_pix_submap, self._glob2loc ) - local_sm[:] *= self._n_pix_submap - pixels[:] += local_sm + local_sm *= self._n_pix_submap + pixels += local_sm return pixels def __repr__(self): @@ -204,16 +229,6 @@ def submap_owners(self): # Need to compute it. local_hit_submaps = np.zeros(self._n_submap, dtype=np.uint8) local_hit_submaps[self._local_submaps] = 1 - # print( - # "rank {} local_submaps = {}".format( - # self._comm.rank, self._local_submaps[:] - # ) - # ) - # print( - # "rank {} local_hit_submaps = {}".format( - # self._comm.rank, local_hit_submaps[:] - # ) - # ) hit_submaps = None if self._comm.rank == 0: @@ -223,9 +238,6 @@ def submap_owners(self): del local_hit_submaps if self._comm.rank == 0: - # print( - # "rank {} hit_submaps = {}".format(self._comm.rank, hit_submaps[:]) - # ) total_hit_submaps = np.sum(hit_submaps.astype(np.int32)) tdist = distribute_uniform(total_hit_submaps, self._comm.size) @@ -247,8 +259,6 @@ def submap_owners(self): del hit_submaps self._comm.Bcast(self._submap_owners, root=0) - # if self._comm.rank == 0: - # print("submap owners = {}".format(self._submap_owners[:])) return self._submap_owners @property @@ -261,7 +271,6 @@ def owned_submaps(self): self._owned_submaps = np.array( [x for x, y in enumerate(owners) if y == self._comm.rank], dtype=np.int32 ) - # print("rank {} owns submaps {}".format(self._comm.rank, self._owned_submaps[:])) return self._owned_submaps @property @@ -506,6 +515,18 @@ def __repr__(self): ) return val + def duplicate(self): + """Create a copy of the data with the same distribution. + + Returns: + (PixelData): A duplicate of the instance with copied data but the same + distribution. + + """ + dup = PixelData(self.distribution, self.dtype, n_value=self.n_value) + dup.raw[:] = self.raw + return dup + def comm_nsubmap(self, bytes): """Given a buffer size, compute the number of submaps to communicate. @@ -607,6 +628,7 @@ def local_reduction(n_submap_value, receive_locations, receive, reduce_buf): def setup_alltoallv(self): """Check that alltoallv buffers exist and create them if needed.""" if self._send_counts is None: + log = Logger.get() # Get the parameters in terms of submaps. ( send_counts, @@ -634,27 +656,41 @@ def setup_alltoallv(self): self._reduce_buf_raw = self.storage_class.zeros(self._n_submap_value) self.reduce_buf = self._reduce_buf_raw.array() - if self._dist.comm is None: - # For this case, point the receive member to the original data. This - # will allow codes processing locally owned submaps to work - # transparently in the serial case. - self.receive = self.data - else: - # Check that our send and receive buffers do not exceed 32bit indices - # required by MPI - max_int = 2147483647 - if scale * (self._recv_displ[-1] + self._recv_counts[-1]) > max_int: - msg = "Alltoallv receive buffer size exceeds max 32bit integer" - raise RuntimeError(msg) - if len(self.raw) > max_int: - msg = "Alltoallv send buffer size exceeds max 32bit integer" - raise RuntimeError(msg) - - # Allocate a persistent receive buffer - self._receive_raw = self.storage_class.zeros( - self._recv_displ[-1] + self._recv_counts[-1] - ) - self.receive = self._receive_raw.array() + buf_check_fail = False + try: + if self._dist.comm is None: + # For this case, point the receive member to the original data. + # This will allow codes processing locally owned submaps to work + # transparently in the serial case. + self.receive = self.data + else: + # Check that our send and receive buffers do not exceed 32bit + # indices required by MPI + max_int = 2147483647 + recv_buf_size = self._recv_displ[-1] + self._recv_counts[-1] + if recv_buf_size > max_int: + msg = "Proc {} Alltoallv receive buffer size exceeds max 32bit integer".format( + self._dist.comm.rank + ) + log.error(msg) + buf_check_fail = True + if len(self.raw) > max_int: + msg = "Proc {} Alltoallv send buffer size exceeds max 32bit integer".format( + self._dist.comm.rank + ) + log.error(msg) + buf_check_fail = True + + # Allocate a persistent receive buffer + self._receive_raw = self.storage_class.zeros(recv_buf_size) + self.receive = self._receive_raw.array() + except: + buf_check_fail = True + if self._dist.comm is not None: + buf_check_fail = self._dist.comm.allreduce(buf_check_fail, op=MPI.LOR) + if buf_check_fail: + msg = "alltoallv buffer setup failed on one or more processes" + raise RuntimeError(msg) @function_timer def forward_alltoallv(self): @@ -668,7 +704,10 @@ def forward_alltoallv(self): None. """ + myp = self.distribution.comm.rank + self.setup_alltoallv() + if self._dist.comm is None: # No communication needed return @@ -720,6 +759,7 @@ def sync_alltoallv(self, local_func=None): None. """ + myp = self.distribution.comm.rank self.forward_alltoallv() if local_func is None: diff --git a/src/toast/tests/covariance.py b/src/toast/tests/covariance.py index 6135d2d54..89c613e1c 100644 --- a/src/toast/tests/covariance.py +++ b/src/toast/tests/covariance.py @@ -2,27 +2,24 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. -from .mpi import MPITestCase - import os import numpy as np - import numpy.testing as nt +from astropy import units as u + import healpy as hp -from ..tod import AnalyticNoise, OpSimNoise -from ..todmap import TODSatellite, OpPointingHpix, OpAccumDiag -from ..todmap.todmap_math import cov_accum_diag -from ..map import DistPixels, covariance_invert, covariance_rcond, covariance_multiply +from .mpi import MPITestCase + +from .. import future_ops as ops + +from ..pixels import PixelDistribution, PixelData + +from ..covariance import covariance_invert, covariance_multiply, covariance_apply -from ._helpers import ( - create_outdir, - create_distdata, - boresight_focalplane, - uniform_chunks, -) +from ._helpers import create_outdir, create_satellite_data class CovarianceTest(MPITestCase): @@ -30,497 +27,115 @@ def setUp(self): fixture_name = os.path.splitext(os.path.basename(__file__))[0] self.outdir = create_outdir(self.comm, fixture_name) - # Create one observation per group, and each observation will have - # a fixed number of detectors and one chunk per process. - - self.data = create_distdata(self.comm, obs_per_group=1) - - self.ndet = 1 - self.rate = 40.0 - self.hwprpm = 50 - - # Create detectors - ( - dnames, - dquat, - depsilon, - drate, - dnet, - dfmin, - dfknee, - dalpha, - ) = boresight_focalplane(self.ndet, samplerate=self.rate, net=7.0) - - # Samples per observation - self.totsamp = 240000 - - # Pixelization - - self.sim_nside = 32 - self.sim_npix = 12 * self.sim_nside ** 2 - - self.map_nside = 32 - self.map_npix = 12 * self.map_nside ** 2 - - # Scan strategy - - self.spinperiod = 10.0 - self.spinangle = 30.0 - self.precperiod = 50.0 - self.precangle = 65.0 - - # One chunk per process - chunks = uniform_chunks(self.totsamp, nchunk=self.data.comm.group_size) - - # Populate the single observation per group - - tod = TODSatellite( - self.data.comm.comm_group, - dquat, - self.totsamp, - detranks=1, - firsttime=0.0, - rate=self.rate, - spinperiod=self.spinperiod, - spinangle=self.spinangle, - precperiod=self.precperiod, - precangle=self.precangle, - sampsizes=chunks, - hwprpm=self.hwprpm, - ) - - tod.set_prec_axis() + def create_invnpp(self): + """Helper function to build a realistic inverse pixel covariance.""" - nse = AnalyticNoise( - rate=drate, - fmin=dfmin, - detectors=dnames, - fknee=dfknee, - alpha=dalpha, - NET=dnet, - ) + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) - self.data.obs[0]["tod"] = tod - self.data.obs[0]["noise"] = nse - - def tearDown(self): - del self.data - - def test_accum(self): - nsm = 2 - npix = 3 - nnz = 4 - block = int(nnz * (nnz + 1) / 2) - scale = 2.0 - nsamp = nsm * npix - - fake = DistPixels( - None, - comm=self.data.comm.comm_world, - npix=nsm * npix, - nnz=nnz, - dtype=np.float64, - npix_submap=npix, - local_submaps=np.arange(nsm, dtype=np.int64), - ) - check = fake.duplicate() - - hits = DistPixels( - None, - comm=self.data.comm.comm_world, - npix=nsm * npix, - nnz=1, - dtype=np.int64, - npix_submap=npix, - local_submaps=np.arange(nsm, dtype=np.int64), - ) - checkhits = hits.duplicate() - - invn = DistPixels( - None, - comm=self.data.comm.comm_world, - npix=nsm * npix, - nnz=block, - dtype=np.float64, - npix_submap=npix, - local_submaps=np.arange(nsm, dtype=np.int64), - ) - checkinvn = invn.duplicate() - - sm = np.zeros(nsamp, dtype=np.int64) - pix = np.zeros(nsamp, dtype=np.int64) - wt = np.zeros(nsamp * nnz, dtype=np.float64) - - for i in range(nsamp): - sm[i] = i % nsm - pix[i] = i % npix - for k in range(nnz): - wt[i * nnz + k] = float(k + 1) - - signal = np.random.normal(size=nsamp) - # - # print( - # nsm, - # npix, - # nnz, - # sm.dtype, - # pix.dtype, - # wt.dtype, - # scale, - # signal.dtype, - # invn.flatdata.dtype, - # hits.flatdata.dtype, - # fake.flatdata.dtype, - # flush=True, - # ) - - cov_accum_diag( - nsm, - npix, - nnz, - sm, - pix, - wt, - scale, - signal, - invn.flatdata, - hits.flatdata, - fake.flatdata, + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" ) + pointing.apply(data) - for i in range(nsamp): - checkhits.data[sm[i], pix[i], 0] += 1 - off = 0 - for j in range(nnz): - check.data[sm[i], pix[i], j] += scale * signal[i] * wt[i * nnz + j] - for k in range(j, nnz): - checkinvn.data[sm[i], pix[i], off] += ( - scale * wt[i * nnz + j] * wt[i * nnz + k] - ) - off += 1 - - # for i in range(nsamp): - # print("{}: {} {}".format(i, checkhits.data[sm[i], pix[i], 0], hits.data[sm[i], pix[i], 0])) - # off = 0 - # for j in range(nnz): - # print(" {}: {} {}".format(j, check.data[sm[i], pix[i], j], fake.data[sm[i], pix[i], j])) - # for k in range(j, nnz): - # print(" {}: {} {}".format(off, checkinvn.data[sm[i], pix[i], off], invn.data[sm[i], pix[i], off])) - # off += 1 - - nt.assert_equal(hits.data, checkhits.data) - nt.assert_almost_equal(fake.data, check.data) - nt.assert_almost_equal(invn.data, checkinvn.data) - - return - - def test_invert(self): - nsm = 2 - npix = 3 - nnz = 4 - scale = 2.0 - nsamp = nsm * npix - nelem = int(nnz * (nnz + 1) / 2) - threshold = 1.0e-6 - - invn = DistPixels( - None, - comm=self.data.comm.comm_world, - npix=nsm * npix, - nnz=nelem, - dtype=np.float64, - npix_submap=npix, - local_submaps=np.arange(nsm, dtype=np.int64), - ) - check = invn.duplicate() - - rowdata = 10.0 * np.arange(nnz, 0, -1) - - for i in range(nsm): - for j in range(npix): - off = 0 - for k in range(nnz): - for m in range(k, nnz): - invn.data[i, j, off] = rowdata[m - k] - check.data[i, j, off] = invn.data[i, j, off] - off += 1 - - # invert twice - covariance_invert(invn, threshold) - covariance_invert(invn, threshold) - - # for i in range(nsm): - # for j in range(npix): - # off = 0 - # print("sm {}, pix {}:".format(i, j)) - # for k in range(nnz): - # for m in range(k, nnz): - # print(" {} {}".format(fakedata[i,j,off], checkdata[i,j,off])) - # off += 1 - - nt.assert_almost_equal(invn.data, check.data) - return - - def test_invnpp(self): - op = OpSimNoise(realization=0) - op.exec(self.data) - - # make a simple pointing matrix - pointing = OpPointingHpix(nside=self.map_nside, nest=True, mode="IQU") - pointing.exec(self.data) - - # construct a distributed map to store the covariance and hits - - invnpp = DistPixels( - self.data, - nnz=6, - dtype=np.float64, - ) - invnpp.data.fill(0.0) + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) - hits = DistPixels( - self.data, - nnz=1, - dtype=np.int64, - ) - hits.data.fill(0) + # Simulate noise timestreams - # accumulate the inverse covariance. Use detector weights - # based on the analytic NET. + sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + sim_noise.apply(data) - tod = self.data.obs[0]["tod"] - nse = self.data.obs[0]["noise"] - detweights = {} - for d in tod.local_dets: - detweights[d] = 1.0 / (self.rate * nse.NET(d) ** 2) + # Build an inverse covariance - build_invnpp = OpAccumDiag( - detweights=detweights, invnpp=invnpp, hits=hits, name="noise" + build_invnpp = ops.BuildInverseCovariance( + pixel_dist="pixel_dist", noise_model="noise_model" ) - build_invnpp.exec(self.data) - - # for i in range(invnpp.data.shape[0]): - # for j in range(invnpp.data.shape[1]): - # print("sm {}, pix {}: hits = {}".format(i, j, hits.data[i, j, 0])) - # for k in range(invnpp.data.shape[2]): - # print(" {}".format(invnpp.data[i, j, k])) + invnpp = build_invnpp.apply(data) - invnpp.allreduce() - hits.allreduce() + del data + return invnpp - # invert it - check = invnpp.duplicate() - covariance_invert(invnpp, 1.0e-14) - covariance_invert(invnpp, 1.0e-14) - - # Matrices that failed the rcond test are set to zero - nonzero = np.absolute(invnpp.data) > 1.0e-12 - if np.sum(nonzero) == 0: - raise Exception("All matrices failed the rcond test.") + def print_cov(self, mat): + for p in range(mat.distribution.n_local_submap * mat.distribution.n_pix_submap): + if mat.raw[p * mat.n_value] == 0: + continue + msg = "local pixel {}:".format(p) + for nv in range(mat.n_value): + msg += " {}".format(mat.raw[p * mat.n_value + nv]) + print(msg) - nt.assert_almost_equal(invnpp.data[nonzero], check.data[nonzero]) + def test_invert(self): + threshold = 1.0e-8 - return + invnpp = self.create_invnpp() - def test_distpix_init(self): - # make a simple pointing matrix - pointing = OpPointingHpix(nside=self.map_nside, nest=True, mode="IQU") - pointing.exec(self.data) + check = invnpp.duplicate() - # construct a distributed map to store the covariance and hits + rcond = PixelData(invnpp.distribution, np.float64, n_value=1) - invnpp = DistPixels( - self.data, - nnz=6, - dtype=np.float64, - ) + # Invert twice, using a different communication algorithm each way. + covariance_invert(invnpp, threshold, rcond=rcond, use_alltoallv=True) + covariance_invert(invnpp, threshold, use_alltoallv=False) - return + for sm in range(invnpp.distribution.n_local_submap): + good = np.where(rcond.data[sm] > threshold)[0] + nt.assert_almost_equal(invnpp.data[sm, good, :], check.data[sm, good, :]) def test_multiply(self): - # make a simple pointing matrix - pointing = OpPointingHpix(nside=self.map_nside, nest=True, mode="IQU") - pointing.exec(self.data) - - # construct a distributed map to store the covariance and hits - - invnpp = DistPixels( - self.data, - nnz=6, - dtype=np.float64, - ) - - hits = DistPixels( - self.data, - nnz=1, - dtype=np.int64, - ) - - # accumulate the inverse covariance. Use detector weights - # based on the analytic NET. - - tod = self.data.obs[0]["tod"] - nse = self.data.obs[0]["noise"] - detweights = {} - for d in tod.local_dets: - detweights[d] = 1.0 / (self.rate * nse.NET(d) ** 2) - - build_invnpp = OpAccumDiag(detweights=detweights, invnpp=invnpp, hits=hits) - build_invnpp.exec(self.data) - - invnpp.allreduce() - hits.allreduce() - - # invert it - check = invnpp.duplicate() - covariance_invert(invnpp, 1.0e-3) - - # multiply the two - covariance_multiply(check, invnpp) - - # check that the multiplied matrices are unit matrices - nsubmap, npix, nblock = check.data.shape - nnz = int(((np.sqrt(8 * nblock) - 1) / 2) + 0.5) - for i in range(nsubmap): - for j in range(npix): - if np.all(invnpp.data[i, j] == 0): - # Matrix failed to invert + threshold = 1.0e-15 + + # Build an inverse + invnpp = self.create_invnpp() + # print("invnpp:") + # self.print_cov(invnpp) + + # Get the covariance + npp = invnpp.duplicate() + covariance_invert(npp, threshold, use_alltoallv=True) + # print("npp:") + # self.print_cov(npp) + + # Multiply the two + covariance_multiply(npp, invnpp, use_alltoallv=True) + # print("identity:") + # self.print_cov(npp) + + for sm in range(npp.distribution.n_local_submap): + for spix in range(npp.distribution.n_pix_submap): + if npp.data[sm, spix, 0] == 0: continue - off = 0 - for k in range(nnz): - for m in range(k, nnz): - if k == m: - nt.assert_almost_equal(check.data[i, j, off], 1.0) - else: - nt.assert_almost_equal(check.data[i, j, off], 0.0) - off += 1 - - return - - def test_fitsio(self): - rank = 0 - if self.comm is not None: - rank = self.comm.rank - # make a simple pointing matrix - pointing = OpPointingHpix(nside=self.map_nside, nest=True, mode="IQU") - pointing.exec(self.data) - - # construct a distributed map to store the covariance and hits - - invnpp = DistPixels( - self.data, - nnz=6, - dtype=np.float64, - ) - - rcond = DistPixels( - self.data, - nnz=1, - dtype=np.float64, - ) - - hits = DistPixels( - self.data, - nnz=1, - dtype=np.int64, - ) - - # accumulate the inverse covariance. Use detector weights - # based on the analytic NET. - - tod = self.data.obs[0]["tod"] - nse = self.data.obs[0]["noise"] - detweights = {} - for d in tod.local_dets: - detweights[d] = 1.0 / (self.rate * nse.NET(d) ** 2) - - build_invnpp = OpAccumDiag(detweights=detweights, invnpp=invnpp, hits=hits) - build_invnpp.exec(self.data) + nt.assert_almost_equal(npp.data[sm, spix, 0], 1.0) + nt.assert_almost_equal(npp.data[sm, spix, 1], 0.0) + nt.assert_almost_equal(npp.data[sm, spix, 2], 0.0) + nt.assert_almost_equal(npp.data[sm, spix, 3], 1.0) + nt.assert_almost_equal(npp.data[sm, spix, 4], 0.0) + nt.assert_almost_equal(npp.data[sm, spix, 5], 1.0) - # self.assertTrue(False) + def test_apply(self): + threshold = 1.0e-15 - invnpp.allreduce() - hits.allreduce() - - # invert it - - covariance_invert(invnpp, 1.0e-3) - rcond = covariance_rcond(invnpp) - - check = invnpp.duplicate() - checkhits = hits.duplicate() - checkrcond = rcond.duplicate() + # Build an inverse + invnpp = self.create_invnpp() - # write this out... + # Get the covariance + npp = invnpp.duplicate() + covariance_invert(npp, threshold, use_alltoallv=True) - subsum = [ - np.sum(invnpp.data[x, :, :]) for x in range(len(invnpp.local_submaps)) - ] + # Random signal + sig = PixelData(npp.distribution, np.float64, n_value=3) + sig.raw[:] = np.random.normal(size=len(sig.raw)) - outfile = os.path.join(self.outdir, "covtest.fits") - if rank == 0: - if os.path.isfile(outfile): - os.remove(outfile) + check = sig.duplicate() - hitfile = os.path.join(self.outdir, "covtest_hits.fits") - if rank == 0: - if os.path.isfile(hitfile): - os.remove(hitfile) + # Apply inverse and then covariance and check that we recover the original. + covariance_apply(invnpp, sig) + covariance_apply(npp, sig) - rcondfile = os.path.join(self.outdir, "covtest_rcond.fits") - if rank == 0: - if os.path.isfile(rcondfile): - os.remove(rcondfile) - - invnpp.write_healpix_fits(outfile) - rcond.write_healpix_fits(rcondfile) - hits.write_healpix_fits(hitfile) - - invnpp.data.fill(0.0) - invnpp.read_healpix_fits(outfile) - - diffdata = invnpp.duplicate() - diffdata.data -= check.data - - difffile = os.path.join(self.outdir, "readwrite_diff.fits") - diffdata.write_healpix_fits(difffile) - - if rank == 0: - import matplotlib.pyplot as plt - - dat = hp.read_map(outfile) - outfile = "{}.png".format(outfile) - hp.mollview(dat, xsize=int(1600)) - plt.savefig(outfile) - plt.close() - - dat = hp.read_map(difffile) - outfile = "{}.png".format(difffile) - hp.mollview(dat, xsize=int(1600)) - plt.savefig(outfile) - plt.close() - - dat = hp.read_map(hitfile) - outfile = "{}.png".format(hitfile) - hp.mollview(dat, xsize=int(1600)) - plt.savefig(outfile) - plt.close() - - dat = hp.read_map(rcondfile) - outfile = "{}.png".format(rcondfile) - hp.mollview(dat, xsize=int(1600)) - plt.savefig(outfile) - plt.close() - - rcond.data.fill(0.0) - rcond.read_healpix_fits(rcondfile) - - nt.assert_almost_equal(rcond.data, checkrcond.data, decimal=6) - # nt.assert_almost_equal(invnpp.data, checkdata, decimal=6) - - hits.data.fill(0) - hits.read_healpix_fits(hitfile) - - nt.assert_equal(hits.data, checkhits.data) - - return + for sm in range(npp.distribution.n_local_submap): + for spix in range(npp.distribution.n_pix_submap): + if npp.data[sm, spix, 0] == 0: + continue + nt.assert_almost_equal(sig.data[sm, spix], check.data[sm, spix]) diff --git a/src/toast/tests/ops_mapmaker_utils.py b/src/toast/tests/ops_mapmaker_utils.py index 690d48151..d3972e2ba 100644 --- a/src/toast/tests/ops_mapmaker_utils.py +++ b/src/toast/tests/ops_mapmaker_utils.py @@ -5,20 +5,17 @@ import os import numpy as np +import numpy.testing as nt from astropy import units as u from .mpi import MPITestCase -from ..vis import set_matplotlib_backend - -from .. import rng as rng - from ..noise import Noise from .. import future_ops as ops -from ..future_ops.sim_tod_noise import sim_noise_timestream +from ..pixels import PixelDistribution, PixelData from ._helpers import create_outdir, create_satellite_data @@ -27,6 +24,24 @@ class MapmakerUtilsTest(MPITestCase): def setUp(self): fixture_name = os.path.splitext(os.path.basename(__file__))[0] self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + self.mix_coeff = np.random.uniform(low=-1.0, high=1.0, size=1000) + + def create_corr_noise(self, dets, nse): + corr_freqs = {"noise_{}".format(i): nse.freq(x) for i, x in enumerate(dets)} + corr_psds = {"noise_{}".format(i): nse.psd(x) for i, x in enumerate(dets)} + corr_indices = {"noise_{}".format(i): 100 + i for i, x in enumerate(dets)} + corr_mix = dict() + for i, x in enumerate(dets): + dmix = self.mix_coeff[: len(dets)] + corr_mix[x] = {"noise_{}".format(y): dmix[y] for y in range(len(dets))} + return Noise( + detectors=dets, + freqs=corr_freqs, + psds=corr_psds, + mixmatrix=corr_mix, + indices=corr_indices, + ) def test_hits(self): # Create a fake satellite data set for testing @@ -41,6 +56,20 @@ def test_hits(self): build_hits = ops.BuildHitMap(pixel_dist="pixel_dist") hits = build_hits.apply(data) + # Manual check + check_hits = PixelData(data["pixel_dist"], np.int64, n_value=1) + for ob in data.obs: + for det in ob.local_detectors: + local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( + ob.detdata["pixels"][det] + ) + for i in range(ob.n_local_samples): + if local_pix[i] >= 0: + check_hits.data[local_sm[i], local_pix[i], 0] += 1 + check_hits.sync_allreduce() + + nt.assert_equal(hits.data, check_hits.data) + del data return @@ -62,33 +91,7 @@ def test_inv_cov(self): # observation. for ob in data.obs: nse = ob[default_model.noise_model] - corr_freqs = { - "noise_{}".format(i): nse.freq(x) - for i, x in enumerate(ob.local_detectors) - } - corr_psds = { - "noise_{}".format(i): nse.psd(x) - for i, x in enumerate(ob.local_detectors) - } - corr_indices = { - "noise_{}".format(i): 100 + i for i, x in enumerate(ob.local_detectors) - } - corr_mix = dict() - for i, x in enumerate(ob.local_detectors): - dmix = np.random.uniform( - low=-1.0, high=1.0, size=len(ob.local_detectors) - ) - corr_mix[x] = { - "noise_{}".format(y): dmix[y] - for y in range(len(ob.local_detectors)) - } - ob["noise_model_corr"] = Noise( - detectors=ob.local_detectors, - freqs=corr_freqs, - psds=corr_psds, - mixmatrix=corr_mix, - indices=corr_indices, - ) + ob["noise_model_corr"] = self.create_corr_noise(ob.local_detectors, nse) # Simulate noise using both models @@ -110,6 +113,50 @@ def test_inv_cov(self): ) invnpp_corr = build_invnpp_corr.apply(data) + # Manual check + + check_invnpp = PixelData(data["pixel_dist"], np.float64, n_value=6) + check_invnpp_corr = PixelData(data["pixel_dist"], np.float64, n_value=6) + + for ob in data.obs: + noise = ob["noise_model"] + noise_corr = ob["noise_model_corr"] + + for det in ob.local_detectors: + detweight = noise.detector_weight(det) + detweight_corr = noise_corr.detector_weight(det) + + wt = ob.detdata["weights"][det] + local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( + ob.detdata["pixels"][det] + ) + for i in range(ob.n_local_samples): + if local_pix[i] < 0: + continue + off = 0 + for j in range(3): + for k in range(j, 3): + check_invnpp.data[local_sm[i], local_pix[i], off] += ( + detweight * wt[i, j] * wt[i, k] + ) + check_invnpp_corr.data[local_sm[i], local_pix[i], off] += ( + detweight_corr * wt[i, j] * wt[i, k] + ) + off += 1 + + check_invnpp.sync_allreduce() + check_invnpp_corr.sync_allreduce() + + for sm in range(invnpp.distribution.n_local_submap): + for px in range(invnpp.distribution.n_pix_submap): + if invnpp.data[sm, px, 0] != 0: + nt.assert_almost_equal( + invnpp.data[sm, px], check_invnpp.data[sm, px] + ) + if invnpp_corr.data[sm, px, 0] != 0: + nt.assert_almost_equal( + invnpp_corr.data[sm, px], check_invnpp_corr.data[sm, px] + ) del data return @@ -131,33 +178,7 @@ def test_zmap(self): # observation. for ob in data.obs: nse = ob[default_model.noise_model] - corr_freqs = { - "noise_{}".format(i): nse.freq(x) - for i, x in enumerate(ob.local_detectors) - } - corr_psds = { - "noise_{}".format(i): nse.psd(x) - for i, x in enumerate(ob.local_detectors) - } - corr_indices = { - "noise_{}".format(i): 100 + i for i, x in enumerate(ob.local_detectors) - } - corr_mix = dict() - for i, x in enumerate(ob.local_detectors): - dmix = np.random.uniform( - low=-1.0, high=1.0, size=len(ob.local_detectors) - ) - corr_mix[x] = { - "noise_{}".format(y): dmix[y] - for y in range(len(ob.local_detectors)) - } - ob["noise_model_corr"] = Noise( - detectors=ob.local_detectors, - freqs=corr_freqs, - psds=corr_psds, - mixmatrix=corr_mix, - indices=corr_indices, - ) + ob["noise_model_corr"] = self.create_corr_noise(ob.local_detectors, nse) # Simulate noise using both models @@ -181,5 +202,40 @@ def test_zmap(self): ) zmap_corr = build_zmap_corr.apply(data) + # Manual check + + check_zmap = PixelData(data["pixel_dist"], np.float64, n_value=3) + check_zmap_corr = PixelData(data["pixel_dist"], np.float64, n_value=3) + + for ob in data.obs: + noise = ob["noise_model"] + noise_corr = ob["noise_model_corr"] + for det in ob.local_detectors: + wt = ob.detdata["weights"][det] + local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( + ob.detdata["pixels"][det] + ) + + for i in range(ob.n_local_samples): + if local_pix[i] < 0: + continue + for j in range(3): + check_zmap.data[local_sm[i], local_pix[i], j] += ( + noise.detector_weight(det) + * ob.detdata["noise"][det, i] + * wt[i, j] + ) + check_zmap_corr.data[local_sm[i], local_pix[i], j] += ( + noise_corr.detector_weight(det) + * ob.detdata["noise_corr"][det, i] + * wt[i, j] + ) + + check_zmap.sync_allreduce() + check_zmap_corr.sync_allreduce() + + np.testing.assert_almost_equal(zmap.data, check_zmap.data) + np.testing.assert_almost_equal(zmap_corr.data, check_zmap_corr.data) + del data return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 951b117b2..54da2d06f 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -37,6 +37,8 @@ from . import ops_mapmaker_utils as test_ops_mapmaker_utils +from . import covariance as test_covariance + # # from . import cache as testcache # @@ -44,8 +46,6 @@ # # from . import psd_math as testpsdmath # -# from . import cov as testcov -# # from . import ops_dipole as testopsdipole # from . import ops_simnoise as testopssimnoise # from . import ops_sim_sss as testopssimsss @@ -145,6 +145,8 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_utils)) + suite.addTest(loader.loadTestsFromModule(test_covariance)) + # # suite.addTest(loader.loadTestsFromModule(testtod)) # From 55a1fda302dec7becfa791e0e0799c40815e4254 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Wed, 25 Nov 2020 17:00:04 -0800 Subject: [PATCH 028/690] Initial port of binned mapmaking. Pipeline class improvements. --- src/toast/data.py | 99 +++++--- src/toast/future_ops/CMakeLists.txt | 2 + src/toast/future_ops/__init__.py | 13 +- src/toast/future_ops/clear.py | 79 +++++++ src/toast/future_ops/mapmaker_binning.py | 211 +++++++++++++++++ src/toast/future_ops/mapmaker_utils.py | 284 ++++++++++++++++++++++- src/toast/future_ops/pipeline.py | 129 ++++++++-- 7 files changed, 760 insertions(+), 57 deletions(-) create mode 100644 src/toast/future_ops/clear.py create mode 100644 src/toast/future_ops/mapmaker_binning.py diff --git a/src/toast/data.py b/src/toast/data.py index 9df94ce56..037cf815e 100644 --- a/src/toast/data.py +++ b/src/toast/data.py @@ -139,46 +139,85 @@ def _get_optional(k, dt): wcomm.barrier() return - def split(self, key): - """Split the Data object. + def select_observations(self, key, require=False): + """Given an observation key, return lists of observations with unique values. - Split the Data object based on the value of `key` in the - observation dictionary. + In the returned dictionary, the order of observations for a given value is + preserved relative to the original list. For a given observation, the value + across all processes in the group must match. Args: - key(str) : Observation key to use. + key (str): The observation key + require (bool): If True, the key must exist in every observation. Returns: - List of 2-tuples of the form (value, data) + (OrderedDict): For each key value, the list of observations. """ - # Build a superset of all values - values = set() - for obs in self.obs: - if key not in obs: - raise RuntimeError( - 'Cannot split data by "{}". Key is not ' - "defined for all observations.".format(key) + group_rank = self.comm.group_rank + group_comm = self.comm.comm_group + + selected = OrderedDict() + + for ob in self.obs: + # The value on this process + proc_val = None + if key in ob: + proc_val = ob[key] + + # Get the values from all processes in the group + group_vals = None + if group_comm is None: + group_vals = [proc_val] + else: + group_vals = group_comm.allgather(proc_val) + + # Check for consistency + if group_vals.count(group_vals[0]) == len(group_vals): + # All entries equal + if proc_val is None: + if require: + msg = "Observation '{}' does not have key '{}'".format( + ob.name, key + ) + if group_rank == 0: + log.error(msg) + raise RuntimeError(msg) + continue + if proc_val not in selected: + selected[proc_val] = list() + selected[proc_val].append(ob) + else: + # Mismatch + msg = "Observation '{}', key '{}' has different values across the group".format( + ob.name, key ) - values.add(obs[key]) - all_values = None - if self._comm.comm_world is None: - all_values = [values] - else: - all_values = self._comm.comm_world.allgather(values) - for vals in all_values: - values = values.union(vals) + if group_rank == 0: + log.error(msg) + raise RuntimeError(msg) + return selected + + def split(self, key, require=False): + """Split the Data object. + + Split the Data object based on the value of `key` in the observation dictionary. + + Args: + key (str): Observation key to use. + require (bool): If True, require that all observations have the key. + + Returns: + (OrderedDict): For each key value, a new Data object. + + """ + selected = self.select_observations(key, require=require) - # Order the values alphabetically. - values = sorted(list(values)) + datasplit = OrderedDict() - # Split the data - datasplit = [] - for value in values: + for value, obslist in selected.items(): new_data = Data(comm=self._comm) - for obs in self.obs: - if obs[key] == value: - new_data.obs.append(obs) - datasplit.append((value, new_data)) + for ob in obslist: + new_data.obs.append(ob) + datasplit[value] = new_data return datasplit diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/future_ops/CMakeLists.txt index e1cea9d0e..1306abb17 100644 --- a/src/toast/future_ops/CMakeLists.txt +++ b/src/toast/future_ops/CMakeLists.txt @@ -4,6 +4,7 @@ install(FILES __init__.py pipeline.py + clear.py memory_counter.py sim_hwp.py sim_tod_noise.py @@ -11,5 +12,6 @@ install(FILES noise_model.py pointing_healpix.py mapmaker_utils.py + mapmaker_binning.py DESTINATION ${PYTHON_SITE}/toast/future_ops ) diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index a16dedf7e..09ebd4296 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -2,10 +2,12 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. -# import functions into our public API +# Import Operators into our public API from .memory_counter import MemoryCounter +from .clear import Clear + from .pipeline import Pipeline from .sim_satellite import SimSatellite @@ -16,4 +18,11 @@ from .pointing_healpix import PointingHealpix -from .mapmaker_utils import BuildHitMap, BuildInverseCovariance, BuildNoiseWeighted +from .mapmaker_utils import ( + BuildHitMap, + BuildInverseCovariance, + BuildNoiseWeighted, + CovarianceAndHits, +) + +from .mapmaker_binning import BinMap diff --git a/src/toast/future_ops/clear.py b/src/toast/future_ops/clear.py new file mode 100644 index 000000000..8f12cc413 --- /dev/null +++ b/src/toast/future_ops/clear.py @@ -0,0 +1,79 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, List + +from ..operator import Operator + + +@trait_docs +class Clear(Operator): + """Class to purge data from observations. + + This operator takes lists of shared, detdata, and meta keys to delete from + observations. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + meta = List( + None, allow_none=True, help="List of Observation dictionary keys to delete" + ) + + detdata = List( + None, allow_none=True, help="List of Observation detdata keys to delete" + ) + + shared = List( + None, allow_none=True, help="List of Observation shared keys to delete" + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + for ob in data.obs: + if self.detdata is not None: + for key in self.detdata: + # This ignores non-existant keys + del ob.detdata[key] + if self.shared is not None: + for key in self.shared: + # This ignores non-existant keys + del ob.shared[key] + if self.meta is not None: + for key in self.meta: + try: + del ob[key] + except KeyError: + pass + return + + def _finalize(self, data, **kwargs): + return None + + def _requires(self): + # Although we could require nothing, since we are deleting keys only if they + # exist, providing these as requirements allows us to catch dependency issues + # in pipelines. + req = dict() + req["meta"] = list(self.meta) + req["detdata"] = list(self.detdata) + req["shared"] = list(self.shared) + return req + + def _provides(self): + return dict() + + def _accelerators(self): + # Eventually we can delete memory objects on devices... + return list() diff --git a/src/toast/future_ops/mapmaker_binning.py b/src/toast/future_ops/mapmaker_binning.py new file mode 100644 index 000000000..682ccefed --- /dev/null +++ b/src/toast/future_ops/mapmaker_binning.py @@ -0,0 +1,211 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +import numpy as np + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool + +from ..operator import Operator + +from ..timing import function_timer + +from ..pixels import PixelDistribution, PixelData + +from .pipeline import Pipeline + +from .mapmaker_utils import BuildHitMap, BuildNoiseWeighted, BuildInverseCovariance + + +@trait_docs +class BinMap(Operator): + """Operator which bins a map. + + Given a noise model and a pointing operator, build the noise weighted map and + apply the noise covariance to get resulting binned map. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + pixel_dist = Unicode( + "pixel_dist", + help="The Data key where the PixelDist object should be stored", + ) + + covariance = Unicode( + "covariance", + help="The Data key containing the noise covariance PixelData instance", + ) + + binned = Unicode( + "binned", + help="The Data key where the binned map should be stored", + ) + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + pointing = Instance( + klass=None, + allow_none=True, + help="This must be an instance of a pointing operator", + ) + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) + + sync_type = Unicode( + "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" + ) + + save_pointing = Bool( + False, help="If True, do not clear detector pointing matrices after use" + ) + + @traitlets.validate("det_flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + @traitlets.validate("sync_type") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check != "allreduce" and check != "alltoallv": + raise traitlets.TraitError("Invalid communication algorithm") + return check + + @traitlets.validate("pointing") + def _check_pointing(self, proposal): + pntg = proposal["value"] + if pntg is not None: + if not isinstance(pntg, Operator): + raise traitlets.TraitError("pointing should be an Operator instance") + if not pntg.has_trait("pixels"): + raise traitlets.TraitError( + "pointing operator should have a 'pixels' trait" + ) + if not pntg.has_trait("weights"): + raise traitlets.TraitError( + "pointing operator should have a 'weights' trait" + ) + if not pntg.has_trait("create_dist"): + raise traitlets.TraitError( + "pointing operator should have a 'create_dist' trait" + ) + return pntg + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.covariance not in data: + msg = "Data does not contain noise covariance '{}'".format(self.covariance) + raise RuntimeError(msg) + + cov = data[self.covariance] + + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + + # Sanity check that the covariance pixel distribution agrees + if cov.distribution != data[self.pixel_dist]: + raise RuntimeError( + "Pixel distribution '{}' does not match the one used by covariance '{}'".format( + self.pixel_dist, self.covariance + ) + ) + + # Set outputs of the pointing operator + + self.pointing.pixels = self.pixels + self.pointing.weights = self.weights + self.pointing.create_dist = None + + # Set up clearing of the pointing matrices + + clear_pointing = Clear(detdata=[self.pixels, self.weights]) + + # Noise weighted map + + build_zmap = BuildNoiseWeighted( + pixel_dist=self.pixel_dist, + pixels=self.pixels, + weights=self.weights, + noise_model=self.noise_model, + det_data=self.det_data, + det_flags=self.det_flags, + det_flag_mask=self.det_flag_mask, + sync_type=self.sync_type, + ) + + # Build a pipeline to expand pointing and accumulate + + accum = None + if self.save_pointing: + # Process all detectors at once + accum = Pipeline(detector_sets=["ALL"]) + accum.operators = [self.pointing, build_zmap] + else: + # Process one detector at a time and clear pointing after each one. + accum = Pipeline(detector_sets=["SINGLE"]) + accum.operators = [self.pointing, build_zmap, clear_pointing] + + pipe_out = accum.apply(data, detectors=detectors) + + # Extract the results + binned_map = pipe_out[1] + + # Apply the covariance + covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) + + # Store products + data[self.binned] = binned + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = { + "meta": [self.noise_model, self.pixel_dist, self.covariance], + "shared": list(), + "detdata": [self.det_data], + } + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + return req + + def _provides(self): + prov = {"meta": [self.binned], "shared": list(), "detdata": list()} + if self.save_pointing: + prov["detdata"].extend([self.pixels, self.weights]) + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/future_ops/mapmaker_utils.py index 52bb5b566..2244c396c 100644 --- a/src/toast/future_ops/mapmaker_utils.py +++ b/src/toast/future_ops/mapmaker_utils.py @@ -8,7 +8,7 @@ from ..utils import Logger -from ..traits import trait_docs, Int, Unicode, Bool +from ..traits import trait_docs, Int, Unicode, Bool, Instance from ..operator import Operator @@ -22,6 +22,8 @@ cov_accum_diag_invnpp, ) +from .clear import Clear + @trait_docs class BuildHitMap(Operator): @@ -58,8 +60,6 @@ class BuildHitMap(Operator): pixels = Unicode("pixels", help="Observation detdata key for pixel indices") - weights = Unicode("weights", help="Observation detdata key for Stokes weights") - sync_type = Unicode( "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" ) @@ -98,6 +98,10 @@ def _exec(self, data, detectors=None, **kwargs): raise RuntimeError(msg) dist = data[self.pixel_dist] + if data.comm.world_rank == 0: + log.debug( + "Building hit map with pixel_distribution {}".format(self.pixel_dist) + ) # On first call, get the pixel distribution and create our distributed hitmap if self._hits is None: @@ -245,6 +249,12 @@ def _exec(self, data, detectors=None, **kwargs): raise RuntimeError(msg) dist = data[self.pixel_dist] + if data.comm.world_rank == 0: + log.debug( + "Building inverse covariance with pixel_distribution {}".format( + self.pixel_dist + ) + ) weight_nnz = None cov_nnz = None @@ -437,6 +447,12 @@ def _exec(self, data, detectors=None, **kwargs): raise RuntimeError(msg) dist = data[self.pixel_dist] + if data.comm.world_rank == 0: + log.debug( + "Building noise weighted map with pixel_distribution {}".format( + self.pixel_dist + ) + ) weight_nnz = None @@ -456,6 +472,12 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError( + "You must set the det_data trait before calling exec()" + ) + for det in dets: # The pixels and weights for this detector. pix = ob.detdata[self.pixels] @@ -539,3 +561,259 @@ def _provides(self): def _accelerators(self): return list() + + +@trait_docs +class CovarianceAndHits(Operator): + """Operator which builds the pixel-space diagonal noise covariance and hit map. + + Frequently the first step in map making is to determine what pixels on the sky + have been covered and build the diagonal noise covariance. During the construction + of the covariance we can cut pixels that are poorly conditioned. + + This operator runs the pointing operator and builds the PixelDist instance + describing how submaps are distributed among processes. It builds the hit map + and the inverse covariance and then inverts this with a threshold on the condition + number in each pixel. + + NOTE: The pointing operator must have the "pixels", "weights", and "create_dist" + traits, which will be set by this operator during execution. + + Output PixelData objects are stored in the Data dictionary. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + pixel_dist = Unicode( + "pixel_dist", + help="The Data key where the PixelDist object should be stored", + ) + + covariance = Unicode( + "covariance", + help="The Data key where the covariance should be stored", + ) + + hits = Unicode( + "hits", + help="The Data key where the hit map should be stored", + ) + + rcond = Unicode( + "rcond", + help="The Data key where the inverse condition number should be stored", + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + pointing = Instance( + klass=None, + allow_none=True, + help="This must be an instance of a pointing operator", + ) + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) + + rcond_threshold = Float( + 1.0e-8, help="Minimum value for inverse condition number cut." + ) + + sync_type = Unicode( + "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" + ) + + save_pointing = Bool( + False, help="If True, do not clear detector pointing matrices after use" + ) + + @traitlets.validate("det_flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + @traitlets.validate("sync_type") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check != "allreduce" and check != "alltoallv": + raise traitlets.TraitError("Invalid communication algorithm") + return check + + @traitlets.validate("pointing") + def _check_pointing(self, proposal): + pntg = proposal["value"] + if pntg is not None: + if not isinstance(pntg, Operator): + raise traitlets.TraitError("pointing should be an Operator instance") + if not pntg.has_trait("pixels"): + raise traitlets.TraitError( + "pointing operator should have a 'pixels' trait" + ) + if not pntg.has_trait("weights"): + raise traitlets.TraitError( + "pointing operator should have a 'weights' trait" + ) + if not pntg.has_trait("create_dist"): + raise traitlets.TraitError( + "pointing operator should have a 'create_dist' trait" + ) + return pntg + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._invcov = None + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.pixel_dist is None: + raise RuntimeError( + "You must set the 'pixel_dist' trait before calling exec()" + ) + + # Set outputs of the pointing operator + + self.pointing.pixels = self.pixels + self.pointing.weights = self.weights + self.pointing.create_dist = None + + # Set up clearing of the pointing matrices + + clear_pointing = Clear(detdata=[self.pixels, self.weights]) + + # If we do not have a pixel distribution yet, we must make one pass through + # the pointing to build this first. + + if self.pixel_dist not in data: + if detectors is not None: + msg = "A subset of detectors is specified, but the pixel distribution\n" + msg += "does not yet exist- and creating this requires all detectors.\n" + msg += "Either pre-create the pixel distribution with all detectors\n" + msg += "or run this operator with all detectors." + raise RuntimeError(msg) + + msg = "Creating pixel distribution '{}' in Data".format(self.pixel_dist) + if data.comm.world_rank == 0: + log.debug(msg) + + # Turn on creation of the pixel distribution + self.pointing.create_dist = self.pixel_dist + + # Compute the pointing matrix + + pixel_dist_pipe = None + if self.save_pointing: + # We are keeping the pointing, which means we need to run all detectors + # at once so they all end up in the detdata for all observations. + pixel_dist_pipe = Pipeline(detector_sets=["ALL"]) + pixel_dist_pipe.operators = [ + self.pointing, + ] + else: + # Run one detector at time and discard. + pixel_dist_pipe = Pipeline(detector_sets=["SINGLE"]) + pixel_dist_pipe.operators = [ + self.pointing, + clear_pointing, + ] + pipe_out = pixel_dist_pipe.apply(data, detectors=detectors) + + # Turn pixel distribution creation off again + self.pointing.create_dist = None + + # Hit map operator + + build_hits = BuildHitMap( + pixel_dist=self.pixel_dist, + pixels=self.pixels, + det_flags=self.det_flags, + det_flag_mask=self.det_flag_mask, + sync_type=self.sync_type, + ) + + # Inverse covariance + + build_invcov = BuildInverseCovariance( + pixel_dist=self.pixel_dist, + pixels=self.pixels, + weights=self.weights, + noise_model=self.noise_model, + det_flags=self.det_flags, + det_flag_mask=self.det_flag_mask, + sync_type=self.sync_type, + ) + + # Build a pipeline to expand pointing and accumulate + + accum = None + if self.save_pointing: + # Process all detectors at once + accum = Pipeline(detector_sets=["ALL"]) + accum.operators = [self.pointing, build_hits, build_invcov] + else: + # Process one detector at a time and clear pointing after each one. + accum = Pipeline(detector_sets=["SINGLE"]) + accum.operators = [self.pointing, build_hits, build_invcov, clear_pointing] + + pipe_out = accum.apply(data, detectors=detectors) + + # Extract the results + hits = pipe_out[1] + cov = pipe_out[2] + + # Invert the covariance + rcond = PixelData(cov.distribution, np.float64, n_value=1) + covariance_invert( + cov, + self.rcond_threshold, + rcond=rcond, + use_alltoallv=(self.sync_type == "alltoallv"), + ) + + # Store products + data[self.hits] = hits + data[self.covariance] = cov + data[self.rcond] = rcond + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = { + "meta": [self.noise_model], + "shared": list(), + "detdata": list(), + } + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + return req + + def _provides(self): + prov = { + "meta": [self.pixel_dist, self.hits, self.covariance, self.rcond], + "shared": list(), + "detdata": list(), + } + if self.save_pointing: + prov["detdata"].extend([self.pixels, self.weights]) + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/pipeline.py b/src/toast/future_ops/pipeline.py index 570b5ab0d..c6cb41a48 100644 --- a/src/toast/future_ops/pipeline.py +++ b/src/toast/future_ops/pipeline.py @@ -16,7 +16,9 @@ class Pipeline(Operator): """Class representing a sequence of Operators. This runs a list of other operators over sets of detectors (default is all - detectors in one shot). + detectors in one shot). By default all observations are passed to each operator, + but the `observation_key` and `observation_value` traits can be used to run the + operators on only observations which have a matching key / value pair. """ @@ -28,7 +30,19 @@ class Pipeline(Operator): detector_sets = List( ["ALL"], - help="List of detector sets. 'ALL' and 'SINGLE' are also valid values.", + help="List of detector sets. ['ALL'] and ['SINGLE'] are also valid values.", + ) + + observation_key = Unicode( + None, + allow_none=True, + help="Only process observations which have this key defined", + ) + + observation_value = Unicode( + None, + allow_none=True, + help="Only process observations where the key has this value", ) @traitlets.validate("detector_sets") @@ -64,6 +78,17 @@ def _check_operators(self, proposal): ) return ops + @traitlets.validate("observation_value") + def _check_observation_value(self, proposal): + val = proposal["value"] + if val is None: + return val + if self.observation_key is None: + raise traitlets.TraitError( + "observation_key must be set before observation_value" + ) + return val + def __init__(self, **kwargs): super().__init__(**kwargs) @@ -76,23 +101,79 @@ def _exec(self, data, detectors=None, **kwargs): # All our operators support CUDA. Stage any required data pass - if detectors is not None: - msg = "Use the 'detector_sets' option to control a Pipeline" - log.error(msg) - raise RuntimeError(msg) - - for dset in self.detector_sets: - if dset == "ALL": - for op in self.operators: - op.exec(data) - elif dset == "SINGLE": - # We are running one detector at a time - raise NotImplementedError("SINGLE detectors not implemented yet") + # Select the observations we will use + data_sets = [data] + if self.observation_key is not None: + data_sets = list() + split_data = data.split(self.observation_key) + if self.observation_value is None: + # We are using all values of the key + for val, d in split_data.items(): + data_sets.append(d) else: - # We are running sets of detectors at once. We first go through all - # observations and find the set of detectors used by each row of the - # process grid. - raise NotImplementedError("detector sets not implemented yet") + # We are using only one value of the key + if self.observation_value not in split_data: + msg = "input data has no observations where '{}' == '{}'".format( + self.observation_key, self.observation_value + ) + if data.comm.world_rank == 0: + log.warning(msg) + else: + data_sets.append(split_data[self.observation_value]) + + for ds_indx, ds in enumerate(data_sets): + if len(ds.obs) == 0: + # No observations for this group + msg = "data set {}, group {} has no observations".format( + ds_indx, ds.comm.group + ) + if data.comm.group_rank == 0: + log.warning(msg) + for det_set in self.detector_sets: + if det_set == "ALL": + # If this is given, then there should be only one entry + if len(self.detector_sets) != 1: + raise RuntimeError( + "If using 'ALL' for a detector set, there should only be one set" + ) + for op in self.operators: + op.exec(ds, detectors=detectors) + elif det_set == "SINGLE": + # If this is given, then there should be only one entry + if len(self.detector_sets) != 1: + raise RuntimeError( + "If using 'SINGLE' for a detector set, there should only be one set" + ) + + # We are running one detector at a time. We will loop over all + # detectors in the superset of detectors across all observations. + all_local_dets = set() + for ob in ds.obs: + for det in ob.local_detectors: + all_local_dets.add(det) + + # If we were given a more restrictive list, prune the global list + selected_dets = list(all_local_dets) + if detectors is not None: + selected_dets = list() + for det in all_local_dets: + if det in detectors: + selected_dets.append(det) + + for det in selected_dets: + for op in self.operators: + op.exec(ds, detectors=[det]) + else: + # We are running sets of detectors at once. For this detector + # set, we prune to just the restricted list passed to exec(). + selected_set = det_set + if detectors is not None: + selected_set = list() + for det in det_set: + if det in detectors: + selected_set.append(det) + for op in self.operators: + op.exec(ds, detectors=selected_set) # Copy to / from accelerator... @@ -116,8 +197,10 @@ def _requires(self): oreq = op.requires() oprov = op.provides() for k in keys: - req[k] |= oreq[k] - req[k] -= oprov[k] + if k in oreq: + req[k] |= oreq[k] + if k in oprov: + req[k] -= oprov[k] for k in keys: req[k] = list(req[k]) return req @@ -132,8 +215,10 @@ def _provides(self): oreq = op.requires() oprov = op.provides() for k in keys: - prov[k] |= oprov[k] - prov[k] -= oreq[k] + if k in oprov: + prov[k] |= oprov[k] + if k in oreq: + prov[k] -= oreq[k] for k in keys: prov[k] = list(prov[k]) return prov From 5dd306fde47cd1088e4a6058e7f2fcc6f0de9465 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 30 Nov 2020 12:46:30 -0800 Subject: [PATCH 029/690] Large work in progress on porting all mapmaking tools. --- src/toast/future_ops/CMakeLists.txt | 3 + src/toast/future_ops/__init__.py | 6 + src/toast/future_ops/clear.py | 9 +- src/toast/future_ops/copy.py | 201 ++ src/toast/future_ops/mapmaker.py | 2001 ++----------------- src/toast/future_ops/mapmaker_binning.py | 43 +- src/toast/future_ops/mapmaker_projection.py | 203 ++ src/toast/future_ops/mapmaker_templates.py | 126 ++ src/toast/future_ops/mapmaker_utils.py | 41 +- src/toast/future_ops/noise_weight.py | 82 + src/toast/future_ops/scan_map.py | 162 ++ src/toast/templates/template.py | 415 ++++ 12 files changed, 1353 insertions(+), 1939 deletions(-) create mode 100644 src/toast/future_ops/copy.py create mode 100644 src/toast/future_ops/mapmaker_projection.py create mode 100644 src/toast/future_ops/mapmaker_templates.py create mode 100644 src/toast/future_ops/noise_weight.py create mode 100644 src/toast/future_ops/scan_map.py create mode 100644 src/toast/templates/template.py diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/future_ops/CMakeLists.txt index 1306abb17..69d5daa35 100644 --- a/src/toast/future_ops/CMakeLists.txt +++ b/src/toast/future_ops/CMakeLists.txt @@ -5,12 +5,15 @@ install(FILES __init__.py pipeline.py clear.py + copy.py memory_counter.py sim_hwp.py sim_tod_noise.py sim_satellite.py noise_model.py + noise_weight.py pointing_healpix.py + scan_map.py mapmaker_utils.py mapmaker_binning.py DESTINATION ${PYTHON_SITE}/toast/future_ops diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index 09ebd4296..404b995b1 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -8,6 +8,8 @@ from .clear import Clear +from .clear import Copy + from .pipeline import Pipeline from .sim_satellite import SimSatellite @@ -16,8 +18,12 @@ from .noise_model import DefaultNoiseModel +from .noise_weight import NoiseWeight + from .pointing_healpix import PointingHealpix +from .scan_map import ScanMap + from .mapmaker_utils import ( BuildHitMap, BuildInverseCovariance, diff --git a/src/toast/future_ops/clear.py b/src/toast/future_ops/clear.py index 8f12cc413..125ca4546 100644 --- a/src/toast/future_ops/clear.py +++ b/src/toast/future_ops/clear.py @@ -66,9 +66,12 @@ def _requires(self): # exist, providing these as requirements allows us to catch dependency issues # in pipelines. req = dict() - req["meta"] = list(self.meta) - req["detdata"] = list(self.detdata) - req["shared"] = list(self.shared) + if self.meta is not None: + req["meta"] = list(self.meta) + if self.detdata is not None: + req["detdata"] = list(self.detdata) + if self.shared is not None: + req["shared"] = list(self.shared) return req def _provides(self): diff --git a/src/toast/future_ops/copy.py b/src/toast/future_ops/copy.py new file mode 100644 index 000000000..2f6c8034f --- /dev/null +++ b/src/toast/future_ops/copy.py @@ -0,0 +1,201 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +from ..utils import Logger + +from ..mpi import MPI + +from ..traits import trait_docs, Int, Unicode, List + +from ..operator import Operator + + +@trait_docs +class Copy(Operator): + """Class to copy data. + + This operator takes lists of shared, detdata, and meta keys to copy to a new + location in each observation. + + Each list contains tuples specifying the input and output key names. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + meta = List( + None, allow_none=True, help="List of tuples of Observation meta keys to copy" + ) + + detdata = List( + None, allow_none=True, help="List of tuples of Observation detdata keys to copy" + ) + + shared = List( + None, allow_none=True, help="List of tuples of Observation shared keys to copy" + ) + + @traitlets.validate("meta") + def _check_meta(self, proposal): + val = proposal["value"] + if val is None: + return val + for v in val: + if not isinstance(v, (tuple, list)): + raise traitlets.TraitError("trait should be a list of tuples") + if len(v) != 2: + raise traitlets.TraitError("key tuples should have 2 values") + if not isinstance(v[0], str) or not isinstance(v[1], str): + raise traitlets.TraitError("key tuples should have string values") + return val + + @traitlets.validate("detdata") + def _check_detdata(self, proposal): + val = proposal["value"] + if val is None: + return val + for v in val: + if not isinstance(v, (tuple, list)): + raise traitlets.TraitError("trait should be a list of tuples") + if len(v) != 2: + raise traitlets.TraitError("key tuples should have 2 values") + if not isinstance(v[0], str) or not isinstance(v[1], str): + raise traitlets.TraitError("key tuples should have string values") + return val + + @traitlets.validate("shared") + def _check_shared(self, proposal): + val = proposal["value"] + if val is None: + return val + for v in val: + if not isinstance(v, (tuple, list)): + raise traitlets.TraitError("trait should be a list of tuples") + if len(v) != 2: + raise traitlets.TraitError("key tuples should have 2 values") + if not isinstance(v[0], str) or not isinstance(v[1], str): + raise traitlets.TraitError("key tuples should have string values") + return val + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + for ob in data.obs: + if self.meta is not None: + for in_key, out_key in self.meta: + if out_key in ob: + # The key exists- issue a warning before overwriting. + msg = "Observation key {} already exists- overwriting".format( + out_key + ) + log.warning(msg) + ob[out_key] = ob[in_key] + + if self.shared is not None: + for in_key, out_key in self.shared: + if out_key in ob.shared: + if ob.shared[in_key].comm is not None: + comp = MPI.Comm.Compare( + ob.shared[out_key].comm, ob.shared[in_key].comm + ) + if comp not in (MPI.IDENT, MPI.CONGRUENT): + msg = "Cannot copy to existing shared key {} with a different communicator".format( + out_key + ) + log.error(msg) + raise RuntimeError(msg) + if ob.shared[out_key].dtype != ob.shared[in_key].dtype: + msg = "Cannot copy to existing shared key {} with different dtype".format( + out_key + ) + log.error(msg) + raise RuntimeError(msg) + if ob.shared[out_key].shape != ob.shared[in_key].shape: + msg = "Cannot copy to existing shared key {} with different shape".format( + out_key + ) + log.error(msg) + raise RuntimeError(msg) + else: + ob.shared.create( + out_key, + shape=ob.shared[in_key].shape, + dtype=ob.shared[in_key].dtype, + comm=ob.shared[in_key].comm, + ) + # Only one process per node copies the shared data. + if ( + ob.shared[in_key].nodecomm is None + or ob.shared[in_key].nodecomm.rank == 0 + ): + ob.shared[out_key]._flat[:] = ob.shared[in_key]._flat + + if self.detdata is not None: + for in_key, out_key in self.detdata: + if out_key in ob.detdata: + # The key exists- verify that dimensions match + if ( + ob.detdata[out_key].detectors + != ob.detdata[in_key].detectors + ): + msg = "Cannot copy to existing detdata key {} with different detectors".format( + out_key + ) + log.error(msg) + raise RuntimeError(msg) + if ob.detdata[out_key].dtype != ob.detdata[in_key].dtype: + msg = "Cannot copy to existing detdata key {} with different dtype".format( + out_key + ) + log.error(msg) + raise RuntimeError(msg) + if ob.detdata[out_key].shape != ob.detdata[in_key].shape: + msg = "Cannot copy to existing detdata key {} with different shape".format( + out_key + ) + log.error(msg) + raise RuntimeError(msg) + else: + ob.detdata.create( + out_key, + detshape=ob.detdata[in_key].detector_shape, + dtype=ob.detdata[in_key].dtype, + detectors=ob.detdata[in_key].detectors, + ) + ob.detdata[out_key][:] = ob.detdata[in_key][:] + + return + + def _finalize(self, data, **kwargs): + return None + + def _requires(self): + req = dict() + if self.meta is not None: + req["meta"] = [x[0] for x in self.meta] + if self.detdata is not None: + req["detdata"] = [x[0] for x in self.detdata] + if self.shared is not None: + req["shared"] = [x[0] for x in self.shared] + return req + + def _provides(self): + prov = dict() + if self.meta is not None: + req["meta"] = [x[1] for x in self.meta] + if self.detdata is not None: + req["detdata"] = [x[1] for x in self.detdata] + if self.shared is not None: + req["shared"] = [x[1] for x in self.shared] + return prov + + def _accelerators(self): + # Eventually we can copy memory objects on devices... + return list() diff --git a/src/toast/future_ops/mapmaker.py b/src/toast/future_ops/mapmaker.py index bf46327cf..69aca411d 100644 --- a/src/toast/future_ops/mapmaker.py +++ b/src/toast/future_ops/mapmaker.py @@ -1,1927 +1,166 @@ -from collections import OrderedDict -import os -import sys +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. -import numpy as np -import scipy.linalg -import scipy.signal - -from ..operator import Operator -from ..mpi import MPI - -from ..timing import gather_timers, GlobalTimers, function_timer, Timer -from ..utils import Logger, Environment -from .sim_det_map import OpSimScan -from .todmap_math import OpAccumDiag, OpScanScale, OpScanMask -from ..tod import OpCacheClear, OpCacheCopy, OpCacheInit, OpFlagsApply, OpFlagGaps -from ..map import covariance_apply, covariance_invert, DistPixels, covariance_rcond -from .. import qarray as qa - -from .._libtoast import add_offsets_to_signal, project_signal_offsets - - -XAXIS, YAXIS, ZAXIS = np.eye(3) - -temporary_names = set() - - -def get_temporary_name(): - i = 0 - while True: - name = "temporary{:03}".format(i) - if name not in temporary_names: - break - i += 1 - temporary_names.add(name) - return name - - -def free_temporary_name(name): - temporary_names.remove(name) - - -class TOASTMatrix: - def apply(self, vector, inplace=False): - """Every TOASTMatrix can apply itself to a distributed vectors - of signal, map or template offsets as is appropriate. - """ - raise NotImplementedError("Virtual apply not implemented in derived class") - - def apply_transpose(self, vector, inplace=False): - """Every TOASTMatrix can apply itself to a distributed vectors - of signal, map or template offsets as is appropriate. - """ - raise NotImplementedError( - "Virtual apply_transpose not implemented in derived class" - ) - - -class TOASTVector: - def dot(self, other): - raise NotImplementedError("Virtual dot not implemented in derived class") - - -class UnitMatrix(TOASTMatrix): - def apply(self, vector, inplace=False): - if inplace: - outvec = vector - else: - outvec = vector.copy() - return outvec - - -class TODTemplate: - """Parent class for all templates that can be registered with - TemplateMatrix - """ - - name = None - namplitude = 0 - comm = None - - def __init___(self, *args, **kwargs): - raise NotImplementedError("Derived class must implement __init__()") - - def add_to_signal(self, signal, amplitudes): - """signal += F.a""" - raise NotImplementedError("Derived class must implement add_to_signal()") - - def project_signal(self, signal, amplitudes): - """a += F^T.signal""" - raise NotImplementedError("Derived class must implement project_signal()") - - def add_prior(self, amplitudes_in, amplitudes_out): - """a' += C_a^{-1}.a""" - # Not all TODTemplates implement the prior - return - - def apply_precond(self, amplitudes_in, amplitudes_out): - """a' = M^{-1}.a""" - raise NotImplementedError("Derived class must implement apply_precond()") - - -class SubharmonicTemplate(TODTemplate): - """This class represents sub-harmonic noise fluctuations. - - Sub-harmonic means that the characteristic frequency of the noise - modes is lower than 1/T where T is the length of the interval - being fitted. - """ - - name = "subharmonic" - - def __init__( - self, - data, - detweights, - order=1, - intervals=None, - common_flags=None, - common_flag_mask=1, - flags=None, - flag_mask=1, - ): - self.data = data - self.detweights = detweights - self.order = order - self.intervals = intervals - self.common_flags = common_flags - self.common_flag_mask = common_flag_mask - self.flags = flags - self.flag_mask = flag_mask - self._last_nsamp = None - self._last_templates = None - self.get_steps_and_preconditioner() - - def get_steps_and_preconditioner(self): - """Assign each template an amplitude""" - self.templates = [] - self.slices = [] - self.preconditioners = [] - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - common_flags = tod.local_common_flags(self.common_flags) - common_flags = (common_flags & self.common_flag_mask) != 0 - if (self.intervals is not None) and (self.intervals in obs): - intervals = obs[self.intervals] - else: - intervals = None - local_intervals = tod.local_intervals(intervals) - slices = {} # this observation - preconditioners = {} # this observation - for ival in local_intervals: - todslice = slice(ival.first, ival.last + 1) - for idet, det in enumerate(tod.local_dets): - ind = slice(self.namplitude, self.namplitude + self.order + 1) - self.templates.append([ind, iobs, det, todslice]) - self.namplitude += self.order + 1 - preconditioner = self._get_preconditioner( - det, tod, todslice, common_flags, self.detweights[iobs][det] - ) - if det not in preconditioners: - preconditioners[det] = [] - slices[det] = [] - preconditioners[det].append(preconditioner) - slices[det].append(ind) - self.slices.append(slices) - self.preconditioners.append(preconditioners) - return - - def _get_preconditioner(self, det, tod, todslice, common_flags, detweight): - """Calculate the preconditioner for the given interval and detector""" - flags = tod.local_flags(det, self.flags)[todslice] - good = (flags & self.flag_mask) == 0 - good[common_flags[todslice]] = False - norder = self.order + 1 - preconditioner = np.zeros([norder, norder]) - templates = self._get_templates(todslice) - for row in range(norder): - for col in range(row, norder): - preconditioner[row, col] = np.dot( - templates[row][good], templates[col][good] - ) - preconditioner[row, col] *= detweight - if row != col: - preconditioner[col, row] = preconditioner[row, col] - preconditioner = np.linalg.inv(preconditioner) - return preconditioner - - def add_to_signal(self, signal, amplitudes): - subharmonic_amplitudes = amplitudes[self.name] - for ibase, (ind, iobs, det, todslice) in enumerate(self.templates): - templates = self._get_templates(todslice) - amps = subharmonic_amplitudes[ind] - for template, amplitude in zip(templates, amps): - signal[iobs, det, todslice] += template * amplitude - return - - def _get_templates(self, todslice): - """Develop hierarchy of subharmonic modes matching the given length - - The basis functions are (orthogonal) Legendre polynomials - """ - nsamp = todslice.stop - todslice.start - if nsamp != self._last_nsamp: - templates = np.zeros([self.order + 1, nsamp]) - r = np.linspace(-1, 1, nsamp) - for order in range(self.order + 1): - if order == 0: - templates[order] = 1 - elif order == 1: - templates[order] = r - else: - templates[order] = ( - (2 * order - 1) * r * templates[order - 1] - - (order - 1) * templates[order - 2] - ) / order - self._last_nsamp = nsamp - self._last_templates = templates - return self._last_templates - - def project_signal(self, signal, amplitudes): - subharmonic_amplitudes = amplitudes[self.name] - for ibase, (ind, iobs, det, todslice) in enumerate(self.templates): - templates = self._get_templates(todslice) - amps = subharmonic_amplitudes[ind] - for order, template in enumerate(templates): - amps[order] = np.dot(signal[iobs, det, todslice], template) - pass - - def apply_precond(self, amplitudes_in, amplitudes_out): - """Standard diagonal preconditioner accounting for the fact that - the templates are not orthogonal in the presence of flagging and masking - """ - subharmonic_amplitudes_in = amplitudes_in[self.name] - subharmonic_amplitudes_out = amplitudes_out[self.name] - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - slices = self.slices[iobs][det] - preconditioners = self.preconditioners[iobs][det] - for ind, preconditioner in zip(slices, preconditioners): - subharmonic_amplitudes_out[ind] = np.dot( - preconditioner, subharmonic_amplitudes_in[ind] - ) - return - - -class Fourier2DTemplate(TODTemplate): - """This class represents atmospheric fluctuations in front of the - focalplane as 2D Fourier modes.""" - - name = "Fourier2D" - - def __init__( - self, - data, - detweights, - focalplane_radius=None, # degrees - order=1, - fit_subharmonics=True, - intervals=None, - common_flags=None, - common_flag_mask=1, - flags=None, - flag_mask=1, - correlation_length=10, - correlation_amplitude=10, - ): - self.data = data - self.comm = data.comm.comm_group - self.detweights = detweights - self.focalplane_radius = focalplane_radius - self.order = order - self.fit_subharmonics = fit_subharmonics - self.norder = order + 1 - self.nmode = (2 * order) ** 2 + 1 - if self.fit_subharmonics: - self.nmode += 2 - self.intervals = intervals - self.common_flags = common_flags - self.common_flag_mask = common_flag_mask - self.flags = flags - self.flag_mask = flag_mask - self._get_templates() - self.correlation_length = correlation_length - self.correlation_amplitude = correlation_amplitude - if correlation_length: - self._get_prior() - return - - @function_timer - def _get_prior(self): - """Evaluate C_a^{-1} for the 2D polynomial coefficients based - on the correlation length. - """ - if self.correlation_length: - # Correlation length is given in seconds and we cannot assume - # that each observation has the same sampling rate. Therefore, - # we will build the filter for each observation - self.filters = [] # all observations - self.preconditioners = [] # all observations - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - times = tod.local_times() - corr = ( - np.exp((times[0] - times) / self.correlation_length) - * self.correlation_amplitude - ) - ihalf = times.size // 2 - corr[ihalf + 1 :] = corr[ihalf - 1 : 0 : -1] - fcorr = np.fft.rfft(corr) - invcorr = np.fft.irfft(1 / fcorr) - self.filters.append(invcorr) - # Scale the filter by the prescribed correlation strength - # and the number of modes at each angular scale - self.filter_scale = np.zeros(self.nmode) - self.filter_scale[0] = 1 - offset = 1 - if self.fit_subharmonics: - self.filter_scale[1:3] = 2 - offset += 2 - self.filter_scale[offset:] = 4 - self.filter_scale *= self.correlation_amplitude - return - - @function_timer - def _get_templates(self): - """Evaluate and normalize the polynomial templates. - - Each template corresponds to a fixed value for each detector - and depends on the position of the detector. - """ - self.templates = [] - - def evaluate_template(theta, phi, radius): - values = np.zeros(self.nmode) - values[0] = 1 - offset = 1 - if self.fit_subharmonics: - values[1:3] = theta / radius, phi / radius - offset += 2 - if self.order > 0: - rinv = np.pi / radius - orders = np.arange(self.order) + 1 - thetavec = np.zeros(self.order * 2) - phivec = np.zeros(self.order * 2) - thetavec[::2] = np.cos(orders * theta * rinv) - thetavec[1::2] = np.sin(orders * theta * rinv) - phivec[::2] = np.cos(orders * phi * rinv) - phivec[1::2] = np.sin(orders * phi * rinv) - values[offset:] = np.outer(thetavec, phivec).ravel() - return values - - self.norms = [] - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - common_flags = tod.local_common_flags(self.common_flags) - common_flags = (common_flags & self.common_flag_mask) != 0 - nsample = tod.total_samples - obs_templates = {} - focalplane = obs["focalplane"] - if self.focalplane_radius: - radius = np.radians(self.focalplane_radius) - else: - try: - radius = np.radians(focalplane.radius) - except AttributeError: - # Focalplane is just a dictionary - radius = np.radians(obs["fpradius"]) - norms = np.zeros([nsample, self.nmode]) - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - for det in tod.local_dets: - flags = tod.local_flags(det, self.flags) - good = ((flags & self.flag_mask) | common_flags) == 0 - detweight = self.detweights[iobs][det] - det_quat = focalplane[det]["quat"] - x, y, z = qa.rotate(det_quat, ZAXIS) - theta, phi = np.arcsin([x, y]) - obs_templates[det] = evaluate_template(theta, phi, radius) - norms[todslice] += np.outer(good, obs_templates[det] ** 2 * detweight) - self.comm.allreduce(norms) - good = norms != 0 - norms[good] = 1 / norms[good] - self.norms.append(norms.ravel()) - self.templates.append(obs_templates) - self.namplitude += nsample * self.nmode - - self.norms = np.hstack(self.norms) - - return - - @function_timer - def add_to_signal(self, signal, amplitudes): - """signal += F.a""" - poly_amplitudes = amplitudes[self.name] - amplitude_offset = 0 - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - nsample = tod.total_samples - # For each observation, sample indices start from 0 - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - obs_amplitudes = poly_amplitudes[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode])[todslice] - for det in tod.local_dets: - templates = self.templates[iobs][det] - signal[iobs, det, todslice] += np.sum(obs_amplitudes * templates, 1) - amplitude_offset += nsample * self.nmode - return - - @function_timer - def project_signal(self, signal, amplitudes): - """a += F^T.signal""" - poly_amplitudes = amplitudes[self.name] - amplitude_offset = 0 - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - nsample = tod.total_samples - # For each observation, sample indices start from 0 - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - obs_amplitudes = poly_amplitudes[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode]) - if self.comm is not None: - my_amplitudes = np.zeros_like(obs_amplitudes) - else: - my_amplitudes = obs_amplitudes - for det in tod.local_dets: - templates = self.templates[iobs][det] - my_amplitudes[todslice] += np.outer( - signal[iobs, det, todslice], templates - ) - if self.comm is not None: - self.comm.allreduce(my_amplitudes) - obs_amplitudes += my_amplitudes - amplitude_offset += nsample * self.nmode - return - - def add_prior(self, amplitudes_in, amplitudes_out): - """a' += C_a^{-1}.a""" - if self.correlation_length: - poly_amplitudes_in = amplitudes_in[self.name] - poly_amplitudes_out = amplitudes_out[self.name] - amplitude_offset = 0 - for obs, noisefilter in zip(self.data.obs, self.filters): - tod = obs["tod"] - nsample = tod.total_samples - obs_amplitudes_in = poly_amplitudes_in[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode]) - obs_amplitudes_out = poly_amplitudes_out[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode]) - # import pdb - # import matplotlib.pyplot as plt - # pdb.set_trace() - for mode in range(self.nmode): - scale = self.filter_scale[mode] - obs_amplitudes_out[:, mode] += scipy.signal.convolve( - obs_amplitudes_in[:, mode], - noisefilter * scale, - mode="same", - ) - amplitude_offset += nsample * self.nmode - return - - def apply_precond(self, amplitudes_in, amplitudes_out): - """a' = M^{-1}.a""" - poly_amplitudes_in = amplitudes_in[self.name] - poly_amplitudes_out = amplitudes_out[self.name] - poly_amplitudes_out[:] = poly_amplitudes_in * self.norms - return - - -class OffsetTemplate(TODTemplate): - """This class represents noise fluctuations as a step function""" - - name = "offset" - - def __init__( - self, - data, - detweights, - step_length=1000000, - intervals=None, - use_noise_prior=True, - common_flags=None, - common_flag_mask=1, - flags=None, - flag_mask=1, - precond_width=20, - ): - self.data = data - self.detweights = detweights - self.step_length = step_length - self.intervals = intervals - self.common_flags = common_flags - self.common_flag_mask = common_flag_mask - self.flags = flags - self.flag_mask = flag_mask - self.precond_width = precond_width - self.get_steps() - self.use_noise_prior = use_noise_prior - if self.use_noise_prior: - self.get_filters_and_preconditioners() - return - - @function_timer - def get_filters_and_preconditioners(self): - """Compute and store the filter and associated preconditioner - for every detector and every observation - """ - log = Logger.get() - self.filters = [] # all observations - self.preconditioners = [] # all observations - for iobs, obs in enumerate(self.data.obs): - if "noise" not in obs: - # If the observations do not include noise PSD:s, we - # we cannot build filters. - if len(self.filters) > 0: - log.warning( - 'Observation "{}" does not have noise information' - "".format(obs["name"]) - ) - continue - tod = obs["tod"] - # Determine the binning for the noise prior - times = tod.local_times() - dtime = np.amin(np.diff(times)) - fsample = 1 / dtime - obstime = times[-1] - times[0] - tbase = self.step_length - fbase = 1 / tbase - powmin = np.floor(np.log10(1 / obstime)) - 1 - powmax = min(np.ceil(np.log10(1 / tbase)) + 2, fsample) - freq = np.logspace(powmin, powmax, 1000) - # Now build the filter for each detector - noise = obs["noise"] - noisefilters = {} # this observation - preconditioners = {} # this observation - for det in tod.local_dets: - offset_psd = self._get_offset_psd(noise, freq, det) - # Store real space filters for every interval and every detector. - ( - noisefilters[det], - preconditioners[det], - ) = self._get_noisefilter_and_preconditioner( - freq, offset_psd, self.offset_slices[iobs][det] - ) - self.filters.append(noisefilters) - self.preconditioners.append(preconditioners) - return - - @function_timer - def _get_offset_psd(self, noise, freq, det): - psdfreq = noise.freq(det) - psd = noise.psd(det) - rate = noise.rate(det) - # Remove the white noise component from the PSD - psd = psd.copy() * np.sqrt(rate) - psd -= np.amin(psd[psdfreq > 1.0]) - psd[psd < 1e-30] = 1e-30 - - # The calculation of `offset_psd` is from Keihänen, E. et al: - # "Making CMB temperature and polarization maps with Madam", - # A&A 510:A57, 2010 - logfreq = np.log(psdfreq) - logpsd = np.log(psd) - - def interpolate_psd(x): - result = np.zeros(x.size) - good = np.abs(x) > 1e-10 - logx = np.log(np.abs(x[good])) - logresult = np.interp(logx, logfreq, logpsd) - result[good] = np.exp(logresult) - return result - - def g(x): - bad = np.abs(x) < 1e-10 - good = np.logical_not(bad) - arg = np.pi * x[good] - result = bad.astype(np.float64) - result[good] = (np.sin(arg) / arg) ** 2 - return result - - tbase = self.step_length - fbase = 1 / tbase - offset_psd = interpolate_psd(freq) * g(freq * tbase) - for m in range(1, 2): - offset_psd += interpolate_psd(freq + m * fbase) * g(freq * tbase + m) - offset_psd += interpolate_psd(freq - m * fbase) * g(freq * tbase - m) - offset_psd *= fbase - return offset_psd - - @function_timer - def _get_noisefilter_and_preconditioner(self, freq, offset_psd, offset_slices): - logfreq = np.log(freq) - logpsd = np.log(offset_psd) - logfilter = np.log(1 / offset_psd) - - def interpolate(x, psd): - result = np.zeros(x.size) - good = np.abs(x) > 1e-10 - logx = np.log(np.abs(x[good])) - logresult = np.interp(logx, logfreq, psd) - result[good] = np.exp(logresult) - return result - - def truncate(noisefilter, lim=1e-4): - icenter = noisefilter.size // 2 - ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim - icut = np.argwhere(ind)[-1][0] - if icut % 2 == 0: - icut += 1 - noisefilter = np.roll(noisefilter, icenter) - noisefilter = noisefilter[icenter - icut : icenter + icut + 1] - return noisefilter - - noisefilters = [] - preconditioners = [] - for offset_slice, sigmasqs in offset_slices: - nstep = offset_slice.stop - offset_slice.start - filterlen = nstep * 2 + 1 - filterfreq = np.fft.rfftfreq(filterlen, self.step_length) - noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter))) - noisefilters.append(noisefilter) - # Build the band-diagonal preconditioner - if self.precond_width <= 1: - # Compute C_a prior - preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd))) - else: - # Compute Cholesky decomposition prior - wband = min(self.precond_width, noisefilter.size // 2) - precond_width = max(wband, min(self.precond_width, nstep)) - icenter = noisefilter.size // 2 - preconditioner = np.zeros([precond_width, nstep], dtype=np.float64) - preconditioner[0] = sigmasqs - preconditioner[:wband, :] += np.repeat( - noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1 - ) - lower = True - scipy.linalg.cholesky_banded( - preconditioner, overwrite_ab=True, lower=lower, check_finite=True - ) - preconditioners.append((preconditioner, lower)) - return noisefilters, preconditioners - - @function_timer - def get_steps(self): - """Divide each interval into offset steps""" - self.offset_templates = [] - self.offset_slices = [] # slices in all observations - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - common_flags = tod.local_common_flags(self.common_flags) - common_flags = (common_flags & self.common_flag_mask) != 0 - if (self.intervals is not None) and (self.intervals in obs): - intervals = obs[self.intervals] - else: - intervals = None - local_intervals = tod.local_intervals(intervals) - times = tod.local_times() - offset_slices = {} # slices in this observation - for ival in local_intervals: - length = times[ival.last] - times[ival.first] - nbase = int(np.ceil(length / self.step_length)) - # Divide the interval into steps, allowing for irregular sampling - todslices = [] - start_times = np.arange(nbase) * self.step_length + ival.start - start_indices = np.searchsorted(times, start_times) - stop_indices = np.hstack([start_indices[1:], [ival.last]]) - todslices = [] - for istart, istop in zip(start_indices, stop_indices): - todslices.append(slice(istart, istop)) - for idet, det in enumerate(tod.local_dets): - istart = self.namplitude - sigmasqs = [] - for todslice in todslices: - sigmasq = self._get_sigmasq( - tod, det, todslice, common_flags, self.detweights[iobs][det] - ) - # Register the baseline offset - self.offset_templates.append( - [self.namplitude, iobs, det, todslice, sigmasq] - ) - sigmasqs.append(sigmasq) - self.namplitude += 1 - # Keep a record of ranges of offsets that correspond - # to one detector and one interval. - # This is the domain we apply the noise filter in. - if det not in offset_slices: - offset_slices[det] = [] - offset_slices[det].append( - (slice(istart, self.namplitude), sigmasqs) - ) - self.offset_slices.append(offset_slices) - return - - @function_timer - def _get_sigmasq(self, tod, det, todslice, common_flags, detweight): - """calculate a rough estimate of the baseline variance - for diagonal preconditioner - """ - flags = tod.local_flags(det, self.flags)[todslice] - good = (flags & self.flag_mask) == 0 - good[common_flags[todslice]] = False - ngood = np.sum(good) - sigmasq = 1 - if detweight != 0: - sigmasq /= detweight - if ngood != 0: - sigmasq /= ngood - return sigmasq - - @function_timer - def add_to_signal(self, signal, amplitudes): - offset_amplitudes = amplitudes[self.name] - last_obs = None - last_det = None - last_ref = None - todslices = [] - itemplates = [] - for itemplate, iobs, det, todslice, sigmasq in self.offset_templates: - if iobs != last_obs or det != last_det: - if len(todslices) != 0: - add_offsets_to_signal( - last_ref, todslices, offset_amplitudes, np.array(itemplates) - ) - todslices = [] - itemplates = [] - last_obs = iobs - last_det = det - last_ref = signal[iobs, det, :] - todslices.append(todslice) - itemplates.append(itemplate) - if len(todslices) != 0: - add_offsets_to_signal( - last_ref, todslices, offset_amplitudes, np.array(itemplates) - ) - return - - @function_timer - def project_signal(self, signal, amplitudes): - offset_amplitudes = amplitudes[self.name] - last_obs = None - last_det = None - last_ref = None - todslices = [] - itemplates = [] - for itemplate, iobs, det, todslice, sqsigma in self.offset_templates: - if iobs != last_obs or det != last_det: - if len(todslices) != 0: - project_signal_offsets( - last_ref, todslices, offset_amplitudes, np.array(itemplates) - ) - todslices = [] - itemplates = [] - last_obs = iobs - last_det = det - last_ref = signal[iobs, det, :] - todslices.append(todslice) - itemplates.append(itemplate) - if len(todslices) != 0: - project_signal_offsets( - last_ref, todslices, offset_amplitudes, np.array(itemplates) - ) - return - - @function_timer - def add_prior(self, amplitudes_in, amplitudes_out): - if not self.use_noise_prior: - return - offset_amplitudes_in = amplitudes_in[self.name] - offset_amplitudes_out = amplitudes_out[self.name] - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - slices = self.offset_slices[iobs][det] - filters = self.filters[iobs][det] - for (offsetslice, sigmasqs), noisefilter in zip(slices, filters): - amps_in = offset_amplitudes_in[offsetslice] - # scipy.signal.convolve will use either `convolve` or `fftconvolve` - # depending on the size of the inputs - amps_out = scipy.signal.convolve(amps_in, noisefilter, mode="same") - offset_amplitudes_out[offsetslice] += amps_out - return - - @function_timer - def apply_precond(self, amplitudes_in, amplitudes_out): - offset_amplitudes_in = amplitudes_in[self.name] - offset_amplitudes_out = amplitudes_out[self.name] - if self.use_noise_prior: - # C_a preconditioner - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - slices = self.offset_slices[iobs][det] - preconditioners = self.preconditioners[iobs][det] - for (offsetslice, sigmasqs), preconditioner in zip( - slices, preconditioners - ): - amps_in = offset_amplitudes_in[offsetslice] - if self.precond_width <= 1: - # Use C_a prior - # scipy.signal.convolve will use either `convolve` or `fftconvolve` - # depending on the size of the inputs - amps_out = scipy.signal.convolve( - amps_in, preconditioner, mode="same" - ) - else: - # Use pre-computed Cholesky decomposition - amps_out = scipy.linalg.cho_solve_banded( - preconditioner, - amps_in, - overwrite_b=False, - check_finite=True, - ) - offset_amplitudes_out[offsetslice] = amps_out - else: - # Diagonal preconditioner - offset_amplitudes_out[:] = offset_amplitudes_in - for itemplate, iobs, det, todslice, sigmasq in self.offset_templates: - offset_amplitudes_out[itemplate] *= sigmasq - return - - -class TemplateMatrix(TOASTMatrix): - def __init__(self, data, comm, templates=None): - """Initialize the template matrix with a given baseline length""" - self.data = data - self.comm = comm - self.templates = [] - for template in templates: - self.register_template(template) - return - - @function_timer - def register_template(self, template): - """Add template to the list of templates to fit""" - self.templates.append(template) - - @function_timer - def apply(self, amplitudes): - """Compute and return y = F.a""" - new_signal = self.zero_signal() - for template in self.templates: - template.add_to_signal(new_signal, amplitudes) - return new_signal - - @function_timer - def apply_transpose(self, signal): - """Compute and return a = F^T.y""" - new_amplitudes = self.zero_amplitudes() - for template in self.templates: - template.project_signal(signal, new_amplitudes) - return new_amplitudes - - @function_timer - def add_prior(self, amplitudes, new_amplitudes): - """Compute a' += C_a^{-1}.a""" - for template in self.templates: - template.add_prior(amplitudes, new_amplitudes) - return - - @function_timer - def apply_precond(self, amplitudes): - """Compute a' = M^{-1}.a""" - new_amplitudes = self.zero_amplitudes() - for template in self.templates: - template.apply_precond(amplitudes, new_amplitudes) - return new_amplitudes - - @function_timer - def zero_amplitudes(self): - """Return a null amplitudes object""" - new_amplitudes = TemplateAmplitudes(self.templates, self.comm) - return new_amplitudes - - @function_timer - def zero_signal(self): - """Return a distributed vector of signal set to zero. - - The zero signal object will use the same TOD objects but different cache prefix - """ - new_signal = Signal(self.data, temporary=True, init_val=0) - return new_signal - - @function_timer - def clean_signal(self, signal, amplitudes, in_place=True): - """Clean the given distributed signal vector by subtracting - the templates multiplied by the given amplitudes. - """ - # DEBUG begin - """ - import pdb - import matplotlib.pyplot as plt - plt.figure(figsize=[18, 12]) - for sig in [signal]: - tod = sig.data.obs[0]["tod"] - for idet, det in enumerate(tod.local_dets): - plt.subplot(2, 2, idet + 1) - plt.plot(tod.local_signal(det, sig.name), label=sig.name, zorder=50) - """ - # DEBUG end - if in_place: - outsignal = signal - else: - outsignal = signal.copy() - template_tod = self.apply(amplitudes) - outsignal -= template_tod - # DEBUG begin - """ - for sig, zorder in [(template_tod, 100), (outsignal, 0)]: - tod = sig.data.obs[0]["tod"] - for idet, det in enumerate(tod.local_dets): - plt.subplot(2, 2, idet + 1) - plt.plot(tod.local_signal(det, sig.name), label=sig.name, zorder=zorder) - plt.legend(loc="best") - plt.savefig("test.png") - plt.close() - #pdb.set_trace() - """ - # DEBUG end - return outsignal +import traitlets +import numpy as np -class TemplateAmplitudes(TOASTVector): - """TemplateAmplitudes objects hold local and shared template amplitudes""" +from ..utils import Logger - def __init__(self, templates, comm): - self.comm = comm - self.amplitudes = OrderedDict() - self.comms = OrderedDict() - for template in templates: - self.amplitudes[template.name] = np.zeros(template.namplitude) - self.comms[template.name] = template.comm - return +from ..traits import trait_docs, Int, Unicode, Bool - @function_timer - def __str__(self): - result = "template amplitudes:" - for name, values in self.amplitudes.items(): - result += '\n"{}" : \n{}'.format(name, values) - return result +from ..operator import Operator - @function_timer - def dot(self, other): - """Compute the dot product between the two amplitude vectors""" - total = 0 - for name, values in self.amplitudes.items(): - comm = self.comms[name] - if comm is None or comm.rank == 0: - total += np.dot(values, other.amplitudes[name]) - if self.comm is not None: - total = self.comm.allreduce(total, op=MPI.SUM) - return total +from ..timing import function_timer - @function_timer - def __getitem__(self, key): - return self.amplitudes[key] +from ..pixels import PixelDistribution, PixelData - @function_timer - def __setitem__(self, key, value): - self.amplitudes[name][:] = value - return +from .pipeline import Pipeline - @function_timer - def copy(self): - new_amplitudes = TemplateAmplitudes([], self.comm) - for name, values in self.amplitudes.items(): - new_amplitudes.amplitudes[name] = self.amplitudes[name].copy() - new_amplitudes.comms[name] = self.comms[name] - return new_amplitudes +from .clear import Clear - @function_timer - def __iadd__(self, other): - """Add the provided amplitudes to this one""" - if isinstance(other, TemplateAmplitudes): - for name, values in self.amplitudes.items(): - values += other.amplitudes[name] - else: - for name, values in self.amplitudes.items(): - values += other - return self +from .copy import Copy - @function_timer - def __isub__(self, other): - """Subtract the provided amplitudes from this one""" - if isinstance(other, TemplateAmplitudes): - for name, values in self.amplitudes.items(): - values -= other.amplitudes[name] - else: - for name, values in self.amplitudes.items(): - values -= other - return self +from .scan_map import ScanMap - @function_timer - def __imul__(self, other): - """Scale the amplitudes""" - for name, values in self.amplitudes.items(): - values *= other - return self - @function_timer - def __itruediv__(self, other): - """Divide the amplitudes""" - for name, values in self.amplitudes.items(): - values /= other - return self +@trait_docs +class MapMaker(Operator): + """Operator for making maps. + This operator first solves for a maximum likelihood set of template amplitudes + that model the timestream contributions from noise, systematics, etc: -class TemplateCovariance(TOASTMatrix): - def __init__(self): - pass + .. math:: + \left[ M^T N^{-1} Z M + M_p \right] a = M^T N^{-1} Z d + Where `a` are the solved amplitudes and `d` is the input data. `N` is the diagonal + time domain noise covariance. `M` is a matrix of templates that project from the + amplitudes into the time domain, and the `Z` operator is given by: -class ProjectionMatrix(TOASTMatrix): - """Projection matrix: + .. math:: Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} - = I - P B, - where - `P` is the pointing matrix - `N` is the noise matrix and - `B` is the binning operator - """ - - def __init__( - self, - data, - comm, - detweights, - nnz, - white_noise_cov_matrix, - common_flag_mask=1, - flag_mask=1, - ): - self.data = data - self.comm = comm - self.detweights = detweights - self.dist_map = DistPixels(data, comm=self.comm, nnz=nnz, dtype=np.float64) - self.white_noise_cov_matrix = white_noise_cov_matrix - self.common_flag_mask = common_flag_mask - self.flag_mask = flag_mask - - @function_timer - def apply(self, signal): - """Return Z.y""" - self.bin_map(signal.name) - new_signal = signal.copy() - scanned_signal = Signal(self.data, temporary=True, init_val=0) - self.scan_map(scanned_signal.name) - new_signal -= scanned_signal - return new_signal - - @function_timer - def bin_map(self, name): - if self.dist_map.data is not None: - self.dist_map.data.fill(0.0) - # FIXME: OpAccumDiag should support separate detweights for each observation - build_dist_map = OpAccumDiag( - zmap=self.dist_map, - name=name, - detweights=self.detweights[0], - common_flag_mask=self.common_flag_mask, - flag_mask=self.flag_mask, - ) - build_dist_map.exec(self.data) - self.dist_map.allreduce() - covariance_apply(self.white_noise_cov_matrix, self.dist_map) - return - - @function_timer - def scan_map(self, name): - scansim = OpSimScan(input_map=self.dist_map, out=name) - scansim.exec(self.data) - return + Where `P` is the pointing matrix. This operator takes a "Projection" instance + as one of its traits, and that operator performs: -class NoiseMatrix(TOASTMatrix): - def __init__( - self, comm, detweights, weightmap=None, common_flag_mask=1, flag_mask=1 - ): - self.comm = comm - self.detweights = detweights - self.weightmap = weightmap - self.common_flag_mask = common_flag_mask - self.flag_mask = flag_mask - - @function_timer - def apply(self, signal, in_place=False): - """Multiplies the signal with N^{-1}. + .. math:: - Note that the quality flags cause the corresponding diagonal - elements of N^{-1} to be zero. - """ - if in_place: - new_signal = signal - else: - new_signal = signal.copy() - for iobs, detweights in enumerate(self.detweights): - for det, detweight in detweights.items(): - new_signal[iobs, det, :] *= detweight - # Set flagged samples to zero - new_signal.apply_flags(self.common_flag_mask, self.flag_mask) - # Scale the signal with the weight map - new_signal.apply_weightmap(self.weightmap) - return new_signal + PROJ = M^T N^{-1} Z - def apply_transpose(self, signal): - # Symmetric matrix - return self.apply(signal) + This projection operator is then used to compute the right hand side of the solver + and for each calculation of the left hand side. + After solving for the template amplitudes, a final map of the signal estimate is + computed using a simple binning: -class PointingMatrix(TOASTMatrix): - def __init__(self): - pass + .. math:: + MAP = ({P'}^T N^{-1} P')^{-1} {P'}^T N^{-1} (y - M a) + Where the "prime" indicates that this final map might be computed using a different + pointing matrix than the one used to solve for the template amplitudes. -class Signal(TOASTVector): - """Signal class wraps the TOAST data object but represents only - one cached signal flavor. """ - def __init__(self, data, name=None, init_val=None, temporary=False): - self.data = data - self.temporary = temporary - if self.temporary: - self.name = get_temporary_name() - else: - self.name = name - if init_val is not None: - cacheinit = OpCacheInit(name=self.name, init_val=init_val) - cacheinit.exec(data) - return - - def __del__(self): - if self.temporary: - cacheclear = OpCacheClear(self.name) - cacheclear.exec(self.data) - free_temporary_name(self.name) - return - - @function_timer - def apply_flags(self, common_flag_mask, flag_mask): - """Set the signal at flagged samples to zero""" - flags_apply = OpFlagsApply( - name=self.name, common_flag_mask=common_flag_mask, flag_mask=flag_mask - ) - flags_apply.exec(self.data) - return - - @function_timer - def apply_weightmap(self, weightmap): - """Scale the signal with the provided weight map""" - if weightmap is None: - return - scanscale = OpScanScale(distmap=weightmap, name=self.name) - scanscale.exec(self.data) - return - - @function_timer - def copy(self): - """Return a new Signal object with independent copies of the - signal vectors. - """ - new_signal = Signal(self.data, temporary=True) - copysignal = OpCacheCopy(self.name, new_signal.name, force=True) - copysignal.exec(self.data) - return new_signal - - @function_timer - def __getitem__(self, key): - """Return a reference to a slice of TOD cache""" - iobs, det, todslice = key - tod = self.data.obs[iobs]["tod"] - return tod.local_signal(det, self.name)[todslice] - - @function_timer - def __setitem__(self, key, value): - """Set slice of TOD cache""" - iobs, det, todslice = key - tod = self.data.obs[iobs]["tod"] - tod.local_signal(det, self.name)[todslice] = value - return - - @function_timer - def __iadd__(self, other): - """Add the provided Signal object to this one""" - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - if isinstance(other, Signal): - self[iobs, det, :] += other[iobs, det, :] - else: - self[iobs, det, :] += other - return self - - @function_timer - def __isub__(self, other): - """Subtract the provided Signal object from this one""" - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - if isinstance(other, Signal): - self[iobs, det, :] -= other[iobs, det, :] - else: - self[iobs, det, :] -= other - return self - - @function_timer - def __imul__(self, other): - """Scale the signal""" - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - self[iobs, det, :] *= other - return self - - @function_timer - def __itruediv__(self, other): - """Divide the signal""" - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - for det in tod.local_dets: - self[iobs, det, :] /= other - return self - - -class PCGSolver: - """Solves `x` in A.x = b""" - - def __init__( - self, - comm, - templates, - noise, - projection, - signal, - niter_min=3, - niter_max=100, - convergence_limit=1e-12, - ): - self.comm = comm - if comm is None: - self.rank = 0 - else: - self.rank = comm.rank - self.templates = templates - self.noise = noise - self.projection = projection - self.signal = signal - self.niter_min = niter_min - self.niter_max = niter_max - self.convergence_limit = convergence_limit - - self.rhs = self.templates.apply_transpose( - self.noise.apply(self.projection.apply(self.signal)) - ) - # print("RHS {}: {}".format(self.signal.name, self.rhs)) # DEBUG - return - - @function_timer - def apply_lhs(self, amplitudes): - """Return A.x""" - new_amplitudes = self.templates.apply_transpose( - self.noise.apply(self.projection.apply(self.templates.apply(amplitudes))) - ) - self.templates.add_prior(amplitudes, new_amplitudes) - return new_amplitudes - - @function_timer - def solve(self): - """Standard issue PCG solution of A.x = b - - Returns: - x : the least squares solution - """ + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + projection = Instance( + klass=None, + allow_none=True, + help="This must be an instance of a projection operator", + ) + + map_binning = Instance( + klass=None, + allow_none=True, + help="Binning operator for final map making. Default uses same operator as projection.", + ) + + @traitlets.validate("projection") + def _check_projection(self, proposal): + proj = proposal["value"] + if proj is not None: + if not isinstance(bin, Operator): + raise traitlets.TraitError("binning should be an Operator instance") + # Check that this operator has the traits we expect + for trt in ["templated_matrix", "det_data", "binning"]: + if not bin.has_trait(trt): + msg = "binning operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) + return bin + + @traitlets.validate("map_binning") + def _check_binning(self, proposal): + bin = proposal["value"] + if bin is not None: + if not isinstance(bin, Operator): + raise traitlets.TraitError("map_binning should be an Operator instance") + # Check that this operator has the traits we expect + for trt in ["det_data", "binned"]: + if not bin.has_trait(trt): + msg = "map_binning operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) + return bin + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): log = Logger.get() - timer0 = Timer() - timer0.start() - timer = Timer() - timer.start() - # Initial guess is zero amplitudes - guess = self.templates.zero_amplitudes() - # print("guess:", guess) # DEBUG - # print("RHS:", self.rhs) # DEBUG - residual = self.rhs.copy() - # print("residual(1):", residual) # DEBUG - residual -= self.apply_lhs(guess) - # print("residual(2):", residual) # DEBUG - precond_residual = self.templates.apply_precond(residual) - proposal = precond_residual.copy() - sqsum = precond_residual.dot(residual) - init_sqsum, best_sqsum, last_best = sqsum, sqsum, sqsum - if self.rank == 0: - log.info("Initial residual: {}".format(init_sqsum)) - # Iterate to convergence - for iiter in range(self.niter_max): - if not np.isfinite(sqsum): - raise RuntimeError("Residual is not finite") - alpha = sqsum - alpha /= proposal.dot(self.apply_lhs(proposal)) - alpha_proposal = proposal.copy() - alpha_proposal *= alpha - guess += alpha_proposal - residual -= self.apply_lhs(alpha_proposal) - del alpha_proposal - # Prepare for next iteration - precond_residual = self.templates.apply_precond(residual) - beta = 1 / sqsum - # Check for convergence - sqsum = precond_residual.dot(residual) - if self.rank == 0: - timer.report_clear( - "Iter = {:4} relative residual: {:12.4e}".format( - iiter, sqsum / init_sqsum - ) - ) - if sqsum < init_sqsum * self.convergence_limit or sqsum < 1e-30: - if self.rank == 0: - timer0.report_clear( - "PCG converged after {} iterations".format(iiter) - ) - break - best_sqsum = min(sqsum, best_sqsum) - if iiter % 10 == 0 and iiter >= self.niter_min: - if last_best < best_sqsum * 2: - if self.rank == 0: - timer0.report_clear( - "PCG stalled after {} iterations".format(iiter) - ) - break - last_best = best_sqsum - # Select the next direction - beta *= sqsum - proposal *= beta - proposal += precond_residual - # log.info("{} : Solution: {}".format(self.rank, guess)) # DEBUG - return guess + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") -class OpMapMaker(Operator): - - # Choose one bit in the common flags for storing gap information - gap_bit = 2 ** 7 - # Choose one bit in the quality flags for storing processing mask - mask_bit = 2 ** 7 - - def __init__( - self, - nside=64, - nnz=3, - name=None, - outdir="out", - outprefix="", - write_hits=True, - zip_maps=False, - write_wcov_inv=True, - write_wcov=True, - write_binned=True, - write_destriped=True, - write_rcond=True, - rcond_limit=1e-3, - baseline_length=100000, - maskfile=None, - weightmapfile=None, - common_flag_mask=1, - flag_mask=1, - intervals="intervals", - subharmonic_order=None, - fourier2D_order=None, - fourier2D_subharmonics=False, - iter_min=3, - iter_max=100, - use_noise_prior=True, - precond_width=20, - pixels="pixels", - ): - self.nside = nside - self.npix = 12 * self.nside ** 2 - self.name = name - self.nnz = nnz - self.ncov = self.nnz * (self.nnz + 1) // 2 - self.outdir = outdir - self.outprefix = outprefix - self.write_hits = write_hits - self.zip_maps = zip_maps - self.write_wcov_inv = write_wcov_inv - self.write_wcov = write_wcov - self.write_binned = write_binned - self.write_destriped = write_destriped - self.write_rcond = write_rcond - self.rcond_limit = rcond_limit - self.baseline_length = baseline_length - self.maskfile = maskfile - self.weightmap = None - self.weightmapfile = weightmapfile - self.common_flag_mask = common_flag_mask - self.flag_mask = flag_mask - self.intervals = intervals - self.subharmonic_order = subharmonic_order - self.fourier2D_order = fourier2D_order - self.fourier2D_subharmonics = fourier2D_subharmonics - self.iter_min = iter_min - self.iter_max = iter_max - self.use_noise_prior = use_noise_prior - self.precond_width = precond_width - self.pixels = pixels - - def report_timing(self): - # gt.stop_all() - all_timers = gather_timers(comm=self.comm) - names = OrderedDict() - names["OpMapMaker.exec"] = OrderedDict( - [ - ("OpMapMaker.flag_gaps", None), - ("OpMapMaker.get_detweights", None), - ("OpMapMaker.initialize_binning", None), - ("OpMapMaker.bin_map", None), - ("OpMapMaker.load_mask", None), - ("OpMapMaker.load_weightmap", None), - ("OpMapMaker.get_templatematrix", None), - ("OpMapMaker.get_noisematrix", None), - ("OpMapMaker.get_projectionmatrix", None), - ("OpMapMaker.get_solver", None), - ( - "PCGSolver.solve", - OrderedDict( - [ - ("TemplateMatrix.zero_amplitudes", None), - ("PCGSolver.apply_lhs", None), - ("TemplateMatrix.apply_precond", None), - ] - ), - ), - ("TemplateMatrix.clean_signal", None), - ] - ) - names["OpMapMaker.exec"]["PCGSolver.solve"][ - "PCGSolver.apply_lhs" - ] = OrderedDict( - [ - ( - "TemplateMatrix.apply_transpose", - OrderedDict( - [ - ("OffsetTemplate.project_signal", None), - ("SubharmonicTemplate.project_signal", None), - ("fourier2DTemplate.project_signal", None), - ] - ), - ), - ("NoiseMatrix.apply", None), - ( - "ProjectionMatrix.apply", - OrderedDict( - [ - ( - "ProjectionMatrix.bin_map", - OrderedDict( - [ - ( - "OpAccumDiag.exec", - OrderedDict( - [ - ( - "OpAccumDiag.exec.apply_flags", - None, - ), - ( - "OpAccumDiag.exec.global_to_local", - None, - ), - ("cov_accum_zmap", None), - ] - ), - ), - ("covariance_apply", None), - ] - ), - ), - ( - "ProjectionMatrix.scan_map", - OrderedDict( - [ - ( - "OpSimScan.exec", - OrderedDict( - [ - ( - "OpSimScan.exec.global_to_local", - None, - ), - ("OpSimScan.exec.scan_map", None), - ] - ), - ) - ] - ), - ), - ] - ), - ), - ( - "TemplateMatrix.apply", - OrderedDict( - [ - ("OffsetTemplate.add_to_signal", None), - ("SubharmonicTemplate.add_to_signal", None), - ("fourier2DTemplate.add_to_signal", None), - ] - ), - ), - ("TemplateMatrix.add_prior", None), - ] - ) - if self.rank == 0: - print("all_timers:", all_timers) # DEBUG - - def report_line(name, indent): - full_name = name - if full_name not in all_timers: - full_name += " (function_timer)" - if full_name not in all_timers: - return - t = all_timers[full_name]["time_max"] - print(indent, "{:.<60}{:8.1f}".format(name, t)) - return - - def report(names, indent): - if names is None: - return - if isinstance(names, str): - report_line(names, indent) - else: - for name, entries in names.items(): - report_line(name, indent) - report(entries, " " * 8 + indent) - - report(names, "-") - print(flush=True) - return - - @function_timer - def get_noisematrix(self, data): - timer = Timer() - timer.start() - noise = NoiseMatrix( - self.comm, - self.detweights, - self.weightmap, - common_flag_mask=(self.common_flag_mask | self.gap_bit), - flag_mask=(self.flag_mask | self.mask_bit), - ) - if self.rank == 0: - timer.report_clear("Initialize projection matrix") - return noise - - @function_timer - def get_projectionmatrix(self, data): - timer = Timer() - timer.start() - projection = ProjectionMatrix( - data, - self.comm, - self.detweights, - self.nnz, - self.white_noise_cov_matrix, - common_flag_mask=(self.common_flag_mask | self.gap_bit), - # Do not add mask_bit here since it is not - # included in the white noise matrices - flag_mask=self.flag_mask, - ) - if self.rank == 0: - timer.report_clear("Initialize projection matrix") - return projection - - @function_timer - def get_templatematrix(self, data): - timer = Timer() - timer.start() - log = Logger.get() - templatelist = [] - if self.baseline_length is not None: - if self.rank == 0: - log.info( - "Initializing offset template, step_length = {}".format( - self.baseline_length - ) - ) - templatelist.append( - OffsetTemplate( - data, - self.detweights, - step_length=self.baseline_length, - intervals=self.intervals, - common_flag_mask=(self.common_flag_mask | self.gap_bit), - flag_mask=(self.flag_mask | self.mask_bit), - use_noise_prior=self.use_noise_prior, - precond_width=self.precond_width, - ) - ) - if self.subharmonic_order is not None: - if self.rank == 0: - log.info( - "Initializing subharmonic template, order = {}".format( - self.subharmonic_order - ) - ) - templatelist.append( - SubharmonicTemplate( - data, - self.detweights, - order=self.subharmonic_order, - intervals=self.intervals, - common_flag_mask=(self.common_flag_mask | self.gap_bit), - flag_mask=(self.flag_mask | self.mask_bit), - ) - ) - if self.fourier2D_order is not None: - log.info( - "Initializing fourier2D template, order = {}, subharmonics = {}".format( - self.fourier2D_order, - self.fourier2D_subharmonics, - ) - ) - templatelist.append( - Fourier2DTemplate( - data, - self.detweights, - order=self.fourier2D_order, - fit_subharmonics=self.fourier2D_subharmonics, - intervals=self.intervals, - common_flag_mask=(self.common_flag_mask | self.gap_bit), - flag_mask=(self.flag_mask | self.mask_bit), - ) - ) - if len(templatelist) == 0: - if self.rank == 0: - log.info("No templates to fit, no destriping done.") - templates = None - else: - templates = TemplateMatrix(data, self.comm, templatelist) - if self.rank == 0: - timer.report_clear("Initialize templates") - return templates - - @function_timer - def get_solver(self, data, templates, noise, projection, signal): - timer = Timer() - timer.start() - solver = PCGSolver( - self.comm, - templates, - noise, - projection, - signal, - niter_min=self.iter_min, - niter_max=self.iter_max, - ) - if self.rank == 0: - timer.report_clear("Initialize PCG solver") - return solver - - @function_timer - def load_mask(self, data): - """Load processing mask and generate appropriate flag bits""" - if self.maskfile is None: - return - log = Logger.get() - timer = Timer() - timer.start() - if self.rank == 0 and not os.path.isfile(self.maskfile): - raise RuntimeError( - "Processing mask does not exist: {}".format(self.maskfile) - ) - distmap = DistPixels(data, comm=self.comm, nnz=1, dtype=np.float32) - distmap.read_healpix_fits(self.maskfile) - if self.rank == 0: - timer.report_clear("Read processing mask from {}".format(self.maskfile)) - - scanmask = OpScanMask(distmap=distmap, flagmask=self.mask_bit) - scanmask.exec(data) - - if self.rank == 0: - timer.report_clear("Apply processing mask") - - return - - @function_timer - def load_weightmap(self, data): - """Load weight map""" - if self.weightmapfile is None: - return - log = Logger.get() - timer = Timer() - timer.start() - if self.rank == 0 and not os.path.isfile(self.weightmapfile): + # Set up projection + if self.projection is None: raise RuntimeError( - "Weight map does not exist: {}".format(self.weightmapfile) + "You must set the projection trait before calling exec()" ) - self.weightmap = DistPixels(data, comm=self.comm, nnz=1, dtype=np.float32) - self.weightmap.read_healpix_fits(self.weightmapfile) - if self.rank == 0: - timer.report_clear("Read weight map from {}".format(self.weightmapfile)) - return - @function_timer - def exec(self, data, comm=None): - log = Logger.get() - timer = Timer() + self.projection.det_data = self.det_data - # Initialize objects - if comm is None: - self.comm = data.comm.comm_world - else: - self.comm = comm - if self.comm is None: - self.rank = 0 - else: - self.rank = self.comm.rank - self.flag_gaps(data) - self.get_detweights(data) - self.initialize_binning(data) - if self.write_binned: - self.bin_map(data, "binned") - self.load_mask(data) - self.load_weightmap(data) + # Check map binning + if self.map_binning is None: + self.map_binning = self.projection.binning - # Solve template amplitudes + # Get the template matrix used in the projection + template_matrix = self.projection.template_matrix - templates = self.get_templatematrix(data) - if templates is None: - return - noise = self.get_noisematrix(data) - projection = self.get_projectionmatrix(data) - signal = Signal(data, name=self.name) - solver = self.get_solver(data, templates, noise, projection, signal) - timer.start() - amplitudes = solver.solve() - if self.rank == 0: - timer.report_clear("Solve amplitudes") + # Get the zero-valued starting amplitudes - # Clean TOD - templates.clean_signal(signal, amplitudes) - if self.rank == 0: - timer.report_clear("Clean TOD") + # Compute the RHS - if self.write_destriped: - self.bin_map(data, "destriped") + # Solve for amplitudes return - @function_timer - def flag_gaps(self, data): - """Add flag bits between the intervals""" - timer = Timer() - timer.start() - flag_gaps = OpFlagGaps(common_flag_value=self.gap_bit, intervals=self.intervals) - flag_gaps.exec(data) - if self.rank == 0: - timer.report_clear("Flag gaps") + def _finalize(self, data, **kwargs): return - @function_timer - def bin_map(self, data, suffix): - log = Logger.get() - timer = Timer() - - dist_map = DistPixels(data, comm=self.comm, nnz=self.nnz, dtype=np.float64) - if dist_map.data is not None: - dist_map.data.fill(0.0) - # FIXME: OpAccumDiag should support separate detweights for each observation - build_dist_map = OpAccumDiag( - zmap=dist_map, - name=self.name, - detweights=self.detweights[0], - common_flag_mask=(self.common_flag_mask | self.gap_bit), - flag_mask=self.flag_mask, - ) - build_dist_map.exec(data) - dist_map.allreduce() - if self.rank == 0: - timer.report_clear(" Build noise-weighted map") + def _requires(self): + # This operator require everything that its sub-operators needs. + req = self.projection.requires() + if self.map_binning is not None: + req.update(self.map_binning.requires()) + req["detdata"].append(self.det_data) + return req - covariance_apply(self.white_noise_cov_matrix, dist_map) - if self.rank == 0: - timer.report_clear(" Apply noise covariance") - - fname = os.path.join(self.outdir, self.outprefix + suffix + ".fits") - if self.zip_maps: - fname += ".gz" - dist_map.write_healpix_fits(fname) - if self.rank == 0: - timer.report_clear(" Write map to {}".format(fname)) - - return - - @function_timer - def get_detweights(self, data): - """Each observation will have its own detweight dictionary""" - timer = Timer() - timer.start() - self.detweights = [] - for obs in data.obs: - tod = obs["tod"] - if "noise" in obs: - noise = obs["noise"] - else: - noise = None - detweights = {} - for det in tod.local_dets: - if noise is None: - noisevar = 1 - else: - # Determine an approximate white noise level, - # accounting for the fact that the PSD may have a - # transfer function roll-off near Nyquist - freq = noise.freq(det) - psd = noise.psd(det) - rate = noise.rate(det) - ind = np.logical_and(freq > rate * 0.2, freq < rate * 0.4) - noisevar = np.median(psd[ind]) - detweights[det] = 1 / noisevar - self.detweights.append(detweights) - if self.rank == 0: - timer.report_clear("Get detector weights") - return - - @function_timer - def initialize_binning(self, data): - log = Logger.get() - timer = Timer() - timer.start() - - if self.rank == 0: - os.makedirs(self.outdir, exist_ok=True) - - self.white_noise_cov_matrix = DistPixels( - data, comm=self.comm, nnz=self.ncov, dtype=np.float64 - ) - if self.white_noise_cov_matrix.data is not None: - self.white_noise_cov_matrix.data.fill(0.0) - - hits = DistPixels(data, comm=self.comm, nnz=1, dtype=np.int64) - if hits.data is not None: - hits.data.fill(0) - - # compute the hits and covariance once, since the pointing and noise - # weights are fixed. - # FIXME: OpAccumDiag should support separate weights for each observation - - build_wcov = OpAccumDiag( - detweights=self.detweights[0], - invnpp=self.white_noise_cov_matrix, - hits=hits, - common_flag_mask=(self.common_flag_mask | self.gap_bit), - flag_mask=self.flag_mask, - ) - build_wcov.exec(data) - - if self.comm is not None: - self.comm.Barrier() - if self.rank == 0: - timer.report_clear("Accumulate N_pp'^1") - - self.white_noise_cov_matrix.allreduce() - - if self.comm is not None: - self.comm.Barrier() - if self.rank == 0: - timer.report_clear("All reduce N_pp'^1") - - if self.write_hits: - hits.allreduce() - fname = os.path.join(self.outdir, self.outprefix + "hits.fits") - if self.zip_maps: - fname += ".gz" - hits.write_healpix_fits(fname) - if self.rank == 0: - log.info("Wrote hits to {}".format(fname)) - if self.rank == 0: - timer.report_clear("Write hits") - - if self.write_wcov_inv: - fname = os.path.join(self.outdir, self.outprefix + "invnpp.fits") - if self.zip_maps: - fname += ".gz" - self.white_noise_cov_matrix.write_healpix_fits(fname) - if self.rank == 0: - log.info("Wrote inverse white noise covariance to {}".format(fname)) - if self.rank == 0: - timer.report_clear("Write N_pp'^1") - - if self.write_rcond: - # Reciprocal condition numbers - rcond = covariance_rcond(self.white_noise_cov_matrix) - if self.rank == 0: - timer.report_clear("Compute reciprocal condition numbers") - fname = os.path.join(self.outdir, self.outprefix + "rcond.fits") - if self.zip_maps: - fname += ".gz" - rcond.write_healpix_fits(fname) - if self.rank == 0: - log.info("Wrote reciprocal condition numbers to {}".format(fname)) - if self.rank == 0: - timer.report_clear("Write rcond") - - # Invert the white noise covariance in each pixel - covariance_invert(self.white_noise_cov_matrix, self.rcond_limit) - if self.rank == 0: - timer.report_clear("Invert N_pp'^1") - - if self.write_wcov: - fname = os.path.join(self.outdir, self.outprefix + "npp.fits") - if self.zip_maps: - fname += ".gz" - self.white_noise_cov_matrix.write_healpix_fits(fname) - if self.rank == 0: - log.info("Wrote white noise covariance to {}".format(fname)) - if self.rank == 0: - timer.report_clear("Write N_pp'") + def _provides(self): + prov = dict() + if self.map_binning is not None: + prov["meta"] = [self.map_binning.binned] + else: + prov["meta"] = [self.projection.binning.binned] + return prov - return + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/mapmaker_binning.py b/src/toast/future_ops/mapmaker_binning.py index 682ccefed..c0cc87f16 100644 --- a/src/toast/future_ops/mapmaker_binning.py +++ b/src/toast/future_ops/mapmaker_binning.py @@ -65,10 +65,6 @@ class BinMap(Operator): help="This must be an instance of a pointing operator", ) - pixels = Unicode("pixels", help="Observation detdata key for pixel indices") - - weights = Unicode("weights", help="Observation detdata key for Stokes weights") - noise_model = Unicode( "noise_model", help="Observation key containing the noise model" ) @@ -89,7 +85,7 @@ def _check_flag_mask(self, proposal): return check @traitlets.validate("sync_type") - def _check_flag_mask(self, proposal): + def _check_sync_type(self, proposal): check = proposal["value"] if check != "allreduce" and check != "alltoallv": raise traitlets.TraitError("Invalid communication algorithm") @@ -101,18 +97,11 @@ def _check_pointing(self, proposal): if pntg is not None: if not isinstance(pntg, Operator): raise traitlets.TraitError("pointing should be an Operator instance") - if not pntg.has_trait("pixels"): - raise traitlets.TraitError( - "pointing operator should have a 'pixels' trait" - ) - if not pntg.has_trait("weights"): - raise traitlets.TraitError( - "pointing operator should have a 'weights' trait" - ) - if not pntg.has_trait("create_dist"): - raise traitlets.TraitError( - "pointing operator should have a 'create_dist' trait" - ) + # Check that this operator has the traits we expect + for trt in ["pixels", "weights", "create_dist"]: + if not pntg.has_trait(trt): + msg = "pointing operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) return pntg def __init__(self, **kwargs): @@ -142,20 +131,18 @@ def _exec(self, data, detectors=None, **kwargs): # Set outputs of the pointing operator - self.pointing.pixels = self.pixels - self.pointing.weights = self.weights self.pointing.create_dist = None # Set up clearing of the pointing matrices - clear_pointing = Clear(detdata=[self.pixels, self.weights]) + clear_pointing = Clear(detdata=[self.pointing.pixels, self.pointing.weights]) # Noise weighted map build_zmap = BuildNoiseWeighted( pixel_dist=self.pixel_dist, - pixels=self.pixels, - weights=self.weights, + pixels=self.pointing.pixels, + weights=self.pointing.weights, noise_model=self.noise_model, det_data=self.det_data, det_flags=self.det_flags, @@ -184,7 +171,7 @@ def _exec(self, data, detectors=None, **kwargs): covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) # Store products - data[self.binned] = binned + data[self.binned] = binned_map return @@ -192,11 +179,9 @@ def _finalize(self, data, **kwargs): return def _requires(self): - req = { - "meta": [self.noise_model, self.pixel_dist, self.covariance], - "shared": list(), - "detdata": [self.det_data], - } + req = self.pointing.requires() + req["meta"].extend([self.noise_model, self.pixel_dist, self.covariance]) + req["detdata"].extend([self.det_data]) if self.det_flags is not None: req["detdata"].append(self.det_flags) return req @@ -204,7 +189,7 @@ def _requires(self): def _provides(self): prov = {"meta": [self.binned], "shared": list(), "detdata": list()} if self.save_pointing: - prov["detdata"].extend([self.pixels, self.weights]) + prov["detdata"].extend([self.pointing.pixels, self.pointing.weights]) return prov def _accelerators(self): diff --git a/src/toast/future_ops/mapmaker_projection.py b/src/toast/future_ops/mapmaker_projection.py new file mode 100644 index 000000000..3ada2cf1f --- /dev/null +++ b/src/toast/future_ops/mapmaker_projection.py @@ -0,0 +1,203 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +import numpy as np + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool + +from ..operator import Operator + +from ..timing import function_timer + +from ..pixels import PixelDistribution, PixelData + +from .pipeline import Pipeline + +from .clear import Clear + +from .copy import Copy + +from .scan_map import ScanMap + + +@trait_docs +class Projection(Operator): + """Operator for map-making projection to template amplitudes. + + This operator performs: + + .. math:: + a = M^T N^{-1} Z d + + Where `d` is a set of timestreams and `a` are the projected amplitudes. `N` is + the time domain diagonal noise covariance and `M` is a set of templates. The `Z` + matrix is given by: + + .. math:: + Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} + + Where `P` is the pointing matrix. In terms of the binning operation this is: + + .. math:: + Z = I - P B + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + binning = Instance( + klass=None, + allow_none=True, + help="This must be an instance of a binning operator", + ) + + template_matrix = Instance( + klass=None, + allow_none=True, + help="This must be an instance of a template matrix operator", + ) + + @traitlets.validate("binning") + def _check_binning(self, proposal): + bin = proposal["value"] + if bin is not None: + if not isinstance(bin, Operator): + raise traitlets.TraitError("binning should be an Operator instance") + # Check that this operator has the traits we expect + for trt in ["pointing", "det_data", "binned"]: + if not bin.has_trait(trt): + msg = "binning operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) + return bin + + @traitlets.validate("template_matrix") + def _check_matrix(self, proposal): + mat = proposal["value"] + if mat is not None: + if not isinstance(mat, Operator): + raise traitlets.TraitError( + "template_matrix should be an Operator instance" + ) + # Check that this operator has the traits we expect + for trt in ["templates", "amplitudes", "det_data", "transpose"]: + if not mat.has_trait(trt): + msg = "template_matrix operator should have a '{}' trait".format( + trt + ) + raise traitlets.TraitError(msg) + return mat + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + + # Set data input for binning + self.binning.det_data = self.det_data + + # Use the same pointing operator as the binning + pointing = self.binning.pointing + + # Set up operator for optional clearing of the pointing matrices + clear_pointing = Clear(detdata=[pointing.pixels, pointing.weights]) + + # Name of the temporary detdata created + det_temp = "temp_projection" + + # Copy data operator + copy_det = Copy( + detdata=[ + (self.det_data, det_temp), + ] + ) + + # Set up map-scanning operator + scan_map = ScanMap( + pixels=pointing.pixels, + weights=pointing.weights, + map_key=self.binning.binned, + det_data=det_temp, + subtract=True, + ) + + # Set up noise weighting operator + noise_weight = NoiseWeight( + noise_model=self.binning.noise_model, det_data=det_temp + ) + + # Set up template matrix operator + + self.template_matrix.transpose = True + self.template_matrix.det_data = det_temp + + # Create a pipeline that projects the binned map and applies noise + # weights and templates. + + proj_pipe = None + if self.binning.save_pointing: + # Process all detectors at once + proj_pipe = Pipeline(detector_sets=["ALL"]) + proj_pipe.operators = [ + copy_det, + pointing, + scan_map, + noise_weight, + self.template_matrix, + ] + else: + # Process one detector at a time and clear pointing after each one. + proj_pipe = Pipeline(detector_sets=["SINGLE"]) + proj_pipe.operators = [ + copy_det, + pointing, + scan_map, + clear_pointing, + noise_weight, + self.template_matrix, + ] + + # Compute the binned map. + + self.binning.apply(data, detectors=detectors) + + # Project and apply template matrix. + + proj_pipe.apply(data, detectors=detectors) + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + # This operator require everything that its sub-operators needs. + req = self.binning.requires() + req.update(self.template_matrix.requires()) + req["detdata"].append(self.det_data) + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + if self.save_pointing: + prov["detdata"].extend([self.pixels, self.weights]) + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/mapmaker_templates.py b/src/toast/future_ops/mapmaker_templates.py new file mode 100644 index 000000000..b022206c4 --- /dev/null +++ b/src/toast/future_ops/mapmaker_templates.py @@ -0,0 +1,126 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool, List + +from ..operator import Operator + +from ..timing import function_timer + + +@trait_docs +class TemplateMatrix(Operator): + """Operator for projecting or accumulating template amplitudes.""" + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + templates = List( + None, allow_none=True, help="This should be a list of Template instances" + ) + + amplitudes = Unicode(None, allow_none=True, help="Data key for template amplitudes") + + transpose = Bool(False, help="If True, apply the transpose.") + + @traitlets.validate("templates") + def _check_templates(self, proposal): + temps = proposal["value"] + if temps is None: + return temps + for tp in temps: + if not isinstance(tp, Template): + raise traitlets.TraitError( + "templates must be a list of Template instances or None" + ) + return temps + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._initialized = False + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + + # Check that amplitudes is set + if self.amplitudes is None: + raise RuntimeError( + "You must set the amplitudes trait before calling exec()" + ) + + # On the first call, we initialize all templates using the Data instance. + if not self._initialized: + for tmpl in self.templates: + tmpl.data = data + self._initialized = True + + # Set the data we are using + for tmpl in self.templates: + tmpl.det_data = self.det_data + + if self.transpose: + if self.amplitudes not in data: + # The output template amplitudes do not yet exist. Create these with + # all zero values. + data[self.amplitudes] = dict() + for tmpl in self.templates: + data[self.amplitudes][tmpl.name] = tmpl.zeros() + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + for d in dets: + for tmpl in self.templates: + tmpl.project_signal(d, data[self.amplitudes[tmpl.name]]) + else: + if self.amplitudes not in data: + msg = "Template amplitudes '{}' do not exist in data".format( + self.amplitudes + ) + log.error(msg) + raise RuntimeError(msg) + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + for d in dets: + for tmpl in self.templates: + tmpl.add_to_signal(d, data[self.amplitudes[tmpl.name]]) + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = dict() + if self.transpose: + req["detdata"] = [self.det_data] + return req + + def _provides(self): + prov = dict() + if not self.transpose: + prov["detdata"] = [self.det_data] + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/future_ops/mapmaker_utils.py index 2244c396c..8fa6a87c8 100644 --- a/src/toast/future_ops/mapmaker_utils.py +++ b/src/toast/future_ops/mapmaker_utils.py @@ -72,7 +72,7 @@ def _check_flag_mask(self, proposal): return check @traitlets.validate("sync_type") - def _check_flag_mask(self, proposal): + def _check_sync_type(self, proposal): check = proposal["value"] if check != "allreduce" and check != "alltoallv": raise traitlets.TraitError("Invalid communication algorithm") @@ -223,7 +223,7 @@ def _check_flag_mask(self, proposal): return check @traitlets.validate("sync_type") - def _check_flag_mask(self, proposal): + def _check_sync_type(self, proposal): check = proposal["value"] if check != "allreduce" and check != "alltoallv": raise traitlets.TraitError("Invalid communication algorithm") @@ -421,7 +421,7 @@ def _check_flag_mask(self, proposal): return check @traitlets.validate("sync_type") - def _check_flag_mask(self, proposal): + def _check_sync_type(self, proposal): check = proposal["value"] if check != "allreduce" and check != "alltoallv": raise traitlets.TraitError("Invalid communication algorithm") @@ -446,6 +446,10 @@ def _exec(self, data, detectors=None, **kwargs): ) raise RuntimeError(msg) + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + dist = data[self.pixel_dist] if data.comm.world_rank == 0: log.debug( @@ -472,12 +476,6 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] - # Check that the detector data is set - if self.det_data is None: - raise RuntimeError( - "You must set the det_data trait before calling exec()" - ) - for det in dets: # The pixels and weights for this detector. pix = ob.detdata[self.pixels] @@ -619,10 +617,6 @@ class CovarianceAndHits(Operator): help="This must be an instance of a pointing operator", ) - pixels = Unicode("pixels", help="Observation detdata key for pixel indices") - - weights = Unicode("weights", help="Observation detdata key for Stokes weights") - noise_model = Unicode( "noise_model", help="Observation key containing the noise model" ) @@ -647,7 +641,7 @@ def _check_flag_mask(self, proposal): return check @traitlets.validate("sync_type") - def _check_flag_mask(self, proposal): + def _check_sync_type(self, proposal): check = proposal["value"] if check != "allreduce" and check != "alltoallv": raise traitlets.TraitError("Invalid communication algorithm") @@ -688,13 +682,11 @@ def _exec(self, data, detectors=None, **kwargs): # Set outputs of the pointing operator - self.pointing.pixels = self.pixels - self.pointing.weights = self.weights self.pointing.create_dist = None # Set up clearing of the pointing matrices - clear_pointing = Clear(detdata=[self.pixels, self.weights]) + clear_pointing = Clear(detdata=[self.pointing.pixels, self.pointing.weights]) # If we do not have a pixel distribution yet, we must make one pass through # the pointing to build this first. @@ -725,7 +717,7 @@ def _exec(self, data, detectors=None, **kwargs): self.pointing, ] else: - # Run one detector at time and discard. + # Run one detector a at time and discard. pixel_dist_pipe = Pipeline(detector_sets=["SINGLE"]) pixel_dist_pipe.operators = [ self.pointing, @@ -740,7 +732,7 @@ def _exec(self, data, detectors=None, **kwargs): build_hits = BuildHitMap( pixel_dist=self.pixel_dist, - pixels=self.pixels, + pixels=self.pointing.pixels, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, sync_type=self.sync_type, @@ -750,8 +742,8 @@ def _exec(self, data, detectors=None, **kwargs): build_invcov = BuildInverseCovariance( pixel_dist=self.pixel_dist, - pixels=self.pixels, - weights=self.weights, + pixels=self.pointing.pixels, + weights=self.pointing.weights, noise_model=self.noise_model, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, @@ -796,11 +788,8 @@ def _finalize(self, data, **kwargs): return def _requires(self): - req = { - "meta": [self.noise_model], - "shared": list(), - "detdata": list(), - } + req = self.pointing.requires() + req["meta"].append(self.noise_model) if self.det_flags is not None: req["detdata"].append(self.det_flags) return req diff --git a/src/toast/future_ops/noise_weight.py b/src/toast/future_ops/noise_weight.py new file mode 100644 index 000000000..bdfc7d40e --- /dev/null +++ b/src/toast/future_ops/noise_weight.py @@ -0,0 +1,82 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numpy as np + +import traitlets + +from ..utils import Environment, Logger + +from ..timing import function_timer, Timer + +from ..noise_sim import AnalyticNoise + +from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity + +from ..operator import Operator + + +@trait_docs +class NoiseWeight(Operator): + """Apply diagonal noise weighting to detector data. + + This simple operator takes the detector weight from the specified noise model and + applies it to the timestream values. + + """ + + # Class traits + + API = traitlets.Int(0, help="Internal interface version for this operator") + + noise_model = traitlets.Unicode( + "noise_model", help="The observation key for storing the noise model" + ) + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # Check that the noise model exists + if self.noise_model not in ob: + msg = "Noise model {} does not exist in observation {}".format( + self.noise_model, ob.name + ) + raise RuntimeError(msg) + + noise = ob[self.noise_model] + + for d in dets: + # Get the detector weight from the noise model. + detweight = noise.detector_weight(det) + + # Apply + ob.detdata[self.det_data][d] *= detweight + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = {"meta": [self.noise_model], "detdata": [self.det_data]} + return req + + def _provides(self): + return dict() + + def _accelerators(self): + return list() diff --git a/src/toast/future_ops/scan_map.py b/src/toast/future_ops/scan_map.py new file mode 100644 index 000000000..921b1e423 --- /dev/null +++ b/src/toast/future_ops/scan_map.py @@ -0,0 +1,162 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +import numpy as np + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool + +from ..operator import Operator + +from ..timing import function_timer + +from ..pixels import PixelDistribution, PixelData + +from .._libtoast import scan_map_float64, scan_map_float32 + + +@trait_docs +class ScanMap(Operator): + """Operator which uses the pointing matrix to scan timestream values from a map. + + The map must be a PixelData instance with either float32 or float64 values. The + values can either be accumulated or subtracted from the input timestream, and the + input timestream can be optionally zeroed out beforehand. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + map_key = Unicode( + None, + allow_none=True, + help="The Data key where the map is located", + ) + + subtract = Bool( + False, help="If True, subtract the map timestream instead of accumulating" + ) + + zero = Bool(False, help="If True, zero the data before accumulating / subtracting") + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + + # Check that the map is set + if self.map_key is None: + raise RuntimeError("You must set the map_key trait before calling exec()") + if self.map_key not in data: + msg = "The map_key '{}' does not exist in the data".format(self.map_key) + raise RuntimeError(msg) + + map_data = data[self.map_key] + if not isinstance(map_data, PixelData): + raise RuntimeError("The map to scan must be a PixelData instance") + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # Temporary array, re-used for all detectors + maptod_raw = AlignedF64.zeros(ob.n_local_samples) + maptod = maptod_raw.array() + + for det in dets: + # The pixels, weights, and data. + pix = ob.detdata[self.pixels][det] + wts = ob.detdata[self.weights][det] + ddata = ob.detdata[self.det_data][det] + + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap(pix) + + # We support projecting from either float64 or float32 maps. + + maptod[:] = 0.0 + + if map_data.dtype.char == "d": + scan_map_float64( + map_data.distribution.n_pix_submap, + map_data.n_value, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + wts.astype(np.float64).reshape(-1), + maptod, + ) + elif map_data.dtype.char == "f": + scan_map_float32( + map_data.distribution.n_pix_submap, + map_data.n_value, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + wts.astype(np.float64).reshape(-1), + maptod, + ) + else: + raise RuntimeError( + "Projection supports only float32 and float64 binned maps" + ) + + # zero-out if needed + if self.zero: + ddata[:] = 0.0 + + # Add or subtract. Note that the map scanned timestream will have + # zeros anywhere that the pointing is bad, but those samples (and + # any other detector flags) should be handled at other steps of the + # processing. + if self.subtract: + ddata -= maptod + else: + ddata += maptod + + del maptod + maptod_raw.clear() + del maptod_raw + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = { + "meta": [map_key], + "shared": list(), + "detdata": [self.pixels, self.weights, self.det_data], + } + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py new file mode 100644 index 000000000..239f9dfb4 --- /dev/null +++ b/src/toast/templates/template.py @@ -0,0 +1,415 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + + +from ..utils import ( + Logger, + AlignedF32, + AlignedF64, +) + +from ..traits import TraitConfig + +from ..data import Data + + +class Template(TraitConfig): + """Base class for timestream templates. + + A template defines a mapping to / from timestream values to a set of template + amplitudes. These amplitudes are usually quantities being solved as part of the + map-making. Examples of templates might be destriping baseline offsets, + azimuthally binned ground pickup, etc. + + The template amplitude data may be distributed in a variety of ways. For some + types of templates, every process may have their own unique set of amplitudes based + on the data that they have locally. In other cases, every process may have a full + local copy of all template amplitudes. There might also be cases where each + process has a non-unique subset of amplitude values (similar to the way that + pixel domain quantities are distributed). + + """ + + # Note: The TraitConfig base class defines a "name" attribute. + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + data = Instance( + None, + klass=Data, + allow_none=True, + help="This must be an instance of a Data class (or None)", + ) + + @traitlets.validate("data") + def _check_data(self, proposal): + dat = proposal["value"] + if dat is not None: + if not isinstance(dat, Data): + raise traitlets.TraitError("data should be a Data instance") + # Call the instance initialization. + self.initialize(dat) + return dat + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _initialize(self, newdata): + raise NotImplementedError("Derived class must implement _initialize()") + + def initialize(self, newdata): + """Initialize instance after the data trait has been set. + + Templates use traits to set their properties, which allows them to be + configured easily with the constructor overrides and enables them to be built + from config files. However, the `data` trait may not be set at construction + time and this trait is likely used to compute the number of template amplitudes + that will be used and other parameters. This explicit initialize method is + called whenever the `data` trait is set. + + """ + self._initialize(newdata) + + def _zeros(self): + raise NotImplementedError("Derived class must implement _zeros()") + + def zeros(self): + """Return an Amplitudes object filled with zeros. + + This returns an Amplitudes instance with appropriate dimensions for this + template. This will raise an exception if called before the `data` trait + is set. + + Returns: + (Amplitudes): Zero amplitudes. + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before using a template") + return self._zeros() + + def _add_to_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _add_to_signal()") + + def add_to_signal(self, detector, amplitudes): + """Accumulate the projected amplitudes to a timestream. + + This performs the operation: + + .. math:: + s += F \\cdot a + + Where `s` is the det_data signal, `F` is the template and `a` is the amplitudes. + + Args: + detector (str): The detector name. + amplitudes (Amplitudes): The Amplitude values for this template. + + Returns: + None + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before using a template") + return self._add_to_signal(detector, amplitudes) + + def _project_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _project_signal()") + + def project_signal(self, detector, amplitudes): + """Project a timestream into template amplitudes. + + This performs: + + .. math:: + a += F^T \\cdot s + + Where `s` is the det_data signal, `F` is the template and `a` is the amplitudes. + + Args: + detector (str): The detector name. + amplitudes (Amplitudes): The Amplitude values for this template. + + Returns: + None + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before using a template") + self._project_signal(detector, amplitudes) + + def _add_prior(self, amplitudes_in, amplitudes_out): + # Not all Templates implement the prior + return + + def add_prior(self, amplitudes_in, amplitudes_out): + """Apply the inverse amplitude covariance as a prior. + + This performs: + + .. math:: + a' += {C_a}^{-1} \\cdot a + + Args: + amplitudes_in (Amplitudes): The input Amplitude values for this template. + amplitudes_out (Amplitudes): The input Amplitude values for this template. + + Returns: + None + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before using a template") + self._add_prior(amplitudes_in, amplitudes_out) + + def _apply_precond(self, amplitudes_in, amplitudes_out): + raise NotImplementedError("Derived class must implement _apply_precond()") + + def apply_precond(self, amplitudes_in, amplitudes_out): + """Apply the template preconditioner. + + This performs: + + .. math:: + a' += M^{-1} \\cdot a + + Args: + amplitudes_in (Amplitudes): The input Amplitude values for this template. + amplitudes_out (Amplitudes): The input Amplitude values for this template. + + Returns: + None + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before using a template") + self._apply_precond(amplitudes_in, amplitudes_out) + + @classmethod + def get_class_config_path(cls): + return "/templates/{}".format(cls.__qualname__) + + def get_config_path(self): + if self.name is None: + return None + return "/templates/{}".format(self.name) + + @classmethod + def get_class_config(cls, input=None): + """Return a dictionary of the default traits of an Template class. + + This returns a new or appended dictionary. The class instance properties are + contained in a dictionary found in result["templates"][cls.name]. + + If the specified named location in the input config already exists then an + exception is raised. + + Args: + input (dict): The optional input dictionary to update. + + Returns: + (dict): The created or updated dictionary. + + """ + return super().get_class_config(section="templates", input=input) + + def get_config(self, input=None): + """Return a dictionary of the current traits of a Template *instance*. + + This returns a new or appended dictionary. The operator instance properties are + contained in a dictionary found in result["templates"][self.name]. + + If the specified named location in the input config already exists then an + exception is raised. + + Args: + input (dict): The optional input dictionary to update. + + Returns: + (dict): The created or updated dictionary. + + """ + return super().get_config(section="templates", input=input) + + @classmethod + def translate(cls, props): + """Given a config dictionary, modify it to match the current API.""" + # For templates, the derived classes should implement this method as needed + # and then call super().translate(props) to trigger this method. Here we strip + # the 'API' key from the config. + props = super().translate(props) + if "API" in props: + del props["API"] + return props + + +class Amplitudes(object): + """Class for distributed template amplitudes. + + In the general case, template amplitudes exist as sparse, non-unique values across + all processes. This object provides methods for describing the local distribution + of amplitudes and for doing global reductions. + + Args: + comm (mpi4py.MPI.Comm): The MPI communicator or None. + n_global (int): The number of global values across all + + """ + + def __init__(self, comm, n_global, local_indices=None): + self._comm = comm + + @property + def comm(self): + return _comm + + def _n_vales(self): + raise NotImplementedError("Derived classes must implement _n_values()") + + def n_values(self): + """Returns the total number of amplitudes.""" + return self._n_values() + + def _n_local(self): + raise NotImplementedError("Derived classes must implement _n_local()") + + def n_local(self): + """Returns the number of locally stored amplitudes.""" + return self._n_local() + + def _get_global_values(self, offset, buffer): + raise NotImplementedError("Derived classes must implement _get_global_values()") + + def get_global_values(self, offset, buffer): + """For the given range of global values, populate the buffer. + + This function takes the provided buffer for the global sample offset and fills + it with any local values that fall in that sample range. Other values should be + set to zero. This is used in synchronization / reduction. + + Args: + offset (int): The global sample offset. + buffer (array): A pre-existing 1D array of amplitudes. + + Returns: + None + + """ + return self._global_values(offset, buffer) + + def _set_global_values(self, offset, buffer): + raise NotImplementedError("Derived classes must implement _set_global_values()") + + def set_global_values(self, offset, buffer): + """For the given range of global values, set local values. + + This function takes the provided buffer for the global sample offset and uses + it to set any local values that fall in that sample range. This is used in + synchronization / reduction. + + Args: + offset (int): The global sample offset. + buffer (array): A 1D array of amplitudes. + + Returns: + None + + """ + return self._global_values(offset, buffer) + + def sync(self, comm_bytes=10000000): + """Perform an Allreduce across all processes. + + If a derived class has only locally unique amplitudes on each process (for + example, destriping baseline offsets), then they should override this method + and make it a no-op. + + Args: + comm_bytes (int): The maximum number of bytes to communicate in each + call to Allreduce. + + Returns: + None + + """ + log = Logger.get() + dt = np.dtype(self.local.dtype) + + storage_class = None + if dt.char == "f": + storage_class = AlignedF32 + elif dt.char == "d": + storage_class = AlignedF64 + elif dt.char == "F": + raise NotImplementedError("No support yet for complex numbers") + elif dt.char == "D": + raise NotImplementedError("No support yet for complex numbers") + else: + msg = "Unsupported data typecode '{}'".format(dt.char) + log.error(msg) + raise ValueError(msg) + + item_size = self.local.dtype.itemsize + n_comm = int(comm_bytes / item_size) + n_total = self.n_values() + + # Create a persistent buffer for the reduction + + send_raw = storage_class.zeros(n_comm) + send_buffer = send_raw.array() + recv_raw = storage_class.zeros(n_comm) + recv_buffer = recv_raw.array() + + # Buffered Allreduce + + comm_offset = 0 + while comm_offset < n_total: + if comm_offset + n_comm > n_total: + n_comm = n_total - comm_offset + self.get_global_values(comm_offset, send_buffer) + self._comm.Allreduce(send_buffer, recv_buffer, op=MPI.SUM) + self.set_global_values(comm_offset, recv_buffer) + comm_offset += n_comm + + # Cleanup + + del send_buffer + del recv_buffer + send_raw.clear() + recv_raw.clear() + del send_raw + del recv_raw + + def _local_dot(self, other): + """Perform a dot product with the local values of another Amplitudes object. + + It is safe to assume that the calling code has verified that the other + Amplitudes instance has a matching data distribution. + + """ + raise NotImplementedError("Derived classes must implement _local_dot()") + + def dot(self, other): + """Perform a dot product with another Amplitudes object. + + The other instance must have the same data distribution. + + Args: + other (Amplitudes): The other instance. + + Result: + (float): The dot product. + + """ + if other.n_values() != self.n_values(): + raise RuntimeError("Amplitudes must have the same number of values") + if other.n_local() != self.n_local(): + raise RuntimeError("Amplitudes must have the same number of local values") + local_result = self._local_dot(other) + result = local_result + if self._comm is not None: + result = MPI.allreduce(result, op=MPI.SUM) + return result From 3a522dea7b46d12e306761e813a34dc7f86a1813 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 06:34:09 -0800 Subject: [PATCH 030/690] Cleanup of template amplitudes --- src/toast/CMakeLists.txt | 1 + src/toast/future_ops/mapmaker.py | 97 +++++++++-- src/toast/observation_data.py | 46 +---- src/toast/templates/CMakeLists.txt | 8 + src/toast/templates/__init__.py | 7 + src/toast/templates/template.py | 266 ++++++++++++++++++----------- src/toast/utils.py | 58 ++++++- 7 files changed, 330 insertions(+), 153 deletions(-) create mode 100644 src/toast/templates/CMakeLists.txt create mode 100644 src/toast/templates/__init__.py diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index efa2b10da..dc10dab98 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -120,3 +120,4 @@ add_subdirectory(todmap) add_subdirectory(fod) add_subdirectory(pipeline_tools) add_subdirectory(future_ops) +add_subdirectory(templates) diff --git a/src/toast/future_ops/mapmaker.py b/src/toast/future_ops/mapmaker.py index 69aca411d..75f42f873 100644 --- a/src/toast/future_ops/mapmaker.py +++ b/src/toast/future_ops/mapmaker.py @@ -35,9 +35,10 @@ class MapMaker(Operator): .. math:: \left[ M^T N^{-1} Z M + M_p \right] a = M^T N^{-1} Z d - Where `a` are the solved amplitudes and `d` is the input data. `N` is the diagonal - time domain noise covariance. `M` is a matrix of templates that project from the - amplitudes into the time domain, and the `Z` operator is given by: + Where `a` are the solved amplitudes and `d` is the input data. `N` is the + diagonal time domain noise covariance. `M` is a matrix of templates that + project from the amplitudes into the time domain, and the `Z` operator is given + by: .. math:: Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} @@ -46,11 +47,10 @@ class MapMaker(Operator): as one of its traits, and that operator performs: .. math:: - PROJ = M^T N^{-1} Z - This projection operator is then used to compute the right hand side of the solver - and for each calculation of the left hand side. + This projection operator is then used to compute the right hand side of the + solver and for each calculation of the left hand side. After solving for the template amplitudes, a final map of the signal estimate is computed using a simple binning: @@ -120,14 +120,12 @@ def _exec(self, data, detectors=None, **kwargs): if self.det_data is None: raise RuntimeError("You must set the det_data trait before calling exec()") - # Set up projection + # Check projection if self.projection is None: raise RuntimeError( "You must set the projection trait before calling exec()" ) - self.projection.det_data = self.det_data - # Check map binning if self.map_binning is None: self.map_binning = self.projection.binning @@ -135,14 +133,91 @@ def _exec(self, data, detectors=None, **kwargs): # Get the template matrix used in the projection template_matrix = self.projection.template_matrix - # Get the zero-valued starting amplitudes - # Compute the RHS + if template_matrix.amplitudes in data: + # Clear any existing amplitudes, so that it will be created. + del data[template_matrix.amplitudes] + + self.projection.det_data = self.det_data + self.projection.apply(data) + + # Copy structure of RHS and zero as starting point for the solver + # Solve for amplitudes return + @function_timer + def _solve(self): + """Standard issue PCG solution of A.x = b + + Returns: + x : the least squares solution + """ + log = Logger.get() + timer0 = Timer() + timer0.start() + timer = Timer() + timer.start() + # Initial guess is zero amplitudes + guess = self.templates.zero_amplitudes() + # print("guess:", guess) # DEBUG + # print("RHS:", self.rhs) # DEBUG + residual = self.rhs.copy() + # print("residual(1):", residual) # DEBUG + residual -= self.apply_lhs(guess) + # print("residual(2):", residual) # DEBUG + precond_residual = self.templates.apply_precond(residual) + proposal = precond_residual.copy() + sqsum = precond_residual.dot(residual) + init_sqsum, best_sqsum, last_best = sqsum, sqsum, sqsum + if self.rank == 0: + log.info("Initial residual: {}".format(init_sqsum)) + # Iterate to convergence + for iiter in range(self.niter_max): + if not np.isfinite(sqsum): + raise RuntimeError("Residual is not finite") + alpha = sqsum + alpha /= proposal.dot(self.apply_lhs(proposal)) + alpha_proposal = proposal.copy() + alpha_proposal *= alpha + guess += alpha_proposal + residual -= self.apply_lhs(alpha_proposal) + del alpha_proposal + # Prepare for next iteration + precond_residual = self.templates.apply_precond(residual) + beta = 1 / sqsum + # Check for convergence + sqsum = precond_residual.dot(residual) + if self.rank == 0: + timer.report_clear( + "Iter = {:4} relative residual: {:12.4e}".format( + iiter, sqsum / init_sqsum + ) + ) + if sqsum < init_sqsum * self.convergence_limit or sqsum < 1e-30: + if self.rank == 0: + timer0.report_clear( + "PCG converged after {} iterations".format(iiter) + ) + break + best_sqsum = min(sqsum, best_sqsum) + if iiter % 10 == 0 and iiter >= self.niter_min: + if last_best < best_sqsum * 2: + if self.rank == 0: + timer0.report_clear( + "PCG stalled after {} iterations".format(iiter) + ) + break + last_best = best_sqsum + # Select the next direction + beta *= sqsum + proposal *= beta + proposal += precond_residual + # log.info("{} : Solution: {}".format(self.rank, guess)) # DEBUG + return guess + def _finalize(self, data, **kwargs): return diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index decd7355a..4b68cc313 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -24,6 +24,7 @@ AlignedU64, AlignedF32, AlignedF64, + dtype_to_aligned, ) from .intervals import IntervalList @@ -81,51 +82,10 @@ def __init__(self, detectors, shape, dtype): self._name2idx = {y: x for x, y in enumerate(self._detectors)} # construct a new dtype in case the parameter given is shortcut string - ttype = np.dtype(dtype) - + self._dtype = np.dtype(dtype) + self._storage_class, self.itemsize = dtype_to_aligned(dtype) self.itemsize = 0 - self._storage_class = None - if ttype.char == "b": - self._storage_class = AlignedI8 - self.itemsize = 1 - elif ttype.char == "B": - self._storage_class = AlignedU8 - self.itemsize = 1 - elif ttype.char == "h": - self._storage_class = AlignedI16 - self.itemsize = 2 - elif ttype.char == "H": - self._storage_class = AlignedU16 - self.itemsize = 2 - elif ttype.char == "i": - self._storage_class = AlignedI32 - self.itemsize = 4 - elif ttype.char == "I": - self._storage_class = AlignedU32 - self.itemsize = 4 - elif (ttype.char == "q") or (ttype.char == "l"): - self._storage_class = AlignedI64 - self.itemsize = 8 - elif (ttype.char == "Q") or (ttype.char == "L"): - self._storage_class = AlignedU64 - self.itemsize = 8 - elif ttype.char == "f": - self._storage_class = AlignedF32 - self.itemsize = 4 - elif ttype.char == "d": - self._storage_class = AlignedF64 - self.itemsize = 8 - elif ttype.char == "F": - raise NotImplementedError("No support yet for complex numbers") - elif ttype.char == "D": - raise NotImplementedError("No support yet for complex numbers") - else: - msg = "Unsupported data typecode '{}'".format(ttype.char) - log.error(msg) - raise ValueError(msg) - self._dtype = ttype - # Verify that our shape contains only integral values self._flatshape = len(self._detectors) for d in shape: diff --git a/src/toast/templates/CMakeLists.txt b/src/toast/templates/CMakeLists.txt new file mode 100644 index 000000000..86fd06b68 --- /dev/null +++ b/src/toast/templates/CMakeLists.txt @@ -0,0 +1,8 @@ + +# Install the python files + +install(FILES + __init__.py + template.py + DESTINATION ${PYTHON_SITE}/toast/templates +) diff --git a/src/toast/templates/__init__.py b/src/toast/templates/__init__.py new file mode 100644 index 000000000..4dc58a800 --- /dev/null +++ b/src/toast/templates/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +# Import Templates into our public API + +from .template import Template, Amplitudes diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index 239f9dfb4..a83fff4a9 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -64,11 +64,11 @@ def initialize(self, newdata): """Initialize instance after the data trait has been set. Templates use traits to set their properties, which allows them to be - configured easily with the constructor overrides and enables them to be built - from config files. However, the `data` trait may not be set at construction - time and this trait is likely used to compute the number of template amplitudes - that will be used and other parameters. This explicit initialize method is - called whenever the `data` trait is set. + configured easily with the constructor or afterwards and enables them to be + built from config files. However, the `data` trait may not be set at + construction time and this trait is likely used to compute the number of + template amplitudes that will be used and other parameters. This explicit + initialize method is called whenever the `data` trait is set. """ self._initialize(newdata) @@ -253,72 +253,155 @@ class Amplitudes(object): all processes. This object provides methods for describing the local distribution of amplitudes and for doing global reductions. + If n_global == n_local, then every process has a full copy of the amplitude + values. The the two arguments are different, then each process has a subset of + values. If local_indices is None, then each process has a unique set of values + and the total number across all processes must sum to n_global. If local_indices + is given, then it is the explicit locations of the local values within the global + set. + Args: comm (mpi4py.MPI.Comm): The MPI communicator or None. - n_global (int): The number of global values across all + n_global (int): The number of global values across all processes. + n_local (int): The number of values on this process. + local_indices (array): If not None, the explicit indices of the local + amplitudes within the global array. + dtype (dtype): The amplitude dtype. """ - def __init__(self, comm, n_global, local_indices=None): + def __init__(self, comm, n_global, n_local, local_indices=None, dtype=np.float64): self._comm = comm + self._n_global = n_global + self._n_local = n_local + self._local_indices = local_indices + self._dtype = np.dtype(dtype) + self._storage_class, self._itemsize = dtype_to_aligned(dtype) + self._full = False + self._global_first = None + self._global_last = None + if self._n_global == self._n_local: + self._full = True + self._global_first = 0 + self._global_last = self._n_local - 1 + else: + if self._local_indices is None: + check = [self._n_local] + rank = 0 + if self._comm is not None: + check = self._comm.allgather(check) + rank = self._comm.rank + if np.sum(check) != self._n_global: + msg = "Total amplitudes on all processes does not equal n_global" + raise RuntimeError(msg) + self._global_first = 0 + for i in range(rank): + self._global_first += check[i] + self._global_last = self._global_first + self._n_local - 1 + else: + if len(self._local_indices) != self._n_local: + msg = "Length of local_indices must match n_local" + raise RuntimeError(msg) + self._global_first = self._local_indices[0] + self._global_last = self._local_indices[-1] + self._raw = self._storage_class.zeros(self._n_local) + self.local = self._raw.array() + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to this object. DO NOT CALL THIS unless you are sure all references + are no longer being used and you are about to delete the object. + + """ + if hasattr(self, "local"): + del self.local + if hasattr(self, "_raw"): + self._raw.clear() + del self._raw + + def __del__(self): + self.clear() @property def comm(self): return _comm - def _n_vales(self): - raise NotImplementedError("Derived classes must implement _n_values()") - - def n_values(self): - """Returns the total number of amplitudes.""" - return self._n_values() - - def _n_local(self): - raise NotImplementedError("Derived classes must implement _n_local()") + @property + def n_global(self): + """The total number of amplitudes.""" + return self._n_global + @property def n_local(self): - """Returns the number of locally stored amplitudes.""" - return self._n_local() - - def _get_global_values(self, offset, buffer): - raise NotImplementedError("Derived classes must implement _get_global_values()") - - def get_global_values(self, offset, buffer): - """For the given range of global values, populate the buffer. - - This function takes the provided buffer for the global sample offset and fills - it with any local values that fall in that sample range. Other values should be - set to zero. This is used in synchronization / reduction. - - Args: - offset (int): The global sample offset. - buffer (array): A pre-existing 1D array of amplitudes. - - Returns: - None - - """ - return self._global_values(offset, buffer) - - def _set_global_values(self, offset, buffer): - raise NotImplementedError("Derived classes must implement _set_global_values()") - - def set_global_values(self, offset, buffer): - """For the given range of global values, set local values. - - This function takes the provided buffer for the global sample offset and uses - it to set any local values that fall in that sample range. This is used in - synchronization / reduction. - - Args: - offset (int): The global sample offset. - buffer (array): A 1D array of amplitudes. - - Returns: - None - - """ - return self._global_values(offset, buffer) + """The number of locally stored amplitudes.""" + return self._n_local + + def _get_global_values(comm_offset, send_buffer): + n_buf = len(send_buffer) + if self._full: + # Shortcut if we have all global amplitudes locally + send_buffer[:] = self.local[comm_offset : comm_offset + n_buf] + else: + # Need to compute our overlap with the global range. + send_buffer[:] = 0 + if (self._global_last < comm_offset) or ( + self._global_first >= comm_offset + n_buf + ): + # No overlap with our local data + return + if self._local_indices is None: + local_off = 0 + buf_off = 0 + if comm_offset > self._global_first: + local_off = comm_offset - self._global_first + else: + buf_off = self._global_first - comm_offset + n_copy = None + if comm_offset + n_buf > self._global_last: + n_copy = self._global_last + 1 - local_off + else: + n_copy = n_buf - buf_off + send_buffer[buf_off : buf_off + n_copy] = self.local[ + local_off : local_off + n_copy + ] + else: + # Need to efficiently do the lookup. Pull existing techniques from + # old code when we need this. + raise NotImplementedError("sync of explicitly indexed amplitudes") + + def _set_global_values(comm_offset, recv_buffer): + n_buf = len(recv_buffer) + if self._full: + # Shortcut if we have all global amplitudes locally + self.local[comm_offset : comm_offset + n_buf] = recv_buffer + else: + # Need to compute our overlap with the global range. + if (self._global_last < comm_offset) or ( + self._global_first >= comm_offset + n_buf + ): + # No overlap with our local data + return + if self._local_indices is None: + local_off = 0 + buf_off = 0 + if comm_offset > self._global_first: + local_off = comm_offset - self._global_first + else: + buf_off = self._global_first - comm_offset + n_copy = None + if comm_offset + n_buf > self._global_last: + n_copy = self._global_last + 1 - local_off + else: + n_copy = n_buf - buf_off + self.local[local_off : local_off + n_copy] = recv_buffer[ + buf_off : buf_off + n_copy + ] + else: + # Need to efficiently do the lookup. Pull existing techniques from + # old code when we need this. + raise NotImplementedError("sync of explicitly indexed amplitudes") def sync(self, comm_bytes=10000000): """Perform an Allreduce across all processes. @@ -335,32 +418,18 @@ def sync(self, comm_bytes=10000000): None """ + if self._comm is None: + return log = Logger.get() - dt = np.dtype(self.local.dtype) - - storage_class = None - if dt.char == "f": - storage_class = AlignedF32 - elif dt.char == "d": - storage_class = AlignedF64 - elif dt.char == "F": - raise NotImplementedError("No support yet for complex numbers") - elif dt.char == "D": - raise NotImplementedError("No support yet for complex numbers") - else: - msg = "Unsupported data typecode '{}'".format(dt.char) - log.error(msg) - raise ValueError(msg) - item_size = self.local.dtype.itemsize - n_comm = int(comm_bytes / item_size) - n_total = self.n_values() + n_comm = int(comm_bytes / self._itemsize) + n_total = self._n_global - # Create a persistent buffer for the reduction + # Create persistent buffers for the reduction - send_raw = storage_class.zeros(n_comm) + send_raw = self._storage_class.zeros(n_comm) send_buffer = send_raw.array() - recv_raw = storage_class.zeros(n_comm) + recv_raw = self._storage_class.zeros(n_comm) recv_buffer = recv_raw.array() # Buffered Allreduce @@ -369,13 +438,12 @@ def sync(self, comm_bytes=10000000): while comm_offset < n_total: if comm_offset + n_comm > n_total: n_comm = n_total - comm_offset - self.get_global_values(comm_offset, send_buffer) + self._get_global_values(comm_offset, send_buffer) self._comm.Allreduce(send_buffer, recv_buffer, op=MPI.SUM) - self.set_global_values(comm_offset, recv_buffer) + self._set_global_values(comm_offset, recv_buffer) comm_offset += n_comm # Cleanup - del send_buffer del recv_buffer send_raw.clear() @@ -383,15 +451,6 @@ def sync(self, comm_bytes=10000000): del send_raw del recv_raw - def _local_dot(self, other): - """Perform a dot product with the local values of another Amplitudes object. - - It is safe to assume that the calling code has verified that the other - Amplitudes instance has a matching data distribution. - - """ - raise NotImplementedError("Derived classes must implement _local_dot()") - def dot(self, other): """Perform a dot product with another Amplitudes object. @@ -404,12 +463,25 @@ def dot(self, other): (float): The dot product. """ - if other.n_values() != self.n_values(): + if other.n_global != self.n_global: raise RuntimeError("Amplitudes must have the same number of values") - if other.n_local() != self.n_local(): + if other.n_local != self.n_local: raise RuntimeError("Amplitudes must have the same number of local values") - local_result = self._local_dot(other) - result = local_result - if self._comm is not None: - result = MPI.allreduce(result, op=MPI.SUM) + local_result = np.dot(self.local, other.local) + result = None + if self._full: + # Every process has a copy of all amplitudes, so we are done + result = local_result + else: + if self._comm is None: + # Only one process + result = local_result + else: + if self._local_indices is None: + # Every process has a unique set of amplitudes. Reduce. + result = MPI.allreduce(local_result, op=MPI.SUM) + else: + # More complicated, since we need to reduce each amplitude only + # once. Implement techniques from other existing code when needed. + raise NotImplementedError("dot of explicitly indexed amplitudes") return result diff --git a/src/toast/utils.py b/src/toast/utils.py index d31bf81d0..515b2a41d 100644 --- a/src/toast/utils.py +++ b/src/toast/utils.py @@ -355,8 +355,7 @@ def ensure_buffer_f64(data): def name_UID(name): - """Return a unique integer for a specified name string. - """ + """Return a unique integer for a specified name string.""" bdet = name.encode("utf-8") dhash = hashlib.md5() dhash.update(bdet) @@ -406,3 +405,58 @@ def rate_from_times(timestamps, mean=False): else: dt = np.median(np.diff(timestamps)) return (1.0 / dt, dt, dt_min, dt_max, dt_std) + + +def dtype_to_aligned(dt): + """For a numpy dtype, return the equivalent internal Aligned storage class. + + Args: + dt (dtype): The numpy dtype. + + Returns: + (tuple): The (storage class, item size). + + """ + log = Logger.get() + itemsize = None + storage_class = None + ttype = np.dtype(dt) + if ttype.char == "b": + storage_class = AlignedI8 + itemsize = 1 + elif ttype.char == "B": + storage_class = AlignedU8 + itemsize = 1 + elif ttype.char == "h": + storage_class = AlignedI16 + itemsize = 2 + elif ttype.char == "H": + storage_class = AlignedU16 + itemsize = 2 + elif ttype.char == "i": + storage_class = AlignedI32 + itemsize = 4 + elif ttype.char == "I": + storage_class = AlignedU32 + itemsize = 4 + elif (ttype.char == "q") or (ttype.char == "l"): + storage_class = AlignedI64 + itemsize = 8 + elif (ttype.char == "Q") or (ttype.char == "L"): + storage_class = AlignedU64 + itemsize = 8 + elif ttype.char == "f": + storage_class = AlignedF32 + itemsize = 4 + elif ttype.char == "d": + storage_class = AlignedF64 + itemsize = 8 + elif ttype.char == "F": + raise NotImplementedError("No support yet for complex numbers") + elif ttype.char == "D": + raise NotImplementedError("No support yet for complex numbers") + else: + msg = "Unsupported data typecode '{}'".format(ttype.char) + log.error(msg) + raise ValueError(msg) + return (storage_class, itemsize) From 194f9b51aa72d22d2b5ef1725e56b7e4fe9f5263 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 06:57:27 -0800 Subject: [PATCH 031/690] Move python bindings into a subdirectory --- src/toast/CMakeLists.txt | 36 +++++++++---------- .../{_libtoast_atm.cpp => _libtoast/atm.cpp} | 0 .../common.cpp} | 2 +- .../common.hpp} | 0 .../fod_psd.cpp} | 0 .../map_cov.cpp} | 0 .../math_fft.cpp} | 0 .../math_healpix.cpp} | 0 .../math_qarray.cpp} | 0 .../math_rng.cpp} | 0 .../math_sf.cpp} | 0 .../{_libtoast.cpp => _libtoast/module.cpp} | 2 +- .../{_libtoast.hpp => _libtoast/module.hpp} | 2 +- .../pixels.cpp} | 0 .../{_libtoast_sys.cpp => _libtoast/sys.cpp} | 2 +- .../tod_filter.cpp} | 0 .../tod_pointing.cpp} | 0 .../tod_simnoise.cpp} | 0 .../todmap_mapmaker.cpp} | 0 .../todmap_scanning.cpp} | 0 20 files changed, 22 insertions(+), 22 deletions(-) rename src/toast/{_libtoast_atm.cpp => _libtoast/atm.cpp} (100%) rename src/toast/{_libtoast_common.cpp => _libtoast/common.cpp} (97%) rename src/toast/{_libtoast_common.hpp => _libtoast/common.hpp} (100%) rename src/toast/{_libtoast_fod_psd.cpp => _libtoast/fod_psd.cpp} (100%) rename src/toast/{_libtoast_map_cov.cpp => _libtoast/map_cov.cpp} (100%) rename src/toast/{_libtoast_math_fft.cpp => _libtoast/math_fft.cpp} (100%) rename src/toast/{_libtoast_math_healpix.cpp => _libtoast/math_healpix.cpp} (100%) rename src/toast/{_libtoast_math_qarray.cpp => _libtoast/math_qarray.cpp} (100%) rename src/toast/{_libtoast_math_rng.cpp => _libtoast/math_rng.cpp} (100%) rename src/toast/{_libtoast_math_sf.cpp => _libtoast/math_sf.cpp} (100%) rename src/toast/{_libtoast.cpp => _libtoast/module.cpp} (98%) rename src/toast/{_libtoast.hpp => _libtoast/module.hpp} (99%) rename src/toast/{_libtoast_pixels.cpp => _libtoast/pixels.cpp} (100%) rename src/toast/{_libtoast_sys.cpp => _libtoast/sys.cpp} (99%) rename src/toast/{_libtoast_tod_filter.cpp => _libtoast/tod_filter.cpp} (100%) rename src/toast/{_libtoast_tod_pointing.cpp => _libtoast/tod_pointing.cpp} (100%) rename src/toast/{_libtoast_tod_simnoise.cpp => _libtoast/tod_simnoise.cpp} (100%) rename src/toast/{_libtoast_todmap_mapmaker.cpp => _libtoast/todmap_mapmaker.cpp} (100%) rename src/toast/{_libtoast_todmap_scanning.cpp => _libtoast/todmap_scanning.cpp} (100%) diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index dc10dab98..2798ae79b 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -28,23 +28,23 @@ set(PYTHON_SITE "lib/python${PYTHON_MAJORMINOR}/site-packages") # Create a module for the serial toast library pybind11_add_module(_libtoast MODULE - _libtoast_common.cpp - _libtoast.cpp - _libtoast_sys.cpp - _libtoast_math_sf.cpp - _libtoast_math_rng.cpp - _libtoast_math_qarray.cpp - _libtoast_math_fft.cpp - _libtoast_math_healpix.cpp - _libtoast_fod_psd.cpp - _libtoast_tod_filter.cpp - _libtoast_tod_pointing.cpp - _libtoast_tod_simnoise.cpp - _libtoast_todmap_scanning.cpp - _libtoast_map_cov.cpp - _libtoast_pixels.cpp - _libtoast_todmap_mapmaker.cpp - _libtoast_atm.cpp + _libtoast/common.cpp + _libtoast/module.cpp + _libtoast/sys.cpp + _libtoast/math_sf.cpp + _libtoast/math_rng.cpp + _libtoast/math_qarray.cpp + _libtoast/math_fft.cpp + _libtoast/math_healpix.cpp + _libtoast/fod_psd.cpp + _libtoast/tod_filter.cpp + _libtoast/tod_pointing.cpp + _libtoast/tod_simnoise.cpp + _libtoast/todmap_scanning.cpp + _libtoast/map_cov.cpp + _libtoast/pixels.cpp + _libtoast/todmap_mapmaker.cpp + _libtoast/atm.cpp ) if(OpenMP_CXX_FOUND) @@ -69,7 +69,7 @@ endif(CHOLMOD_FOUND) # Include path to the toast headers target_include_directories(_libtoast BEFORE PRIVATE - "${CMAKE_CURRENT_SOURCE_DIR}" + "${CMAKE_CURRENT_SOURCE_DIR}/_libtoast" "${CMAKE_CURRENT_SOURCE_DIR}/../libtoast/include" ) diff --git a/src/toast/_libtoast_atm.cpp b/src/toast/_libtoast/atm.cpp similarity index 100% rename from src/toast/_libtoast_atm.cpp rename to src/toast/_libtoast/atm.cpp diff --git a/src/toast/_libtoast_common.cpp b/src/toast/_libtoast/common.cpp similarity index 97% rename from src/toast/_libtoast_common.cpp rename to src/toast/_libtoast/common.cpp index 090a4add9..6b37bba0c 100644 --- a/src/toast/_libtoast_common.cpp +++ b/src/toast/_libtoast/common.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast_common.hpp> +#include // FIXME: we could use configure checks to detect whether we are on a 64bit // system and whether "l" and "L" are equivalent to "q" and "Q". diff --git a/src/toast/_libtoast_common.hpp b/src/toast/_libtoast/common.hpp similarity index 100% rename from src/toast/_libtoast_common.hpp rename to src/toast/_libtoast/common.hpp diff --git a/src/toast/_libtoast_fod_psd.cpp b/src/toast/_libtoast/fod_psd.cpp similarity index 100% rename from src/toast/_libtoast_fod_psd.cpp rename to src/toast/_libtoast/fod_psd.cpp diff --git a/src/toast/_libtoast_map_cov.cpp b/src/toast/_libtoast/map_cov.cpp similarity index 100% rename from src/toast/_libtoast_map_cov.cpp rename to src/toast/_libtoast/map_cov.cpp diff --git a/src/toast/_libtoast_math_fft.cpp b/src/toast/_libtoast/math_fft.cpp similarity index 100% rename from src/toast/_libtoast_math_fft.cpp rename to src/toast/_libtoast/math_fft.cpp diff --git a/src/toast/_libtoast_math_healpix.cpp b/src/toast/_libtoast/math_healpix.cpp similarity index 100% rename from src/toast/_libtoast_math_healpix.cpp rename to src/toast/_libtoast/math_healpix.cpp diff --git a/src/toast/_libtoast_math_qarray.cpp b/src/toast/_libtoast/math_qarray.cpp similarity index 100% rename from src/toast/_libtoast_math_qarray.cpp rename to src/toast/_libtoast/math_qarray.cpp diff --git a/src/toast/_libtoast_math_rng.cpp b/src/toast/_libtoast/math_rng.cpp similarity index 100% rename from src/toast/_libtoast_math_rng.cpp rename to src/toast/_libtoast/math_rng.cpp diff --git a/src/toast/_libtoast_math_sf.cpp b/src/toast/_libtoast/math_sf.cpp similarity index 100% rename from src/toast/_libtoast_math_sf.cpp rename to src/toast/_libtoast/math_sf.cpp diff --git a/src/toast/_libtoast.cpp b/src/toast/_libtoast/module.cpp similarity index 98% rename from src/toast/_libtoast.cpp rename to src/toast/_libtoast/module.cpp index 4cb11c916..60389c4bc 100644 --- a/src/toast/_libtoast.cpp +++ b/src/toast/_libtoast/module.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include using size_container = py::detail::any_container ; diff --git a/src/toast/_libtoast.hpp b/src/toast/_libtoast/module.hpp similarity index 99% rename from src/toast/_libtoast.hpp rename to src/toast/_libtoast/module.hpp index f30b7643c..5a35267b5 100644 --- a/src/toast/_libtoast.hpp +++ b/src/toast/_libtoast/module.hpp @@ -6,7 +6,7 @@ #ifndef LIBTOAST_HPP #define LIBTOAST_HPP -#include <_libtoast_common.hpp> +#include PYBIND11_MAKE_OPAQUE(toast::AlignedI8); PYBIND11_MAKE_OPAQUE(toast::AlignedU8); diff --git a/src/toast/_libtoast_pixels.cpp b/src/toast/_libtoast/pixels.cpp similarity index 100% rename from src/toast/_libtoast_pixels.cpp rename to src/toast/_libtoast/pixels.cpp diff --git a/src/toast/_libtoast_sys.cpp b/src/toast/_libtoast/sys.cpp similarity index 99% rename from src/toast/_libtoast_sys.cpp rename to src/toast/_libtoast/sys.cpp index d5cb1be62..3fd114f68 100644 --- a/src/toast/_libtoast_sys.cpp +++ b/src/toast/_libtoast/sys.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_sys(py::module & m) { diff --git a/src/toast/_libtoast_tod_filter.cpp b/src/toast/_libtoast/tod_filter.cpp similarity index 100% rename from src/toast/_libtoast_tod_filter.cpp rename to src/toast/_libtoast/tod_filter.cpp diff --git a/src/toast/_libtoast_tod_pointing.cpp b/src/toast/_libtoast/tod_pointing.cpp similarity index 100% rename from src/toast/_libtoast_tod_pointing.cpp rename to src/toast/_libtoast/tod_pointing.cpp diff --git a/src/toast/_libtoast_tod_simnoise.cpp b/src/toast/_libtoast/tod_simnoise.cpp similarity index 100% rename from src/toast/_libtoast_tod_simnoise.cpp rename to src/toast/_libtoast/tod_simnoise.cpp diff --git a/src/toast/_libtoast_todmap_mapmaker.cpp b/src/toast/_libtoast/todmap_mapmaker.cpp similarity index 100% rename from src/toast/_libtoast_todmap_mapmaker.cpp rename to src/toast/_libtoast/todmap_mapmaker.cpp diff --git a/src/toast/_libtoast_todmap_scanning.cpp b/src/toast/_libtoast/todmap_scanning.cpp similarity index 100% rename from src/toast/_libtoast_todmap_scanning.cpp rename to src/toast/_libtoast/todmap_scanning.cpp From f737b49d8d914e722d6e783abfb6a6be723b722a Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 07:08:24 -0800 Subject: [PATCH 032/690] Move operator class into ops directory --- src/toast/CMakeLists.txt | 1 - src/toast/_libtoast/atm.cpp | 2 +- src/toast/_libtoast/fod_psd.cpp | 2 +- src/toast/_libtoast/map_cov.cpp | 2 +- src/toast/_libtoast/math_fft.cpp | 2 +- src/toast/_libtoast/math_healpix.cpp | 2 +- src/toast/_libtoast/math_qarray.cpp | 2 +- src/toast/_libtoast/math_rng.cpp | 2 +- src/toast/_libtoast/math_sf.cpp | 2 +- src/toast/_libtoast/pixels.cpp | 2 +- src/toast/_libtoast/tod_filter.cpp | 2 +- src/toast/_libtoast/tod_pointing.cpp | 2 +- src/toast/_libtoast/tod_simnoise.cpp | 2 +- src/toast/_libtoast/todmap_mapmaker.cpp | 2 +- src/toast/_libtoast/todmap_scanning.cpp | 2 +- src/toast/future_ops/CMakeLists.txt | 1 + src/toast/future_ops/__init__.py | 2 ++ src/toast/future_ops/clear.py | 2 +- src/toast/future_ops/copy.py | 2 +- src/toast/future_ops/mapmaker.py | 4 ++-- src/toast/future_ops/mapmaker_binning.py | 4 ++-- src/toast/future_ops/mapmaker_projection.py | 4 ++-- src/toast/future_ops/mapmaker_templates.py | 4 ++-- src/toast/future_ops/mapmaker_utils.py | 4 ++-- src/toast/future_ops/memory_counter.py | 2 +- src/toast/future_ops/noise_model.py | 2 +- src/toast/future_ops/noise_weight.py | 2 +- src/toast/{ => future_ops}/operator.py | 4 ++-- src/toast/future_ops/pipeline.py | 2 +- src/toast/future_ops/pointing_healpix.py | 4 ++-- src/toast/future_ops/scan_map.py | 4 ++-- src/toast/future_ops/sim_ground.py | 13 +++++-------- src/toast/future_ops/sim_satellite.py | 4 ++-- src/toast/future_ops/sim_tod_noise.py | 4 ++-- 34 files changed, 48 insertions(+), 49 deletions(-) rename src/toast/{ => future_ops}/operator.py (99%) diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 2798ae79b..e30299914 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -99,7 +99,6 @@ install(FILES observation.py observation_data.py observation_view.py - operator.py vis.py rng.py qarray.py diff --git a/src/toast/_libtoast/atm.cpp b/src/toast/_libtoast/atm.cpp index 9ff1daf1a..ca8d20c7f 100644 --- a/src/toast/_libtoast/atm.cpp +++ b/src/toast/_libtoast/atm.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_atm(py::module & m) { diff --git a/src/toast/_libtoast/fod_psd.cpp b/src/toast/_libtoast/fod_psd.cpp index cfa002896..54375c219 100644 --- a/src/toast/_libtoast/fod_psd.cpp +++ b/src/toast/_libtoast/fod_psd.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_fod_psd(py::module & m) { diff --git a/src/toast/_libtoast/map_cov.cpp b/src/toast/_libtoast/map_cov.cpp index acbb04cc3..52e3342c1 100644 --- a/src/toast/_libtoast/map_cov.cpp +++ b/src/toast/_libtoast/map_cov.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_map_cov(py::module & m) { diff --git a/src/toast/_libtoast/math_fft.cpp b/src/toast/_libtoast/math_fft.cpp index 60945cf0a..e58baead9 100644 --- a/src/toast/_libtoast/math_fft.cpp +++ b/src/toast/_libtoast/math_fft.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_math_fft(py::module & m) { diff --git a/src/toast/_libtoast/math_healpix.cpp b/src/toast/_libtoast/math_healpix.cpp index 233c560f8..d6292ad3f 100644 --- a/src/toast/_libtoast/math_healpix.cpp +++ b/src/toast/_libtoast/math_healpix.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_math_healpix(py::module & m) { diff --git a/src/toast/_libtoast/math_qarray.cpp b/src/toast/_libtoast/math_qarray.cpp index e9962e284..4e4b8cca5 100644 --- a/src/toast/_libtoast/math_qarray.cpp +++ b/src/toast/_libtoast/math_qarray.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_math_qarray(py::module & m) { diff --git a/src/toast/_libtoast/math_rng.cpp b/src/toast/_libtoast/math_rng.cpp index d9c035e65..eb7c9bd3c 100644 --- a/src/toast/_libtoast/math_rng.cpp +++ b/src/toast/_libtoast/math_rng.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_math_rng(py::module & m) { diff --git a/src/toast/_libtoast/math_sf.cpp b/src/toast/_libtoast/math_sf.cpp index 765e01c53..e1d7f8126 100644 --- a/src/toast/_libtoast/math_sf.cpp +++ b/src/toast/_libtoast/math_sf.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_math_sf(py::module & m) { diff --git a/src/toast/_libtoast/pixels.cpp b/src/toast/_libtoast/pixels.cpp index 98a25cd13..8f39f5aa3 100644 --- a/src/toast/_libtoast/pixels.cpp +++ b/src/toast/_libtoast/pixels.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include template diff --git a/src/toast/_libtoast/tod_filter.cpp b/src/toast/_libtoast/tod_filter.cpp index 8886a1ad7..897386d7a 100644 --- a/src/toast/_libtoast/tod_filter.cpp +++ b/src/toast/_libtoast/tod_filter.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_tod_filter(py::module & m) { diff --git a/src/toast/_libtoast/tod_pointing.cpp b/src/toast/_libtoast/tod_pointing.cpp index 1e333404d..ad0923cf0 100644 --- a/src/toast/_libtoast/tod_pointing.cpp +++ b/src/toast/_libtoast/tod_pointing.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_tod_pointing(py::module & m) { diff --git a/src/toast/_libtoast/tod_simnoise.cpp b/src/toast/_libtoast/tod_simnoise.cpp index bf73572b4..dd73fa479 100644 --- a/src/toast/_libtoast/tod_simnoise.cpp +++ b/src/toast/_libtoast/tod_simnoise.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void init_tod_simnoise(py::module & m) { diff --git a/src/toast/_libtoast/todmap_mapmaker.cpp b/src/toast/_libtoast/todmap_mapmaker.cpp index f0e50d077..679f71e7b 100644 --- a/src/toast/_libtoast/todmap_mapmaker.cpp +++ b/src/toast/_libtoast/todmap_mapmaker.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include void apply_flags_to_pixels(py::array_t common_flags, diff --git a/src/toast/_libtoast/todmap_scanning.cpp b/src/toast/_libtoast/todmap_scanning.cpp index a19677dd9..589403e83 100644 --- a/src/toast/_libtoast/todmap_scanning.cpp +++ b/src/toast/_libtoast/todmap_scanning.cpp @@ -3,7 +3,7 @@ // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. -#include <_libtoast.hpp> +#include template diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/future_ops/CMakeLists.txt index 69d5daa35..d1513d2ba 100644 --- a/src/toast/future_ops/CMakeLists.txt +++ b/src/toast/future_ops/CMakeLists.txt @@ -3,6 +3,7 @@ install(FILES __init__.py + operator.py pipeline.py clear.py copy.py diff --git a/src/toast/future_ops/__init__.py b/src/toast/future_ops/__init__.py index 404b995b1..617e2a34c 100644 --- a/src/toast/future_ops/__init__.py +++ b/src/toast/future_ops/__init__.py @@ -4,6 +4,8 @@ # Import Operators into our public API +from .operator import Operator + from .memory_counter import MemoryCounter from .clear import Clear diff --git a/src/toast/future_ops/clear.py b/src/toast/future_ops/clear.py index 125ca4546..bf0bbd4ac 100644 --- a/src/toast/future_ops/clear.py +++ b/src/toast/future_ops/clear.py @@ -8,7 +8,7 @@ from ..traits import trait_docs, Int, Unicode, List -from ..operator import Operator +from .operator import Operator @trait_docs diff --git a/src/toast/future_ops/copy.py b/src/toast/future_ops/copy.py index 2f6c8034f..6d294480f 100644 --- a/src/toast/future_ops/copy.py +++ b/src/toast/future_ops/copy.py @@ -10,7 +10,7 @@ from ..traits import trait_docs, Int, Unicode, List -from ..operator import Operator +from .operator import Operator @trait_docs diff --git a/src/toast/future_ops/mapmaker.py b/src/toast/future_ops/mapmaker.py index 75f42f873..c600766ed 100644 --- a/src/toast/future_ops/mapmaker.py +++ b/src/toast/future_ops/mapmaker.py @@ -10,12 +10,12 @@ from ..traits import trait_docs, Int, Unicode, Bool -from ..operator import Operator - from ..timing import function_timer from ..pixels import PixelDistribution, PixelData +from .operator import Operator + from .pipeline import Pipeline from .clear import Clear diff --git a/src/toast/future_ops/mapmaker_binning.py b/src/toast/future_ops/mapmaker_binning.py index c0cc87f16..2d2e0f120 100644 --- a/src/toast/future_ops/mapmaker_binning.py +++ b/src/toast/future_ops/mapmaker_binning.py @@ -10,12 +10,12 @@ from ..traits import trait_docs, Int, Unicode, Bool -from ..operator import Operator - from ..timing import function_timer from ..pixels import PixelDistribution, PixelData +from .operator import Operator + from .pipeline import Pipeline from .mapmaker_utils import BuildHitMap, BuildNoiseWeighted, BuildInverseCovariance diff --git a/src/toast/future_ops/mapmaker_projection.py b/src/toast/future_ops/mapmaker_projection.py index 3ada2cf1f..b2fb0f9da 100644 --- a/src/toast/future_ops/mapmaker_projection.py +++ b/src/toast/future_ops/mapmaker_projection.py @@ -10,12 +10,12 @@ from ..traits import trait_docs, Int, Unicode, Bool -from ..operator import Operator - from ..timing import function_timer from ..pixels import PixelDistribution, PixelData +from .operator import Operator + from .pipeline import Pipeline from .clear import Clear diff --git a/src/toast/future_ops/mapmaker_templates.py b/src/toast/future_ops/mapmaker_templates.py index b022206c4..eb877c269 100644 --- a/src/toast/future_ops/mapmaker_templates.py +++ b/src/toast/future_ops/mapmaker_templates.py @@ -8,10 +8,10 @@ from ..traits import trait_docs, Int, Unicode, Bool, List -from ..operator import Operator - from ..timing import function_timer +from .operator import Operator + @trait_docs class TemplateMatrix(Operator): diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/future_ops/mapmaker_utils.py index 8fa6a87c8..a283820f5 100644 --- a/src/toast/future_ops/mapmaker_utils.py +++ b/src/toast/future_ops/mapmaker_utils.py @@ -10,8 +10,6 @@ from ..traits import trait_docs, Int, Unicode, Bool, Instance -from ..operator import Operator - from ..timing import function_timer from ..pixels import PixelDistribution, PixelData @@ -22,6 +20,8 @@ cov_accum_diag_invnpp, ) +from .operator import Operator + from .clear import Clear diff --git a/src/toast/future_ops/memory_counter.py b/src/toast/future_ops/memory_counter.py index 10458782a..4b256542b 100644 --- a/src/toast/future_ops/memory_counter.py +++ b/src/toast/future_ops/memory_counter.py @@ -14,7 +14,7 @@ from ..traits import trait_docs, Int, Bool -from ..operator import Operator +from .operator import Operator @trait_docs diff --git a/src/toast/future_ops/noise_model.py b/src/toast/future_ops/noise_model.py index da1c83853..c013ae7cc 100644 --- a/src/toast/future_ops/noise_model.py +++ b/src/toast/future_ops/noise_model.py @@ -14,7 +14,7 @@ from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity -from ..operator import Operator +from .operator import Operator @trait_docs diff --git a/src/toast/future_ops/noise_weight.py b/src/toast/future_ops/noise_weight.py index bdfc7d40e..77807a23a 100644 --- a/src/toast/future_ops/noise_weight.py +++ b/src/toast/future_ops/noise_weight.py @@ -14,7 +14,7 @@ from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity -from ..operator import Operator +from .operator import Operator @trait_docs diff --git a/src/toast/operator.py b/src/toast/future_ops/operator.py similarity index 99% rename from src/toast/operator.py rename to src/toast/future_ops/operator.py index 4530c41d4..7f8b97893 100644 --- a/src/toast/operator.py +++ b/src/toast/future_ops/operator.py @@ -3,9 +3,9 @@ # a BSD-style license that can be found in the LICENSE file. -from .utils import Logger +from ..utils import Logger -from .traits import TraitConfig +from ..traits import TraitConfig class Operator(TraitConfig): diff --git a/src/toast/future_ops/pipeline.py b/src/toast/future_ops/pipeline.py index c6cb41a48..a37ff5fd4 100644 --- a/src/toast/future_ops/pipeline.py +++ b/src/toast/future_ops/pipeline.py @@ -8,7 +8,7 @@ from ..traits import trait_docs, Int, Unicode, List -from ..operator import Operator +from .operator import Operator @trait_docs diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/future_ops/pointing_healpix.py index 93ee57d5d..1b5e0f166 100644 --- a/src/toast/future_ops/pointing_healpix.py +++ b/src/toast/future_ops/pointing_healpix.py @@ -12,8 +12,6 @@ from ..healpix import HealpixPixels -from ..operator import Operator - from ..timing import function_timer from .. import qarray as qa @@ -22,6 +20,8 @@ from .._libtoast import pointing_matrix_healpix +from .operator import Operator + @trait_docs class PointingHealpix(Operator): diff --git a/src/toast/future_ops/scan_map.py b/src/toast/future_ops/scan_map.py index 921b1e423..36a3a9eca 100644 --- a/src/toast/future_ops/scan_map.py +++ b/src/toast/future_ops/scan_map.py @@ -10,14 +10,14 @@ from ..traits import trait_docs, Int, Unicode, Bool -from ..operator import Operator - from ..timing import function_timer from ..pixels import PixelDistribution, PixelData from .._libtoast import scan_map_float64, scan_map_float32 +from .operator import Operator + @trait_docs class ScanMap(Operator): diff --git a/src/toast/future_ops/sim_ground.py b/src/toast/future_ops/sim_ground.py index 1c4d0295a..c7d918c4c 100644 --- a/src/toast/future_ops/sim_ground.py +++ b/src/toast/future_ops/sim_ground.py @@ -18,8 +18,6 @@ from ..tod import Interval, TOD, regular_intervals, AnalyticNoise -from ..operator import Operator - from ..observation import Observation from ..config import ObjectConfig @@ -28,6 +26,8 @@ from ..healpix import ang2vec +from .operator import Operator + from .sim_hwp import simulate_hwp_angle @@ -263,13 +263,11 @@ def finalize(self, data): return def requires(self): - """List of Observation keys directly used by this Operator. - """ + """List of Observation keys directly used by this Operator.""" return list() def provides(self): - """List of Observation keys generated by this Operator. - """ + """List of Observation keys generated by this Operator.""" prov = [ "TIMESTAMPS", "BORESIGHT_RADEC", @@ -282,6 +280,5 @@ def provides(self): return prov def accelerators(self): - """List of accelerators supported by this Operator. - """ + """List of accelerators supported by this Operator.""" return list() diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/future_ops/sim_satellite.py index 42e31c45f..e50620fa6 100644 --- a/src/toast/future_ops/sim_satellite.py +++ b/src/toast/future_ops/sim_satellite.py @@ -26,14 +26,14 @@ from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity -from ..operator import Operator - from ..observation import Observation from ..instrument import Telescope from ..healpix import ang2vec +from .operator import Operator + from .sim_hwp import simulate_hwp_response diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/future_ops/sim_tod_noise.py index 40e7c516b..a9bbcf6f5 100644 --- a/src/toast/future_ops/sim_tod_noise.py +++ b/src/toast/future_ops/sim_tod_noise.py @@ -16,12 +16,12 @@ from ..fft import FFTPlanReal1DStore -from ..operator import Operator - from ..utils import rate_from_times, Logger, AlignedF64 from .._libtoast import tod_sim_noise_timestream +from .operator import Operator + @function_timer def sim_noise_timestream( From ca0bb4aa7ecf972fa28d640647c9d226e3acd463 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 07:08:58 -0800 Subject: [PATCH 033/690] Rename future_ops --> ops --- src/toast/{future_ops => ops}/CMakeLists.txt | 0 src/toast/{future_ops => ops}/__init__.py | 0 src/toast/{future_ops => ops}/clear.py | 0 src/toast/{future_ops => ops}/copy.py | 0 src/toast/{future_ops => ops}/mapmaker.py | 0 src/toast/{future_ops => ops}/mapmaker_binning.py | 0 src/toast/{future_ops => ops}/mapmaker_projection.py | 0 src/toast/{future_ops => ops}/mapmaker_templates.py | 0 src/toast/{future_ops => ops}/mapmaker_utils.py | 0 src/toast/{future_ops => ops}/memory_counter.py | 0 src/toast/{future_ops => ops}/noise_model.py | 0 src/toast/{future_ops => ops}/noise_weight.py | 0 src/toast/{future_ops => ops}/operator.py | 0 src/toast/{future_ops => ops}/pipeline.py | 0 src/toast/{future_ops => ops}/pointing_healpix.py | 0 src/toast/{future_ops => ops}/scan_map.py | 0 src/toast/{future_ops => ops}/sim_ground.py | 0 src/toast/{future_ops => ops}/sim_hwp.py | 0 src/toast/{future_ops => ops}/sim_satellite.py | 0 src/toast/{future_ops => ops}/sim_tod_noise.py | 0 20 files changed, 0 insertions(+), 0 deletions(-) rename src/toast/{future_ops => ops}/CMakeLists.txt (100%) rename src/toast/{future_ops => ops}/__init__.py (100%) rename src/toast/{future_ops => ops}/clear.py (100%) rename src/toast/{future_ops => ops}/copy.py (100%) rename src/toast/{future_ops => ops}/mapmaker.py (100%) rename src/toast/{future_ops => ops}/mapmaker_binning.py (100%) rename src/toast/{future_ops => ops}/mapmaker_projection.py (100%) rename src/toast/{future_ops => ops}/mapmaker_templates.py (100%) rename src/toast/{future_ops => ops}/mapmaker_utils.py (100%) rename src/toast/{future_ops => ops}/memory_counter.py (100%) rename src/toast/{future_ops => ops}/noise_model.py (100%) rename src/toast/{future_ops => ops}/noise_weight.py (100%) rename src/toast/{future_ops => ops}/operator.py (100%) rename src/toast/{future_ops => ops}/pipeline.py (100%) rename src/toast/{future_ops => ops}/pointing_healpix.py (100%) rename src/toast/{future_ops => ops}/scan_map.py (100%) rename src/toast/{future_ops => ops}/sim_ground.py (100%) rename src/toast/{future_ops => ops}/sim_hwp.py (100%) rename src/toast/{future_ops => ops}/sim_satellite.py (100%) rename src/toast/{future_ops => ops}/sim_tod_noise.py (100%) diff --git a/src/toast/future_ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt similarity index 100% rename from src/toast/future_ops/CMakeLists.txt rename to src/toast/ops/CMakeLists.txt diff --git a/src/toast/future_ops/__init__.py b/src/toast/ops/__init__.py similarity index 100% rename from src/toast/future_ops/__init__.py rename to src/toast/ops/__init__.py diff --git a/src/toast/future_ops/clear.py b/src/toast/ops/clear.py similarity index 100% rename from src/toast/future_ops/clear.py rename to src/toast/ops/clear.py diff --git a/src/toast/future_ops/copy.py b/src/toast/ops/copy.py similarity index 100% rename from src/toast/future_ops/copy.py rename to src/toast/ops/copy.py diff --git a/src/toast/future_ops/mapmaker.py b/src/toast/ops/mapmaker.py similarity index 100% rename from src/toast/future_ops/mapmaker.py rename to src/toast/ops/mapmaker.py diff --git a/src/toast/future_ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py similarity index 100% rename from src/toast/future_ops/mapmaker_binning.py rename to src/toast/ops/mapmaker_binning.py diff --git a/src/toast/future_ops/mapmaker_projection.py b/src/toast/ops/mapmaker_projection.py similarity index 100% rename from src/toast/future_ops/mapmaker_projection.py rename to src/toast/ops/mapmaker_projection.py diff --git a/src/toast/future_ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py similarity index 100% rename from src/toast/future_ops/mapmaker_templates.py rename to src/toast/ops/mapmaker_templates.py diff --git a/src/toast/future_ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py similarity index 100% rename from src/toast/future_ops/mapmaker_utils.py rename to src/toast/ops/mapmaker_utils.py diff --git a/src/toast/future_ops/memory_counter.py b/src/toast/ops/memory_counter.py similarity index 100% rename from src/toast/future_ops/memory_counter.py rename to src/toast/ops/memory_counter.py diff --git a/src/toast/future_ops/noise_model.py b/src/toast/ops/noise_model.py similarity index 100% rename from src/toast/future_ops/noise_model.py rename to src/toast/ops/noise_model.py diff --git a/src/toast/future_ops/noise_weight.py b/src/toast/ops/noise_weight.py similarity index 100% rename from src/toast/future_ops/noise_weight.py rename to src/toast/ops/noise_weight.py diff --git a/src/toast/future_ops/operator.py b/src/toast/ops/operator.py similarity index 100% rename from src/toast/future_ops/operator.py rename to src/toast/ops/operator.py diff --git a/src/toast/future_ops/pipeline.py b/src/toast/ops/pipeline.py similarity index 100% rename from src/toast/future_ops/pipeline.py rename to src/toast/ops/pipeline.py diff --git a/src/toast/future_ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py similarity index 100% rename from src/toast/future_ops/pointing_healpix.py rename to src/toast/ops/pointing_healpix.py diff --git a/src/toast/future_ops/scan_map.py b/src/toast/ops/scan_map.py similarity index 100% rename from src/toast/future_ops/scan_map.py rename to src/toast/ops/scan_map.py diff --git a/src/toast/future_ops/sim_ground.py b/src/toast/ops/sim_ground.py similarity index 100% rename from src/toast/future_ops/sim_ground.py rename to src/toast/ops/sim_ground.py diff --git a/src/toast/future_ops/sim_hwp.py b/src/toast/ops/sim_hwp.py similarity index 100% rename from src/toast/future_ops/sim_hwp.py rename to src/toast/ops/sim_hwp.py diff --git a/src/toast/future_ops/sim_satellite.py b/src/toast/ops/sim_satellite.py similarity index 100% rename from src/toast/future_ops/sim_satellite.py rename to src/toast/ops/sim_satellite.py diff --git a/src/toast/future_ops/sim_tod_noise.py b/src/toast/ops/sim_tod_noise.py similarity index 100% rename from src/toast/future_ops/sim_tod_noise.py rename to src/toast/ops/sim_tod_noise.py From b37a07563ba00ad160157782005994c4a32ffd9e Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 07:14:34 -0800 Subject: [PATCH 034/690] Move old source into a separate directory --- src/toast/CMakeLists.txt | 2 -- src/toast/{ => unported}/cache.py | 0 src/toast/{ => unported}/fod/CMakeLists.txt | 0 src/toast/{ => unported}/fod/__init__.py | 0 src/toast/{ => unported}/fod/noise_estimation.py | 0 src/toast/{ => unported}/fod/psd_math.py | 0 src/toast/{ => unported}/map/CMakeLists.txt | 0 src/toast/{ => unported}/map/__init__.py | 0 src/toast/{ => unported}/map/cov.py | 0 src/toast/{ => unported}/map/pixels.py | 0 src/toast/{ => unported}/pipeline_tools/CMakeLists.txt | 0 src/toast/{ => unported}/pipeline_tools/__init__.py | 0 src/toast/{ => unported}/pipeline_tools/atm.py | 0 src/toast/{ => unported}/pipeline_tools/binning.py | 0 src/toast/{ => unported}/pipeline_tools/classes.py | 0 src/toast/{ => unported}/pipeline_tools/debug.py | 0 src/toast/{ => unported}/pipeline_tools/dipole.py | 0 src/toast/{ => unported}/pipeline_tools/dist.py | 0 src/toast/{ => unported}/pipeline_tools/export.py | 0 src/toast/{ => unported}/pipeline_tools/filters.py | 0 src/toast/{ => unported}/pipeline_tools/gain.py | 0 src/toast/{ => unported}/pipeline_tools/madam.py | 0 src/toast/{ => unported}/pipeline_tools/mapmaker.py | 0 src/toast/{ => unported}/pipeline_tools/noise.py | 0 src/toast/{ => unported}/pipeline_tools/pointing.py | 0 src/toast/{ => unported}/pipeline_tools/sky_signal.py | 0 src/toast/{ => unported}/pipeline_tools/sss.py | 0 src/toast/{ => unported}/pipeline_tools/todground.py | 0 src/toast/{ => unported}/pipeline_tools/todsatellite.py | 0 src/toast/{ => unported}/tod/CMakeLists.txt | 0 src/toast/{ => unported}/tod/__init__.py | 0 src/toast/{ => unported}/tod/applygain.py | 0 src/toast/{ => unported}/tod/conviqt_example.txt | 0 src/toast/{ => unported}/tod/gainscrambler.py | 0 src/toast/{ => unported}/tod/interval.py | 0 src/toast/{ => unported}/tod/memorycounter.py | 0 src/toast/{ => unported}/tod/noise.py | 0 src/toast/{ => unported}/tod/polyfilter.py | 0 src/toast/{ => unported}/tod/sim_det_noise.py | 0 src/toast/{ => unported}/tod/sim_focalplane.py | 0 src/toast/{ => unported}/tod/sim_interval.py | 0 src/toast/{ => unported}/tod/sim_noise.py | 0 src/toast/{ => unported}/tod/spt3g.py | 0 src/toast/{ => unported}/tod/spt3g_utils.py | 0 src/toast/{ => unported}/tod/tidas.py | 0 src/toast/{ => unported}/tod/tidas_utils.py | 0 src/toast/{ => unported}/tod/tod.py | 0 src/toast/{ => unported}/tod/tod_math.py | 0 src/toast/{ => unported}/todmap/CMakeLists.txt | 0 src/toast/{ => unported}/todmap/__init__.py | 0 src/toast/{ => unported}/todmap/atm.py | 0 src/toast/{ => unported}/todmap/conviqt.py | 0 src/toast/{ => unported}/todmap/groundfilter.py | 0 src/toast/{ => unported}/todmap/madam.py | 0 src/toast/{ => unported}/todmap/mapmaker.py | 0 src/toast/{ => unported}/todmap/mapsampler.py | 0 src/toast/{ => unported}/todmap/pointing.py | 0 src/toast/{ => unported}/todmap/pointing_math.py | 0 src/toast/{ => unported}/todmap/pysm.py | 0 src/toast/{ => unported}/todmap/sim_det_atm.py | 0 src/toast/{ => unported}/todmap/sim_det_dipole.py | 0 src/toast/{ => unported}/todmap/sim_det_map.py | 0 src/toast/{ => unported}/todmap/sim_det_pysm.py | 0 src/toast/{ => unported}/todmap/sim_tod.py | 0 src/toast/{ => unported}/todmap/sss.py | 0 src/toast/{ => unported}/todmap/todmap_math.py | 0 66 files changed, 2 deletions(-) rename src/toast/{ => unported}/cache.py (100%) rename src/toast/{ => unported}/fod/CMakeLists.txt (100%) rename src/toast/{ => unported}/fod/__init__.py (100%) rename src/toast/{ => unported}/fod/noise_estimation.py (100%) rename src/toast/{ => unported}/fod/psd_math.py (100%) rename src/toast/{ => unported}/map/CMakeLists.txt (100%) rename src/toast/{ => unported}/map/__init__.py (100%) rename src/toast/{ => unported}/map/cov.py (100%) rename src/toast/{ => unported}/map/pixels.py (100%) rename src/toast/{ => unported}/pipeline_tools/CMakeLists.txt (100%) rename src/toast/{ => unported}/pipeline_tools/__init__.py (100%) rename src/toast/{ => unported}/pipeline_tools/atm.py (100%) rename src/toast/{ => unported}/pipeline_tools/binning.py (100%) rename src/toast/{ => unported}/pipeline_tools/classes.py (100%) rename src/toast/{ => unported}/pipeline_tools/debug.py (100%) rename src/toast/{ => unported}/pipeline_tools/dipole.py (100%) rename src/toast/{ => unported}/pipeline_tools/dist.py (100%) rename src/toast/{ => unported}/pipeline_tools/export.py (100%) rename src/toast/{ => unported}/pipeline_tools/filters.py (100%) rename src/toast/{ => unported}/pipeline_tools/gain.py (100%) rename src/toast/{ => unported}/pipeline_tools/madam.py (100%) rename src/toast/{ => unported}/pipeline_tools/mapmaker.py (100%) rename src/toast/{ => unported}/pipeline_tools/noise.py (100%) rename src/toast/{ => unported}/pipeline_tools/pointing.py (100%) rename src/toast/{ => unported}/pipeline_tools/sky_signal.py (100%) rename src/toast/{ => unported}/pipeline_tools/sss.py (100%) rename src/toast/{ => unported}/pipeline_tools/todground.py (100%) rename src/toast/{ => unported}/pipeline_tools/todsatellite.py (100%) rename src/toast/{ => unported}/tod/CMakeLists.txt (100%) rename src/toast/{ => unported}/tod/__init__.py (100%) rename src/toast/{ => unported}/tod/applygain.py (100%) rename src/toast/{ => unported}/tod/conviqt_example.txt (100%) rename src/toast/{ => unported}/tod/gainscrambler.py (100%) rename src/toast/{ => unported}/tod/interval.py (100%) rename src/toast/{ => unported}/tod/memorycounter.py (100%) rename src/toast/{ => unported}/tod/noise.py (100%) rename src/toast/{ => unported}/tod/polyfilter.py (100%) rename src/toast/{ => unported}/tod/sim_det_noise.py (100%) rename src/toast/{ => unported}/tod/sim_focalplane.py (100%) rename src/toast/{ => unported}/tod/sim_interval.py (100%) rename src/toast/{ => unported}/tod/sim_noise.py (100%) rename src/toast/{ => unported}/tod/spt3g.py (100%) rename src/toast/{ => unported}/tod/spt3g_utils.py (100%) rename src/toast/{ => unported}/tod/tidas.py (100%) rename src/toast/{ => unported}/tod/tidas_utils.py (100%) rename src/toast/{ => unported}/tod/tod.py (100%) rename src/toast/{ => unported}/tod/tod_math.py (100%) rename src/toast/{ => unported}/todmap/CMakeLists.txt (100%) rename src/toast/{ => unported}/todmap/__init__.py (100%) rename src/toast/{ => unported}/todmap/atm.py (100%) rename src/toast/{ => unported}/todmap/conviqt.py (100%) rename src/toast/{ => unported}/todmap/groundfilter.py (100%) rename src/toast/{ => unported}/todmap/madam.py (100%) rename src/toast/{ => unported}/todmap/mapmaker.py (100%) rename src/toast/{ => unported}/todmap/mapsampler.py (100%) rename src/toast/{ => unported}/todmap/pointing.py (100%) rename src/toast/{ => unported}/todmap/pointing_math.py (100%) rename src/toast/{ => unported}/todmap/pysm.py (100%) rename src/toast/{ => unported}/todmap/sim_det_atm.py (100%) rename src/toast/{ => unported}/todmap/sim_det_dipole.py (100%) rename src/toast/{ => unported}/todmap/sim_det_map.py (100%) rename src/toast/{ => unported}/todmap/sim_det_pysm.py (100%) rename src/toast/{ => unported}/todmap/sim_tod.py (100%) rename src/toast/{ => unported}/todmap/sss.py (100%) rename src/toast/{ => unported}/todmap/todmap_math.py (100%) diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index e30299914..19878569d 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -83,7 +83,6 @@ install(FILES mpi.py timing.py traits.py - cache.py config.py pixels.py pixels_io.py @@ -106,7 +105,6 @@ install(FILES healpix.py weather.py schedule.py - spt3g.py "RELEASE" DESTINATION ${PYTHON_SITE}/toast ) diff --git a/src/toast/cache.py b/src/toast/unported/cache.py similarity index 100% rename from src/toast/cache.py rename to src/toast/unported/cache.py diff --git a/src/toast/fod/CMakeLists.txt b/src/toast/unported/fod/CMakeLists.txt similarity index 100% rename from src/toast/fod/CMakeLists.txt rename to src/toast/unported/fod/CMakeLists.txt diff --git a/src/toast/fod/__init__.py b/src/toast/unported/fod/__init__.py similarity index 100% rename from src/toast/fod/__init__.py rename to src/toast/unported/fod/__init__.py diff --git a/src/toast/fod/noise_estimation.py b/src/toast/unported/fod/noise_estimation.py similarity index 100% rename from src/toast/fod/noise_estimation.py rename to src/toast/unported/fod/noise_estimation.py diff --git a/src/toast/fod/psd_math.py b/src/toast/unported/fod/psd_math.py similarity index 100% rename from src/toast/fod/psd_math.py rename to src/toast/unported/fod/psd_math.py diff --git a/src/toast/map/CMakeLists.txt b/src/toast/unported/map/CMakeLists.txt similarity index 100% rename from src/toast/map/CMakeLists.txt rename to src/toast/unported/map/CMakeLists.txt diff --git a/src/toast/map/__init__.py b/src/toast/unported/map/__init__.py similarity index 100% rename from src/toast/map/__init__.py rename to src/toast/unported/map/__init__.py diff --git a/src/toast/map/cov.py b/src/toast/unported/map/cov.py similarity index 100% rename from src/toast/map/cov.py rename to src/toast/unported/map/cov.py diff --git a/src/toast/map/pixels.py b/src/toast/unported/map/pixels.py similarity index 100% rename from src/toast/map/pixels.py rename to src/toast/unported/map/pixels.py diff --git a/src/toast/pipeline_tools/CMakeLists.txt b/src/toast/unported/pipeline_tools/CMakeLists.txt similarity index 100% rename from src/toast/pipeline_tools/CMakeLists.txt rename to src/toast/unported/pipeline_tools/CMakeLists.txt diff --git a/src/toast/pipeline_tools/__init__.py b/src/toast/unported/pipeline_tools/__init__.py similarity index 100% rename from src/toast/pipeline_tools/__init__.py rename to src/toast/unported/pipeline_tools/__init__.py diff --git a/src/toast/pipeline_tools/atm.py b/src/toast/unported/pipeline_tools/atm.py similarity index 100% rename from src/toast/pipeline_tools/atm.py rename to src/toast/unported/pipeline_tools/atm.py diff --git a/src/toast/pipeline_tools/binning.py b/src/toast/unported/pipeline_tools/binning.py similarity index 100% rename from src/toast/pipeline_tools/binning.py rename to src/toast/unported/pipeline_tools/binning.py diff --git a/src/toast/pipeline_tools/classes.py b/src/toast/unported/pipeline_tools/classes.py similarity index 100% rename from src/toast/pipeline_tools/classes.py rename to src/toast/unported/pipeline_tools/classes.py diff --git a/src/toast/pipeline_tools/debug.py b/src/toast/unported/pipeline_tools/debug.py similarity index 100% rename from src/toast/pipeline_tools/debug.py rename to src/toast/unported/pipeline_tools/debug.py diff --git a/src/toast/pipeline_tools/dipole.py b/src/toast/unported/pipeline_tools/dipole.py similarity index 100% rename from src/toast/pipeline_tools/dipole.py rename to src/toast/unported/pipeline_tools/dipole.py diff --git a/src/toast/pipeline_tools/dist.py b/src/toast/unported/pipeline_tools/dist.py similarity index 100% rename from src/toast/pipeline_tools/dist.py rename to src/toast/unported/pipeline_tools/dist.py diff --git a/src/toast/pipeline_tools/export.py b/src/toast/unported/pipeline_tools/export.py similarity index 100% rename from src/toast/pipeline_tools/export.py rename to src/toast/unported/pipeline_tools/export.py diff --git a/src/toast/pipeline_tools/filters.py b/src/toast/unported/pipeline_tools/filters.py similarity index 100% rename from src/toast/pipeline_tools/filters.py rename to src/toast/unported/pipeline_tools/filters.py diff --git a/src/toast/pipeline_tools/gain.py b/src/toast/unported/pipeline_tools/gain.py similarity index 100% rename from src/toast/pipeline_tools/gain.py rename to src/toast/unported/pipeline_tools/gain.py diff --git a/src/toast/pipeline_tools/madam.py b/src/toast/unported/pipeline_tools/madam.py similarity index 100% rename from src/toast/pipeline_tools/madam.py rename to src/toast/unported/pipeline_tools/madam.py diff --git a/src/toast/pipeline_tools/mapmaker.py b/src/toast/unported/pipeline_tools/mapmaker.py similarity index 100% rename from src/toast/pipeline_tools/mapmaker.py rename to src/toast/unported/pipeline_tools/mapmaker.py diff --git a/src/toast/pipeline_tools/noise.py b/src/toast/unported/pipeline_tools/noise.py similarity index 100% rename from src/toast/pipeline_tools/noise.py rename to src/toast/unported/pipeline_tools/noise.py diff --git a/src/toast/pipeline_tools/pointing.py b/src/toast/unported/pipeline_tools/pointing.py similarity index 100% rename from src/toast/pipeline_tools/pointing.py rename to src/toast/unported/pipeline_tools/pointing.py diff --git a/src/toast/pipeline_tools/sky_signal.py b/src/toast/unported/pipeline_tools/sky_signal.py similarity index 100% rename from src/toast/pipeline_tools/sky_signal.py rename to src/toast/unported/pipeline_tools/sky_signal.py diff --git a/src/toast/pipeline_tools/sss.py b/src/toast/unported/pipeline_tools/sss.py similarity index 100% rename from src/toast/pipeline_tools/sss.py rename to src/toast/unported/pipeline_tools/sss.py diff --git a/src/toast/pipeline_tools/todground.py b/src/toast/unported/pipeline_tools/todground.py similarity index 100% rename from src/toast/pipeline_tools/todground.py rename to src/toast/unported/pipeline_tools/todground.py diff --git a/src/toast/pipeline_tools/todsatellite.py b/src/toast/unported/pipeline_tools/todsatellite.py similarity index 100% rename from src/toast/pipeline_tools/todsatellite.py rename to src/toast/unported/pipeline_tools/todsatellite.py diff --git a/src/toast/tod/CMakeLists.txt b/src/toast/unported/tod/CMakeLists.txt similarity index 100% rename from src/toast/tod/CMakeLists.txt rename to src/toast/unported/tod/CMakeLists.txt diff --git a/src/toast/tod/__init__.py b/src/toast/unported/tod/__init__.py similarity index 100% rename from src/toast/tod/__init__.py rename to src/toast/unported/tod/__init__.py diff --git a/src/toast/tod/applygain.py b/src/toast/unported/tod/applygain.py similarity index 100% rename from src/toast/tod/applygain.py rename to src/toast/unported/tod/applygain.py diff --git a/src/toast/tod/conviqt_example.txt b/src/toast/unported/tod/conviqt_example.txt similarity index 100% rename from src/toast/tod/conviqt_example.txt rename to src/toast/unported/tod/conviqt_example.txt diff --git a/src/toast/tod/gainscrambler.py b/src/toast/unported/tod/gainscrambler.py similarity index 100% rename from src/toast/tod/gainscrambler.py rename to src/toast/unported/tod/gainscrambler.py diff --git a/src/toast/tod/interval.py b/src/toast/unported/tod/interval.py similarity index 100% rename from src/toast/tod/interval.py rename to src/toast/unported/tod/interval.py diff --git a/src/toast/tod/memorycounter.py b/src/toast/unported/tod/memorycounter.py similarity index 100% rename from src/toast/tod/memorycounter.py rename to src/toast/unported/tod/memorycounter.py diff --git a/src/toast/tod/noise.py b/src/toast/unported/tod/noise.py similarity index 100% rename from src/toast/tod/noise.py rename to src/toast/unported/tod/noise.py diff --git a/src/toast/tod/polyfilter.py b/src/toast/unported/tod/polyfilter.py similarity index 100% rename from src/toast/tod/polyfilter.py rename to src/toast/unported/tod/polyfilter.py diff --git a/src/toast/tod/sim_det_noise.py b/src/toast/unported/tod/sim_det_noise.py similarity index 100% rename from src/toast/tod/sim_det_noise.py rename to src/toast/unported/tod/sim_det_noise.py diff --git a/src/toast/tod/sim_focalplane.py b/src/toast/unported/tod/sim_focalplane.py similarity index 100% rename from src/toast/tod/sim_focalplane.py rename to src/toast/unported/tod/sim_focalplane.py diff --git a/src/toast/tod/sim_interval.py b/src/toast/unported/tod/sim_interval.py similarity index 100% rename from src/toast/tod/sim_interval.py rename to src/toast/unported/tod/sim_interval.py diff --git a/src/toast/tod/sim_noise.py b/src/toast/unported/tod/sim_noise.py similarity index 100% rename from src/toast/tod/sim_noise.py rename to src/toast/unported/tod/sim_noise.py diff --git a/src/toast/tod/spt3g.py b/src/toast/unported/tod/spt3g.py similarity index 100% rename from src/toast/tod/spt3g.py rename to src/toast/unported/tod/spt3g.py diff --git a/src/toast/tod/spt3g_utils.py b/src/toast/unported/tod/spt3g_utils.py similarity index 100% rename from src/toast/tod/spt3g_utils.py rename to src/toast/unported/tod/spt3g_utils.py diff --git a/src/toast/tod/tidas.py b/src/toast/unported/tod/tidas.py similarity index 100% rename from src/toast/tod/tidas.py rename to src/toast/unported/tod/tidas.py diff --git a/src/toast/tod/tidas_utils.py b/src/toast/unported/tod/tidas_utils.py similarity index 100% rename from src/toast/tod/tidas_utils.py rename to src/toast/unported/tod/tidas_utils.py diff --git a/src/toast/tod/tod.py b/src/toast/unported/tod/tod.py similarity index 100% rename from src/toast/tod/tod.py rename to src/toast/unported/tod/tod.py diff --git a/src/toast/tod/tod_math.py b/src/toast/unported/tod/tod_math.py similarity index 100% rename from src/toast/tod/tod_math.py rename to src/toast/unported/tod/tod_math.py diff --git a/src/toast/todmap/CMakeLists.txt b/src/toast/unported/todmap/CMakeLists.txt similarity index 100% rename from src/toast/todmap/CMakeLists.txt rename to src/toast/unported/todmap/CMakeLists.txt diff --git a/src/toast/todmap/__init__.py b/src/toast/unported/todmap/__init__.py similarity index 100% rename from src/toast/todmap/__init__.py rename to src/toast/unported/todmap/__init__.py diff --git a/src/toast/todmap/atm.py b/src/toast/unported/todmap/atm.py similarity index 100% rename from src/toast/todmap/atm.py rename to src/toast/unported/todmap/atm.py diff --git a/src/toast/todmap/conviqt.py b/src/toast/unported/todmap/conviqt.py similarity index 100% rename from src/toast/todmap/conviqt.py rename to src/toast/unported/todmap/conviqt.py diff --git a/src/toast/todmap/groundfilter.py b/src/toast/unported/todmap/groundfilter.py similarity index 100% rename from src/toast/todmap/groundfilter.py rename to src/toast/unported/todmap/groundfilter.py diff --git a/src/toast/todmap/madam.py b/src/toast/unported/todmap/madam.py similarity index 100% rename from src/toast/todmap/madam.py rename to src/toast/unported/todmap/madam.py diff --git a/src/toast/todmap/mapmaker.py b/src/toast/unported/todmap/mapmaker.py similarity index 100% rename from src/toast/todmap/mapmaker.py rename to src/toast/unported/todmap/mapmaker.py diff --git a/src/toast/todmap/mapsampler.py b/src/toast/unported/todmap/mapsampler.py similarity index 100% rename from src/toast/todmap/mapsampler.py rename to src/toast/unported/todmap/mapsampler.py diff --git a/src/toast/todmap/pointing.py b/src/toast/unported/todmap/pointing.py similarity index 100% rename from src/toast/todmap/pointing.py rename to src/toast/unported/todmap/pointing.py diff --git a/src/toast/todmap/pointing_math.py b/src/toast/unported/todmap/pointing_math.py similarity index 100% rename from src/toast/todmap/pointing_math.py rename to src/toast/unported/todmap/pointing_math.py diff --git a/src/toast/todmap/pysm.py b/src/toast/unported/todmap/pysm.py similarity index 100% rename from src/toast/todmap/pysm.py rename to src/toast/unported/todmap/pysm.py diff --git a/src/toast/todmap/sim_det_atm.py b/src/toast/unported/todmap/sim_det_atm.py similarity index 100% rename from src/toast/todmap/sim_det_atm.py rename to src/toast/unported/todmap/sim_det_atm.py diff --git a/src/toast/todmap/sim_det_dipole.py b/src/toast/unported/todmap/sim_det_dipole.py similarity index 100% rename from src/toast/todmap/sim_det_dipole.py rename to src/toast/unported/todmap/sim_det_dipole.py diff --git a/src/toast/todmap/sim_det_map.py b/src/toast/unported/todmap/sim_det_map.py similarity index 100% rename from src/toast/todmap/sim_det_map.py rename to src/toast/unported/todmap/sim_det_map.py diff --git a/src/toast/todmap/sim_det_pysm.py b/src/toast/unported/todmap/sim_det_pysm.py similarity index 100% rename from src/toast/todmap/sim_det_pysm.py rename to src/toast/unported/todmap/sim_det_pysm.py diff --git a/src/toast/todmap/sim_tod.py b/src/toast/unported/todmap/sim_tod.py similarity index 100% rename from src/toast/todmap/sim_tod.py rename to src/toast/unported/todmap/sim_tod.py diff --git a/src/toast/todmap/sss.py b/src/toast/unported/todmap/sss.py similarity index 100% rename from src/toast/todmap/sss.py rename to src/toast/unported/todmap/sss.py diff --git a/src/toast/todmap/todmap_math.py b/src/toast/unported/todmap/todmap_math.py similarity index 100% rename from src/toast/todmap/todmap_math.py rename to src/toast/unported/todmap/todmap_math.py From a5bb5b3eff42133548acd8fadc91d3a28d17b203 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 07:22:16 -0800 Subject: [PATCH 035/690] Move more unported source out of the way --- src/toast/CMakeLists.txt | 7 +------ src/toast/tests/CMakeLists.txt | 15 --------------- src/toast/{ => unported}/tests/binned.py | 0 src/toast/{ => unported}/tests/cache.py | 0 src/toast/{ => unported}/tests/map_ground.py | 0 src/toast/{ => unported}/tests/map_satellite.py | 0 src/toast/{ => unported}/tests/ops_applygain.py | 0 src/toast/{ => unported}/tests/ops_dipole.py | 0 .../{ => unported}/tests/ops_gainscrambler.py | 0 .../{ => unported}/tests/ops_groundfilter.py | 0 src/toast/{ => unported}/tests/ops_madam.py | 0 src/toast/{ => unported}/tests/ops_mapmaker.py | 0 src/toast/{ => unported}/tests/ops_polyfilter.py | 0 src/toast/{ => unported}/tests/ops_sim_atm.py | 0 src/toast/{ => unported}/tests/ops_sim_pysm.py | 0 src/toast/{ => unported}/tests/ops_sim_sss.py | 0 src/toast/{ => unported}/tests/psd_math.py | 0 src/toast/{ => unported}/tests/spt3g.py | 0 src/toast/{ => unported}/tests/tidas.py | 0 src/toast/{ => unported}/tests/tod.py | 0 src/toast/{ => unported}/tests/tod_satellite.py | 0 21 files changed, 1 insertion(+), 21 deletions(-) rename src/toast/{ => unported}/tests/binned.py (100%) rename src/toast/{ => unported}/tests/cache.py (100%) rename src/toast/{ => unported}/tests/map_ground.py (100%) rename src/toast/{ => unported}/tests/map_satellite.py (100%) rename src/toast/{ => unported}/tests/ops_applygain.py (100%) rename src/toast/{ => unported}/tests/ops_dipole.py (100%) rename src/toast/{ => unported}/tests/ops_gainscrambler.py (100%) rename src/toast/{ => unported}/tests/ops_groundfilter.py (100%) rename src/toast/{ => unported}/tests/ops_madam.py (100%) rename src/toast/{ => unported}/tests/ops_mapmaker.py (100%) rename src/toast/{ => unported}/tests/ops_polyfilter.py (100%) rename src/toast/{ => unported}/tests/ops_sim_atm.py (100%) rename src/toast/{ => unported}/tests/ops_sim_pysm.py (100%) rename src/toast/{ => unported}/tests/ops_sim_sss.py (100%) rename src/toast/{ => unported}/tests/psd_math.py (100%) rename src/toast/{ => unported}/tests/spt3g.py (100%) rename src/toast/{ => unported}/tests/tidas.py (100%) rename src/toast/{ => unported}/tests/tod.py (100%) rename src/toast/{ => unported}/tests/tod_satellite.py (100%) diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 19878569d..39328070e 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -111,10 +111,5 @@ install(FILES # Process the sub directories add_subdirectory(tests) -add_subdirectory(tod) -add_subdirectory(map) -add_subdirectory(todmap) -add_subdirectory(fod) -add_subdirectory(pipeline_tools) -add_subdirectory(future_ops) +add_subdirectory(ops) add_subdirectory(templates) diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 7102866a4..c6f54a026 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -18,26 +18,11 @@ install(FILES timing.py pixels.py ops_sim_satellite.py - ops_applygain.py ops_sim_tod_noise.py ops_mapmaker_utils.py covariance.py ops_pointing_healpix.py - ops_dipole.py - ops_groundfilter.py sim_focalplane.py - ops_polyfilter.py ops_memory_counter.py - ops_gainscrambler.py - psd_math.py - ops_madam.py - ops_mapmaker.py - map_satellite.py - map_ground.py - binned.py - ops_sim_pysm.py - tidas.py - ops_sim_atm.py - ops_sim_sss.py DESTINATION ${PYTHON_SITE}/toast/tests ) diff --git a/src/toast/tests/binned.py b/src/toast/unported/tests/binned.py similarity index 100% rename from src/toast/tests/binned.py rename to src/toast/unported/tests/binned.py diff --git a/src/toast/tests/cache.py b/src/toast/unported/tests/cache.py similarity index 100% rename from src/toast/tests/cache.py rename to src/toast/unported/tests/cache.py diff --git a/src/toast/tests/map_ground.py b/src/toast/unported/tests/map_ground.py similarity index 100% rename from src/toast/tests/map_ground.py rename to src/toast/unported/tests/map_ground.py diff --git a/src/toast/tests/map_satellite.py b/src/toast/unported/tests/map_satellite.py similarity index 100% rename from src/toast/tests/map_satellite.py rename to src/toast/unported/tests/map_satellite.py diff --git a/src/toast/tests/ops_applygain.py b/src/toast/unported/tests/ops_applygain.py similarity index 100% rename from src/toast/tests/ops_applygain.py rename to src/toast/unported/tests/ops_applygain.py diff --git a/src/toast/tests/ops_dipole.py b/src/toast/unported/tests/ops_dipole.py similarity index 100% rename from src/toast/tests/ops_dipole.py rename to src/toast/unported/tests/ops_dipole.py diff --git a/src/toast/tests/ops_gainscrambler.py b/src/toast/unported/tests/ops_gainscrambler.py similarity index 100% rename from src/toast/tests/ops_gainscrambler.py rename to src/toast/unported/tests/ops_gainscrambler.py diff --git a/src/toast/tests/ops_groundfilter.py b/src/toast/unported/tests/ops_groundfilter.py similarity index 100% rename from src/toast/tests/ops_groundfilter.py rename to src/toast/unported/tests/ops_groundfilter.py diff --git a/src/toast/tests/ops_madam.py b/src/toast/unported/tests/ops_madam.py similarity index 100% rename from src/toast/tests/ops_madam.py rename to src/toast/unported/tests/ops_madam.py diff --git a/src/toast/tests/ops_mapmaker.py b/src/toast/unported/tests/ops_mapmaker.py similarity index 100% rename from src/toast/tests/ops_mapmaker.py rename to src/toast/unported/tests/ops_mapmaker.py diff --git a/src/toast/tests/ops_polyfilter.py b/src/toast/unported/tests/ops_polyfilter.py similarity index 100% rename from src/toast/tests/ops_polyfilter.py rename to src/toast/unported/tests/ops_polyfilter.py diff --git a/src/toast/tests/ops_sim_atm.py b/src/toast/unported/tests/ops_sim_atm.py similarity index 100% rename from src/toast/tests/ops_sim_atm.py rename to src/toast/unported/tests/ops_sim_atm.py diff --git a/src/toast/tests/ops_sim_pysm.py b/src/toast/unported/tests/ops_sim_pysm.py similarity index 100% rename from src/toast/tests/ops_sim_pysm.py rename to src/toast/unported/tests/ops_sim_pysm.py diff --git a/src/toast/tests/ops_sim_sss.py b/src/toast/unported/tests/ops_sim_sss.py similarity index 100% rename from src/toast/tests/ops_sim_sss.py rename to src/toast/unported/tests/ops_sim_sss.py diff --git a/src/toast/tests/psd_math.py b/src/toast/unported/tests/psd_math.py similarity index 100% rename from src/toast/tests/psd_math.py rename to src/toast/unported/tests/psd_math.py diff --git a/src/toast/tests/spt3g.py b/src/toast/unported/tests/spt3g.py similarity index 100% rename from src/toast/tests/spt3g.py rename to src/toast/unported/tests/spt3g.py diff --git a/src/toast/tests/tidas.py b/src/toast/unported/tests/tidas.py similarity index 100% rename from src/toast/tests/tidas.py rename to src/toast/unported/tests/tidas.py diff --git a/src/toast/tests/tod.py b/src/toast/unported/tests/tod.py similarity index 100% rename from src/toast/tests/tod.py rename to src/toast/unported/tests/tod.py diff --git a/src/toast/tests/tod_satellite.py b/src/toast/unported/tests/tod_satellite.py similarity index 100% rename from src/toast/tests/tod_satellite.py rename to src/toast/unported/tests/tod_satellite.py From ee874001a90f067d59537279de501ec78b619372 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 1 Dec 2020 07:49:28 -0800 Subject: [PATCH 036/690] Fix imports --- src/toast/__init__.py | 2 -- src/toast/config.py | 10 ++++----- src/toast/instrument.py | 30 ------------------------- src/toast/ops/CMakeLists.txt | 2 +- src/toast/ops/__init__.py | 2 +- src/toast/ops/mapmaker.py | 4 ++-- src/toast/ops/mapmaker_binning.py | 4 ++-- src/toast/ops/mapmaker_projection.py | 4 ++-- src/toast/ops/mapmaker_utils.py | 4 ++-- src/toast/tests/_helpers.py | 2 +- src/toast/tests/config.py | 2 +- src/toast/tests/covariance.py | 2 +- src/toast/tests/ops_mapmaker_utils.py | 2 +- src/toast/tests/ops_memory_counter.py | 2 +- src/toast/tests/ops_pointing_healpix.py | 2 +- src/toast/tests/ops_sim_satellite.py | 2 +- src/toast/tests/ops_sim_tod_noise.py | 4 ++-- 17 files changed, 23 insertions(+), 57 deletions(-) diff --git a/src/toast/__init__.py b/src/toast/__init__.py index 4c28b0e17..75df72834 100644 --- a/src/toast/__init__.py +++ b/src/toast/__init__.py @@ -76,8 +76,6 @@ from .config import load_config -from .operator import Operator - from .instrument import Telescope, Focalplane, Site from .instrument_sim import fake_hexagon_focalplane diff --git a/src/toast/config.py b/src/toast/config.py index cccf3c441..367582a0f 100644 --- a/src/toast/config.py +++ b/src/toast/config.py @@ -23,11 +23,9 @@ from .instrument import Focalplane, Telescope from . import instrument -from .operator import Operator - from .traits import TraitConfig -from . import future_ops as ops +from . import ops as ops def build_config(objects): @@ -43,10 +41,10 @@ def build_config(objects): """ conf = OrderedDict() for o in objects: - if not isinstance(o, Operator): - raise RuntimeError("The object list should contain Operator instances") + if not isinstance(o, TraitConfig): + raise RuntimeError("The object list should contain TraitConfig instances") if o.name is None: - raise RuntimeError("Cannot buid config from Operators without a name") + raise RuntimeError("Cannot buid config from objects without a name") conf = o.get_config(input=conf) return conf diff --git a/src/toast/instrument.py b/src/toast/instrument.py index f9af16231..c743bbe1d 100644 --- a/src/toast/instrument.py +++ b/src/toast/instrument.py @@ -11,8 +11,6 @@ from .timing import function_timer, Timer -from .tod import AnalyticNoise - from .utils import Logger, Environment, name_UID from . import qarray @@ -188,34 +186,6 @@ def detector_quats(self): self._detquats[detname] = detdata["quat"] return self._detquats - @property - def noise(self): - if self._noise is None: - fmin = {} - fknee = {} - alpha = {} - NET = {} - rates = {} - for detname in self.detectors: - detdata = self.detector_data[detname] - if "fsample" in detdata: - rates[detname] = detdata["fsample"] - else: - rates[detname] = self.sample_rate - fmin[detname] = detdata["fmin"] - fknee[detname] = detdata["fknee"] - alpha[detname] = detdata["alpha"] - NET[detname] = detdata["NET"] - self._noise = AnalyticNoise( - rate=rates, - fmin=fmin, - detectors=self.detectors, - fknee=fknee, - alpha=alpha, - NET=NET, - ) - return self._noise - def __repr__(self): value = " Date: Tue, 1 Dec 2020 11:05:39 -0800 Subject: [PATCH 037/690] Begin work on madam operator port. --- src/toast/ops/CMakeLists.txt | 3 + src/toast/ops/__init__.py | 4 + src/toast/ops/madam.py | 304 +++++++++++++++++++++++++++++++++++ 3 files changed, 311 insertions(+) create mode 100644 src/toast/ops/madam.py diff --git a/src/toast/ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt index 1ceb304c3..0ba5eb2cb 100644 --- a/src/toast/ops/CMakeLists.txt +++ b/src/toast/ops/CMakeLists.txt @@ -17,5 +17,8 @@ install(FILES scan_map.py mapmaker_utils.py mapmaker_binning.py + mapmaker_projection.py + mapmaker_templates.py + madam.py DESTINATION ${PYTHON_SITE}/toast/ops ) diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index 5146b43f8..8c571e035 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -34,3 +34,7 @@ ) from .mapmaker_binning import BinMap + +from .mapmaker import MapMaker + +from .madam import Madam diff --git a/src/toast/ops/madam.py b/src/toast/ops/madam.py new file mode 100644 index 000000000..12963740a --- /dev/null +++ b/src/toast/ops/madam.py @@ -0,0 +1,304 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from ..mpi import MPI, use_mpi + +import traitlets + +import numpy as np + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool, Dict + +from ..timing import function_timer + +from .operator import Operator + +from .clear import Clear + +from .copy import Copy + + +madam = None +if use_mpi: + try: + import libmadam_wrapper as madam + except ImportError: + madam = None + + +@trait_docs +class Madam(Operator): + """Operator which passes data to libmadam for map-making.""" + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + params = Dict(dict(), help="Parameters to pass to madam") + + times = Unicode("times", help="Observation shared key for timestamps") + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional detector flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional shared flagging") + + pixels = Unicode("pixels", help="Observation detdata key for output pixel indices") + + weights = Unicode("weights", help="Observation detdata key for output weights") + + view = Unicode(None, allow_none=True, help="Use this view of the data in all observations") + + pixels_nested = Bool(True, help="True if pixel indices are in NESTED ordering") + + det_out = Unicode( + None, + allow_none=True, + help="Observation detdata key for output destriped timestreams", + ) + + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) + + purge = Bool( + False, help="If True, clear all observation data after copying to madam buffers" + ) + + purge_det_data = Bool( + False, + help="If True, clear all observation detector data after copying to madam buffers", + ) + + purge_pointing = Bool( + False, + help="If True, clear all observation detector pointing data after copying to madam buffers", + ) + + purge_flags = Bool( + False, + help="If True, clear all observation detector flags after copying to madam buffers", + ) + + mcmode = Bool( + False, + help="If true, Madam will store auxiliary information such as pixel matrices and noise filter.", + ) + + conserve_memory = Int(0, help="Stagger the Madam buffer staging on each node.") + + translate_timestamps = Bool( + False, help="Translate timestamps to enforce monotonity." + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._cached = False + + @classmethod + def available(cls): + """(bool): True if libmadam is found in the library search path.""" + return (madam is not None and madam.available) + + @function_timer + def _exec(self, data, detectors=None): + log = Logger.get() + + if not self.available: + raise RuntimeError("libmadam is not available") + + if len(data.obs) == 0: + raise RuntimeError( + "Madam requires every supplied data object to " + "contain at least one observation" + ) + + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + + # Check that the pointing is set + if self.pixels is None or self.weights is None: + raise RuntimeError( + "You must set the pixels and weights before calling exec()" + ) + + # Check purging + if self.purge: + # Purging everything + self.purge_det_data = True + self.purge_pointing = True + self.purge_flags = True + + # Madam-compatible data buffers + self._madam_timestamps = None + self._madam_pixels = None + self._madam_pixweights = None + self._madam_signal = None + + + + return + + def __del__(self): + if self._cached: + madam.clear_caches() + self._cached = False + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = { + "meta": [self.noise_model], + "shared": [ + self.times, + ], + "detdata": [ + self.det_data, + self.pixels, + self.weights + ], + "intervals": list() + } + if self.view is not None: + req["intervals"].append(self.view) + if self.shared_flags is not None: + req["shared"].append(self.shared_flags) + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + return req + + def _provides(self): + prov = {"detdata": list()} + if self.det_out is not None: + prov["detdata"].append(self.det_out) + return prov + + def _accelerators(self): + return list() + + @function_timer + def _prepare(self, data, detectors): + """Examine the data and determine quantities needed to set up Madam data""" + log = Logger.get() + timer = Timer() + timer.start() + + # Madam requires a fixed set of detectors and pointing matrix non-zeros. + # Here we find the superset of local detectors used, and also the number + # of pointing matrix elements. + + nsamp = 0 + all_dets = set() + nnz_full = None + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + all_dets.add(dets) + + # Are we using a view of the data? If so, we will only consider data in + # those valid intervals. + if self.view is not None: + if self.view not in ob.intervals: + msg = "View '{}' does not exist in observation {}".format(self.view, ob.name) + raise RuntimeError(msg) + + nsamp += ob.n_local_samples + + # Check that the detector data and pointing exists in the observation + if self.det_data not in ob.detdata: + msg = "Detector data '{}' does not exist in observation '{}'".format(self.det_data, ob.name) + raise RuntimeError(msg) + if self.pixels not in ob.detdata: + msg = "Detector pixels '{}' does not exist in observation '{}'".format(self.pixels, ob.name) + raise RuntimeError(msg) + if self.weights not in ob.detdata: + msg = "Detector data '{}' does not exist in observation '{}'".format(self.weights, ob.name) + raise RuntimeError(msg) + + # Get the number of pointing weights and verify that it is constant + # across observations. + ob_nnz = None + if len(ob.detdata[self.weights].detector_shape) == 1: + # The pointing weights just have one dimension (samples) + ob_full = 1 + else: + ob_full = ob.detdata[self.weights].detector_shape[-1] + + if nnz_full is None: + nnz_full = ob_nnz + elif ob_nnz != nnz_full: + msg = "observation '{}' has {} pointing weights per sample, not {}".format(ob.name, ob_nnz, nnz_full) + raise RuntimeError(msg) + + all_dets = list(all_dets) + ndet = len(all_dets) + + nnz = None + nnz_stride = None + if "temperature_only" in self.params and self.params["temperature_only"] in [ + "T", + "True", + "TRUE", + "true", + True, + ]: + # User has requested a temperature-only map. + if nnz_full not in [1, 3]: + raise RuntimeError( + "Madam cannot make a temperature map with nnz == {}".format(nnz_full) + ) + nnz = 1 + nnz_stride = nnz_full + else: + nnz = nnz_full + nnz_stride = 1 + + if "nside_map" not in self.params: + raise RuntimeError( + "Madam 'nside_map' must be set in the parameter dictionary" + ) + nside = int(self.params["nside_map"]) + + if data.comm.world_rank == 0 and "path_output" in self.params: + os.makedirs(self.params["path_output"], exist_ok=True) + + # Inspect the valid intervals across all observations to + # determine the number of samples per detector + + obs_period_ranges, psdfreqs, periods, nsamp = self._get_period_ranges( + dets, nsamp + ) + + self._comm.Barrier() + if self._rank == 0: + log.debug() + timer.report_clear("Collect dataset dimensions") + + return ( + all_dets, + nsamp, + ndet, + nnz, + nnz_full, + nnz_stride, + periods, + obs_period_ranges, + psdfreqs, + nside, + ) From de2e3a31ef2df06a0426b3d71ee18fe1f73c3439 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 7 Dec 2020 10:47:40 -0800 Subject: [PATCH 038/690] Work in progress on madam and ground sim --- pipelines/toast_ground_schedule.py | 2 +- src/toast/instrument.py | 1 - src/toast/observation.py | 41 +- src/toast/ops/__init__.py | 2 +- src/toast/ops/madam.py | 575 ++++- src/toast/ops/pointing_healpix.py | 93 +- src/toast/ops/sim_ground.py | 593 ++++-- src/toast/ops/sim_satellite.py | 4 +- src/toast/qarray.py | 90 + src/toast/schedule.py | 3112 +++------------------------- src/toast/schedule_build.py | 2882 ++++++++++++++++++++++++++ 11 files changed, 4292 insertions(+), 3103 deletions(-) create mode 100644 src/toast/schedule_build.py diff --git a/pipelines/toast_ground_schedule.py b/pipelines/toast_ground_schedule.py index 41aced87d..17df07c5d 100644 --- a/pipelines/toast_ground_schedule.py +++ b/pipelines/toast_ground_schedule.py @@ -14,7 +14,7 @@ from toast.mpi import get_world from toast.timing import GlobalTimers -from toast.schedule import run_scheduler +from toast.schedule_build import run_scheduler def main(): diff --git a/src/toast/instrument.py b/src/toast/instrument.py index c743bbe1d..94737e1a6 100644 --- a/src/toast/instrument.py +++ b/src/toast/instrument.py @@ -230,7 +230,6 @@ class Site(object): """ def __init__(self, name, lat, lon, alt_m, id=None, weather=None): - self.name = name # Strings get interpreted correctly pyEphem. # Floats must be in radians diff --git a/src/toast/observation.py b/src/toast/observation.py index 9770538be..fd6705379 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -582,7 +582,46 @@ def redistribute(self, process_rows): if process_rows == self.dist.process_rows: # Nothing to do! return - pass + newdist = DistDetSamp( + self._samples, + self._telescope.focalplane.detectors, + self._sample_sets, + self._detector_sets, + self._comm, + process_rows, + ) + + if newdist.comm_rank == 0: + # check that all processes have some data, otherwise print warning + for d in range(newdist.process_rows): + if len(newdist.dets[d]) == 0: + msg = "WARNING: process row rank {} has no detectors" + " assigned in new distribution.".format(d) + log.warning(msg) + for r in range(newdist.process_cols): + if newdist.samps[r][1] <= 0: + msg = "WARNING: process column rank {} has no data assigned " + "in new distribution.".format(r) + log.warning(msg) + + # Redistribute shared data + + newshared = SharedDataMgr( + self._comm, + newdist.comm_row, + newdist.comm_col, + ) + + # Redistribute detector data + + newdetdata = DetDataMgr( + newdist.dets[self.dist.comm_col_rank], + newdist.samps[self.dist.comm_row_rank][1], + ) + + # Redistribute intervals + + newintervals = IntervalMgr(self._comm, newdist.comm_row, newdist.comm_col) # Accelerator use diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index 8c571e035..e55d42a4d 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -37,4 +37,4 @@ from .mapmaker import MapMaker -from .madam import Madam +# from .madam import Madam diff --git a/src/toast/ops/madam.py b/src/toast/ops/madam.py index 12963740a..4863bfc90 100644 --- a/src/toast/ops/madam.py +++ b/src/toast/ops/madam.py @@ -20,6 +20,8 @@ from .copy import Copy +from .memory_counter import MemoryCounter + madam = None if use_mpi: @@ -61,7 +63,9 @@ class Madam(Operator): weights = Unicode("weights", help="Observation detdata key for output weights") - view = Unicode(None, allow_none=True, help="Use this view of the data in all observations") + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) pixels_nested = Bool(True, help="True if pixel indices are in NESTED ordering") @@ -75,10 +79,6 @@ class Madam(Operator): "noise_model", help="Observation key containing the noise model" ) - purge = Bool( - False, help="If True, clear all observation data after copying to madam buffers" - ) - purge_det_data = Bool( False, help="If True, clear all observation detector data after copying to madam buffers", @@ -89,22 +89,25 @@ class Madam(Operator): help="If True, clear all observation detector pointing data after copying to madam buffers", ) - purge_flags = Bool( - False, - help="If True, clear all observation detector flags after copying to madam buffers", - ) - mcmode = Bool( False, help="If true, Madam will store auxiliary information such as pixel matrices and noise filter.", ) - conserve_memory = Int(0, help="Stagger the Madam buffer staging on each node.") + copy_groups = Int( + 1, + help="The processes on each node are split into this number of groups to copy data in turns", + ) translate_timestamps = Bool( False, help="Translate timestamps to enforce monotonity." ) + noise_scale = Unicode( + "noise_scale", + help="Observation key with optional scaling factor for noise PSDs", + ) + def __init__(self, **kwargs): super().__init__(**kwargs) self._cached = False @@ -112,7 +115,32 @@ def __init__(self, **kwargs): @classmethod def available(cls): """(bool): True if libmadam is found in the library search path.""" - return (madam is not None and madam.available) + return madam is not None and madam.available + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to the buffers. DO NOT CALL THIS unless you are sure all references + are no longer being used. + + """ + if self._cached: + madam.clear_caches() + self._cached = False + for atr in ["timestamps", "signal", "pixels", "pixweights"]: + atrname = "_madam_{}".format(atr) + rawname = "{}_raw".format(atrname) + if hasattr(self, atrname): + delattr(self, atrname) + raw = getattr(self, rawname) + if raw is not None: + raw.clear() + setattr(self, rawname, None) + setattr(self, atrname, None) + + def __del__(self): + self.clear() @function_timer def _exec(self, data, detectors=None): @@ -137,20 +165,44 @@ def _exec(self, data, detectors=None): "You must set the pixels and weights before calling exec()" ) - # Check purging - if self.purge: - # Purging everything - self.purge_det_data = True - self.purge_pointing = True - self.purge_flags = True - - # Madam-compatible data buffers - self._madam_timestamps = None - self._madam_pixels = None - self._madam_pixweights = None - self._madam_signal = None + # Check input parameters and compute the sizes of Madam data objects + ( + all_dets, + nsamp, + ndet, + nnz, + nnz_full, + nnz_stride, + interval_starts, + psd_freqs, + nside, + ) = self._prepare(data, detectors) + psdinfo, signal_type, pixels_dtype, weight_dtype = self._stage_data( + nsamp, + ndet, + nnz, + nnz_full, + nnz_stride, + obs_period_ranges, + psdfreqs, + dets, + nside, + ) + # self._destripe(pars, dets, periods, psdinfo) + # + # self._unstage_data( + # nsamp, + # nnz, + # nnz_full, + # obs_period_ranges, + # dets, + # signal_type, + # pixels_dtype, + # nside, + # weight_dtype, + # ) return @@ -168,12 +220,8 @@ def _requires(self): "shared": [ self.times, ], - "detdata": [ - self.det_data, - self.pixels, - self.weights - ], - "intervals": list() + "detdata": [self.det_data, self.pixels, self.weights], + "intervals": list(), } if self.view is not None: req["intervals"].append(self.view) @@ -194,58 +242,129 @@ def _accelerators(self): @function_timer def _prepare(self, data, detectors): - """Examine the data and determine quantities needed to set up Madam data""" + """Examine the data and determine quantities needed to set up Madam buffers""" log = Logger.get() timer = Timer() timer.start() + if "nside_map" not in self.params: + raise RuntimeError( + "Madam 'nside_map' must be set in the parameter dictionary" + ) + nside = int(self.params["nside_map"]) + # Madam requires a fixed set of detectors and pointing matrix non-zeros. # Here we find the superset of local detectors used, and also the number # of pointing matrix elements. nsamp = 0 + + # Madam uses monolithic data buffers and specifies contiguous data intervals + # in that buffer. The starting sample index is used to mark the transition + # between data intervals. + interval_starts = list() + + # This quantity is only used for printing the fraction of samples in valid + # ranges specified by the View. Only samples actually in the view are copied + # to Madam buffers. + nsamp_valid = 0 + all_dets = set() nnz_full = None + psd_freqs = None + for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) all_dets.add(dets) - # Are we using a view of the data? If so, we will only consider data in - # those valid intervals. - if self.view is not None: - if self.view not in ob.intervals: - msg = "View '{}' does not exist in observation {}".format(self.view, ob.name) - raise RuntimeError(msg) - - nsamp += ob.n_local_samples + # Check that the timestamps exist. + if self.times not in ob.shared: + msg = ( + "Shared timestamps '{}' does not exist in observation '{}'".format( + self.times, ob.name + ) + ) + raise RuntimeError(msg) # Check that the detector data and pointing exists in the observation if self.det_data not in ob.detdata: - msg = "Detector data '{}' does not exist in observation '{}'".format(self.det_data, ob.name) + msg = "Detector data '{}' does not exist in observation '{}'".format( + self.det_data, ob.name + ) raise RuntimeError(msg) if self.pixels not in ob.detdata: - msg = "Detector pixels '{}' does not exist in observation '{}'".format(self.pixels, ob.name) + msg = "Detector pixels '{}' does not exist in observation '{}'".format( + self.pixels, ob.name + ) raise RuntimeError(msg) if self.weights not in ob.detdata: - msg = "Detector data '{}' does not exist in observation '{}'".format(self.weights, ob.name) + msg = "Detector data '{}' does not exist in observation '{}'".format( + self.weights, ob.name + ) raise RuntimeError(msg) + # Check that the noise model exists, and that the PSD frequencies are the + # same across all observations (required by Madam). + if self.noise_model not in ob: + msg = "Noise model '{}' not in observation '{}'".format( + self.noise_model, ob.name + ) + raise RuntimeError(msg) + if psd_freqs is None: + psd_freqs = np.array( + ob[self.noise_model].freq(ob.local_detectors[0]), dtype=np.float64 + ) + else: + check_freqs = ob[self.noise_model].freq(ob.local_detectors[0]) + if not np.allclose(psd_freqs, check_freqs): + raise RuntimeError( + "All PSDs passed to Madam must have the same frequency binning." + ) + # Get the number of pointing weights and verify that it is constant # across observations. ob_nnz = None - if len(ob.detdata[self.weights].detector_shape) == 1: - # The pointing weights just have one dimension (samples) - ob_full = 1 - else: - ob_full = ob.detdata[self.weights].detector_shape[-1] + if len(ob.detdata[self.weights].detector_shape) == 1: + # The pointing weights just have one dimension (samples) + ob_nnz = 1 + else: + ob_nnz = ob.detdata[self.weights].detector_shape[-1] if nnz_full is None: nnz_full = ob_nnz elif ob_nnz != nnz_full: - msg = "observation '{}' has {} pointing weights per sample, not {}".format(ob.name, ob_nnz, nnz_full) + msg = "observation '{}' has {} pointing weights per sample, not {}".format( + ob.name, ob_nnz, nnz_full + ) raise RuntimeError(msg) + # Are we using a view of the data? If so, we will only be copying data in + # those valid intervals. + if self.view is not None: + if self.view not in ob.intervals: + msg = "View '{}' does not exist in observation {}".format( + self.view, ob.name + ) + raise RuntimeError(msg) + # Go through all the intervals that will be used for our data view + # and accumulate the number of samples. + for intvw in ob.intervals[self.view]: + interval_starts.append(nsamp_valid) + nsamp_valid += intvw.last - intvw.first + 1 + else: + interval_starts.append(nsamp_valid) + nsamp_valid += ob.n_local_samples + nsamp += ob.n_local_samples + + if data.comm.world_rank == 0: + log.info( + "Madam: {:.2f} % of samples are included in valid " + "intervals.".format(nsamp_valid * 100.0 / nsamp) + ) + + nsamp = nsamp_valid + all_dets = list(all_dets) ndet = len(all_dets) @@ -261,7 +380,9 @@ def _prepare(self, data, detectors): # User has requested a temperature-only map. if nnz_full not in [1, 3]: raise RuntimeError( - "Madam cannot make a temperature map with nnz == {}".format(nnz_full) + "Madam cannot make a temperature map with nnz == {}".format( + nnz_full + ) ) nnz = 1 nnz_stride = nnz_full @@ -269,23 +390,13 @@ def _prepare(self, data, detectors): nnz = nnz_full nnz_stride = 1 - if "nside_map" not in self.params: - raise RuntimeError( - "Madam 'nside_map' must be set in the parameter dictionary" - ) - nside = int(self.params["nside_map"]) - if data.comm.world_rank == 0 and "path_output" in self.params: os.makedirs(self.params["path_output"], exist_ok=True) # Inspect the valid intervals across all observations to # determine the number of samples per detector - obs_period_ranges, psdfreqs, periods, nsamp = self._get_period_ranges( - dets, nsamp - ) - - self._comm.Barrier() + data.comm.comm_world.Barrier() if self._rank == 0: log.debug() timer.report_clear("Collect dataset dimensions") @@ -293,12 +404,348 @@ def _prepare(self, data, detectors): return ( all_dets, nsamp, - ndet, nnz, nnz_full, nnz_stride, - periods, - obs_period_ranges, - psdfreqs, + interval_starts, + psd_freqs, nside, ) + + @function_timer + def _stage_data( + self, + data, + all_dets, + nsamp, + nnz, + nnz_full, + nnz_stride, + interval_starts, + psd_freqs, + nside, + ): + """Create madam-compatible buffers. + + Collect the data into Madam buffers. If we are purging TOAST data to save + memory, then optionally limit the number of processes that are copying at once. + + """ + log = Logger.get() + + # Memory counting operator + mem_count = MemoryCounter(silent=True) + + nodecomm = data.comm.comm_world.Split_type( + MPI.COMM_TYPE_SHARED, data.comm.world_rank + ) + + # Determine how many processes per node should copy at once. + n_copy_groups = 1 + if self.purge_det_data or self.purge_pointing: + # We will be purging some data- see if we should reduce the number of + # processes copying in parallel (if we are not purging data, there + # is no benefit to staggering the copy). + if self.copy_groups > 0: + n_copy_groups = min(self.copy_groups, nodecomm.size) + + # self._comm.Barrier() + # timer_tot = Timer() + # timer_tot.start() + + # Copy timestamps and PSDs all at once, since they are never purged. + + timestamp_storage = dtype_to_aligned(madam.TIMESTAMP_TYPE) + self._madam_timestamps_raw = timestamp_storage.zeros(nsamp) + self._madam_timestamps = self._madam_timestamps_raw.array() + psds = dict() + + interval = 0 + time_offset = 0.0 + + for ob in data.obs: + if self.view is not None: + for vw in ob.view[self.view].shared[self.times]: + offset = interval_starts[interval] + slc = slice(offset, offset + len(vw), 1) + self._madam_timestamps[slc] = vw + if self.translate_timestamps: + off = self._madam_timestamps[offset] - time_offset + self._madam_timestamps[slc] -= off + time_offset = self._madam_timestamps[slc][-1] + 1.0 + interval += 1 + else: + offset = interval_starts[interval] + slc = slice(offset, offset + ob.n_local_samples, 1) + self._madam_timestamps[slc] = ob.shared[self.times] + if self.translate_timestamps: + off = self._madam_timestamps[offset] - time_offset + self._madam_timestamps[slc] -= off + time_offset = self._madam_timestamps[slc][-1] + 1.0 + interval += 1 + + # Get the noise object for this observation and create new + # entries in the dictionary when the PSD actually changes + nse = ob[self.noise_model] + nse_scale = 1.0 + if self.noise_scale is not None: + if self.noise_scale in ob: + nse_scale = float(ob[self.noise_scale]) + + for det in all_dets: + if det not in ob.local_detectors: + continue + psd = nse.psd(det) * nse_scale ** 2 + if det not in psds: + psds[det] = [(0.0, psd)] + else: + if not np.allclose(psds[det][-1][1], psd): + psds[det] += [(ob.shared[self.times][0], psd)] + + def copy_local(detdata_name, madam_dtype, dnnz, do_flags=False, do_purge=False): + """Helper function to create a madam buffer from a local detdata key.""" + storage = dtype_to_aligned(madam_dtype) + n_all_det = len(all_dets) + raw = storage.zeros(nsamp * n_all_det) + wrapped = raw.array() + interval = 0 + for ob in data.obs: + if self.view is not None: + for vw in ob.view[self.view].detdata[detdeta_name]: + offset = interval_starts[interval] + flags = None + if do_flags: + if ( + self.shared_flags is not None + or self.det_flags is not None + ): + # Using flags + flags = np.zeros(len(vw), dtype=np.uint8) + if self.shared_flags is not None: + flags |= ( + ob.view[self.view].shared[self.shared_flags] + & self.shared_flag_mask + ) + + for idet, det in enumerate(all_dets): + if det not in ob.local_detectors: + continue + slc = slice( + (idet * nsamp + offset) * dnnz, + (idet * nsamp + offset + len(vw)) * dnnz, + 1, + ) + if dnnz > 1: + wrapped[slc] = vw[idet].flatten()[::nnz_stride] + else: + wrapped[slc] = vw[idet].flatten() + detflags = None + if do_flags: + if self.det_flags is None: + detflags = flags + else: + detflags = np.copy(flags) + detflags |= ( + ob.view[self.view].detdata[self.det_flags][idet] + & self.det_flag_mask + ) + # The do_flags option should only be true if we are + # processing the pixel indices (which is how madam + # effectively implements flagging). So we will set + # all flagged samples to "-1" + if detflags is not None: + # sanity check + if nnz != 1: + raise RuntimeError( + "Internal error on madam copy. Only pixel indices should be flagged." + ) + wrapped[slc][detflags != 0] = -1 + interval += 1 + else: + offset = interval_starts[interval] + flags = None + if do_flags: + if self.shared_flags is not None or self.det_flags is not None: + # Using flags + flags = np.zeros(ob.n_local_samples, dtype=np.uint8) + if self.shared_flags is not None: + flags |= ( + ob.shared[self.shared_flags] & self.shared_flag_mask + ) + for idet, det in enumerate(all_dets): + if det not in ob.local_detectors: + continue + slc = slice( + (idet * nsamp + offset) * dnnz, + (idet * nsamp + offset + ob.n_local_samples) * dnnz, + 1, + ) + if dnnz > 1: + wrapped[slc] = ob.detdata[detdata_name][idet].flatten()[ + ::nnz_stride + ] + else: + wrapped[slc] = ob.detdata[detdata_name][idet].flatten() + detflags = None + if do_flags: + if self.det_flags is None: + detflags = flags + else: + detflags = np.copy(flags) + detflags |= ( + ob.detdata[self.det_flags][idet] + & self.det_flag_mask + ) + # The do_flags option should only be true if we are + # processing the pixel indices (which is how madam + # effectively implements flagging). So we will set + # all flagged samples to "-1" + if detflags is not None: + # sanity check + if dnnz != 1: + raise RuntimeError( + "Internal error on madam copy. Only pixel indices should be flagged." + ) + wrapped[slc][detflags != 0] = -1 + interval += 1 + if do_purge: + del ob.detdata[detdata_name] + return raw, wrapped + + def copy_in_turns(detdata_name, madam_dtype, dnnz, do_flags): + """When purging data, take turns copying it.""" + raw = None + wrapped = None + for copying in range(n_copy_groups): + if nodecomm.rank % n_copy_groups == copying: + # Our turn to copy data + raw, wrapped = copy_local( + detdata_name, + madam_dtype, + dnnz, + do_flags=do_flags, + do_purge=True, + ) + nodecomm.barrier() + return raw, wrapped + + # Copy the signal + + if self.purge_det_data: + self._madam_signal_raw, self._madam_signal = copy_in_turns( + self.det_data, madam.SIGNAL_TYPE, 1, do_flags=False + ) + else: + self._madam_signal_raw, self._madam_signal = copy_local( + self.det_data, madam.SIGNAL_TYPE, 1, do_flags=False, do_purge=False + ) + + # Copy the pointing + + if self.purge_pointing: + self._madam_pixels_raw, self._madam_pixels = copy_in_turns( + self.pixels, madam.PIXEL_TYPE, 1, do_flags=True + ) + self._madam_weights_raw, self._madam_weights = copy_in_turns( + self.weights, madam.WEIGHT_TYPE, nnz, do_flags=False + ) + else: + self._madam_pixels_raw, self._madam_pixels = copy_local( + self.pixels, madam.PIXEL_TYPE, 1, do_flags=True, do_purge=False + ) + self._madam_weights_raw, self._madam_weights = copy_local( + self.weights, madam.WEIGHT_TYPE, nnz, do_flags=False, do_purge=False + ) + + # Madam uses constant detector weights? + + # # detweights is either a dictionary of weights specified at + # # construction time, or else we use uniform weighting. + # detw = {} + # if self._detw is None: + # for idet, det in enumerate(detectors): + # detw[det] = 1.0 + # else: + # detw = self._detw + # + # detweights = np.zeros(ndet, dtype=np.float64) + # for idet, det in enumerate(detectors): + # detweights[idet] = detw[det] + # + # if len(psds) > 0: + # npsdbin = len(psdfreqs) + # + # npsd = np.zeros(ndet, dtype=np.int64) + # psdstarts = [] + # psdvals = [] + # for idet, det in enumerate(detectors): + # if det not in psds: + # raise RuntimeError("Every detector must have at least " "one PSD") + # psdlist = psds[det] + # npsd[idet] = len(psdlist) + # for psdstart, psd in psdlist: + # psdstarts.append(psdstart) + # psdvals.append(psd) + # npsdtot = np.sum(npsd) + # psdstarts = np.array(psdstarts, dtype=np.float64) + # psdvals = np.hstack(psdvals).astype(madam.PSD_TYPE) + # npsdval = psdvals.size + # else: + # npsd = np.ones(ndet, dtype=np.int64) + # npsdtot = np.sum(npsd) + # psdstarts = np.zeros(npsdtot) + # npsdbin = 10 + # fsample = 10.0 + # psdfreqs = np.arange(npsdbin) * fsample / npsdbin + # npsdval = npsdbin * npsdtot + # psdvals = np.ones(npsdval) + # psdinfo = (detweights, npsd, psdstarts, psdfreqs, psdvals) + # if self._rank == 0 and self._verbose: + # timer_tot.report_clear("Collect PSD info") + # return psdinfo, signal_dtype, pixels_dtype, weight_dtype + + # def _unstage_data(self): + # pass + # + # @function_timer + # def _destripe(self, pars, dets, periods, psdinfo): + # """Destripe the buffered data""" + # if self._verbose: + # memreport("just before calling libmadam.destripe", self._comm) + # if self._cached: + # # destripe + # outpath = "" + # if "path_output" in self.params: + # outpath = self.params["path_output"] + # outpath = outpath.encode("ascii") + # madam.destripe_with_cache( + # self._comm, + # self._madam_timestamps, + # self._madam_pixels, + # self._madam_pixweights, + # self._madam_signal, + # outpath, + # ) + # else: + # (detweights, npsd, psdstarts, psdfreqs, psdvals) = psdinfo + # + # # destripe + # madam.destripe( + # self._comm, + # pars, + # dets, + # detweights, + # self._madam_timestamps, + # self._madam_pixels, + # self._madam_pixweights, + # self._madam_signal, + # periods, + # npsd, + # psdstarts, + # psdfreqs, + # psdvals, + # ) + # + # if self._mcmode: + # self._cached = True + # return diff --git a/src/toast/ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py index 1b5e0f166..a6a46b2b3 100644 --- a/src/toast/ops/pointing_healpix.py +++ b/src/toast/ops/pointing_healpix.py @@ -97,6 +97,18 @@ class PointingHealpix(Operator): help="The observation key with a dictionary of pointing weight calibration for each det", ) + coord_in = Unicode( + None, + allow_none=True, + help="The input boresight coordinate system ('C', 'E', 'G')", + ) + + coord_out = Unicode( + None, + allow_none=True, + help="The output boresight coordinate system ('C', 'E', 'G')", + ) + overwrite = Bool(False, help="If True, regenerate pointing even if it exists") @traitlets.validate("nside") @@ -136,20 +148,47 @@ def _check_flag_mask(self, proposal): raise traitlets.TraitError("Flag mask should be a positive integer") return check - def __init__(self, **kwargs): - super().__init__(**kwargs) - - # Initialize the healpix pixels object - self.hpix = HealpixPixels(self.nside) + @traitlets.validate("coord_in") + def _check_coord_in(self, proposal): + check = proposal["value"] + if check is not None: + if check not in ["E", "C", "G"]: + raise traitlets.TraitError("coordinate system must be 'E', 'C', or 'G'") + return check - self._nnz = 1 - if self.mode == "IQU": - self._nnz = 3 + @traitlets.validate("coord_out") + def _check_coord_out(self, proposal): + check = proposal["value"] + if check is not None: + if check not in ["E", "C", "G"]: + raise traitlets.TraitError("coordinate system must be 'E', 'C', or 'G'") + return check - self._n_pix = 12 * self.nside ** 2 - self._n_pix_submap = 12 * self.nside_submap ** 2 - self._n_submap = (self.nside // self.nside_submap) ** 2 + def __init__(self, **kwargs): + super().__init__(**kwargs) + @traitlets.observe("nside", "nside_submap", "mode") + def _reset_hpix(self, change): + # (Re-)initialize the healpix pixels object when one of these traits change. + # Current values: + nside = self.nside + nside_submap = self.nside_submap + nnz = self._nnz + + # Update to the trait that changed + if change["name"] == "nside": + nside = change["new"] + if change["name"] == "nside_submap": + nside_submap = change["new"] + if change["name"] == "mode": + if change["new"] == "IQU": + nnz = 3 + else: + nnz = 1 + self.hpix = HealpixPixels(nside) + self._n_pix = 12 * nside ** 2 + self._n_pix_submap = 12 * nside_submap ** 2 + self._n_submap = (nside // nside_submap) ** 2 self._local_submaps = None @function_timer @@ -160,6 +199,31 @@ def _exec(self, data, detectors=None, **kwargs): if self._local_submaps is None and self.create_dist is not None: self._local_submaps = np.zeros(self._n_submap, dtype=np.bool) + coord_rot = None + if self.coord_in is None: + if self.coord_out is not None: + msg = "Input and output coordinate systems should both be None or valid" + raise RuntimeError(msg) + else: + if self.coord_out is None: + msg = "Input and output coordinate systems should both be None or valid" + raise RuntimeError(msg) + if self.coord_in == "C": + if self.coord_out == "E": + coord_rot = qa.equ2ecl + elif self.coord_out == "G": + coord_rot = qa.equ2gal + elif self.coord_in == "E": + if self.coord_out == "G": + coord_rot = qa.ecl2gal + elif self.coord_out == "C": + coord_rot = qa.inv(qa.equ2ecl) + elif self.coord_in == "G": + if self.coord_out == "C": + coord_rot = qa.inv(qa.equ2gal) + if self.coord_out == "E": + coord_rot = qa.inv(qa.ecl2gal) + # We do the calculation over buffers of timestream samples to reduce memory # overhead from temporary arrays. tod_buffer_length = env.tod_buffer_length() @@ -188,7 +252,12 @@ def _exec(self, data, detectors=None, **kwargs): hwp_angle = ob.shared[self.hwp_angle] # Boresight pointing quaternions - boresight = ob.shared[self.boresight] + in_boresight = ob.shared[self.boresight] + + # Coordinate transform if needed + boresight = in_boresight + if coord_rot is not None: + boresight = qa.mult(coord_rot, in_boresight) # Focalplane for this observation focalplane = ob.telescope.focalplane diff --git a/src/toast/ops/sim_ground.py b/src/toast/ops/sim_ground.py index c7d918c4c..481be6f43 100644 --- a/src/toast/ops/sim_ground.py +++ b/src/toast/ops/sim_ground.py @@ -2,12 +2,18 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import traitlets + import numpy as np from scipy.constants import degree import healpy as hp +from astropy import units as u + +import ephem + from .. import qarray as qa from ..utils import Environment, name_UID, Logger, rate_from_times @@ -16,11 +22,13 @@ from ..timing import function_timer, Timer -from ..tod import Interval, TOD, regular_intervals, AnalyticNoise +from ..intervals import Interval, regular_intervals -from ..observation import Observation +from ..noise_sim import AnalyticNoise -from ..config import ObjectConfig +from ..traits import trait_docs, Int, Unicode, Float, Bool, Instance, Quantity + +from ..observation import Observation from ..instrument import Telescope @@ -28,257 +36,472 @@ from .operator import Operator -from .sim_hwp import simulate_hwp_angle +from .sim_hwp import simulate_hwp_response +@trait_docs class SimGround(Operator): - """Simulate a generic ground-based telescope. + """Simulate a generic ground-based telescope scanning. - This uses an observing schedule to simulate observations of a ground based - telescope. + This simulates ground-based pointing in constant elevation scans for a telescope + located at a particular site and using an pre-created schedule. - Args: - config (dict): Configuration parameters. + The created observations define several interval lists to describe regions where + the telescope is scanning left, right or in a turnaround or El-nod. A shared + flag array is also created with bits sets for these same properties. """ - def __init__(self, config): - super().__init__(config) - self._parse() + # Class traits - @classmethod - def defaults(cls): - """(Class method) Return options supported by the operator and their defaults. + API = Int(0, help="Internal interface version for this operator") - This returns an ObjectConfig instance, and each entry should have a help - string. + schedule = Instance(klass=Schedule, allow_none=True, help="The observing schedule") - Returns: - (ObjectConfig): The options. + scan_rate = Quantity(1.0 * u.degree / u.second, help="The sky scanning rate") - """ - opts = ObjectConfig() + scan_rate_el = Quantity( + None, allow_none=True, help="The sky elevation scanning rate" + ) - opts.add("class", "toast.future_ops.SimGround", "The class name") + scan_accel = Quantity( + 0.1 * u.degree / u.second ** 2, + help="Mount scanning rate acceleration for turnarounds", + ) - opts.add("API", 0, "(Internal interface version for this operator)") + scan_accel_el = Quantity( + None, allow_none=True, help="Mount elevation rate acceleration." + ) - opts.add("telescope", None, "This should be an instance of a Telescope") + cosecant_modulation = Bool( + False, help="Modulate the scan rate according to 1/sin(az) for uniform depth" + ) - opts.add("start_time", 0.0, "The mission start time in seconds") + sun_angle_min = Quantity( + 90.0 * u.degree, help="Minimum angular distance for the scan and the Sun" + ) - opts.add("hwp_rpm", None, "The rate (in RPM) of the HWP rotation") + el_mod_step = Quantity( + 0.0 * u.degree, help="Amount to step elevation after each left-right scan pair" + ) - opts.add( - "hwp_step_deg", None, "For stepped HWP, the angle in degrees of each step" - ) + el_mod_rate = Quantity( + 0.0 * u.Hz, help="Modulate elevation continuously at this rate" + ) - opts.add( - "hwp_step_time_m", - None, - "For stepped HWP, the time in minutes between steps", - ) + el_mod_amplitude = Quantity(1.0 * u.degree, help="Range of elevation modulation") + + el_mod_sine = Bool( + False, help="Modulate elevation with a sine wave instead of a triangle wave" + ) + + distribute_time = Bool( + False, + help="Distribute observation data along the time axis rather than detector axis", + ) + + times = Unicode("times", help="Observation shared key for timestamps") + + shared_flags = Unicode("flags", help="Observation shared key for common flags") + + hwp_angle = Unicode("hwp_angle", help="Observation shared key for HWP angle") + + boresight_azel = Unicode( + "boresight_radec", help="Observation shared key for boresight AZ/EL" + ) + + boresight_radec = Unicode( + "boresight_radec", help="Observation shared key for boresight RA/DEC" + ) + + position = Unicode("position", help="Observation shared key for position") + + velocity = Unicode("velocity", help="Observation shared key for velocity") + + hwp_rpm = Float(None, allow_none=True, help="The rate (in RPM) of the HWP rotation") + + hwp_step = Quantity( + None, allow_none=True, help="For stepped HWP, the angle of each step" + ) + + hwp_step_time = Quantity( + None, allow_none=True, help="For stepped HWP, the time between steps" + ) + + start_with_elnod = Bool(True, help="Perform an el-nod before the scan") + + end_with_elnod = Bool(False, help="Perform an el-nod after the scan") + + el_nod = List(None, allow_none=True, help="List of relative el_nods") + + scanning_interval = Unicode("scanning", help="Interval name for scanning") + + turnaround_interval = Unicode("turnaround", help="Interval name for turnarounds") + + scan_left_interval = Unicode("scan_left", help="Interval name for left-going scans") + + scan_right_interval = Unicode( + "scan_right", help="Interval name for right-going scans" + ) + + el_nod_interval = Unicode("elnod", help="Interval name for elnods") + + sun_up_interval = Unicode( + "sun_up", help="Interval name for times when the sun is up" + ) + + sun_close_interval = Unicode( + "sun_close", help="Interval name for times when the sun is close" + ) + + @traitlets.validate("schedule") + def _check_schedule(self, proposal): + sch = proposal["value"] + if sch is not None: + if not isinstance(sch, Schedule): + raise traitlets.TraitError("schedule must be an instance of a Schedule") + if sch.telescope is None: + raise traitlets.TraitError("schedule must have a telescope") + if sch.ceslist is None: + raise traitlets.TraitError("schedule must have a list of CESs") + tele = sch.telescope + try: + dets = tele.focalplane.detectors + except Exception: + raise traitlets.TraitError( + "schedule telescope must be a Telescope instance with a focalplane" + ) + return sch + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self._AU = 149597870.7 + self._radperday = 0.01720209895 + self._radpersec = self._radperday / 86400.0 + self._earthspeed = self._radpersec * self._AU - boresight_angle = (0,) - firsttime = (0.0,) - rate = (100.0,) - site_lon = (0,) - site_lat = (0,) - site_alt = (0,) - el = (None,) - azmin = (None,) - azmax = (None,) - el_nod = (None,) - start_with_elnod = (True,) - end_with_elnod = (False,) - scanrate = (1,) - scanrate_el = (None,) - scan_accel = (0.1,) - scan_accel_el = (None,) - CES_start = (None,) - CES_stop = (None,) - el_min = (0,) - sun_angle_min = (90,) - sampsizes = (None,) - sampbreaks = (None,) - coord = ("C",) - report_timing = (True,) - hwprpm = (None,) - hwpstep = (None,) - hwpsteptime = (None,) - cosecant_modulation = (False,) - - return opts - - def _parse(self): - if "telescope" not in self.config: - raise RuntimeError("Satellite simulations require a telescope") - try: - dets = self.config["telescope"].focalplane.detectors - except: - raise RuntimeError("'telescope' option should be an instance of Telescope") - if "start_time" not in self.config: - self.config["start_time"] = 0.0 - if "observation_time_h" not in self.config: - raise RuntimeError("Time span of each observation must be specified") - if "gap_time_h" not in self.config: - self.config["gap_time_h"] = 0.0 - if "n_observation" not in self.config: - raise RuntimeError("Number of observations must be specified") - - def exec(self, data, detectors=None): - """Create observations containing simulated satellite pointing. - - Observations will be appended to the Data object. - - Args: - data (toast.Data): The distributed data. - detectors (list): A list of detector names or indices. If None, this - indicates a list of all detectors. - - Returns: - None - - """ + def _exec(self, data, detectors=None, **kwargs): log = Logger.get() - focalplane = self.config["telescope"].focalplane + if self.schedule is None: + raise RuntimeError( + "The schedule attribute must be set before calling exec()" + ) + focalplane = self.schedule.telescope.focalplane comm = data.comm # List of detectors in this pipeline - pipedets = list() - for d in focalplane.detectors: - if d not in detectors: - continue - pipedets.append(d) - - if comm.group_size > len(pipedets): - if comm.world_rank == 0: - log.error("process group is too large for the number of detectors") - comm.comm_world.Abort() - - # Distribute the observations uniformly among groups - - groupdist = distribute_uniform(self.config["n_observation"], comm.ngroups) - - # Compute global time and sample ranges of all observations - - obsrange = regular_intervals( - self.config["n_observation"], - self.config["start_time"], - 0, - focalplane.sample_rate, - 3600 * self.config["observation_time_h"], - 3600 * self.config["gap_time_h"], + pipedets = None + if detectors is None: + pipedets = focalplane.detectors + else: + pipedets = list() + for det in focalplane.detectors: + if det in detectors: + pipedets.append(det) + + # Distribute the observations among groups in a load balanced way based on + # the duration of each CES. + + num_obs = len(self.schedule.ceslist) + obs_sizes = np.array( + [int(x.stop_time - x.start_time) + 1 for x in self.schedule.ceslist] ) + groupdist = distribute_discrete(obs_sizes, comm.ngroups) + + # Set the size of the process grid for each group + + det_ranks = comm.group_size + if self.distribute_time: + det_ranks = 1 + # Every process group creates its observations group_firstobs = groupdist[comm.group][0] group_numobs = groupdist[comm.group][1] - for ob in range(group_firstobs, group_firstobs + group_numobs): - obname = "science_{:05d}".format(ob) - obs = Observation( - self.config["telescope"], - name=obname, - UID=name_UID(obname), - samples=obsrange[ob].samples, - detector_ranks=comm.group_size, - mpicomm=comm.comm_group, + for obindx in range(group_firstobs, group_firstobs + group_numobs): + # The CES for this observation + ces = self.schedule.ceslist[obindx] + + # Set the boresight pointing based on the given scan parameters + + timer = Timer() + if self._report_timing: + if mpicomm is not None: + mpicomm.Barrier() + timer.start() + + self._times = np.array([]) + self._commonflags = np.array([], dtype=np.uint8) + self._az = np.array([]) + self._el = np.array([]) + + nsample_elnod = 0 + if start_with_elnod: + # Begin with an el-nod + nsample_elnod = self.simulate_elnod( + self._firsttime, azmin * degree, el * degree + ) + if nsample_elnod > 0: + t_elnod = self._times[-1] - self._times[0] + # Shift the time stamps so that the CES starts at the prescribed time + self._times -= t_elnod + self._firsttime -= t_elnod + + nsample_ces = self.simulate_scan(samples) + + if end_with_elnod and self._elnod_az is not None: + # Append en el-nod after the CES + self._elnod_az[:] = self._az[-1] + nsample_elnod = self.simulate_elnod( + self._times[-1], self._az[-1], self._el[-1] + ) + self._lasttime = self._times[-1] + samples = self._times.size + + if self._report_timing: + if mpicomm is not None: + mpicomm.Barrier() + if mpicomm is None or mpicomm.rank == 0: + timer.report_clear("TODGround: simulate scan") + + # Create a list of subscans that excludes the turnarounds. + # All processes in the group still have all samples. + + self.subscans = [] + self._subscan_min_length = 10 # in samples + for istart, istop in zip(self._stable_starts, self._stable_stops): + if istop - istart < self._subscan_min_length or istart < nsample_elnod: + self._commonflags[istart:istop] |= self.TURNAROUND + continue + start = self._firsttime + istart / self._rate + stop = self._firsttime + istop / self._rate + self.subscans.append( + Interval(start=start, stop=stop, first=istart, last=istop - 1) + ) + + if len(self._stable_stops) > 0: + self._commonflags[self._stable_stops[-1] :] |= self.TURNAROUND + + if np.sum((self._commonflags & self.TURNAROUND) == 0) == 0 and do_ces: + raise RuntimeError( + "The entire TOD is flagged as turnaround. Samplerate too low " + "({} Hz) or scanrate too high ({} deg/s)?" + "".format(rate, scanrate) + ) + + if self._report_timing: + if mpicomm is not None: + mpicomm.Barrier() + if mpicomm is None or mpicomm.rank == 0: + timer.report_clear("TODGround: list valid intervals") + + self._fp = detectors + self._detlist = sorted(list(self._fp.keys())) + + # call base class constructor to distribute data + + props = { + "site_lon": site_lon, + "site_lat": site_lat, + "site_alt": site_alt, + "azmin": azmin, + "azmax": azmax, + "el": el, + "scanrate": scanrate, + "scan_accel": scan_accel, + "el_min": el_min, + "sun_angle_min": sun_angle_min, + } + super().__init__( + mpicomm, + self._detlist, + samples, + sampsizes=[samples], + sampbreaks=None, + meta=props, + **kwargs ) - # Create standard shared objects. + if self._report_timing: + if mpicomm is not None: + mpicomm.Barrier() + if mpicomm is None or mpicomm.rank == 0: + timer.report_clear("TODGround: call base class constructor") + + self.translate_pointing() + + self.crop_vectors() + + if self._report_timing: + if mpicomm is not None: + mpicomm.Barrier() + if mpicomm is None or mpicomm.rank == 0: + timer.report_clear("TODGround: translate scan pointing") + + # If HWP parameters are specified, simulate and cache HWP angle + + simulate_hwp(self, hwprpm, hwpstep, hwpsteptime) + + # Check that we do not have too many processes for our data distribution. + + if self.distribute_time: + # We are distributing data by scan sets + if comm.group_size > len(self.schedule.ceslist): + msg = "process group is too large for the number of CESs" + if comm.world_rank == 0: + log.error(msg) + raise RuntimeError(msg) + else: + # We are distributing data by detector sets. + if comm.group_size > len(pipedets): + msg = "process group is too large for the number of detectors" + if comm.world_rank == 0: + log.error(msg) + raise RuntimeError(msg) + + ob = Observation( + self.schedule.telescope, + obsrange[obindx].samples, + name=ces.name, + UID=name_UID(ces.name), + comm=comm.comm_group, + process_rows=det_ranks, + ) - obs.create_times() - obs.create_common_flags() + # Create shared objects for timestamps, common flags, position, + # and velocity. + ob.shared.create( + self.times, + shape=(ob.n_local_samples,), + dtype=np.float64, + comm=ob.comm_col, + ) + ob.shared.create( + self.shared_flags, + shape=(ob.n_local_samples,), + dtype=np.uint8, + comm=ob.comm_col, + ) + ob.shared.create( + self.position, + shape=(ob.n_local_samples, 3), + dtype=np.float64, + comm=ob.comm_col, + ) + ob.shared.create( + self.velocity, + shape=(ob.n_local_samples, 3), + dtype=np.float64, + comm=ob.comm_col, + ) # Rank zero of each grid column creates the data stamps = None - if obs.grid_comm_col is None or obs.grid_comm_col.rank == 0: - start_abs = obs.local_samples[0] + obsrange[ob].first + position = None + velocity = None + if ob.comm_col_rank == 0: + start_abs = ob.local_index_offset + obsrange[obindx].first start_time = ( - obsrange[ob].start + float(start_abs) / focalplane.sample_rate + obsrange[obindx].start + float(start_abs) / focalplane.sample_rate ) stop_time = ( - start_time + float(obs.local_samples[1]) / focalplane.sample_rate + start_time + float(ob.n_local_samples) / focalplane.sample_rate ) stamps = np.linspace( start_time, stop_time, - num=obs.local_samples[1], + num=ob.n_local_samples, endpoint=False, dtype=np.float64, ) - obs.times().set(stamps, offset=(0,), fromrank=0) + # For this simple class, assume that the Earth is located + # along the X axis at time == 0.0s. We also just use the + # mean values for distance and angular speed. Classes for + # real experiments should obviously use ephemeris data. + rad = np.fmod( + (start_time - self.start_time.to_value(u.second)) * self._radpersec, + 2.0 * np.pi, + ) + ang = radinc * np.arange(ob.n_local_samples, dtype=np.float64) + rad + x = self._AU * np.cos(ang) + y = self._AU * np.sin(ang) + z = np.zeros_like(x) + position = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) + + ang = ( + radinc * np.arange(ob.n_local_samples, dtype=np.float64) + + rad + + (0.5 * np.pi) + ) + x = self._earthspeed * np.cos(ang) + y = self._earthspeed * np.sin(ang) + z = np.zeros_like(x) + velocity = np.ravel(np.column_stack((x, y, z))).reshape((-1, 3)) - # Create boresight - start_abs = obs.local_samples[0] + obsrange[ob].first + ob.shared[self.times].set(stamps, offset=(0,), fromrank=0) + ob.shared[self.position].set(position, offset=(0, 0), fromrank=0) + ob.shared[self.velocity].set(velocity, offset=(0, 0), fromrank=0) + + # Create boresight pointing + start_abs = ob.local_index_offset + obsrange[obindx].first degday = 360.0 / 365.25 q_prec = None - if obs.grid_comm_col is None or obs.grid_comm_col.rank == 0: + if ob.comm_col_rank == 0: q_prec = slew_precession_axis( first_samp=start_abs, - n_samp=obs.local_samples[1], + n_samp=ob.n_local_samples, sample_rate=focalplane.sample_rate, deg_day=degday, ) satellite_scanning( - obs, + ob, + self.boresight, sample_offset=start_abs, q_prec=q_prec, - spin_period_m=self.config["spin_period_m"], - spin_angle_deg=self.config["spin_angle_deg"], - prec_period_m=self.config["prec_period_m"], - prec_angle_deg=self.config["prec_angle_deg"], + spin_period_m=self.spin_period.to_value(u.minute), + spin_angle_deg=self.spin_angle.to_value(u.degree), + prec_period_m=self.prec_period.to_value(u.minute), + prec_angle_deg=self.prec_angle.to_value(u.degree), ) # Set HWP angle - simulate_hwp_angle( - obs, - obsrange[ob].start, - self.config["hwp_rpm"], - self.config["hwp_step_deg"], - self.config["hwp_step_time_m"], + simulate_hwp_response( + ob, + ob_time_key=self.times, + ob_angle_key=self.hwp_angle, + ob_mueller_key=None, + hwp_start=obsrange[obindx].start * u.second, + hwp_rpm=self.hwp_rpm, + hwp_step=self.hwp_step, + hwp_step_time=self.hwp_step_time, ) - data.obs.append(obs) + data.obs.append(ob) return - def finalize(self, data): - """Perform any final operations / communication. - - This calls the finalize() method on all operators in sequence. - - Args: - data (toast.Data): The distributed data. - - Returns: - None - - """ + def _finalize(self, data, **kwargs): return - def requires(self): - """List of Observation keys directly used by this Operator.""" - return list() - - def provides(self): - """List of Observation keys generated by this Operator.""" - prov = [ - "TIMESTAMPS", - "BORESIGHT_RADEC", - "BORESIGHT_RESPONSE", - "COMMON_FLAGS", - "HWP_ANGLE", - "POSITION", - "VELOCITY", - ] - return prov - - def accelerators(self): - """List of accelerators supported by this Operator.""" + def _requires(self): + return dict() + + def _provides(self): + return { + "shared": [ + self.times, + self.shared_flags, + self.boresight, + self.hwp_angle, + self.position, + self.velocity, + ] + } + + def _accelerators(self): return list() diff --git a/src/toast/ops/sim_satellite.py b/src/toast/ops/sim_satellite.py index e50620fa6..ff22f2a96 100644 --- a/src/toast/ops/sim_satellite.py +++ b/src/toast/ops/sim_satellite.py @@ -318,9 +318,7 @@ class SimSatellite(Operator): times = Unicode("times", help="Observation shared key for timestamps") - shared_flags = Unicode( - "shared_flags", help="Observation shared key for common flags" - ) + shared_flags = Unicode("flags", help="Observation shared key for common flags") hwp_angle = Unicode("hwp_angle", help="Observation shared key for HWP angle") diff --git a/src/toast/qarray.py b/src/toast/qarray.py index 145c57119..9f315550f 100644 --- a/src/toast/qarray.py +++ b/src/toast/qarray.py @@ -506,3 +506,93 @@ def to_position(q): else: return (float(theta[0]), float(phi[0])) return (theta.array(), phi.array()) + + +# J2000 coordinate transforms + +# RA, DEC to galactic coordinates + +_coordmat_J2000radec2gal = None +_equ2gal = None + + +def equ2gal(): + """Return the equatorial to galactic coordinate transform quaternion.""" + global _coordmat_J2000radec2gal + global _equ2gal + if _coordmat_J2000radec2gal is None: + _coordmat_J2000radec2gal = np.array( + [ + -0.054875539726, + -0.873437108010, + -0.483834985808, + 0.494109453312, + -0.444829589425, + 0.746982251810, + -0.867666135858, + -0.198076386122, + 0.455983795705, + ] + ).reshape([3, 3]) + if _equ2gal is None: + _equ2gal = from_rotmat(_coordmat_J2000radec2gal) + return _equ2gal + + +# RA, DEC to (geocentric) ecliptic coordinates + +_coordmat_J2000radec2ecl = None +_equ2ecl = None + + +def equ2ecl(): + """Return the equatorial to ecliptic coordinate transform quaternion.""" + global _coordmat_J2000radec2ecl + global _equ2ecl + if _coordmat_J2000radec2ecl is None: + _coordmat_J2000radec2ecl = np.array( + [ + 1.0, + 0.0, + 0.0, + 0.0, + 0.917482062069182, + 0.397777155931914, + 0.0, + -0.397777155931914, + 0.917482062069182, + ] + ).reshape([3, 3]) + if _equ2ecl is None: + _equ2ecl = from_rotmat(coordmat_J2000radec2ecl) + return _equ2ecl + + +# Ecliptic coordinates (geocentric) to galactic +# (use the same rotation as HEALPix, to avoid confusion) + +_coordmat_J2000ecl2gal = None +_ecl2gal = None + + +def ecl2gal(): + """Return the ecliptic to galactic coordinate transform quaternion.""" + global _coordmat_J2000ecl2gal + global _ecl2gal + if _coordmat_J2000ecl2gal is None: + _coordmat_J2000ecl2gal = np.array( + [ + -0.054882486, + -0.993821033, + -0.096476249, + 0.494116468, + -0.110993846, + 0.862281440, + -0.867661702, + -0.000346354, + 0.497154957, + ] + ).reshape([3, 3]) + if _ecl2gal is None: + _ecl2gal = from_rotmat(_coordmat_J2000ecl2gal) + return _ecl2gal diff --git a/src/toast/schedule.py b/src/toast/schedule.py index 69cf985f0..2b67c2390 100644 --- a/src/toast/schedule.py +++ b/src/toast/schedule.py @@ -1,2882 +1,324 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# Copyright (c) 2019-2020 by the parties listed in the AUTHORS file. # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. -""" -This script creates a CES schedule file that can be used as input -to toast_ground_sim.py -""" - -import argparse -from datetime import datetime, timezone, timedelta -import dateutil.parser import os import sys -import traceback import numpy as np -from scipy.constants import degree -from matplotlib import cm - -import ephem -import healpy as hp - -from .utils import Logger -from . import qarray as qa -from .timing import function_timer - - -XAXIS, YAXIS, ZAXIS = np.eye(3) - - -class TooClose(Exception): - pass - - -class SunTooClose(TooClose): - pass - - -class MoonTooClose(TooClose): - pass +from .timing import function_timer, Timer -class Patch(object): +from .utils import Logger, Environment - hits = 0 - partial_hits = 0 - rising_hits = 0 - setting_hits = 0 - time = 0 - rising_time = 0 - setting_time = 0 - step = -1 - az_min = 0 - az_max = 2 * np.pi - _area = None - current_el_min = 0 - current_el_max = 0 - el_min0 = 0 - el_max0 = np.pi / 2 - el_min = el_min0 - el_max = el_max0 - el_step = 0 - alternate = False - ra_amplitude = None - ra_period = 10 - dec_amplitude = None - dec_period = 10 - corners = [] - preferred_el = None +class CES(object): def __init__( self, + start_time, + stop_time, + boresight_angle, name, - weight, - corners, - el_min=0, - el_max=np.pi / 2, - el_step=0, - alternate=False, - site_lat=0, - area=None, - ra_period=10, - ra_amplitude=None, - dec_period=10, - dec_amplitude=None, - elevations=None, - ): - self.name = name - self.weight = weight - self.corners = corners - self.el_min0 = el_min - self.el_min = el_min - self.el_max0 = el_max - self.el_step = np.abs(el_step) - self.alternate = alternate - self._area = area - self.site_lat = site_lat - self.ra_period = ra_period - self.ra_amplitude = np.radians(ra_amplitude) - self.dec_period = dec_period - self.dec_amplitude = np.radians(dec_amplitude) - # Use the site latitude to infer the lowest elevation that all - # corners cross. - site_el_max = np.pi / 2 - for corner in corners: - el_max = np.pi / 2 - np.abs(corner._dec - self.site_lat) - if el_max < site_el_max: - site_el_max = el_max - if elevations is None: - if site_el_max < self.el_max0: - self.el_max0 = site_el_max - self.elevations = None - else: - # Parse the allowed elevations - try: - # Try parsing as a string - self.elevations = [ - np.radians(float(el)) for el in elevations.split(",") - ] - except AttributeError: - # Try parsing as an iterable - self.elevations = [np.radians(el) for el in elevations] - self.elevations = np.sort(np.array(self.elevations)) - # Check if any of the allowed elevations is above the highest - # observable elevation - bad = self.elevations > site_el_max - if np.any(bad): - good = np.logical_not(bad) - if np.any(good): - print( - "WARNING: {} of the observing elevations are too high " - "for '{}': {} > {:.2f} deg".format( - np.sum(bad), - self.name, - np.degrees(self.elevations[bad]), - np.degrees(site_el_max), - ), - flush=True, - ) - self.elevations = self.elevations[good] - else: - print( - "ERROR: all of the observing elevations are too high for {}. " - "Maximum observing elevation is {} deg".format( - self.name, np.degrees(site_el_max) - ), - flush=True, - ) - sys.exit() - self.el_min0 = np.amin(self.elevations) - self.el_max0 = np.amax(self.elevations) - if el_step != 0: - self.nstep_el = int((self.el_max0 - self.el_min0 + 1e-3) // el_step) + 1 - self.elevations0 = self.elevations - self.el_max = self.el_max0 - self.el_lim = self.el_min0 - self.step_azel() - return - - def oscillate(self): - if self.ra_amplitude: - # Oscillate RA - halfperiod = self.ra_period // 2 - old_phase = np.fmod(self.hits - 1 + halfperiod, self.ra_period) - halfperiod - new_phase = np.fmod(self.hits + halfperiod, self.ra_period) - halfperiod - old_offset = old_phase / halfperiod * self.ra_amplitude - new_offset = new_phase / halfperiod * self.ra_amplitude - offset = new_offset - old_offset - for corner in self.corners: - corner._ra += offset - if self.dec_amplitude: - # Oscillate DEC - halfperiod = self.dec_period // 2 - old_phase = ( - np.fmod(self.hits - 1 + halfperiod, self.dec_period) - halfperiod - ) - new_phase = np.fmod(self.hits + halfperiod, self.dec_period) - halfperiod - old_offset = old_phase / halfperiod * self.dec_amplitude - new_offset = new_phase / halfperiod * self.dec_amplitude - offset = new_offset - old_offset - for corner in self.corners: - corner._dec += offset - return - - @function_timer - def get_area(self, observer, nside=32, equalize=False): - self.update(observer) - if self._area is None: - npix = 12 * nside ** 2 - hitmap = np.zeros(npix) - for corner in self.corners: - corner.compute(observer) - for pix in range(npix): - lon, lat = hp.pix2ang(nside, pix, lonlat=True) - center = ephem.FixedBody() - center._ra = np.radians(lon) - center._dec = np.radians(lat) - center.compute(observer) - hitmap[pix] = self.in_patch(center) - self._area = np.sum(hitmap) / hitmap.size - if self._area == 0: - raise RuntimeError("Patch has zero area!") - if equalize: - self.weight /= self._area - return self._area - - @function_timer - def corner_coordinates(self, observer=None, unwind=False): - """Return the corner coordinates in horizontal frame. - - PyEphem measures the azimuth East (clockwise) from North. - """ - azs = [] - els = [] - az0 = None - for corner in self.corners: - if observer is not None: - corner.compute(observer) - if unwind: - if az0 is None: - az0 = corner.az - azs.append(unwind_angle(az0, corner.az)) - else: - azs.append(corner.az) - els.append(corner.alt) - return np.array(azs), np.array(els) - - @function_timer - def in_patch(self, obj): - """ - Determine if the object (e.g. Sun or Moon) is inside the patch - by using a ray casting algorithm. The ray is cast along a - constant meridian to follow a great circle. - """ - az0 = obj.az - # Get corner coordinates, assuming they were already computed - azs, els = self.corner_coordinates() - els_cross = [] - for i in range(len(self.corners)): - az1 = azs[i] - el1 = els[i] - j = (i + 1) % len(self.corners) - az2 = unwind_angle(az1, azs[j]) - el2 = els[j] - azmean = 0.5 * (az1 + az2) - az0 = unwind_angle(azmean, np.float(obj.az), np.pi) - if (az1 - az0) * (az2 - az0) > 0: - # the constant meridian is not between the two corners - continue - el_cross = el1 + (az1 - az0) * (el2 - el1) / (az1 - az2) - if np.abs(obj.az - (az0 % (2 * np.pi))) < 1e-3: - els_cross.append(el_cross) - elif el_cross > 0: - els_cross.append(np.pi - el_cross) - else: - els_cross.append(-np.pi - el_cross) - - els_cross = np.array(els_cross) - if els_cross.size < 2: - return False - - # Unwind the crossing elevations to minimize the scatter - els_cross = np.sort(els_cross) - if els_cross.size > 1: - ptps = [] - for i in range(els_cross.size): - els_cross_alt = els_cross.copy() - els_cross_alt[:i] += 2 * np.pi - ptps.append(np.ptp(els_cross_alt)) - i = np.argmin(ptps) - if i > 0: - els_cross[:i] += 2 * np.pi - els_cross = np.sort(els_cross) - el_mean = np.mean(els_cross) - el0 = unwind_angle(el_mean, np.float(obj.alt)) - - ncross = np.sum(els_cross > el0) - - if ncross % 2 == 0: - # Even number of crossings means that the object is outside - # of the patch - return False - return True - - @function_timer - def step_azel(self): - self.step += 1 - if self.el_step != 0 and self.alternate: - # alternate between rising and setting scans - if self.rising_hits < self.setting_hits: - # Schedule a rising scan - istep = self.rising_hits % self.nstep_el - self.el_min = min(self.el_max0, self.el_min0 + istep * self.el_step) - self.el_max = self.el_max0 - self.az_min = 0 - self.az_max = np.pi - else: - # Schedule a setting scan - istep = self.setting_hits % self.nstep_el - self.el_min = self.el_min0 - self.el_max = max(self.el_min0, self.el_max0 - istep * self.el_step) - self.az_min = np.pi - self.az_max = 2 * np.pi - else: - if self.alternate: - self.az_min = (self.az_min + np.pi) % (2 * np.pi) - self.az_max = self.az_min + np.pi - else: - self.el_min += self.el_step - if self.el_min > self.el_max0: - self.el_min = self.el_min0 - if self.el_step != 0 and self.elevations is not None: - tol = np.radians(0.1) - self.elevations = np.array( - [ - el - for el in self.elevations0 - if (el + tol >= self.el_min and el - tol <= self.el_max) - ] - ) - return - - def reset(self): - self.step += 1 - self.el_min = self.el_min0 - self.el_max = self.el_max0 - self.elevations = self.elevations0 - self.az_min = 0 - if self.alternate: - self.az_max = np.pi - else: - self.az_max = 2 * np.pi - return - - def visible( - self, - el_min, - observer, - sun, - moon, - sun_avoidance_angle, - moon_avoidance_angle, - check_sso, - ): - self.update(observer) - patch_el_max = -1000 - patch_el_min = 1000 - in_view = False - for i, corner in enumerate(self.corners): - corner.compute(observer) - patch_el_min = min(patch_el_min, corner.alt) - patch_el_max = max(patch_el_max, corner.alt) - if corner.alt > el_min: - # At least one corner is visible - in_view = True - if check_sso: - if sun_avoidance_angle > 0: - angle = np.degrees(ephem.separation(sun, corner)) - if angle < sun_avoidance_angle: - # Patch is too close to the Sun - return False, "Too close to Sun {:.2f}".format(angle) - if moon_avoidance_angle > 0: - angle = np.degrees(ephem.separation(moon, corner)) - if angle < moon_avoidance_angle: - # Patch is too close to the Moon - return False, "Too close to Moon {:.2f}".format(angle) - if not in_view: - msg = "Below el_min = {:.2f} at el = {:.2f}..{:.2f}.".format( - np.degrees(el_min), np.degrees(patch_el_min), np.degrees(patch_el_max) - ) - else: - msg = "in view" - self.current_el_min = patch_el_min - self.current_el_max = patch_el_max - - return in_view, msg - - def update(self, *args, **kwargs): - """ - A virtual method that is implemented by moving targets - """ - pass - - -class SSOPatch(Patch): - def __init__(self, name, weight, radius): - self.name = name - self.weight = weight - self.radius = radius - try: - self.body = getattr(ephem, name)() - except: - raise RuntimeError("Failed to initialize {} from pyEphem".format(name)) - self.corners = None - return - - def update(self, observer): - """ - Calculate the relative position of the SSO at a given time - """ - self.body.compute(observer) - ra, dec = self.body.ra, self.body.dec - # Synthesize 8 corners around the center - phi = ra - theta = dec - r = self.radius - ncorner = 8 - angstep = 2 * np.pi / ncorner - self.corners = [] - for icorner in range(ncorner): - ang = angstep * icorner - delta_theta = np.cos(ang) * r - delta_phi = np.sin(ang) * r / np.cos(theta + delta_theta) - patch_corner = ephem.FixedBody() - patch_corner._ra = phi + delta_phi - patch_corner._dec = theta + delta_theta - self.corners.append(patch_corner) - return - - -class CoolerCyclePatch(Patch): - def __init__( - self, - weight, - power, - hold_time_min, - hold_time_max, - cycle_time, - az, + mjdstart, + scan, + subscan, + azmin, + azmax, el, - last_cycle_end, + season, + start_date, + rising, + mindist_sun, + mindist_moon, + el_sun, ): - # Standardized name for cooler cycles - self.name = "cooler_cycle" - self.hold_time_min = hold_time_min * 3600 - self.hold_time_max = hold_time_max * 3600 - self.cycle_time = cycle_time * 3600 - self.az = az + self.start_time = start_time + self.stop_time = stop_time + self.boresight_angle = boresight_angle + self.name = name + self.mjdstart = mjdstart + self.scan = scan + self.subscan = subscan + self.azmin = azmin + self.azmax = azmax self.el = el - self.last_cycle_end = last_cycle_end - self.weight0 = weight - self.weight = weight - self.power = power - return - - def get_area(self, *args, **kwargs): - if self._area is None: - self._area = 0 - return self._area - - def corner_coordinates(self, *args, **kwargs): - return None - - def in_patch(self, *args, **kwargs): - return False - - def step_azel(self, *args, **kwargs): - return + self.season = season + self.start_date = start_date + self.rising = rising + self.mindist_sun = mindist_sun + self.mindist_moon = mindist_moon + self.el_sun = el_sun + + +class Schedule(object): + """Class representing an observing schedule. + + A schedule consists of a list of Constant Elevation Scans (CESs) for a telescope at + a particular site. The schedule can be constructed either by loading from a file + or by passing the telescope and CES list directly. + + Args: + file (str): The path to the schedule file. + file_split (tuple): If not None and loading from a file, only use a subset of + the schedule. The arguments are (isplit, nsplit) and only observations + that satisfy 'scan index modulo nsplit == isplit' are included. + telescope (Telescope): If not loading from a file, specify the Telescope + instance. + ceslist (list): If not loading from a file, specify the list of CESs. + sort (bool): If True, sort the CES list by name. - def reset(self, *args, **kwargs): - return - - def get_current_hold_time(self, observer): - tlast = to_DJD(self.last_cycle_end) - tnow = float(observer.date) # In Dublin Julian date - hold_time = (tnow - tlast) * 86400 # in seconds - return hold_time + """ - def visible( - self, - el_min, - observer, - sun, - moon, - sun_avoidance_angle, - moon_avoidance_angle, - check_sso, + def __init__( + self, file=None, file_split=None, telescope=None, ceslist=None, sort=False ): - self.update(observer) - hold_time = self.get_current_hold_time(observer) - if hold_time > self.hold_time_min: - visible = True - msg = "minimum hold time exceeded" + if file is not None: + if not os.path.isfile(file): + msg = "No such schedule file '{}'".format(file) + raise RuntimeError(msg) + self.load(file, file_split) else: - visible = False - msg = "minimum hold time not met" - return visible, msg - - def update(self, observer): - hold_time = self.get_current_hold_time(observer) - if hold_time < self.hold_time_min: - self.weight = np.inf - else: - weight = (self.hold_time_max - hold_time) / ( - self.hold_time_max - self.hold_time_min - ) - self.weight = self.weight0 * weight ** self.power + self.telescope = telescope + self.ceslist = ceslist + if sort: + self.sort_ceslist() return - -class HorizontalPatch(Patch): - elevations = None - def __init__(self, name, weight, azmin, azmax, el, scantime): - self.name = name - self.weight = weight - if azmin <= np.pi and azmax <= np.pi: - self.rising = True - elif azmin >= np.pi and azmax >= np.pi: - self.rising = False - else: - raise RuntimeError("Horizontal patch must either be rising or setting.") - self.az_min = azmin - self.az_max = azmax - self.el = el - # scan time is the maximum time spent on this scan before targeting again - self.scantime = scantime # in minutes. - self.scandrift = scantime / 60 * 15 * degree - - self.el_min0 = el - self.el_min = el - self.el_max0 = el - self.el_step = 0 - self.alternate = False - self._area = 0 - self.el_max = self.el_max0 - self.el_lim = self.el_min0 + def sort_ceslist(self): + """Sort the list of CES by name.""" + if self.ceslist is None: + return + nces = len(self.ceslist) + for i in range(nces - 1): + for j in range(i + 1, nces): + if self.ceslist[j].name < self.ceslist[j - 1].name: + temp = self.ceslist[j] + self.ceslist[j] = self.ceslist[j - 1] + self.ceslist[j - 1] = temp return - def get_area(self, observer, nside=32, equalize=False): - return 1 - - def corner_coordinates(self, observer=None, unwind=False): - azs = [self.az_min, self.az_max] - els = [self.el_min, self.el_max] - return np.array(azs), np.array(els) - - def in_patch(self, obj, angle=0): - azmin = obj.az - angle - azmax = obj.az + angle - elmin = obj.alt - angle - elmax = obj.alt + angle - if self.rising: - elmax += self.scandrift - else: - elmin -= self.scandrift - if ( - azmin > self.az_min - and azmax < self.az_max - and elmin > self.el_min - and elmax < self.el_max - ): - return True - return False - - def step_azel(self): - return - - def visible( - self, - el_min, - observer, - sun, - moon, - sun_avoidance_angle, - moon_avoidance_angle, - check_sso, - ): - - in_view = True - msg = "" - if check_sso: - for sso, angle, name in [ - (sun, sun_avoidance_angle, "Sun"), - (moon, moon_avoidance_angle, "Moon"), - ]: - if self.in_patch(sso, angle=angle): - in_view = False - msg += "{} too close;".format(name) - - if in_view: - msg = "in view" - self.current_el_min = self.el_min - self.current_el_max = self.el_max - return in_view, msg - - -def to_UTC(t): - # Convert UNIX time stamp to a date string - return datetime.fromtimestamp(t, timezone.utc).strftime("%Y-%m-%d %H:%M:%S") - - -def to_JD(t): - # Unix time stamp to Julian date - # (days since -4712-01-01 12:00:00 UTC) - return t / 86400.0 + 2440587.5 - - -def to_MJD(t): - # Convert Unix time stamp to modified Julian date - # (days since 1858-11-17 00:00:00 UTC) - return to_JD(t) - 2400000.5 - + @function_timer + def min_sso_dist(el, azmin, azmax, sso_el1, sso_az1, sso_el2, sso_az2): + """Return a rough minimum angular distance between the bore sight + and a solar system object""" + sso_vec1 = hp.dir2vec(sso_az1, sso_el1, lonlat=True) + sso_vec2 = hp.dir2vec(sso_az2, sso_el2, lonlat=True) + az1 = azmin + az2 = azmax + if az2 < az1: + az2 += 360 + n = 100 + az = np.linspace(az1, az2, n) + el = np.ones(n) * el + vec = hp.dir2vec(az, el, lonlat=True) + dist1 = np.degrees(np.arccos(np.dot(sso_vec1, vec))) + dist2 = np.degrees(np.arccos(np.dot(sso_vec2, vec))) + return min(np.amin(dist1), np.amin(dist2)) -def to_DJD(t): - # Convert Unix time stamp to Dublin Julian date - # (days since 1899-12-31 12:00:00) - # This is the time format used by PyEphem - return to_JD(t) - 2415020 + @function_timer + def load(self, file, file_split): + """Load the observing schedule from a file. + This method populates the internal telescope and CES list. For parallel use, + simply construct the schedule on one process and broadcast. -def DJDtoUNIX(djd): - # Convert Dublin Julian date to a UNIX time stamp - return ((djd + 2415020) - 2440587.5) * 86400.0 + Args: + file (str): The file to load. + file_split (tuple): If not None and loading from a file, only use a subset + of the schedule. The arguments are (isplit, nsplit) and only + observations that satisfy 'scan index modulo nsplit == isplit' are + included. + Returns: + None -def patch_is_rising(patch): - try: - # Horizontal patch definition - rising = patch.rising - except: - rising = True - for corner in patch.corners: - if corner.alt > 0 and corner.az > np.pi: - # The patch is setting - rising = False + """ + isplit = None + nsplit = None + if file_split is not None: + isplit, nsplit = file_split + scan_counters = dict() + all_ces = list() + + with open(file, "r") as f: + while True: + line = f.readline() + if line.startswith("#"): + continue + ( + site_name, + telescope_name, + site_lat, + site_lon, + site_alt, + ) = line.split() + site = Site(site_name, site_lat, site_lon, float(site_alt)) + self.telescope = Telescope(telescope_name, site=site, coord="C") break - return rising - - -@function_timer -def prioritize(args, visible): - """Order visible targets by priority and number of scans.""" - log = Logger.get() - for i in range(len(visible)): - for j in range(len(visible) - i - 1): - # If either of the patches is a cooler cycle, we don't modulate - # the priorities with hit counts, observing time or elevation - if isinstance(visible[j], CoolerCyclePatch) or isinstance( - visible[j + 1], CoolerCyclePatch - ): - weight1 = visible[j].weight - weight2 = visible[j + 1].weight - else: - if patch_is_rising(visible[j]): - if args.equalize_time: - hits1 = visible[j].rising_time - else: - hits1 = visible[j].rising_hits - el1 = np.degrees(visible[j].current_el_max) - else: - if args.equalize_time: - hits1 = visible[j].setting_time - else: - hits1 = visible[j].setting_hits - el1 = np.degrees(visible[j].current_el_min) - if patch_is_rising(visible[j + 1]): - if args.equalize_time: - hits2 = visible[j + 1].rising_time - else: - hits2 = visible[j + 1].rising_hits - el2 = np.degrees(visible[j + 1].current_el_max) - else: - if args.equalize_time: - hits2 = visible[j + 1].setting_time - else: - hits2 = visible[j + 1].setting_hits - el2 = np.degrees(visible[j + 1].current_el_min) - # Patch with the lower weight goes first. Having more - # earlier observing time and lower observing elevation - # will increase the weight. - weight1 = (hits1 + 1) * visible[j].weight - weight2 = (hits2 + 1) * visible[j + 1].weight - # Optional elevation penalty - if args.elevation_penalty_limit > 0: - lim = args.elevation_penalty_limit - if el1 < lim: - weight1 *= (lim / el1) ** args.elevation_penalty_power - if el2 < lim: - weight2 *= (lim / el2) ** args.elevation_penalty_power - if weight1 > weight2: - visible[j], visible[j + 1] = visible[j + 1], visible[j] - names = [] - for patch in visible: - names.append(patch.name) - log.debug("Prioritized list of viewable patches: {}".format(names)) - return - - -@function_timer -def attempt_scan( - args, - observer, - visible, - not_visible, - t, - fp_radius, - stop_timestamp, - tstop_cooler, - sun, - moon, - sun_el_max, - fout, - fout_fmt, - ods, - boresight_angle, -): - """Attempt scanning the visible patches in order until success.""" - log = Logger.get() - success = False - # Always begin by attempting full scans. If none can be completed - # and user allowed partials scans, try them next. - for allow_partial_scans in False, True: - if allow_partial_scans and not args.allow_partial_scans: - break - for patch in visible: - if isinstance(patch, CoolerCyclePatch): - # Cycle the cooler - t = add_cooler_cycle( - args, - t, + last_name = None + for line in f: + if line.startswith("#"): + continue + ( + start_timestamp, + start_date, stop_timestamp, - observer, - sun, - moon, - fout, - fout_fmt, - patch, + season, + mjdstart, + mjdstop, boresight_angle, - ) - success = True - break - # All on-sky targets - for rising in [True, False]: - observer.date = to_DJD(t) - el = get_constant_elevation( - args, - observer, - patch, - rising, - fp_radius, - not_visible, - partial_scan=allow_partial_scans, - ) - if el is None: - continue - success, azmins, azmaxs, aztimes, tstop = scan_patch( - args, + name, + azmin, + azmax, el, - patch, - t, - fp_radius, - observer, - sun, - not_visible, - tstop_cooler, - sun_el_max, + scan, + subscan, + mindist_sun, + mindist_moon, + el_sun, rising, - ) - if success: - try: - t, _ = add_scan( - args, - t, - tstop, - aztimes, - azmins, - azmaxs, - rising, - fp_radius, - observer, - sun, - moon, - fout, - fout_fmt, - patch, - el, - ods, - boresight_angle, - partial_scan=allow_partial_scans, - ) - patch.step_azel() - break - except TooClose: - success = False - break - if success: - break - if success: - break - return success, t - - -def from_angles(az, el): - elquat = qa.rotation(YAXIS, np.radians(90 - el)) - azquat = qa.rotation(ZAXIS, np.radians(az)) - return qa.mult(azquat, elquat) - - -def unwind_quat(quat1, quat2): - if np.sum(np.abs(quat1 - quat2)) > np.sum(np.abs(quat1 + quat2)): - return -quat2 - else: - return quat2 - - -@function_timer -def check_sso(observer, az1, az2, el, sso, angle, tstart, tstop): - """ - Check if a solar system object (SSO) enters within "angle" of - the constant elevation scan. - """ - if az2 < az1: - az2 += 360 - naz = max(3, np.int(0.25 * (az2 - az1) * np.cos(np.radians(el)))) - quats = [] - for az in np.linspace(az1, az2, naz): - quats.append(from_angles(az % 360, el)) - vecs = qa.rotate(quats, ZAXIS) - - tstart = to_DJD(tstart) - tstop = to_DJD(tstop) - t1 = tstart - # Test every ten minutes - tstep = 10 / 1440 - while t1 < tstop: - t2 = min(tstop, t1 + tstep) - observer.date = t1 - sso.compute(observer) - sun_az1, sun_el1 = np.degrees(sso.az), np.degrees(sso.alt) - observer.date = t2 - sso.compute(observer) - sun_az2, sun_el2 = np.degrees(sso.az), np.degrees(sso.alt) - sun_quat1 = from_angles(sun_az1, sun_el1) - sun_quat2 = from_angles(sun_az2, sun_el2) - sun_quat2 = unwind_quat(sun_quat1, sun_quat2) - t = np.linspace(0, 1, 10) - sun_quats = qa.slerp(t, [0, 1], [sun_quat1, sun_quat2]) - sun_vecs = qa.rotate(sun_quats, ZAXIS).T - dpmax = np.amax(np.dot(vecs, sun_vecs)) - min_dist = np.degrees(np.arccos(dpmax)) - if min_dist < angle: - return True, DJDtoUNIX(t1) - t1 = t2 - return False, DJDtoUNIX(t2) - - -@function_timer -def attempt_scan_pole( - args, - observer, - visible, - not_visible, - tstart, - fp_radius, - el_max, - el_min, - stop_timestamp, - tstop_cooler, - sun, - moon, - sun_el_max, - fout, - fout_fmt, - ods, - boresight_angle, -): - """Attempt scanning the visible patches in order until success.""" - if args.one_scan_per_day and stop_timestamp > tstop_cooler: - raise RuntimeError("one_scan_per_day is incompatible with cooler cycles") - success = False - for patch in visible: - observer.date = to_DJD(tstart) - if isinstance(patch, CoolerCyclePatch): - # Cycle the cooler - t = add_cooler_cycle( - args, - tstart, - stop_timestamp, - observer, - sun, - moon, - fout, - fout_fmt, - patch, - boresight_angle, - ) - success = True - break - # In pole scheduling, first elevation is just below the patch - el = get_constant_elevation_pole( - args, observer, patch, fp_radius, el_min, el_max, not_visible - ) - if el is None: - continue - pole_success = True - subscan = -1 - t = tstart - while pole_success: - (pole_success, azmins, azmaxs, aztimes, tstop) = scan_patch_pole( - args, - el, - patch, - t, - fp_radius, - observer, - sun, - not_visible, - tstop_cooler, - sun_el_max, - ) - if pole_success: - if success: - # Still the same scan - patch.hits -= 1 - try: - t, subscan = add_scan( - args, - t, - tstop, - aztimes, - azmins, - azmaxs, - False, - fp_radius, - observer, - sun, - moon, - fout, - fout_fmt, - patch, - el, - ods, - boresight_angle, + ) = _parse_line(line) + if nsplit is not None: + # Only accept 1 / `nsplit` of the rising and setting + # scans in patch `name`. Selection is performed + # during the first subscan. + if name != last_name: + if name not in scan_counters: + scan_counters[name] = dict() + counter = scan_counters[name] + # Separate counters for rising and setting scans + if rising not in counter: + counter[rising] = 0 + else: + counter[rising] += 1 + iscan = counter[rising] + last_name = name + if iscan % nsplit != isplit: + continue + all_ces.append( + CES( + start_time=start_timestamp, + stop_time=stop_timestamp, + boresight_angle=boresight_angle, + name=name, + mjdstart=mjdstart, + scan=scan, subscan=subscan, - ) - el += np.radians(args.pole_el_step_deg) - success = True - except TooClose: - success = False - pole_success = False - if success: - break - tstop = t - if args.one_scan_per_day: - day1 = int(to_MJD(tstart)) - while int(to_MJD(tstop)) == day1: - tstop += 60.0 - return success, tstop - - -@function_timer -def get_constant_elevation( - args, observer, patch, rising, fp_radius, not_visible, partial_scan=False -): - """Determine the elevation at which to scan.""" - log = Logger.get() - - azs, els = patch.corner_coordinates(observer) - - ind_rising = azs < np.pi - ind_setting = azs > np.pi - - el = None - if rising: - if np.sum(ind_rising) == 0: - not_visible.append((patch.name, "No rising corners")) - else: - el = np.amax(els[ind_rising]) + fp_radius - else: - if np.sum(ind_setting) == 0: - not_visible.append((patch.name, "No setting corners")) - else: - el = np.amin(els[ind_setting]) - fp_radius - - if el is not None and patch.elevations is not None: - # Fixed elevation mode. Find the first allowed observing elevation. - if rising: - ind = patch.elevations >= el - if np.any(ind): - el = np.amin(patch.elevations[ind]) - elif partial_scan: - # None of the elevations allow a full rising scan, - # Observe at the highest allowed elevation - el = np.amax(patch.elevations) - if el < np.amin(els[ind_rising]) + fp_radius: - not_visible.append( - (patch.name, "Rising patch above maximum elevation") - ) - el = None - else: - not_visible.append((patch.name, "Only partial rising scans available")) - el = None - else: - ind = patch.elevations <= el - if np.any(ind): - el = np.amax(patch.elevations[ind]) - elif partial_scan: - # None of the elevations allow a full setting scan, - # Observe at the lowest allowed elevation - el = np.amin(patch.elevations) - if el > np.amax(els[ind_setting]) + fp_radius: - not_visible.append( - (patch.name, "Setting patch above below elevation") - ) - el = None - else: - not_visible.append((patch.name, "Only partial setting scans available")) - el = None - elif el is not None: - if el < patch.el_min: - if partial_scan and np.any(patch.el_min < els[ind_setting] - fp_radius): - # Partial setting scan - el = patch.el_min - else: - not_visible.append( - ( - patch.name, - "el < el_min ({:.2f} < {:.2f}) rising = {}, partial = {}".format( - el / degree, patch.el_min / degree, rising, partial_scan - ), + azmin=azmin, + azmax=azmax, + el=el, + season=season, + start_date=start_date, + rising=rising, + mindist_sun=mindist_sun, + mindist_moon=mindist_moon, + el_sun=el_sun, ) ) - el = None - elif el > patch.el_max: - if partial_scan and np.any(patch.el_max > els[ind_rising] + fp_radius): - # Partial rising scan - el = patch.el_max - else: - not_visible.append( - ( - patch.name, - "el > el_max ({:.2f} > {:.2f}) rising = {}, partial = {}".format( - el / degree, patch.el_max / degree, rising, partial_scan - ), - ) - ) - el = None - if el is None: - log.debug("NO ELEVATION: {}".format(not_visible[-1])) - else: - log.debug( - "{} : ELEVATION = {}, rising = {}, partial = {}".format( - patch.name, el / degree, rising, partial_scan - ) - ) - return el - + self.ceslist = all_ces -@function_timer -def get_constant_elevation_pole( - args, observer, patch, fp_radius, el_min, el_max, not_visible -): - """Determine the elevation at which to scan.""" - log = Logger.get() - _, els = patch.corner_coordinates(observer) - el = np.amin(els) - fp_radius + def _parse_line(line): + """Parse one line of the schedule file""" + if line.startswith("#"): + return None - if el < el_min: - not_visible.append( + fields = line.split() + nfield = len(fields) + if nfield == 22: + # Deprecated prior to 2020-02 schedule format without boresight rotation field ( - patch.name, - "el < el_min ({:.2f} < {:.2f})".format(el / degree, el_min / degree), - ) - ) - el = None - elif el > el_max: - not_visible.append( - ( - patch.name, - "el > el_max ({:.2f} > {:.2f})".format(el / degree, el_max / degree), - ) - ) - el = None - if el is None: - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - return el - - -def check_sun_el(t, observer, sun, sun_el_max, args, not_visible): - log = Logger.get() - observer.date = to_DJD(t) - if sun_el_max < np.pi / 2: - sun.compute(observer) - if sun.alt > sun_el_max: - not_visible.append( - ( - patch.name, - "Sun too high {:.2f} rising = {}" - "".format(np.degrees(sun.alt), rising), - ) - ) - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - return True - return False - - -@function_timer -def scan_patch( - args, - el, - patch, - t, - fp_radius, - observer, - sun, - not_visible, - stop_timestamp, - sun_el_max, - rising, -): - """Attempt scanning the patch specified by corners at elevation el.""" - log = Logger.get() - azmins, azmaxs, aztimes = [], [], [] - if isinstance(patch, HorizontalPatch): - # No corners. Simply scan for the requested time - if rising and not patch.rising: - return False, azmins, azmaxs, aztimes, t - if check_sun_el(t, observer, sun, sun_el_max, args, not_visible): - return False, azmins, azmaxs, aztimes, t - azmins = [patch.az_min] - azmaxs = [patch.az_max] - aztimes = [t] - return True, azmins, azmaxs, aztimes, t + patch.scantime * 60 - # Traditional patch, track each corner - success = False - # and now track when all corners are past the elevation - tstop = t - tstep = 60 - to_cross = np.ones(len(patch.corners), dtype=np.bool) - scan_started = False - while True: - if tstop > stop_timestamp or tstop - t > 86400: - not_visible.append( - (patch.name, "Ran out of time rising = {}".format(rising)) - ) - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - break - if check_sun_el(tstop, observer, sun, sun_el_max, args, not_visible): - break - azs, els = patch.corner_coordinates(observer) - has_extent = current_extent( - azmins, - azmaxs, - aztimes, - patch.corners, - fp_radius, - el, - azs, - els, - rising, - tstop, - ) - if has_extent: - scan_started = True - - if rising: - good = azs <= np.pi - to_cross[np.logical_and(els > el + fp_radius, good)] = False - else: - good = azs >= np.pi - to_cross[np.logical_and(els < el - fp_radius, good)] = False - - # If we are alternating rising and setting scans, reject patches - # that appear on the wrong side of the sky. - if np.any((np.array(azmins) % (2 * np.pi)) < patch.az_min) or np.any( - (np.array(azmaxs) % (2 * np.pi)) > patch.az_max - ): - success = False - break - - if len(aztimes) > 0 and not np.any(to_cross): - # All corners made it across the CES line. - success = True - # Begin the scan before the patch is at the CES line - if aztimes[0] > t: - aztimes[0] -= tstep - break - - if scan_started and not has_extent: - # The patch went out of view before all corners - # could cross the elevation line. - success = False - break - tstop += tstep - - return success, azmins, azmaxs, aztimes, tstop - - -def unwind_angle(alpha, beta, multiple=2 * np.pi): - """Minimize absolute difference between alpha and beta. - - Minimize the absolute difference by adding a multiple of - 2*pi to beta to match alpha. - """ - while np.abs(alpha - beta - multiple) < np.abs(alpha - beta): - beta += multiple - while np.abs(alpha - beta + multiple) < np.abs(alpha - beta): - beta -= multiple - return beta - - -@function_timer -def scan_patch_pole( - args, - el, - patch, - t, - fp_radius, - observer, - sun, - not_visible, - stop_timestamp, - sun_el_max, -): - """Attempt scanning the patch specified by corners at elevation el. - - The pole scheduling mode will not wait for the patch to drift across. - It simply attempts to scan for the required time: args.pole_ces_time. - """ - log = Logger.get() - success = False - tstop = t - tstep = 60 - azmins, azmaxs, aztimes = [], [], [] - while True: - if tstop - t > args.pole_ces_time_s - 1: - # Succesfully scanned the maximum time - if len(azmins) > 0: - success = True - else: - not_visible.append( - (patch.name, "No overlap at {:.2f}".format(el / degree)) - ) - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - break - if tstop > stop_timestamp or tstop - t > 86400: - not_visible.append((patch.name, "Ran out of time")) - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - break - observer.date = to_DJD(tstop) - sun.compute(observer) - if sun.alt > sun_el_max: - not_visible.append( - (patch.name, "Sun too high {:.2f}".format(sun.alt / degree)) - ) - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - break - azs, els = patch.corner_coordinates(observer) - if np.amax(els) + fp_radius < el: - not_visible.append((patch.name, "Patch below {:.2f}".format(el / degree))) - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - break - radius = max(np.radians(1), fp_radius) - current_extent_pole( - azmins, azmaxs, aztimes, patch.corners, radius, el, azs, els, tstop - ) - tstop += tstep - return success, azmins, azmaxs, aztimes, tstop - - -@function_timer -def current_extent_pole( - azmins, azmaxs, aztimes, corners, fp_radius, el, azs, els, tstop -): - """Get the azimuthal extent of the patch along elevation el. - - Pole scheduling does not care if the patch is "rising" or "setting". - """ - azs_cross = [] - for i in range(len(corners)): - if np.abs(els[i] - el) < fp_radius: - azs_cross.append(azs[i]) - j = (i + 1) % len(corners) - if np.abs(els[j] - el) < fp_radius: - azs_cross.append(azs[j]) - if np.abs(els[i] - el) < fp_radius or np.abs(els[j] - el) < fp_radius: - continue - elif (els[i] - el) * (els[j] - el) < 0: - # Record the location where a line between the corners - # crosses el. - az1 = azs[i] - az2 = azs[j] - el1 = els[i] - el - el2 = els[j] - el - if az2 - az1 > np.pi: - az1 += 2 * np.pi - if az1 - az2 > np.pi: - az2 += 2 * np.pi - az_cross = (az1 + el1 * (az2 - az1) / (el1 - el2)) % (2 * np.pi) - azs_cross.append(az_cross) - - # Translate the azimuths at multiples of 2pi so they are in a - # compact cluster - - for i in range(1, len(azs_cross)): - azs_cross[i] = unwind_angle(azs_cross[0], azs_cross[i]) - - if len(azs_cross) > 0: - azs_cross = np.sort(azs_cross) - azmin = azs_cross[0] - azmax = azs_cross[-1] - azmax = unwind_angle(azmin, azmax) - if azmax - azmin > np.pi: - # Patch crosses the zero meridian - azmin, azmax = azmax, azmin - if len(azmins) > 0: - azmin = unwind_angle(azmins[-1], azmin) - azmax = unwind_angle(azmaxs[-1], azmax) - azmins.append(azmin) - azmaxs.append(azmax) - aztimes.append(tstop) - return - - -@function_timer -def current_extent( - azmins, azmaxs, aztimes, corners, fp_radius, el, azs, els, rising, t -): - """Get the azimuthal extent of the patch along elevation el. - - Find the pairs of corners that are on opposite sides - of the CES line. Record the crossing azimuth of a - line between the corners. - - """ - azs_cross = [] - for i in range(len(corners)): - j = (i + 1) % len(corners) - for el0 in [el - fp_radius, el, el + fp_radius]: - if (els[i] - el0) * (els[j] - el0) < 0: - # The corners are on opposite sides of the elevation line - az1 = azs[i] - az2 = azs[j] - el1 = els[i] - el0 - el2 = els[j] - el0 - az2 = unwind_angle(az1, az2) - az_cross = (az1 + el1 * (az2 - az1) / (el1 - el2)) % (2 * np.pi) - azs_cross.append(az_cross) - if fp_radius == 0: - break - if len(azs_cross) == 0: - return False - - azs_cross = np.array(azs_cross) - if rising: - good = azs_cross < np.pi - else: - good = azs_cross > np.pi - ngood = np.sum(good) - if ngood == 0: - return False - elif ngood > 1: - azs_cross = azs_cross[good] - - # Unwind the crossing azimuths to minimize the scatter - azs_cross = np.sort(azs_cross) - if azs_cross.size > 1: - ptp0 = azs_cross[-1] - azs_cross[0] - ptps = azs_cross[:-1] + 2 * np.pi - azs_cross[1:] - ptps = np.hstack([ptp0, ptps]) - i = np.argmin(ptps) - azs_cross[:i] += 2 * np.pi - np.roll(azs_cross, i) - - if len(azs_cross) > 1: - azmin = azs_cross[0] % (2 * np.pi) - azmax = azs_cross[-1] % (2 * np.pi) - if azmax - azmin > np.pi: - # Patch crosses the zero meridian - azmin, azmax = azmax, azmin - azmins.append(azmin) - azmaxs.append(azmax) - aztimes.append(t) - return True - return False - - -@function_timer -def add_scan( - args, - tstart, - tstop, - aztimes, - azmins, - azmaxs, - rising, - fp_radius, - observer, - sun, - moon, - fout, - fout_fmt, - patch, - el, - ods, - boresight_angle, - subscan=-1, - partial_scan=False, -): - """Make an entry for a CES in the schedule file.""" - log = Logger.get() - ces_time = tstop - tstart - if ces_time > args.ces_max_time_s: # and not args.pole_mode: - nsub = np.int(np.ceil(ces_time / args.ces_max_time_s)) - ces_time /= nsub - aztimes = np.array(aztimes) - azmins = np.array(azmins) - azmaxs = np.array(azmaxs) - azmaxs[0] = unwind_angle(azmins[0], azmaxs[0]) - for i in range(1, azmins.size): - azmins[i] = unwind_angle(azmins[0], azmins[i]) - azmaxs[i] = unwind_angle(azmaxs[0], azmaxs[i]) - azmaxs[i] = unwind_angle(azmins[i], azmaxs[i]) - # for i in range(azmins.size-1): - # if azmins[i+1] - azmins[i] > np.pi: - # azmins[i+1], azmaxs[i+1] = azmins[i+1]-2*np.pi, azmaxs[i+1]-2*np.pi - # if azmins[i+1] - azmins[i] < np.pi: - # azmins[i+1], azmaxs[i+1] = azmins[i+1]+2*np.pi, azmaxs[i+1]+2*np.pi - rising_string = "R" if rising else "S" - t1 = np.amin(aztimes) - entries = [] - while t1 < tstop - 1: - subscan += 1 - if args.operational_days: - # See if adding this scan would exceed the number of desired - # operational days - if subscan == 0: - tz = args.timezone / 24 - od = int(to_MJD(tstart) + tz) - ods.add(od) - if len(ods) > args.operational_days: - # Prevent adding further entries to the schedule once - # the number of operational days is full - break - t2 = min(t1 + ces_time, tstop) - if tstop - t2 < ces_time / 10: - # Append leftover scan to the last full subscan - t2 = tstop - ind = np.logical_and(aztimes >= t1, aztimes <= t2) - if np.all(aztimes > t2): - ind[0] = True - if np.all(aztimes < t1): - ind[-1] = True - if azmins[ind][0] < azmaxs[ind][0]: - azmin = np.amin(azmins[ind]) - azmax = np.amax(azmaxs[ind]) - else: - # we are, scan from the maximum to the minimum - azmin = np.amax(azmins[ind]) - azmax = np.amin(azmaxs[ind]) - if args.scan_margin > 0: - # Add a random error to the scan parameters to smooth out - # caustics in the hit map - delta_az = azmax - unwind_angle(azmax, azmin) - sub_az = delta_az * np.abs(np.random.randn()) * args.scan_margin * 0.5 - add_az = delta_az * np.abs(np.random.randn()) * args.scan_margin * 0.5 - azmin = (azmin - sub_az) % (2 * np.pi) - azmax = (azmax + add_az) % (2 * np.pi) - if t2 == tstop: - delta_t = t2 - t1 # tstop - tstart - add_t = delta_t * np.abs(np.random.randn()) * args.scan_margin - t2 += add_t - # Add the focal plane radius to the scan width - fp_radius_eff = fp_radius / np.cos(el) - azmin = (azmin - fp_radius_eff) % (2 * np.pi) / degree - azmax = (azmax + fp_radius_eff) % (2 * np.pi) / degree - # Get the Sun and Moon locations at the beginning and end - observer.date = to_DJD(t1) - sun.compute(observer) - moon.compute(observer) - sun_az1, sun_el1 = sun.az / degree, sun.alt / degree - moon_az1, moon_el1 = moon.az / degree, moon.alt / degree - moon_phase1 = moon.phase - # It is possible that the Sun or the Moon gets too close to the - # scan, even if they are far enough from the actual patch. - sun_too_close, sun_time = check_sso( - observer, - azmin, - azmax, - el / degree, - sun, - args.sun_avoidance_angle_deg, - t1, - t2, - ) - moon_too_close, moon_time = check_sso( - observer, - azmin, - azmax, - el / degree, - moon, - args.moon_avoidance_angle_deg, - t1, - t2, - ) - - if ( - (isinstance(patch, HorizontalPatch) or partial_scan) - and sun_time > tstart + 1 - and moon_time > tstart + 1 - ): - # Simply terminate the scan when the Sun or the Moon is too close - t2 = min(sun_time, moon_time) - if sun_too_close or moon_too_close: - tstop = t2 - if t1 == t2: - break - else: - # For regular patches, this is a failure condition - if sun_too_close: - log.debug("Sun too close") - raise SunTooClose - if moon_too_close: - log.debug("Moon too close") - raise MoonTooClose - - observer.date = to_DJD(t2) - sun.compute(observer) - moon.compute(observer) - sun_az2, sun_el2 = sun.az / degree, sun.alt / degree - moon_az2, moon_el2 = moon.az / degree, moon.alt / degree - moon_phase2 = moon.phase - # Create an entry in the schedule - entry = fout_fmt.format( - to_UTC(t1), - to_UTC(t2), - to_MJD(t1), - to_MJD(t2), - boresight_angle, - patch.name, - (azmin + args.boresight_offset_az_deg) % 360, - (azmax + args.boresight_offset_az_deg) % 360, - (el / degree + args.boresight_offset_el_deg), - rising_string, - sun_el1, - sun_az1, - sun_el2, - sun_az2, - moon_el1, - moon_az1, - moon_el2, - moon_az2, - 0.005 * (moon_phase1 + moon_phase2), - -1 - patch.partial_hits if partial_scan else patch.hits, - subscan, - ) - entries.append(entry) - if partial_scan: - # Never append more than one partial scan before - # checking if full scans are again available - tstop = t2 - break - t1 = t2 + args.gap_small_s - - # Write the entries - for entry in entries: - log.debug(entry) - fout.write(entry) - fout.flush() - - if not partial_scan: - # Only update the patch counters when performing full scans - patch.hits += 1 - patch.time += ces_time - if rising or args.pole_mode: - patch.rising_hits += 1 - patch.rising_time += ces_time - if not rising or args.pole_mode: - patch.setting_hits += 1 - patch.setting_time += ces_time - # The oscillate method will slightly shift the patch to - # blur the boundaries - patch.oscillate() - # Advance the time - tstop += args.gap_s - else: - patch.partial_hits += 1 - # Advance the time - tstop += args.gap_small_s - - return tstop, subscan - - -@function_timer -def add_cooler_cycle( - args, tstart, tstop, observer, sun, moon, fout, fout_fmt, patch, boresight_angle -): - """Make an entry for a cooler cycle in the schedule file.""" - log = Logger.get() - az = patch.az - el = patch.el - t1 = tstart - t2 = t1 + patch.cycle_time - - observer.date = to_DJD(t1) - sun.compute(observer) - moon.compute(observer) - sun_az1, sun_el1 = sun.az / degree, sun.alt / degree - moon_az1, moon_el1 = moon.az / degree, moon.alt / degree - moon_phase1 = moon.phase - - observer.date = to_DJD(t2) - sun.compute(observer) - moon.compute(observer) - sun_az2, sun_el2 = sun.az / degree, sun.alt / degree - moon_az2, moon_el2 = moon.az / degree, moon.alt / degree - moon_phase2 = moon.phase - - # Create an entry in the schedule - entry = fout_fmt.format( - to_UTC(t1), - to_UTC(t2), - to_MJD(t1), - to_MJD(t2), - boresight_angle, - patch.name, - az, - az, - el, - "R", - sun_el1, - sun_az1, - sun_el2, - sun_az2, - moon_el1, - moon_az1, - moon_el2, - moon_az2, - 0.005 * (moon_phase1 + moon_phase2), - patch.hits, - 0, - ) - - # Write the entry - log.debug(entry) - fout.write(entry) - fout.flush() - - patch.last_cycle_end = t2 - patch.hits += 1 - patch.time += t2 - t1 - patch.rising_hits += 1 - patch.rising_time += t2 - t1 - patch.setting_hits += 1 - patch.setting_time += t2 - t1 - - return t2 - - -@function_timer -def get_visible(args, observer, sun, moon, patches, el_min): - """Determine which patches are visible.""" - log = Logger.get() - visible = [] - not_visible = [] - for patch in patches: - # Reject all patches that have even one corner too close - # to the Sun or the Moon and patches that are completely - # below the horizon - in_view, msg = patch.visible( - el_min, - observer, - sun, - moon, - args.sun_avoidance_angle_deg, - args.moon_avoidance_angle_deg, - not (args.allow_partial_scans or args.delay_sso_check), - ) - if not in_view: - not_visible.append((patch.name, msg)) - - if in_view: - if not (args.allow_partial_scans or args.delay_sso_check): - # Finally, check that the Sun or the Moon are not - # inside the patch - if args.moon_avoidance_angle_deg >= 0 and patch.in_patch(moon): - not_visible.append((patch.name, "Moon in patch")) - in_view = False - if args.sun_avoidance_angle_deg >= 0 and patch.in_patch(sun): - not_visible.append((patch.name, "Sun in patch")) - in_view = False - if in_view: - visible.append(patch) - log.debug( - "In view: {}. el = {:.2f}..{:.2f}".format( - patch.name, np.degrees(patch.el_min), np.degrees(patch.el_max) - ) - ) - else: - log.debug("NOT VISIBLE: {}".format(not_visible[-1])) - return visible, not_visible - - -@function_timer -def get_boresight_angle(args, t, t0=0): - """Return the scheduled boresight angle at time t.""" - if args.boresight_angle_step_deg == 0 or args.boresight_angle_time_min == 0: - return 0 - - istep = int((t - t0) / 60 / args.boresight_angle_time_min) - return (args.boresight_angle_step_deg * istep) % 360 - - -@function_timer -def apply_blockouts(args, t_in): - """Check if `t` is inside a blockout period. - If so, advance it to the next unblocked time. - - Returns: The (new) time and a boolean flag indicating if - the time was blocked and subsequently advanced. - """ - if not args.block_out: - return t_in, False - log = Logger.get() - t = t_in - blocked = False - for block_out in args.block_out: - current = datetime.fromtimestamp(t, timezone.utc) - start, stop = block_out.split("-") - try: - # If the block out specifies the year then no extra logic is needed - start_year, start_month, start_day = start.split("/") - start = datetime( - int(start_year), - int(start_month), - int(start_day), - 0, - 0, - 0, - 0, - timezone.utc, - ) - except ValueError: - # No year given so must figure out which year is the right one - start_month, start_day = start.split("/") - start = datetime( - current.year, int(start_month), int(start_day), 0, 0, 0, 0, timezone.utc - ) - if start > current: - # This year's block out is still in the future but the past - # year's blockout may still be active - start = start.replace(year=start.year - 1) - try: - # If the block out specifies the year then no extra logic is needed - stop_year, stop_month, stop_day = stop.split("/") - stop = datetime( - int(stop_year), int(stop_month), int(stop_day), 0, 0, 0, 0, timezone.utc - ) - except ValueError: - # No year given so must figure out which year is the right one - stop_month, stop_day = stop.split("/") - stop = datetime( - start.year, int(stop_month), int(stop_day), 0, 0, 0, 0, timezone.utc - ) - if stop < start: - # The block out ends on a different year than it starts - stop = stop.replace(year=start.year + 1) - # advance the stop time by one day to make the definition inclusive - stop += timedelta(days=1) - if start < current and current < stop: - # `t` is inside the block out. - # Advance to the end of the block out. - log.info( - "{} is inside block out {}, advancing to {}".format( - current, block_out, stop - ) - ) - t = stop.timestamp() - blocked = True - return t, blocked - - -def advance_time(t, time_step, offset=0): - """Advance the time ensuring that the sampling falls - over same discrete times (multiples of time_step) - regardless of the current value of t. - """ - return offset + ((t - offset) // time_step + 1) * time_step - - -@function_timer -def build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon): - log = Logger.get() - - sun_el_max = args.sun_el_max_deg * degree - el_min = args.el_min_deg - el_max = args.el_max_deg - if args.elevations_deg is None: - el_min = args.el_min_deg - el_max = args.el_max_deg - else: - # Override the elevation limits - el_min = 90 - el_max = 0 - for el in args.elevations_deg.split(","): - el = np.float(el) - el_min = min(el * 0.9, el_min) - el_max = max(el * 1.1, el_max) - el_min *= degree - el_max *= degree - fp_radius = args.fp_radius_deg * degree - - fname_out = args.out - dir_out = os.path.dirname(fname_out) - if dir_out: - log.info("Creating '{}'".format(dir_out)) - os.makedirs(dir_out, exist_ok=True) - fout = open(fname_out, "w") - - fout.write( - "#{:15} {:15} {:>15} {:>15} {:>15}\n".format( - "Site", "Telescope", "Latitude [deg]", "Longitude [deg]", "Elevation [m]" - ) - ) - fout.write( - " {:15} {:15} {:15.3f} {:15.3f} {:15.1f}\n".format( - args.site_name, - args.telescope, - np.degrees(observer.lat), - np.degrees(observer.lon), - observer.elevation, - ) - ) - - fout_fmt0 = ( - "#{:>20} {:>20} {:>14} {:>14} {:>8} " - "{:35} {:>8} {:>8} {:>8} {:>5} " - "{:>8} {:>8} {:>8} {:>8} " - "{:>8} {:>8} {:>8} {:>8} {:>5} " - "{:>5} {:>3}\n" - ) - - fout_fmt = ( - " {:20} {:20} {:14.6f} {:14.6f} {:8.2f} " - "{:35} {:8.2f} {:8.2f} {:8.2f} {:5} " - "{:8.2f} {:8.2f} {:8.2f} {:8.2f} " - "{:8.2f} {:8.2f} {:8.2f} {:8.2f} {:5.2f} " - "{:5} {:3}\n" - ) - - fout.write( - fout_fmt0.format( - "Start time UTC", - "Stop time UTC", - "Start MJD", - "Stop MJD", - "Rotation", - "Patch name", - "Az min", - "Az max", - "El", - "R/S", - "Sun el1", - "Sun az1", - "Sun el2", - "Sun az2", - "Moon el1", - "Moon az1", - "Moon el2", - "Moon az2", - "Phase", - "Pass", - "Sub", - ) - ) - - # Operational days - ods = set() - - t = start_timestamp - last_successful = t - while True: - t, blocked = apply_blockouts(args, t) - boresight_angle = get_boresight_angle(args, t) - if t > stop_timestamp: - break - if t - last_successful > 86400 or blocked: - # A long time has passed since the last successfully - # scheduled scan. - # Reset the individual patch az and el limits - for patch in patches: - patch.reset() - if blocked: - last_successful = t - else: - # Only try this once for every day. Swapping - # `t` <-> `last_successful` means that we will not trigger - # this branch again without scheduling a succesful scan - log.debug( - "Resetting patches and returning to the last successful " - "scan: {}".format(to_UTC(last_successful)) - ) - t, last_successful = last_successful, t - - # Determine which patches are observable at time t. - - log.debug("t = {}".format(to_UTC(t))) - # Determine which patches are visible - observer.date = to_DJD(t) - sun.compute(observer) - if sun.alt > sun_el_max: - log.debug( - "Sun elevation is {:.2f} > {:.2f}. Moving on.".format( - sun.alt / degree, sun_el_max / degree - ) - ) - t = advance_time(t, args.time_step_s) - continue - moon.compute(observer) - - visible, not_visible = get_visible(args, observer, sun, moon, patches, el_min) - - if len(visible) == 0: - log.debug("No patches visible at {}: {}".format(to_UTC(t), not_visible)) - t = advance_time(t, args.time_step_s) - continue - - # Determine if a cooler cycle sets a limit for observing - tstop_cooler = stop_timestamp - for patch in patches: - if isinstance(patch, CoolerCyclePatch): - ttest = patch.last_cycle_end + patch.hold_time_max - if ttest < tstop_cooler: - tstop_cooler = ttest - - # Order the targets by priority and attempt to observe with both - # a rising and setting scans until we find one that can be - # succesfully scanned. - # If the criteria are not met, advance the time by a step - # and try again - - prioritize(args, visible) - - if args.pole_mode: - success, t = attempt_scan_pole( - args, - observer, - visible, - not_visible, - t, - fp_radius, - el_max, - el_min, - stop_timestamp, - tstop_cooler, - sun, - moon, - sun_el_max, - fout, - fout_fmt, - ods, - boresight_angle, - ) + start_date, + start_time, + stop_date, + stop_time, + mjdstart, + mjdstop, + name, + azmin, + azmax, + el, + rs, + sun_el1, + sun_az1, + sun_el2, + sun_az2, + moon_el1, + moon_az1, + moon_el2, + moon_az2, + moon_phase, + scan, + subscan, + ) = line.split() + boresight_angle = 0 else: - success, t = attempt_scan( - args, - observer, - visible, - not_visible, - t, - fp_radius, - stop_timestamp, - tstop_cooler, - sun, - moon, - sun_el_max, - fout, - fout_fmt, - ods, + # 2020-02 schedule format with boresight rotation field + ( + start_date, + start_time, + stop_date, + stop_time, + mjdstart, + mjdstop, boresight_angle, - ) - - if args.operational_days and len(ods) > args.operational_days: - break - - if not success: - log.debug( - "No patches could be scanned at {}: {}".format(to_UTC(t), not_visible) - ) - t = advance_time(t, args.time_step_s) - else: - last_successful = t - - fout.close() - return - - -def parse_args(opts=None): - parser = argparse.ArgumentParser( - description="Generate ground observation schedule.", fromfile_prefix_chars="@" - ) - - parser.add_argument( - "--site-name", required=False, default="LBL", help="Observing site name" - ) - parser.add_argument( - "--telescope", - required=False, - default="Telescope", - help="Observing telescope name", - ) - parser.add_argument( - "--site-lon", - required=False, - default="-122.247", - help="Observing site longitude [PyEphem string]", - ) - parser.add_argument( - "--site-lat", - required=False, - default="37.876", - help="Observing site latitude [PyEphem string]", - ) - parser.add_argument( - "--site-alt", - required=False, - default=100, - type=np.float, - help="Observing site altitude [meters]", - ) - parser.add_argument( - "--scan-margin", - required=False, - default=0, - type=np.float, - help="Random fractional margin [0..1] added to the " - "scans to smooth out edge effects", - ) - parser.add_argument( - "--ra-period", - required=False, - default=10, - type=np.int, - help="Period of patch position oscillations in RA [visits]", - ) - parser.add_argument( - "--ra-amplitude-deg", - required=False, - default=0, - type=np.float, - help="Amplitude of patch position oscillations in RA [deg]", - ) - parser.add_argument( - "--dec-period", - required=False, - default=10, - type=np.int, - help="Period of patch position oscillations in DEC [visits]", - ) - parser.add_argument( - "--dec-amplitude-deg", - required=False, - default=0, - type=np.float, - help="Amplitude of patch position oscillations in DEC [deg]", - ) - parser.add_argument( - "--elevation-penalty-limit", - required=False, - default=0, - type=np.float, - help="Assign a penalty to observing elevations below this limit [degrees]", - ) - parser.add_argument( - "--elevation-penalty-power", - required=False, - default=2, - type=np.float, - help="Power in the elevation penalty function [> 0] ", - ) - parser.add_argument( - "--equalize-area", - required=False, - default=False, - action="store_true", - help="Adjust priorities to account for patch area", - ) - parser.add_argument( - "--equalize-time", - required=False, - action="store_true", - dest="equalize_time", - help="Modulate priority by integration time.", - ) - parser.add_argument( - "--equalize-scans", - required=False, - action="store_false", - dest="equalize_time", - help="Modulate priority by number of scans.", - ) - parser.set_defaults(equalize_time=False) - parser.add_argument( - "--patch", - required=True, - action="append", - help="Patch definition: " - "name,weight,lon1,lat1,lon2,lat2 ... " - "OR name,weight,lon,lat,width", - ) - parser.add_argument( - "--patch-coord", - required=False, - default="C", - help="Sky patch coordinate system [C,E,G]", - ) - parser.add_argument( - "--el-min-deg", - required=False, - default=30, - type=np.float, - help="Minimum elevation for a CES", - ) - parser.add_argument( - "--el-max-deg", - required=False, - default=80, - type=np.float, - help="Maximum elevation for a CES", - ) - parser.add_argument( - "--el-step-deg", - required=False, - default=0, - type=np.float, - help="Optional step to apply to minimum elevation", - ) - parser.add_argument( - "--alternate", - required=False, - default=False, - action="store_true", - help="Alternate between rising and setting scans", - ) - parser.add_argument( - "--fp-radius-deg", - required=False, - default=0, - type=np.float, - help="Focal plane radius [deg]", - ) - parser.add_argument( - "--sun-avoidance-angle-deg", - required=False, - default=30, - type=np.float, - help="Minimum distance between the Sun and the bore sight [deg]", - ) - parser.add_argument( - "--moon-avoidance-angle-deg", - required=False, - default=20, - type=np.float, - help="Minimum distance between the Moon and the bore sight [deg]", - ) - parser.add_argument( - "--sun-el-max-deg", - required=False, - default=90, - type=np.float, - help="Maximum allowed sun elevation [deg]", - ) - parser.add_argument( - "--boresight-angle-step-deg", - required=False, - default=0, - type=np.float, - help="Boresight rotation step size [deg]", - ) - parser.add_argument( - "--boresight-angle-time-min", - required=False, - default=0, - type=np.float, - help="Boresight rotation step interval [minutes]", - ) - parser.add_argument( - "--start", - required=False, - default="2000-01-01 00:00:00", - help="UTC start time of the schedule", - ) - parser.add_argument("--stop", required=False, help="UTC stop time of the schedule") - parser.add_argument( - "--block-out", - required=False, - action="append", - help="Range of UTC calendar days to omit from scheduling in format " - "START_MONTH/START_DAY-END_MONTH/END_DAY or " - "START_YEAR/START_MONTH/START_DAY-END_YEAR/END_MONTH/END_DAY " - "where YEAR, MONTH and DAY are integers. END days are inclusive", - ) - parser.add_argument( - "--operational-days", - required=False, - type=np.int, - help="Number of operational days to schedule (empty days do not count)", - ) - parser.add_argument( - "--timezone", - required=False, - type=np.int, - default=0, - help="Offset to apply to MJD to separate operational days [hours]", - ) - parser.add_argument( - "--gap-s", - required=False, - default=100, - type=np.float, - help="Gap between CES:es [seconds]", - ) - parser.add_argument( - "--gap-small-s", - required=False, - default=10, - type=np.float, - help="Gap between split CES:es [seconds]", - ) - parser.add_argument( - "--time-step-s", - required=False, - default=600, - type=np.float, - help="Time step after failed target acquisition [seconds]", - ) - parser.add_argument( - "--one-scan-per-day", - required=False, - default=False, - action="store_true", - help="Pad each operational day to have only one CES", - ) - parser.add_argument( - "--ces-max-time-s", - required=False, - default=900, - type=np.float, - help="Maximum length of a CES [seconds]", - ) - parser.add_argument( - "--debug", - required=False, - default=False, - action="store_true", - help="Write diagnostics, including patch plots.", - ) - parser.add_argument( - "--polmap", - required=False, - help="Include polarization from map in the plotted patches when --debug", - ) - parser.add_argument( - "--pol-min", - required=False, - type=np.float, - help="Lower plotting range for polarization map", - ) - parser.add_argument( - "--pol-max", - required=False, - type=np.float, - help="Upper plotting range for polarization map", - ) - parser.add_argument( - "--delay-sso-check", - required=False, - default=False, - action="store_true", - help="Only apply SSO check during simulated scan.", - ) - parser.add_argument( - "--pole-mode", - required=False, - default=False, - action="store_true", - help="Pole scheduling mode (no drift scan)", - ) - parser.add_argument( - "--pole-el-step-deg", - required=False, - default=0.25, - type=np.float, - help="Elevation step in pole scheduling mode [deg]", - ) - parser.add_argument( - "--pole-ces-time-s", - required=False, - default=3000, - type=np.float, - help="Time to scan at constant elevation in pole mode", - ) - parser.add_argument( - "--out", required=False, default="schedule.txt", help="Output filename" - ) - parser.add_argument( - "--boresight-offset-el-deg", - required=False, - default=0, - type=np.float, - help="Optional offset added to every observing elevation", - ) - parser.add_argument( - "--boresight-offset-az-deg", - required=False, - default=0, - type=np.float, - help="Optional offset added to every observing azimuth", - ) - parser.add_argument( - "--elevations-deg", - required=False, - help="Fixed observing elevations in a comma-separated list.", - ) - parser.add_argument( - "--partial-scans", - required=False, - action="store_true", - dest="allow_partial_scans", - help="Allow partials scans when full scans are not available.", - ) - parser.add_argument( - "--no-partial-scans", - required=False, - action="store_false", - dest="allow_partial_scans", - help="Allow partials scans when full scans are not available.", - ) - parser.set_defaults(allow_partial_scans=False) - - args = None - if opts is None: - try: - args = parser.parse_args() - except SystemExit: - sys.exit(0) - else: + name, + azmin, + azmax, + el, + rs, + sun_el1, + sun_az1, + sun_el2, + sun_az2, + moon_el1, + moon_az1, + moon_el2, + moon_az2, + moon_phase, + scan, + subscan, + ) = line.split() + start_time = start_date + " " + start_time + stop_time = stop_date + " " + stop_time + # Define season as a calendar year. This can be + # changed later and could even be in the schedule file. + season = int(start_date.split("-")[0]) try: - args = parser.parse_args(opts) - except SystemExit: - sys.exit(0) - - if args.operational_days is None and args.stop is None: - raise RuntimeError("You must provide --stop or --operational-days") - - stop_time = None - if args.start.endswith("Z"): - start_time = dateutil.parser.parse(args.start) - if args.stop is not None: - if not args.stop.endswith("Z"): - raise RuntimeError("Either both or neither times must be given in UTC") - stop_time = dateutil.parser.parse(args.stop) - else: - if args.timezone < 0: - tz = "-{:02}00".format(-args.timezone) - else: - tz = "+{:02}00".format(args.timezone) - start_time = dateutil.parser.parse(args.start + tz) - if args.stop is not None: - if args.stop.endswith("Z"): - raise RuntimeError("Either both or neither times must be given in UTC") - stop_time = dateutil.parser.parse(args.stop + tz) - - start_timestamp = start_time.timestamp() - if stop_time is None: - # Keep scheduling until the desired number of operational days is full. - stop_timestamp = 2 ** 60 - else: + start_time = dateutil.parser.parse(start_time + " +0000") + stop_time = dateutil.parser.parse(stop_time + " +0000") + except Exception: + start_time = dateutil.parser.parse(start_time) + stop_time = dateutil.parser.parse(stop_time) + start_timestamp = start_time.timestamp() stop_timestamp = stop_time.timestamp() - return args, start_timestamp, stop_timestamp - - -@function_timer -def parse_patch_sso(args, parts): - log = Logger.get() - log.info("SSO format") - name = parts[0] - weight = float(parts[2]) - radius = float(parts[3]) * degree - patch = SSOPatch(name, weight, radius, elevations=args.elevations_deg) - return patch - - -@function_timer -def parse_patch_cooler(args, parts, last_cycle_end): - log = Logger.get() - log.info("Cooler cycle format") - weight = float(parts[2]) - power = float(parts[3]) - hold_time_min = float(parts[4]) # in hours - hold_time_max = float(parts[5]) # in hours - cycle_time = float(parts[6]) # in hours - az = float(parts[7]) - el = float(parts[8]) - patch = CoolerCyclePatch( - weight, power, hold_time_min, hold_time_max, cycle_time, az, el, last_cycle_end - ) - return patch - - -@function_timer -def parse_patch_horizontal(args, parts): - """Parse an explicit patch definition line""" - log = Logger.get() - corners = [] - log.info("Horizontal format") - name = parts[0] - weight = float(parts[2]) - azmin = float(parts[3]) * degree - azmax = float(parts[4]) * degree - el = float(parts[5]) * degree - scantime = float(parts[6]) # minutes - patch = HorizontalPatch(name, weight, azmin, azmax, el, scantime) - return patch - - -@function_timer -def parse_patch_explicit(args, parts): - """Parse an explicit patch definition line""" - log = Logger.get() - corners = [] - log.info("Explicit-corners format: ") - name = parts[0] - i = 2 - definition = "" - while i + 1 < len(parts): - definition += " ({}, {})".format(parts[i], parts[i + 1]) - try: - # Assume coordinates in degrees - lon = float(parts[i]) * degree - lat = float(parts[i + 1]) * degree - except ValueError: - # Failed simple interpreration, assume pyEphem strings - lon = parts[i] - lat = parts[i + 1] - i += 2 - if args.patch_coord == "C": - corner = ephem.Equatorial(lon, lat, epoch="2000") - elif args.patch_coord == "E": - corner = ephem.Ecliptic(lon, lat, epoch="2000") - elif args.patch_coord == "G": - corner = ephem.Galactic(lon, lat, epoch="2000") - else: - raise RuntimeError("Unknown coordinate system: {}".format(args.patch_coord)) - corner = ephem.Equatorial(corner) - if corner.dec > 80 * degree or corner.dec < -80 * degree: - raise RuntimeError( - "{} has at least one circumpolar corner. " - "Circumpolar targeting not yet implemented".format(name) - ) - patch_corner = ephem.FixedBody() - patch_corner._ra = corner.ra - patch_corner._dec = corner.dec - corners.append(patch_corner) - log.info(definition) - return corners - - -@function_timer -def parse_patch_rectangular(args, parts): - """Parse a rectangular patch definition line""" - log = Logger.get() - corners = [] - log.info("Rectangular format") - name = parts[0] - try: - # Assume coordinates in degrees - lon_min = float(parts[2]) * degree - lat_max = float(parts[3]) * degree - lon_max = float(parts[4]) * degree - lat_min = float(parts[5]) * degree - except ValueError: - # Failed simple interpreration, assume pyEphem strings - lon_min = parts[2] - lat_max = parts[3] - lon_max = parts[4] - lat_min = parts[5] - if args.patch_coord == "C": - coordconv = ephem.Equatorial - elif args.patch_coord == "E": - coordconv = ephem.Ecliptic - elif args.patch_coord == "G": - coordconv = ephem.Galactic - else: - raise RuntimeError("Unknown coordinate system: {}".format(args.patch_coord)) - - nw_corner = coordconv(lon_min, lat_max, epoch="2000") - ne_corner = coordconv(lon_max, lat_max, epoch="2000") - se_corner = coordconv(lon_max, lat_min, epoch="2000") - sw_corner = coordconv(lon_min, lat_min, epoch="2000") - - lon_max = unwind_angle(lon_min, lon_max) - if lon_min < lon_max: - delta_lon = lon_max - lon_min - else: - delta_lon = lon_min - lon_max - area = (np.cos(np.pi / 2 - lat_max) - np.cos(np.pi / 2 - lat_min)) * delta_lon - - corners_temp = [] - add_side(nw_corner, ne_corner, corners_temp, coordconv) - add_side(ne_corner, se_corner, corners_temp, coordconv) - add_side(se_corner, sw_corner, corners_temp, coordconv) - add_side(sw_corner, nw_corner, corners_temp, coordconv) - - for corner in corners_temp: - if corner.dec > 80 * degree or corner.dec < -80 * degree: - raise RuntimeError( - "{} has at least one circumpolar corner. " - "Circumpolar targeting not yet implemented".format(name) - ) - patch_corner = ephem.FixedBody() - patch_corner._ra = corner.ra - patch_corner._dec = corner.dec - corners.append(patch_corner) - return corners, area - - -@function_timer -def add_side(corner1, corner2, corners_temp, coordconv): - """Add one side of a rectangle. - - Add one side of a rectangle with enough interpolation points. - """ - step = np.radians(1) - corners_temp.append(ephem.Equatorial(corner1)) - lon1 = corner1.ra - lon2 = corner2.ra - lat1 = corner1.dec - lat2 = corner2.dec - if lon1 == lon2: - lon = lon1 - if lat1 < lat2: - lat_step = step - else: - lat_step = -step - for lat in np.arange(lat1, lat2, lat_step): - corners_temp.append(ephem.Equatorial(coordconv(lon, lat, epoch="2000"))) - elif lat1 == lat2: - lat = lat1 - if lon1 < lon2: - lon_step = step / np.cos(lat) - else: - lon_step = -step / np.cos(lat) - for lon in np.arange(lon1, lon2, lon_step): - corners_temp.append(ephem.Equatorial(coordconv(lon, lat, epoch="2000"))) - else: - raise RuntimeError("add_side: both latitude and longitude change") - return - - -@function_timer -def parse_patch_center_and_width(args, parts): - """Parse center-and-width patch definition""" - log = Logger.get() - corners = [] - log.info("Center-and-width format") - try: - # Assume coordinates in degrees - lon = float(parts[2]) * degree - lat = float(parts[3]) * degree - except ValueError: - # Failed simple interpreration, assume pyEphem strings - lon = parts[2] - lat = parts[3] - width = float(parts[4]) * degree - if args.patch_coord == "C": - center = ephem.Equatorial(lon, lat, epoch="2000") - elif args.patch_coord == "E": - center = ephem.Ecliptic(lon, lat, epoch="2000") - elif args.patch_coord == "G": - center = ephem.Galactic(lon, lat, epoch="2000") - else: - raise RuntimeError("Unknown coordinate system: {}".format(args.patch_coord)) - center = ephem.Equatorial(center) - # Synthesize 8 corners around the center - phi = center.ra - theta = center.dec - r = width / 2 - ncorner = 8 - angstep = 2 * np.pi / ncorner - for icorner in range(ncorner): - ang = angstep * icorner - delta_theta = np.cos(ang) * r - delta_phi = np.sin(ang) * r / np.cos(theta + delta_theta) - patch_corner = ephem.FixedBody() - patch_corner._ra = phi + delta_phi - patch_corner._dec = theta + delta_theta - corners.append(patch_corner) - return corners - - -@function_timer -def parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp): - # Parse the patch definitions - log = Logger.get() - patches = [] - total_weight = 0 - for patch_def in args.patch: - parts = patch_def.split(",") - name = parts[0] - log.info('Adding patch "{}"'.format(name)) - if parts[1].upper() == "HORIZONTAL": - patch = parse_patch_horizontal(args, parts) - elif parts[1].upper() == "SSO": - patch = parse_patch_sso(args, parts) - elif parts[1].upper() == "COOLER": - patch = parse_patch_cooler(args, parts, start_timestamp) - else: - weight = float(parts[1]) - if np.isnan(weight): - raise RuntimeError("Patch has NaN priority: {}".format(patch_def)) - if weight == 0: - raise RuntimeError("Patch has zero priority: {}".format(patch_def)) - if len(parts[2:]) == 3: - corners = parse_patch_center_and_width(args, parts) - area = None - elif len(parts[2:]) == 4: - corners, area = parse_patch_rectangular(args, parts) - else: - corners = parse_patch_explicit(args, parts) - area = None - patch = Patch( - name, - weight, - corners, - el_min=args.el_min_deg * degree, - el_max=args.el_max_deg * degree, - el_step=args.el_step_deg * degree, - alternate=args.alternate, - site_lat=observer.lat, - area=area, - ra_period=args.ra_period, - ra_amplitude=args.ra_amplitude_deg, - dec_period=args.dec_period, - dec_amplitude=args.dec_amplitude_deg, - elevations=args.elevations_deg, + # useful metadata + mindist_sun = min_sso_dist( + *np.array([el, azmin, azmax, sun_el1, sun_az1, sun_el2, sun_az2]).astype( + np.float ) - if args.equalize_area or args.debug: - area = patch.get_area(observer, nside=32, equalize=args.equalize_area) - total_weight += patch.weight - patches.append(patch) - - log.debug( - "Highest possible observing elevation: {:.2f} degrees." - " Sky fraction = {:.4f}".format(patches[-1].el_max0 / degree, patch._area) ) - - if args.debug: - import matplotlib.pyplot as plt - - polmap = None - if args.polmap: - polmap = hp.read_map(args.polmap, [1, 2]) - bad = polmap[0] == hp.UNSEEN - polmap = np.sqrt(polmap[0] ** 2 + polmap[1] ** 2) * 1e6 - polmap[bad] = hp.UNSEEN - plt.style.use("default") - cmap = cm.inferno - cmap.set_under("w") - plt.figure(figsize=[20, 4]) - plt.subplots_adjust(left=0.1, right=0.9) - patch_color = "black" - sun_color = "black" - sun_lw = 8 - sun_avoidance_color = "gray" - moon_color = "black" - moon_lw = 2 - moon_avoidance_color = "gray" - alpha = 0.5 - avoidance_alpha = 0.01 - sun_step = np.int(86400 * 1) - moon_step = np.int(86400 * 0.1) - for iplot, coord in enumerate("CEG"): - scoord = {"C": "Equatorial", "E": "Ecliptic", "G": "Galactic"}[coord] - title = scoord # + ' patch locations' - if polmap is None: - nside = 256 - avoidance_map = np.zeros(12 * nside ** 2) - # hp.mollview(np.zeros(12) + hp.UNSEEN, coord=coord, cbar=False, - # title='', sub=[1, 3, 1 + iplot], cmap=cmap) - else: - hp.mollview( - polmap, - coord="G" + coord, - cbar=True, - unit="$\mu$K", - min=args.polmin, - max=args.polmax, - norm="log", - cmap=cmap, - title=title, - sub=[1, 3, 1 + iplot], - notext=True, - format="%.1f", - xsize=1600, - ) - # Plot sun and moon avoidance circle - sunlon, sunlat = [], [] - moonlon, moonlat = [], [] - sun_avoidance_angle = args.sun_avoidance_angle_deg * degree - moon_avoidance_angle = args.moon_avoidance_angle_deg * degree - for lon, lat, sso, angle_min, color, step, lw in [ - ( - sunlon, - sunlat, - sun, - sun_avoidance_angle, - sun_avoidance_color, - sun_step, - sun_lw, - ), - ( - moonlon, - moonlat, - moon, - moon_avoidance_angle, - moon_avoidance_color, - moon_step, - moon_lw, - ), - ]: - for t in range(np.int(start_timestamp), np.int(stop_timestamp), step): - observer.date = to_DJD(t) - sso.compute(observer) - lon.append(sso.a_ra / degree) - lat.append(sso.a_dec / degree) - if angle_min <= 0: - continue - if polmap is None: - # accumulate avoidance map - vec = hp.dir2vec(lon[-1], lat[-1], lonlat=True) - pix = hp.query_disc(nside, vec, angle_min) - for p in pix: - avoidance_map[p] += 1 - else: - # plot a circle around the location - clon, clat = [], [] - phi = sso.a_ra - theta = sso.a_dec - r = angle_min - for ang in np.linspace(0, 2 * np.pi, 36): - dtheta = np.cos(ang) * r - dphi = np.sin(ang) * r / np.cos(theta + dtheta) - clon.append((phi + dphi) / degree) - clat.append((theta + dtheta) / degree) - hp.projplot( - clon, - clat, - "-", - color=color, - alpha=avoidance_alpha, - lw=lw, - threshold=1, - lonlat=True, - coord="C", - ) - if polmap is None: - avoidance_map[avoidance_map == 0] = hp.UNSEEN - hp.mollview( - avoidance_map, - coord="C" + coord, - cbar=False, - title="", - sub=[1, 3, 1 + iplot], - cmap=cmap, - ) - hp.graticule(30, verbose=False) - - # Plot patches - for patch in patches: - lon = [corner._ra / degree for corner in patch.corners] - lat = [corner._dec / degree for corner in patch.corners] - if len(lon) == 0: - # Special patch without sky coordinates - continue - lon.append(lon[0]) - lat.append(lat[0]) - log.info( - "{} corners:\n lon = {}\n lat= {}".format(patch.name, lon, lat) - ) - hp.projplot( - lon, - lat, - "-", - threshold=1, - lonlat=True, - coord="C", - color=patch_color, - lw=2, - alpha=alpha, - ) - if len(patches) > 10: - continue - # label the patch - it = np.argmax(lat) - area = patch.get_area(observer) - title = "{} {:.2f}%".format(patch.name, 100 * area) - hp.projtext( - lon[it], - lat[it], - title, - lonlat=True, - coord="C", - color=patch_color, - fontsize=14, - alpha=alpha, - ) - if polmap is not None: - # Plot Sun and Moon trajectory - hp.projplot( - sunlon, - sunlat, - "-", - color=sun_color, - alpha=alpha, - threshold=1, - lonlat=True, - coord="C", - lw=sun_lw, - ) - hp.projplot( - moonlon, - moonlat, - "-", - color=moon_color, - alpha=alpha, - threshold=1, - lonlat=True, - coord="C", - lw=moon_lw, - ) - hp.projtext( - sunlon[0], - sunlat[0], - "Sun", - color=sun_color, - lonlat=True, - coord="C", - fontsize=14, - alpha=alpha, - ) - hp.projtext( - moonlon[0], - moonlat[0], - "Moon", - color=moon_color, - lonlat=True, - coord="C", - fontsize=14, - alpha=alpha, - ) - - plt.savefig("patches.png") - plt.close() - - # Normalize the weights - for i in range(len(patches)): - patches[i].weight /= total_weight - return patches - - -def run_scheduler(opts=None): - args, start_timestamp, stop_timestamp = parse_args(opts=opts) - - observer = ephem.Observer() - observer.lon = args.site_lon - observer.lat = args.site_lat - observer.elevation = args.site_alt # In meters - observer.epoch = "2000" - observer.temp = 0 # in Celcius - observer.compute_pressure() - - sun = ephem.Sun() - moon = ephem.Moon() - - patches = parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp) - - build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon) - return + mindist_moon = min_sso_dist( + *np.array( + [el, azmin, azmax, moon_el1, moon_az1, moon_el2, moon_az2] + ).astype(np.float) + ) + el_sun = max(float(sun_el1), float(sun_el2)) + return ( + start_timestamp, + start_date, + stop_timestamp, + season, + float(mjdstart), + float(mjdstop), + float(boresight_angle), + name, + float(azmin), + float(azmax), + float(el), + int(scan), + int(subscan), + mindist_sun, + mindist_moon, + el_sun, + rs.upper() == "R", + ) diff --git a/src/toast/schedule_build.py b/src/toast/schedule_build.py new file mode 100644 index 000000000..69cf985f0 --- /dev/null +++ b/src/toast/schedule_build.py @@ -0,0 +1,2882 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +""" +This script creates a CES schedule file that can be used as input +to toast_ground_sim.py +""" + +import argparse +from datetime import datetime, timezone, timedelta +import dateutil.parser +import os +import sys +import traceback + +import numpy as np +from scipy.constants import degree +from matplotlib import cm + +import ephem +import healpy as hp + +from .utils import Logger +from . import qarray as qa +from .timing import function_timer + + +XAXIS, YAXIS, ZAXIS = np.eye(3) + + +class TooClose(Exception): + pass + + +class SunTooClose(TooClose): + pass + + +class MoonTooClose(TooClose): + pass + + +class Patch(object): + + hits = 0 + partial_hits = 0 + rising_hits = 0 + setting_hits = 0 + time = 0 + rising_time = 0 + setting_time = 0 + step = -1 + az_min = 0 + az_max = 2 * np.pi + _area = None + current_el_min = 0 + current_el_max = 0 + el_min0 = 0 + el_max0 = np.pi / 2 + el_min = el_min0 + el_max = el_max0 + el_step = 0 + alternate = False + ra_amplitude = None + ra_period = 10 + dec_amplitude = None + dec_period = 10 + corners = [] + preferred_el = None + + def __init__( + self, + name, + weight, + corners, + el_min=0, + el_max=np.pi / 2, + el_step=0, + alternate=False, + site_lat=0, + area=None, + ra_period=10, + ra_amplitude=None, + dec_period=10, + dec_amplitude=None, + elevations=None, + ): + self.name = name + self.weight = weight + self.corners = corners + self.el_min0 = el_min + self.el_min = el_min + self.el_max0 = el_max + self.el_step = np.abs(el_step) + self.alternate = alternate + self._area = area + self.site_lat = site_lat + self.ra_period = ra_period + self.ra_amplitude = np.radians(ra_amplitude) + self.dec_period = dec_period + self.dec_amplitude = np.radians(dec_amplitude) + # Use the site latitude to infer the lowest elevation that all + # corners cross. + site_el_max = np.pi / 2 + for corner in corners: + el_max = np.pi / 2 - np.abs(corner._dec - self.site_lat) + if el_max < site_el_max: + site_el_max = el_max + if elevations is None: + if site_el_max < self.el_max0: + self.el_max0 = site_el_max + self.elevations = None + else: + # Parse the allowed elevations + try: + # Try parsing as a string + self.elevations = [ + np.radians(float(el)) for el in elevations.split(",") + ] + except AttributeError: + # Try parsing as an iterable + self.elevations = [np.radians(el) for el in elevations] + self.elevations = np.sort(np.array(self.elevations)) + # Check if any of the allowed elevations is above the highest + # observable elevation + bad = self.elevations > site_el_max + if np.any(bad): + good = np.logical_not(bad) + if np.any(good): + print( + "WARNING: {} of the observing elevations are too high " + "for '{}': {} > {:.2f} deg".format( + np.sum(bad), + self.name, + np.degrees(self.elevations[bad]), + np.degrees(site_el_max), + ), + flush=True, + ) + self.elevations = self.elevations[good] + else: + print( + "ERROR: all of the observing elevations are too high for {}. " + "Maximum observing elevation is {} deg".format( + self.name, np.degrees(site_el_max) + ), + flush=True, + ) + sys.exit() + self.el_min0 = np.amin(self.elevations) + self.el_max0 = np.amax(self.elevations) + if el_step != 0: + self.nstep_el = int((self.el_max0 - self.el_min0 + 1e-3) // el_step) + 1 + self.elevations0 = self.elevations + self.el_max = self.el_max0 + self.el_lim = self.el_min0 + self.step_azel() + return + + def oscillate(self): + if self.ra_amplitude: + # Oscillate RA + halfperiod = self.ra_period // 2 + old_phase = np.fmod(self.hits - 1 + halfperiod, self.ra_period) - halfperiod + new_phase = np.fmod(self.hits + halfperiod, self.ra_period) - halfperiod + old_offset = old_phase / halfperiod * self.ra_amplitude + new_offset = new_phase / halfperiod * self.ra_amplitude + offset = new_offset - old_offset + for corner in self.corners: + corner._ra += offset + if self.dec_amplitude: + # Oscillate DEC + halfperiod = self.dec_period // 2 + old_phase = ( + np.fmod(self.hits - 1 + halfperiod, self.dec_period) - halfperiod + ) + new_phase = np.fmod(self.hits + halfperiod, self.dec_period) - halfperiod + old_offset = old_phase / halfperiod * self.dec_amplitude + new_offset = new_phase / halfperiod * self.dec_amplitude + offset = new_offset - old_offset + for corner in self.corners: + corner._dec += offset + return + + @function_timer + def get_area(self, observer, nside=32, equalize=False): + self.update(observer) + if self._area is None: + npix = 12 * nside ** 2 + hitmap = np.zeros(npix) + for corner in self.corners: + corner.compute(observer) + for pix in range(npix): + lon, lat = hp.pix2ang(nside, pix, lonlat=True) + center = ephem.FixedBody() + center._ra = np.radians(lon) + center._dec = np.radians(lat) + center.compute(observer) + hitmap[pix] = self.in_patch(center) + self._area = np.sum(hitmap) / hitmap.size + if self._area == 0: + raise RuntimeError("Patch has zero area!") + if equalize: + self.weight /= self._area + return self._area + + @function_timer + def corner_coordinates(self, observer=None, unwind=False): + """Return the corner coordinates in horizontal frame. + + PyEphem measures the azimuth East (clockwise) from North. + """ + azs = [] + els = [] + az0 = None + for corner in self.corners: + if observer is not None: + corner.compute(observer) + if unwind: + if az0 is None: + az0 = corner.az + azs.append(unwind_angle(az0, corner.az)) + else: + azs.append(corner.az) + els.append(corner.alt) + return np.array(azs), np.array(els) + + @function_timer + def in_patch(self, obj): + """ + Determine if the object (e.g. Sun or Moon) is inside the patch + by using a ray casting algorithm. The ray is cast along a + constant meridian to follow a great circle. + """ + az0 = obj.az + # Get corner coordinates, assuming they were already computed + azs, els = self.corner_coordinates() + els_cross = [] + for i in range(len(self.corners)): + az1 = azs[i] + el1 = els[i] + j = (i + 1) % len(self.corners) + az2 = unwind_angle(az1, azs[j]) + el2 = els[j] + azmean = 0.5 * (az1 + az2) + az0 = unwind_angle(azmean, np.float(obj.az), np.pi) + if (az1 - az0) * (az2 - az0) > 0: + # the constant meridian is not between the two corners + continue + el_cross = el1 + (az1 - az0) * (el2 - el1) / (az1 - az2) + if np.abs(obj.az - (az0 % (2 * np.pi))) < 1e-3: + els_cross.append(el_cross) + elif el_cross > 0: + els_cross.append(np.pi - el_cross) + else: + els_cross.append(-np.pi - el_cross) + + els_cross = np.array(els_cross) + if els_cross.size < 2: + return False + + # Unwind the crossing elevations to minimize the scatter + els_cross = np.sort(els_cross) + if els_cross.size > 1: + ptps = [] + for i in range(els_cross.size): + els_cross_alt = els_cross.copy() + els_cross_alt[:i] += 2 * np.pi + ptps.append(np.ptp(els_cross_alt)) + i = np.argmin(ptps) + if i > 0: + els_cross[:i] += 2 * np.pi + els_cross = np.sort(els_cross) + el_mean = np.mean(els_cross) + el0 = unwind_angle(el_mean, np.float(obj.alt)) + + ncross = np.sum(els_cross > el0) + + if ncross % 2 == 0: + # Even number of crossings means that the object is outside + # of the patch + return False + return True + + @function_timer + def step_azel(self): + self.step += 1 + if self.el_step != 0 and self.alternate: + # alternate between rising and setting scans + if self.rising_hits < self.setting_hits: + # Schedule a rising scan + istep = self.rising_hits % self.nstep_el + self.el_min = min(self.el_max0, self.el_min0 + istep * self.el_step) + self.el_max = self.el_max0 + self.az_min = 0 + self.az_max = np.pi + else: + # Schedule a setting scan + istep = self.setting_hits % self.nstep_el + self.el_min = self.el_min0 + self.el_max = max(self.el_min0, self.el_max0 - istep * self.el_step) + self.az_min = np.pi + self.az_max = 2 * np.pi + else: + if self.alternate: + self.az_min = (self.az_min + np.pi) % (2 * np.pi) + self.az_max = self.az_min + np.pi + else: + self.el_min += self.el_step + if self.el_min > self.el_max0: + self.el_min = self.el_min0 + if self.el_step != 0 and self.elevations is not None: + tol = np.radians(0.1) + self.elevations = np.array( + [ + el + for el in self.elevations0 + if (el + tol >= self.el_min and el - tol <= self.el_max) + ] + ) + return + + def reset(self): + self.step += 1 + self.el_min = self.el_min0 + self.el_max = self.el_max0 + self.elevations = self.elevations0 + self.az_min = 0 + if self.alternate: + self.az_max = np.pi + else: + self.az_max = 2 * np.pi + return + + def visible( + self, + el_min, + observer, + sun, + moon, + sun_avoidance_angle, + moon_avoidance_angle, + check_sso, + ): + self.update(observer) + patch_el_max = -1000 + patch_el_min = 1000 + in_view = False + for i, corner in enumerate(self.corners): + corner.compute(observer) + patch_el_min = min(patch_el_min, corner.alt) + patch_el_max = max(patch_el_max, corner.alt) + if corner.alt > el_min: + # At least one corner is visible + in_view = True + if check_sso: + if sun_avoidance_angle > 0: + angle = np.degrees(ephem.separation(sun, corner)) + if angle < sun_avoidance_angle: + # Patch is too close to the Sun + return False, "Too close to Sun {:.2f}".format(angle) + if moon_avoidance_angle > 0: + angle = np.degrees(ephem.separation(moon, corner)) + if angle < moon_avoidance_angle: + # Patch is too close to the Moon + return False, "Too close to Moon {:.2f}".format(angle) + if not in_view: + msg = "Below el_min = {:.2f} at el = {:.2f}..{:.2f}.".format( + np.degrees(el_min), np.degrees(patch_el_min), np.degrees(patch_el_max) + ) + else: + msg = "in view" + self.current_el_min = patch_el_min + self.current_el_max = patch_el_max + + return in_view, msg + + def update(self, *args, **kwargs): + """ + A virtual method that is implemented by moving targets + """ + pass + + +class SSOPatch(Patch): + def __init__(self, name, weight, radius): + self.name = name + self.weight = weight + self.radius = radius + try: + self.body = getattr(ephem, name)() + except: + raise RuntimeError("Failed to initialize {} from pyEphem".format(name)) + self.corners = None + return + + def update(self, observer): + """ + Calculate the relative position of the SSO at a given time + """ + self.body.compute(observer) + ra, dec = self.body.ra, self.body.dec + # Synthesize 8 corners around the center + phi = ra + theta = dec + r = self.radius + ncorner = 8 + angstep = 2 * np.pi / ncorner + self.corners = [] + for icorner in range(ncorner): + ang = angstep * icorner + delta_theta = np.cos(ang) * r + delta_phi = np.sin(ang) * r / np.cos(theta + delta_theta) + patch_corner = ephem.FixedBody() + patch_corner._ra = phi + delta_phi + patch_corner._dec = theta + delta_theta + self.corners.append(patch_corner) + return + + +class CoolerCyclePatch(Patch): + def __init__( + self, + weight, + power, + hold_time_min, + hold_time_max, + cycle_time, + az, + el, + last_cycle_end, + ): + # Standardized name for cooler cycles + self.name = "cooler_cycle" + self.hold_time_min = hold_time_min * 3600 + self.hold_time_max = hold_time_max * 3600 + self.cycle_time = cycle_time * 3600 + self.az = az + self.el = el + self.last_cycle_end = last_cycle_end + self.weight0 = weight + self.weight = weight + self.power = power + return + + def get_area(self, *args, **kwargs): + if self._area is None: + self._area = 0 + return self._area + + def corner_coordinates(self, *args, **kwargs): + return None + + def in_patch(self, *args, **kwargs): + return False + + def step_azel(self, *args, **kwargs): + return + + def reset(self, *args, **kwargs): + return + + def get_current_hold_time(self, observer): + tlast = to_DJD(self.last_cycle_end) + tnow = float(observer.date) # In Dublin Julian date + hold_time = (tnow - tlast) * 86400 # in seconds + return hold_time + + def visible( + self, + el_min, + observer, + sun, + moon, + sun_avoidance_angle, + moon_avoidance_angle, + check_sso, + ): + self.update(observer) + hold_time = self.get_current_hold_time(observer) + if hold_time > self.hold_time_min: + visible = True + msg = "minimum hold time exceeded" + else: + visible = False + msg = "minimum hold time not met" + return visible, msg + + def update(self, observer): + hold_time = self.get_current_hold_time(observer) + if hold_time < self.hold_time_min: + self.weight = np.inf + else: + weight = (self.hold_time_max - hold_time) / ( + self.hold_time_max - self.hold_time_min + ) + self.weight = self.weight0 * weight ** self.power + return + + +class HorizontalPatch(Patch): + elevations = None + def __init__(self, name, weight, azmin, azmax, el, scantime): + self.name = name + self.weight = weight + if azmin <= np.pi and azmax <= np.pi: + self.rising = True + elif azmin >= np.pi and azmax >= np.pi: + self.rising = False + else: + raise RuntimeError("Horizontal patch must either be rising or setting.") + self.az_min = azmin + self.az_max = azmax + self.el = el + # scan time is the maximum time spent on this scan before targeting again + self.scantime = scantime # in minutes. + self.scandrift = scantime / 60 * 15 * degree + + self.el_min0 = el + self.el_min = el + self.el_max0 = el + self.el_step = 0 + self.alternate = False + self._area = 0 + self.el_max = self.el_max0 + self.el_lim = self.el_min0 + return + + def get_area(self, observer, nside=32, equalize=False): + return 1 + + def corner_coordinates(self, observer=None, unwind=False): + azs = [self.az_min, self.az_max] + els = [self.el_min, self.el_max] + return np.array(azs), np.array(els) + + def in_patch(self, obj, angle=0): + azmin = obj.az - angle + azmax = obj.az + angle + elmin = obj.alt - angle + elmax = obj.alt + angle + if self.rising: + elmax += self.scandrift + else: + elmin -= self.scandrift + if ( + azmin > self.az_min + and azmax < self.az_max + and elmin > self.el_min + and elmax < self.el_max + ): + return True + return False + + def step_azel(self): + return + + def visible( + self, + el_min, + observer, + sun, + moon, + sun_avoidance_angle, + moon_avoidance_angle, + check_sso, + ): + + in_view = True + msg = "" + if check_sso: + for sso, angle, name in [ + (sun, sun_avoidance_angle, "Sun"), + (moon, moon_avoidance_angle, "Moon"), + ]: + if self.in_patch(sso, angle=angle): + in_view = False + msg += "{} too close;".format(name) + + if in_view: + msg = "in view" + self.current_el_min = self.el_min + self.current_el_max = self.el_max + return in_view, msg + + +def to_UTC(t): + # Convert UNIX time stamp to a date string + return datetime.fromtimestamp(t, timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + + +def to_JD(t): + # Unix time stamp to Julian date + # (days since -4712-01-01 12:00:00 UTC) + return t / 86400.0 + 2440587.5 + + +def to_MJD(t): + # Convert Unix time stamp to modified Julian date + # (days since 1858-11-17 00:00:00 UTC) + return to_JD(t) - 2400000.5 + + +def to_DJD(t): + # Convert Unix time stamp to Dublin Julian date + # (days since 1899-12-31 12:00:00) + # This is the time format used by PyEphem + return to_JD(t) - 2415020 + + +def DJDtoUNIX(djd): + # Convert Dublin Julian date to a UNIX time stamp + return ((djd + 2415020) - 2440587.5) * 86400.0 + + +def patch_is_rising(patch): + try: + # Horizontal patch definition + rising = patch.rising + except: + rising = True + for corner in patch.corners: + if corner.alt > 0 and corner.az > np.pi: + # The patch is setting + rising = False + break + return rising + + +@function_timer +def prioritize(args, visible): + """Order visible targets by priority and number of scans.""" + log = Logger.get() + for i in range(len(visible)): + for j in range(len(visible) - i - 1): + # If either of the patches is a cooler cycle, we don't modulate + # the priorities with hit counts, observing time or elevation + if isinstance(visible[j], CoolerCyclePatch) or isinstance( + visible[j + 1], CoolerCyclePatch + ): + weight1 = visible[j].weight + weight2 = visible[j + 1].weight + else: + if patch_is_rising(visible[j]): + if args.equalize_time: + hits1 = visible[j].rising_time + else: + hits1 = visible[j].rising_hits + el1 = np.degrees(visible[j].current_el_max) + else: + if args.equalize_time: + hits1 = visible[j].setting_time + else: + hits1 = visible[j].setting_hits + el1 = np.degrees(visible[j].current_el_min) + if patch_is_rising(visible[j + 1]): + if args.equalize_time: + hits2 = visible[j + 1].rising_time + else: + hits2 = visible[j + 1].rising_hits + el2 = np.degrees(visible[j + 1].current_el_max) + else: + if args.equalize_time: + hits2 = visible[j + 1].setting_time + else: + hits2 = visible[j + 1].setting_hits + el2 = np.degrees(visible[j + 1].current_el_min) + # Patch with the lower weight goes first. Having more + # earlier observing time and lower observing elevation + # will increase the weight. + weight1 = (hits1 + 1) * visible[j].weight + weight2 = (hits2 + 1) * visible[j + 1].weight + # Optional elevation penalty + if args.elevation_penalty_limit > 0: + lim = args.elevation_penalty_limit + if el1 < lim: + weight1 *= (lim / el1) ** args.elevation_penalty_power + if el2 < lim: + weight2 *= (lim / el2) ** args.elevation_penalty_power + if weight1 > weight2: + visible[j], visible[j + 1] = visible[j + 1], visible[j] + names = [] + for patch in visible: + names.append(patch.name) + log.debug("Prioritized list of viewable patches: {}".format(names)) + return + + +@function_timer +def attempt_scan( + args, + observer, + visible, + not_visible, + t, + fp_radius, + stop_timestamp, + tstop_cooler, + sun, + moon, + sun_el_max, + fout, + fout_fmt, + ods, + boresight_angle, +): + """Attempt scanning the visible patches in order until success.""" + log = Logger.get() + success = False + # Always begin by attempting full scans. If none can be completed + # and user allowed partials scans, try them next. + for allow_partial_scans in False, True: + if allow_partial_scans and not args.allow_partial_scans: + break + for patch in visible: + if isinstance(patch, CoolerCyclePatch): + # Cycle the cooler + t = add_cooler_cycle( + args, + t, + stop_timestamp, + observer, + sun, + moon, + fout, + fout_fmt, + patch, + boresight_angle, + ) + success = True + break + # All on-sky targets + for rising in [True, False]: + observer.date = to_DJD(t) + el = get_constant_elevation( + args, + observer, + patch, + rising, + fp_radius, + not_visible, + partial_scan=allow_partial_scans, + ) + if el is None: + continue + success, azmins, azmaxs, aztimes, tstop = scan_patch( + args, + el, + patch, + t, + fp_radius, + observer, + sun, + not_visible, + tstop_cooler, + sun_el_max, + rising, + ) + if success: + try: + t, _ = add_scan( + args, + t, + tstop, + aztimes, + azmins, + azmaxs, + rising, + fp_radius, + observer, + sun, + moon, + fout, + fout_fmt, + patch, + el, + ods, + boresight_angle, + partial_scan=allow_partial_scans, + ) + patch.step_azel() + break + except TooClose: + success = False + break + if success: + break + if success: + break + return success, t + + +def from_angles(az, el): + elquat = qa.rotation(YAXIS, np.radians(90 - el)) + azquat = qa.rotation(ZAXIS, np.radians(az)) + return qa.mult(azquat, elquat) + + +def unwind_quat(quat1, quat2): + if np.sum(np.abs(quat1 - quat2)) > np.sum(np.abs(quat1 + quat2)): + return -quat2 + else: + return quat2 + + +@function_timer +def check_sso(observer, az1, az2, el, sso, angle, tstart, tstop): + """ + Check if a solar system object (SSO) enters within "angle" of + the constant elevation scan. + """ + if az2 < az1: + az2 += 360 + naz = max(3, np.int(0.25 * (az2 - az1) * np.cos(np.radians(el)))) + quats = [] + for az in np.linspace(az1, az2, naz): + quats.append(from_angles(az % 360, el)) + vecs = qa.rotate(quats, ZAXIS) + + tstart = to_DJD(tstart) + tstop = to_DJD(tstop) + t1 = tstart + # Test every ten minutes + tstep = 10 / 1440 + while t1 < tstop: + t2 = min(tstop, t1 + tstep) + observer.date = t1 + sso.compute(observer) + sun_az1, sun_el1 = np.degrees(sso.az), np.degrees(sso.alt) + observer.date = t2 + sso.compute(observer) + sun_az2, sun_el2 = np.degrees(sso.az), np.degrees(sso.alt) + sun_quat1 = from_angles(sun_az1, sun_el1) + sun_quat2 = from_angles(sun_az2, sun_el2) + sun_quat2 = unwind_quat(sun_quat1, sun_quat2) + t = np.linspace(0, 1, 10) + sun_quats = qa.slerp(t, [0, 1], [sun_quat1, sun_quat2]) + sun_vecs = qa.rotate(sun_quats, ZAXIS).T + dpmax = np.amax(np.dot(vecs, sun_vecs)) + min_dist = np.degrees(np.arccos(dpmax)) + if min_dist < angle: + return True, DJDtoUNIX(t1) + t1 = t2 + return False, DJDtoUNIX(t2) + + +@function_timer +def attempt_scan_pole( + args, + observer, + visible, + not_visible, + tstart, + fp_radius, + el_max, + el_min, + stop_timestamp, + tstop_cooler, + sun, + moon, + sun_el_max, + fout, + fout_fmt, + ods, + boresight_angle, +): + """Attempt scanning the visible patches in order until success.""" + if args.one_scan_per_day and stop_timestamp > tstop_cooler: + raise RuntimeError("one_scan_per_day is incompatible with cooler cycles") + success = False + for patch in visible: + observer.date = to_DJD(tstart) + if isinstance(patch, CoolerCyclePatch): + # Cycle the cooler + t = add_cooler_cycle( + args, + tstart, + stop_timestamp, + observer, + sun, + moon, + fout, + fout_fmt, + patch, + boresight_angle, + ) + success = True + break + # In pole scheduling, first elevation is just below the patch + el = get_constant_elevation_pole( + args, observer, patch, fp_radius, el_min, el_max, not_visible + ) + if el is None: + continue + pole_success = True + subscan = -1 + t = tstart + while pole_success: + (pole_success, azmins, azmaxs, aztimes, tstop) = scan_patch_pole( + args, + el, + patch, + t, + fp_radius, + observer, + sun, + not_visible, + tstop_cooler, + sun_el_max, + ) + if pole_success: + if success: + # Still the same scan + patch.hits -= 1 + try: + t, subscan = add_scan( + args, + t, + tstop, + aztimes, + azmins, + azmaxs, + False, + fp_radius, + observer, + sun, + moon, + fout, + fout_fmt, + patch, + el, + ods, + boresight_angle, + subscan=subscan, + ) + el += np.radians(args.pole_el_step_deg) + success = True + except TooClose: + success = False + pole_success = False + if success: + break + tstop = t + if args.one_scan_per_day: + day1 = int(to_MJD(tstart)) + while int(to_MJD(tstop)) == day1: + tstop += 60.0 + return success, tstop + + +@function_timer +def get_constant_elevation( + args, observer, patch, rising, fp_radius, not_visible, partial_scan=False +): + """Determine the elevation at which to scan.""" + log = Logger.get() + + azs, els = patch.corner_coordinates(observer) + + ind_rising = azs < np.pi + ind_setting = azs > np.pi + + el = None + if rising: + if np.sum(ind_rising) == 0: + not_visible.append((patch.name, "No rising corners")) + else: + el = np.amax(els[ind_rising]) + fp_radius + else: + if np.sum(ind_setting) == 0: + not_visible.append((patch.name, "No setting corners")) + else: + el = np.amin(els[ind_setting]) - fp_radius + + if el is not None and patch.elevations is not None: + # Fixed elevation mode. Find the first allowed observing elevation. + if rising: + ind = patch.elevations >= el + if np.any(ind): + el = np.amin(patch.elevations[ind]) + elif partial_scan: + # None of the elevations allow a full rising scan, + # Observe at the highest allowed elevation + el = np.amax(patch.elevations) + if el < np.amin(els[ind_rising]) + fp_radius: + not_visible.append( + (patch.name, "Rising patch above maximum elevation") + ) + el = None + else: + not_visible.append((patch.name, "Only partial rising scans available")) + el = None + else: + ind = patch.elevations <= el + if np.any(ind): + el = np.amax(patch.elevations[ind]) + elif partial_scan: + # None of the elevations allow a full setting scan, + # Observe at the lowest allowed elevation + el = np.amin(patch.elevations) + if el > np.amax(els[ind_setting]) + fp_radius: + not_visible.append( + (patch.name, "Setting patch above below elevation") + ) + el = None + else: + not_visible.append((patch.name, "Only partial setting scans available")) + el = None + elif el is not None: + if el < patch.el_min: + if partial_scan and np.any(patch.el_min < els[ind_setting] - fp_radius): + # Partial setting scan + el = patch.el_min + else: + not_visible.append( + ( + patch.name, + "el < el_min ({:.2f} < {:.2f}) rising = {}, partial = {}".format( + el / degree, patch.el_min / degree, rising, partial_scan + ), + ) + ) + el = None + elif el > patch.el_max: + if partial_scan and np.any(patch.el_max > els[ind_rising] + fp_radius): + # Partial rising scan + el = patch.el_max + else: + not_visible.append( + ( + patch.name, + "el > el_max ({:.2f} > {:.2f}) rising = {}, partial = {}".format( + el / degree, patch.el_max / degree, rising, partial_scan + ), + ) + ) + el = None + if el is None: + log.debug("NO ELEVATION: {}".format(not_visible[-1])) + else: + log.debug( + "{} : ELEVATION = {}, rising = {}, partial = {}".format( + patch.name, el / degree, rising, partial_scan + ) + ) + return el + + +@function_timer +def get_constant_elevation_pole( + args, observer, patch, fp_radius, el_min, el_max, not_visible +): + """Determine the elevation at which to scan.""" + log = Logger.get() + _, els = patch.corner_coordinates(observer) + el = np.amin(els) - fp_radius + + if el < el_min: + not_visible.append( + ( + patch.name, + "el < el_min ({:.2f} < {:.2f})".format(el / degree, el_min / degree), + ) + ) + el = None + elif el > el_max: + not_visible.append( + ( + patch.name, + "el > el_max ({:.2f} > {:.2f})".format(el / degree, el_max / degree), + ) + ) + el = None + if el is None: + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + return el + + +def check_sun_el(t, observer, sun, sun_el_max, args, not_visible): + log = Logger.get() + observer.date = to_DJD(t) + if sun_el_max < np.pi / 2: + sun.compute(observer) + if sun.alt > sun_el_max: + not_visible.append( + ( + patch.name, + "Sun too high {:.2f} rising = {}" + "".format(np.degrees(sun.alt), rising), + ) + ) + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + return True + return False + + +@function_timer +def scan_patch( + args, + el, + patch, + t, + fp_radius, + observer, + sun, + not_visible, + stop_timestamp, + sun_el_max, + rising, +): + """Attempt scanning the patch specified by corners at elevation el.""" + log = Logger.get() + azmins, azmaxs, aztimes = [], [], [] + if isinstance(patch, HorizontalPatch): + # No corners. Simply scan for the requested time + if rising and not patch.rising: + return False, azmins, azmaxs, aztimes, t + if check_sun_el(t, observer, sun, sun_el_max, args, not_visible): + return False, azmins, azmaxs, aztimes, t + azmins = [patch.az_min] + azmaxs = [patch.az_max] + aztimes = [t] + return True, azmins, azmaxs, aztimes, t + patch.scantime * 60 + # Traditional patch, track each corner + success = False + # and now track when all corners are past the elevation + tstop = t + tstep = 60 + to_cross = np.ones(len(patch.corners), dtype=np.bool) + scan_started = False + while True: + if tstop > stop_timestamp or tstop - t > 86400: + not_visible.append( + (patch.name, "Ran out of time rising = {}".format(rising)) + ) + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + break + if check_sun_el(tstop, observer, sun, sun_el_max, args, not_visible): + break + azs, els = patch.corner_coordinates(observer) + has_extent = current_extent( + azmins, + azmaxs, + aztimes, + patch.corners, + fp_radius, + el, + azs, + els, + rising, + tstop, + ) + if has_extent: + scan_started = True + + if rising: + good = azs <= np.pi + to_cross[np.logical_and(els > el + fp_radius, good)] = False + else: + good = azs >= np.pi + to_cross[np.logical_and(els < el - fp_radius, good)] = False + + # If we are alternating rising and setting scans, reject patches + # that appear on the wrong side of the sky. + if np.any((np.array(azmins) % (2 * np.pi)) < patch.az_min) or np.any( + (np.array(azmaxs) % (2 * np.pi)) > patch.az_max + ): + success = False + break + + if len(aztimes) > 0 and not np.any(to_cross): + # All corners made it across the CES line. + success = True + # Begin the scan before the patch is at the CES line + if aztimes[0] > t: + aztimes[0] -= tstep + break + + if scan_started and not has_extent: + # The patch went out of view before all corners + # could cross the elevation line. + success = False + break + tstop += tstep + + return success, azmins, azmaxs, aztimes, tstop + + +def unwind_angle(alpha, beta, multiple=2 * np.pi): + """Minimize absolute difference between alpha and beta. + + Minimize the absolute difference by adding a multiple of + 2*pi to beta to match alpha. + """ + while np.abs(alpha - beta - multiple) < np.abs(alpha - beta): + beta += multiple + while np.abs(alpha - beta + multiple) < np.abs(alpha - beta): + beta -= multiple + return beta + + +@function_timer +def scan_patch_pole( + args, + el, + patch, + t, + fp_radius, + observer, + sun, + not_visible, + stop_timestamp, + sun_el_max, +): + """Attempt scanning the patch specified by corners at elevation el. + + The pole scheduling mode will not wait for the patch to drift across. + It simply attempts to scan for the required time: args.pole_ces_time. + """ + log = Logger.get() + success = False + tstop = t + tstep = 60 + azmins, azmaxs, aztimes = [], [], [] + while True: + if tstop - t > args.pole_ces_time_s - 1: + # Succesfully scanned the maximum time + if len(azmins) > 0: + success = True + else: + not_visible.append( + (patch.name, "No overlap at {:.2f}".format(el / degree)) + ) + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + break + if tstop > stop_timestamp or tstop - t > 86400: + not_visible.append((patch.name, "Ran out of time")) + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + break + observer.date = to_DJD(tstop) + sun.compute(observer) + if sun.alt > sun_el_max: + not_visible.append( + (patch.name, "Sun too high {:.2f}".format(sun.alt / degree)) + ) + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + break + azs, els = patch.corner_coordinates(observer) + if np.amax(els) + fp_radius < el: + not_visible.append((patch.name, "Patch below {:.2f}".format(el / degree))) + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + break + radius = max(np.radians(1), fp_radius) + current_extent_pole( + azmins, azmaxs, aztimes, patch.corners, radius, el, azs, els, tstop + ) + tstop += tstep + return success, azmins, azmaxs, aztimes, tstop + + +@function_timer +def current_extent_pole( + azmins, azmaxs, aztimes, corners, fp_radius, el, azs, els, tstop +): + """Get the azimuthal extent of the patch along elevation el. + + Pole scheduling does not care if the patch is "rising" or "setting". + """ + azs_cross = [] + for i in range(len(corners)): + if np.abs(els[i] - el) < fp_radius: + azs_cross.append(azs[i]) + j = (i + 1) % len(corners) + if np.abs(els[j] - el) < fp_radius: + azs_cross.append(azs[j]) + if np.abs(els[i] - el) < fp_radius or np.abs(els[j] - el) < fp_radius: + continue + elif (els[i] - el) * (els[j] - el) < 0: + # Record the location where a line between the corners + # crosses el. + az1 = azs[i] + az2 = azs[j] + el1 = els[i] - el + el2 = els[j] - el + if az2 - az1 > np.pi: + az1 += 2 * np.pi + if az1 - az2 > np.pi: + az2 += 2 * np.pi + az_cross = (az1 + el1 * (az2 - az1) / (el1 - el2)) % (2 * np.pi) + azs_cross.append(az_cross) + + # Translate the azimuths at multiples of 2pi so they are in a + # compact cluster + + for i in range(1, len(azs_cross)): + azs_cross[i] = unwind_angle(azs_cross[0], azs_cross[i]) + + if len(azs_cross) > 0: + azs_cross = np.sort(azs_cross) + azmin = azs_cross[0] + azmax = azs_cross[-1] + azmax = unwind_angle(azmin, azmax) + if azmax - azmin > np.pi: + # Patch crosses the zero meridian + azmin, azmax = azmax, azmin + if len(azmins) > 0: + azmin = unwind_angle(azmins[-1], azmin) + azmax = unwind_angle(azmaxs[-1], azmax) + azmins.append(azmin) + azmaxs.append(azmax) + aztimes.append(tstop) + return + + +@function_timer +def current_extent( + azmins, azmaxs, aztimes, corners, fp_radius, el, azs, els, rising, t +): + """Get the azimuthal extent of the patch along elevation el. + + Find the pairs of corners that are on opposite sides + of the CES line. Record the crossing azimuth of a + line between the corners. + + """ + azs_cross = [] + for i in range(len(corners)): + j = (i + 1) % len(corners) + for el0 in [el - fp_radius, el, el + fp_radius]: + if (els[i] - el0) * (els[j] - el0) < 0: + # The corners are on opposite sides of the elevation line + az1 = azs[i] + az2 = azs[j] + el1 = els[i] - el0 + el2 = els[j] - el0 + az2 = unwind_angle(az1, az2) + az_cross = (az1 + el1 * (az2 - az1) / (el1 - el2)) % (2 * np.pi) + azs_cross.append(az_cross) + if fp_radius == 0: + break + if len(azs_cross) == 0: + return False + + azs_cross = np.array(azs_cross) + if rising: + good = azs_cross < np.pi + else: + good = azs_cross > np.pi + ngood = np.sum(good) + if ngood == 0: + return False + elif ngood > 1: + azs_cross = azs_cross[good] + + # Unwind the crossing azimuths to minimize the scatter + azs_cross = np.sort(azs_cross) + if azs_cross.size > 1: + ptp0 = azs_cross[-1] - azs_cross[0] + ptps = azs_cross[:-1] + 2 * np.pi - azs_cross[1:] + ptps = np.hstack([ptp0, ptps]) + i = np.argmin(ptps) + azs_cross[:i] += 2 * np.pi + np.roll(azs_cross, i) + + if len(azs_cross) > 1: + azmin = azs_cross[0] % (2 * np.pi) + azmax = azs_cross[-1] % (2 * np.pi) + if azmax - azmin > np.pi: + # Patch crosses the zero meridian + azmin, azmax = azmax, azmin + azmins.append(azmin) + azmaxs.append(azmax) + aztimes.append(t) + return True + return False + + +@function_timer +def add_scan( + args, + tstart, + tstop, + aztimes, + azmins, + azmaxs, + rising, + fp_radius, + observer, + sun, + moon, + fout, + fout_fmt, + patch, + el, + ods, + boresight_angle, + subscan=-1, + partial_scan=False, +): + """Make an entry for a CES in the schedule file.""" + log = Logger.get() + ces_time = tstop - tstart + if ces_time > args.ces_max_time_s: # and not args.pole_mode: + nsub = np.int(np.ceil(ces_time / args.ces_max_time_s)) + ces_time /= nsub + aztimes = np.array(aztimes) + azmins = np.array(azmins) + azmaxs = np.array(azmaxs) + azmaxs[0] = unwind_angle(azmins[0], azmaxs[0]) + for i in range(1, azmins.size): + azmins[i] = unwind_angle(azmins[0], azmins[i]) + azmaxs[i] = unwind_angle(azmaxs[0], azmaxs[i]) + azmaxs[i] = unwind_angle(azmins[i], azmaxs[i]) + # for i in range(azmins.size-1): + # if azmins[i+1] - azmins[i] > np.pi: + # azmins[i+1], azmaxs[i+1] = azmins[i+1]-2*np.pi, azmaxs[i+1]-2*np.pi + # if azmins[i+1] - azmins[i] < np.pi: + # azmins[i+1], azmaxs[i+1] = azmins[i+1]+2*np.pi, azmaxs[i+1]+2*np.pi + rising_string = "R" if rising else "S" + t1 = np.amin(aztimes) + entries = [] + while t1 < tstop - 1: + subscan += 1 + if args.operational_days: + # See if adding this scan would exceed the number of desired + # operational days + if subscan == 0: + tz = args.timezone / 24 + od = int(to_MJD(tstart) + tz) + ods.add(od) + if len(ods) > args.operational_days: + # Prevent adding further entries to the schedule once + # the number of operational days is full + break + t2 = min(t1 + ces_time, tstop) + if tstop - t2 < ces_time / 10: + # Append leftover scan to the last full subscan + t2 = tstop + ind = np.logical_and(aztimes >= t1, aztimes <= t2) + if np.all(aztimes > t2): + ind[0] = True + if np.all(aztimes < t1): + ind[-1] = True + if azmins[ind][0] < azmaxs[ind][0]: + azmin = np.amin(azmins[ind]) + azmax = np.amax(azmaxs[ind]) + else: + # we are, scan from the maximum to the minimum + azmin = np.amax(azmins[ind]) + azmax = np.amin(azmaxs[ind]) + if args.scan_margin > 0: + # Add a random error to the scan parameters to smooth out + # caustics in the hit map + delta_az = azmax - unwind_angle(azmax, azmin) + sub_az = delta_az * np.abs(np.random.randn()) * args.scan_margin * 0.5 + add_az = delta_az * np.abs(np.random.randn()) * args.scan_margin * 0.5 + azmin = (azmin - sub_az) % (2 * np.pi) + azmax = (azmax + add_az) % (2 * np.pi) + if t2 == tstop: + delta_t = t2 - t1 # tstop - tstart + add_t = delta_t * np.abs(np.random.randn()) * args.scan_margin + t2 += add_t + # Add the focal plane radius to the scan width + fp_radius_eff = fp_radius / np.cos(el) + azmin = (azmin - fp_radius_eff) % (2 * np.pi) / degree + azmax = (azmax + fp_radius_eff) % (2 * np.pi) / degree + # Get the Sun and Moon locations at the beginning and end + observer.date = to_DJD(t1) + sun.compute(observer) + moon.compute(observer) + sun_az1, sun_el1 = sun.az / degree, sun.alt / degree + moon_az1, moon_el1 = moon.az / degree, moon.alt / degree + moon_phase1 = moon.phase + # It is possible that the Sun or the Moon gets too close to the + # scan, even if they are far enough from the actual patch. + sun_too_close, sun_time = check_sso( + observer, + azmin, + azmax, + el / degree, + sun, + args.sun_avoidance_angle_deg, + t1, + t2, + ) + moon_too_close, moon_time = check_sso( + observer, + azmin, + azmax, + el / degree, + moon, + args.moon_avoidance_angle_deg, + t1, + t2, + ) + + if ( + (isinstance(patch, HorizontalPatch) or partial_scan) + and sun_time > tstart + 1 + and moon_time > tstart + 1 + ): + # Simply terminate the scan when the Sun or the Moon is too close + t2 = min(sun_time, moon_time) + if sun_too_close or moon_too_close: + tstop = t2 + if t1 == t2: + break + else: + # For regular patches, this is a failure condition + if sun_too_close: + log.debug("Sun too close") + raise SunTooClose + if moon_too_close: + log.debug("Moon too close") + raise MoonTooClose + + observer.date = to_DJD(t2) + sun.compute(observer) + moon.compute(observer) + sun_az2, sun_el2 = sun.az / degree, sun.alt / degree + moon_az2, moon_el2 = moon.az / degree, moon.alt / degree + moon_phase2 = moon.phase + # Create an entry in the schedule + entry = fout_fmt.format( + to_UTC(t1), + to_UTC(t2), + to_MJD(t1), + to_MJD(t2), + boresight_angle, + patch.name, + (azmin + args.boresight_offset_az_deg) % 360, + (azmax + args.boresight_offset_az_deg) % 360, + (el / degree + args.boresight_offset_el_deg), + rising_string, + sun_el1, + sun_az1, + sun_el2, + sun_az2, + moon_el1, + moon_az1, + moon_el2, + moon_az2, + 0.005 * (moon_phase1 + moon_phase2), + -1 - patch.partial_hits if partial_scan else patch.hits, + subscan, + ) + entries.append(entry) + if partial_scan: + # Never append more than one partial scan before + # checking if full scans are again available + tstop = t2 + break + t1 = t2 + args.gap_small_s + + # Write the entries + for entry in entries: + log.debug(entry) + fout.write(entry) + fout.flush() + + if not partial_scan: + # Only update the patch counters when performing full scans + patch.hits += 1 + patch.time += ces_time + if rising or args.pole_mode: + patch.rising_hits += 1 + patch.rising_time += ces_time + if not rising or args.pole_mode: + patch.setting_hits += 1 + patch.setting_time += ces_time + # The oscillate method will slightly shift the patch to + # blur the boundaries + patch.oscillate() + # Advance the time + tstop += args.gap_s + else: + patch.partial_hits += 1 + # Advance the time + tstop += args.gap_small_s + + return tstop, subscan + + +@function_timer +def add_cooler_cycle( + args, tstart, tstop, observer, sun, moon, fout, fout_fmt, patch, boresight_angle +): + """Make an entry for a cooler cycle in the schedule file.""" + log = Logger.get() + az = patch.az + el = patch.el + t1 = tstart + t2 = t1 + patch.cycle_time + + observer.date = to_DJD(t1) + sun.compute(observer) + moon.compute(observer) + sun_az1, sun_el1 = sun.az / degree, sun.alt / degree + moon_az1, moon_el1 = moon.az / degree, moon.alt / degree + moon_phase1 = moon.phase + + observer.date = to_DJD(t2) + sun.compute(observer) + moon.compute(observer) + sun_az2, sun_el2 = sun.az / degree, sun.alt / degree + moon_az2, moon_el2 = moon.az / degree, moon.alt / degree + moon_phase2 = moon.phase + + # Create an entry in the schedule + entry = fout_fmt.format( + to_UTC(t1), + to_UTC(t2), + to_MJD(t1), + to_MJD(t2), + boresight_angle, + patch.name, + az, + az, + el, + "R", + sun_el1, + sun_az1, + sun_el2, + sun_az2, + moon_el1, + moon_az1, + moon_el2, + moon_az2, + 0.005 * (moon_phase1 + moon_phase2), + patch.hits, + 0, + ) + + # Write the entry + log.debug(entry) + fout.write(entry) + fout.flush() + + patch.last_cycle_end = t2 + patch.hits += 1 + patch.time += t2 - t1 + patch.rising_hits += 1 + patch.rising_time += t2 - t1 + patch.setting_hits += 1 + patch.setting_time += t2 - t1 + + return t2 + + +@function_timer +def get_visible(args, observer, sun, moon, patches, el_min): + """Determine which patches are visible.""" + log = Logger.get() + visible = [] + not_visible = [] + for patch in patches: + # Reject all patches that have even one corner too close + # to the Sun or the Moon and patches that are completely + # below the horizon + in_view, msg = patch.visible( + el_min, + observer, + sun, + moon, + args.sun_avoidance_angle_deg, + args.moon_avoidance_angle_deg, + not (args.allow_partial_scans or args.delay_sso_check), + ) + if not in_view: + not_visible.append((patch.name, msg)) + + if in_view: + if not (args.allow_partial_scans or args.delay_sso_check): + # Finally, check that the Sun or the Moon are not + # inside the patch + if args.moon_avoidance_angle_deg >= 0 and patch.in_patch(moon): + not_visible.append((patch.name, "Moon in patch")) + in_view = False + if args.sun_avoidance_angle_deg >= 0 and patch.in_patch(sun): + not_visible.append((patch.name, "Sun in patch")) + in_view = False + if in_view: + visible.append(patch) + log.debug( + "In view: {}. el = {:.2f}..{:.2f}".format( + patch.name, np.degrees(patch.el_min), np.degrees(patch.el_max) + ) + ) + else: + log.debug("NOT VISIBLE: {}".format(not_visible[-1])) + return visible, not_visible + + +@function_timer +def get_boresight_angle(args, t, t0=0): + """Return the scheduled boresight angle at time t.""" + if args.boresight_angle_step_deg == 0 or args.boresight_angle_time_min == 0: + return 0 + + istep = int((t - t0) / 60 / args.boresight_angle_time_min) + return (args.boresight_angle_step_deg * istep) % 360 + + +@function_timer +def apply_blockouts(args, t_in): + """Check if `t` is inside a blockout period. + If so, advance it to the next unblocked time. + + Returns: The (new) time and a boolean flag indicating if + the time was blocked and subsequently advanced. + """ + if not args.block_out: + return t_in, False + log = Logger.get() + t = t_in + blocked = False + for block_out in args.block_out: + current = datetime.fromtimestamp(t, timezone.utc) + start, stop = block_out.split("-") + try: + # If the block out specifies the year then no extra logic is needed + start_year, start_month, start_day = start.split("/") + start = datetime( + int(start_year), + int(start_month), + int(start_day), + 0, + 0, + 0, + 0, + timezone.utc, + ) + except ValueError: + # No year given so must figure out which year is the right one + start_month, start_day = start.split("/") + start = datetime( + current.year, int(start_month), int(start_day), 0, 0, 0, 0, timezone.utc + ) + if start > current: + # This year's block out is still in the future but the past + # year's blockout may still be active + start = start.replace(year=start.year - 1) + try: + # If the block out specifies the year then no extra logic is needed + stop_year, stop_month, stop_day = stop.split("/") + stop = datetime( + int(stop_year), int(stop_month), int(stop_day), 0, 0, 0, 0, timezone.utc + ) + except ValueError: + # No year given so must figure out which year is the right one + stop_month, stop_day = stop.split("/") + stop = datetime( + start.year, int(stop_month), int(stop_day), 0, 0, 0, 0, timezone.utc + ) + if stop < start: + # The block out ends on a different year than it starts + stop = stop.replace(year=start.year + 1) + # advance the stop time by one day to make the definition inclusive + stop += timedelta(days=1) + if start < current and current < stop: + # `t` is inside the block out. + # Advance to the end of the block out. + log.info( + "{} is inside block out {}, advancing to {}".format( + current, block_out, stop + ) + ) + t = stop.timestamp() + blocked = True + return t, blocked + + +def advance_time(t, time_step, offset=0): + """Advance the time ensuring that the sampling falls + over same discrete times (multiples of time_step) + regardless of the current value of t. + """ + return offset + ((t - offset) // time_step + 1) * time_step + + +@function_timer +def build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon): + log = Logger.get() + + sun_el_max = args.sun_el_max_deg * degree + el_min = args.el_min_deg + el_max = args.el_max_deg + if args.elevations_deg is None: + el_min = args.el_min_deg + el_max = args.el_max_deg + else: + # Override the elevation limits + el_min = 90 + el_max = 0 + for el in args.elevations_deg.split(","): + el = np.float(el) + el_min = min(el * 0.9, el_min) + el_max = max(el * 1.1, el_max) + el_min *= degree + el_max *= degree + fp_radius = args.fp_radius_deg * degree + + fname_out = args.out + dir_out = os.path.dirname(fname_out) + if dir_out: + log.info("Creating '{}'".format(dir_out)) + os.makedirs(dir_out, exist_ok=True) + fout = open(fname_out, "w") + + fout.write( + "#{:15} {:15} {:>15} {:>15} {:>15}\n".format( + "Site", "Telescope", "Latitude [deg]", "Longitude [deg]", "Elevation [m]" + ) + ) + fout.write( + " {:15} {:15} {:15.3f} {:15.3f} {:15.1f}\n".format( + args.site_name, + args.telescope, + np.degrees(observer.lat), + np.degrees(observer.lon), + observer.elevation, + ) + ) + + fout_fmt0 = ( + "#{:>20} {:>20} {:>14} {:>14} {:>8} " + "{:35} {:>8} {:>8} {:>8} {:>5} " + "{:>8} {:>8} {:>8} {:>8} " + "{:>8} {:>8} {:>8} {:>8} {:>5} " + "{:>5} {:>3}\n" + ) + + fout_fmt = ( + " {:20} {:20} {:14.6f} {:14.6f} {:8.2f} " + "{:35} {:8.2f} {:8.2f} {:8.2f} {:5} " + "{:8.2f} {:8.2f} {:8.2f} {:8.2f} " + "{:8.2f} {:8.2f} {:8.2f} {:8.2f} {:5.2f} " + "{:5} {:3}\n" + ) + + fout.write( + fout_fmt0.format( + "Start time UTC", + "Stop time UTC", + "Start MJD", + "Stop MJD", + "Rotation", + "Patch name", + "Az min", + "Az max", + "El", + "R/S", + "Sun el1", + "Sun az1", + "Sun el2", + "Sun az2", + "Moon el1", + "Moon az1", + "Moon el2", + "Moon az2", + "Phase", + "Pass", + "Sub", + ) + ) + + # Operational days + ods = set() + + t = start_timestamp + last_successful = t + while True: + t, blocked = apply_blockouts(args, t) + boresight_angle = get_boresight_angle(args, t) + if t > stop_timestamp: + break + if t - last_successful > 86400 or blocked: + # A long time has passed since the last successfully + # scheduled scan. + # Reset the individual patch az and el limits + for patch in patches: + patch.reset() + if blocked: + last_successful = t + else: + # Only try this once for every day. Swapping + # `t` <-> `last_successful` means that we will not trigger + # this branch again without scheduling a succesful scan + log.debug( + "Resetting patches and returning to the last successful " + "scan: {}".format(to_UTC(last_successful)) + ) + t, last_successful = last_successful, t + + # Determine which patches are observable at time t. + + log.debug("t = {}".format(to_UTC(t))) + # Determine which patches are visible + observer.date = to_DJD(t) + sun.compute(observer) + if sun.alt > sun_el_max: + log.debug( + "Sun elevation is {:.2f} > {:.2f}. Moving on.".format( + sun.alt / degree, sun_el_max / degree + ) + ) + t = advance_time(t, args.time_step_s) + continue + moon.compute(observer) + + visible, not_visible = get_visible(args, observer, sun, moon, patches, el_min) + + if len(visible) == 0: + log.debug("No patches visible at {}: {}".format(to_UTC(t), not_visible)) + t = advance_time(t, args.time_step_s) + continue + + # Determine if a cooler cycle sets a limit for observing + tstop_cooler = stop_timestamp + for patch in patches: + if isinstance(patch, CoolerCyclePatch): + ttest = patch.last_cycle_end + patch.hold_time_max + if ttest < tstop_cooler: + tstop_cooler = ttest + + # Order the targets by priority and attempt to observe with both + # a rising and setting scans until we find one that can be + # succesfully scanned. + # If the criteria are not met, advance the time by a step + # and try again + + prioritize(args, visible) + + if args.pole_mode: + success, t = attempt_scan_pole( + args, + observer, + visible, + not_visible, + t, + fp_radius, + el_max, + el_min, + stop_timestamp, + tstop_cooler, + sun, + moon, + sun_el_max, + fout, + fout_fmt, + ods, + boresight_angle, + ) + else: + success, t = attempt_scan( + args, + observer, + visible, + not_visible, + t, + fp_radius, + stop_timestamp, + tstop_cooler, + sun, + moon, + sun_el_max, + fout, + fout_fmt, + ods, + boresight_angle, + ) + + if args.operational_days and len(ods) > args.operational_days: + break + + if not success: + log.debug( + "No patches could be scanned at {}: {}".format(to_UTC(t), not_visible) + ) + t = advance_time(t, args.time_step_s) + else: + last_successful = t + + fout.close() + return + + +def parse_args(opts=None): + parser = argparse.ArgumentParser( + description="Generate ground observation schedule.", fromfile_prefix_chars="@" + ) + + parser.add_argument( + "--site-name", required=False, default="LBL", help="Observing site name" + ) + parser.add_argument( + "--telescope", + required=False, + default="Telescope", + help="Observing telescope name", + ) + parser.add_argument( + "--site-lon", + required=False, + default="-122.247", + help="Observing site longitude [PyEphem string]", + ) + parser.add_argument( + "--site-lat", + required=False, + default="37.876", + help="Observing site latitude [PyEphem string]", + ) + parser.add_argument( + "--site-alt", + required=False, + default=100, + type=np.float, + help="Observing site altitude [meters]", + ) + parser.add_argument( + "--scan-margin", + required=False, + default=0, + type=np.float, + help="Random fractional margin [0..1] added to the " + "scans to smooth out edge effects", + ) + parser.add_argument( + "--ra-period", + required=False, + default=10, + type=np.int, + help="Period of patch position oscillations in RA [visits]", + ) + parser.add_argument( + "--ra-amplitude-deg", + required=False, + default=0, + type=np.float, + help="Amplitude of patch position oscillations in RA [deg]", + ) + parser.add_argument( + "--dec-period", + required=False, + default=10, + type=np.int, + help="Period of patch position oscillations in DEC [visits]", + ) + parser.add_argument( + "--dec-amplitude-deg", + required=False, + default=0, + type=np.float, + help="Amplitude of patch position oscillations in DEC [deg]", + ) + parser.add_argument( + "--elevation-penalty-limit", + required=False, + default=0, + type=np.float, + help="Assign a penalty to observing elevations below this limit [degrees]", + ) + parser.add_argument( + "--elevation-penalty-power", + required=False, + default=2, + type=np.float, + help="Power in the elevation penalty function [> 0] ", + ) + parser.add_argument( + "--equalize-area", + required=False, + default=False, + action="store_true", + help="Adjust priorities to account for patch area", + ) + parser.add_argument( + "--equalize-time", + required=False, + action="store_true", + dest="equalize_time", + help="Modulate priority by integration time.", + ) + parser.add_argument( + "--equalize-scans", + required=False, + action="store_false", + dest="equalize_time", + help="Modulate priority by number of scans.", + ) + parser.set_defaults(equalize_time=False) + parser.add_argument( + "--patch", + required=True, + action="append", + help="Patch definition: " + "name,weight,lon1,lat1,lon2,lat2 ... " + "OR name,weight,lon,lat,width", + ) + parser.add_argument( + "--patch-coord", + required=False, + default="C", + help="Sky patch coordinate system [C,E,G]", + ) + parser.add_argument( + "--el-min-deg", + required=False, + default=30, + type=np.float, + help="Minimum elevation for a CES", + ) + parser.add_argument( + "--el-max-deg", + required=False, + default=80, + type=np.float, + help="Maximum elevation for a CES", + ) + parser.add_argument( + "--el-step-deg", + required=False, + default=0, + type=np.float, + help="Optional step to apply to minimum elevation", + ) + parser.add_argument( + "--alternate", + required=False, + default=False, + action="store_true", + help="Alternate between rising and setting scans", + ) + parser.add_argument( + "--fp-radius-deg", + required=False, + default=0, + type=np.float, + help="Focal plane radius [deg]", + ) + parser.add_argument( + "--sun-avoidance-angle-deg", + required=False, + default=30, + type=np.float, + help="Minimum distance between the Sun and the bore sight [deg]", + ) + parser.add_argument( + "--moon-avoidance-angle-deg", + required=False, + default=20, + type=np.float, + help="Minimum distance between the Moon and the bore sight [deg]", + ) + parser.add_argument( + "--sun-el-max-deg", + required=False, + default=90, + type=np.float, + help="Maximum allowed sun elevation [deg]", + ) + parser.add_argument( + "--boresight-angle-step-deg", + required=False, + default=0, + type=np.float, + help="Boresight rotation step size [deg]", + ) + parser.add_argument( + "--boresight-angle-time-min", + required=False, + default=0, + type=np.float, + help="Boresight rotation step interval [minutes]", + ) + parser.add_argument( + "--start", + required=False, + default="2000-01-01 00:00:00", + help="UTC start time of the schedule", + ) + parser.add_argument("--stop", required=False, help="UTC stop time of the schedule") + parser.add_argument( + "--block-out", + required=False, + action="append", + help="Range of UTC calendar days to omit from scheduling in format " + "START_MONTH/START_DAY-END_MONTH/END_DAY or " + "START_YEAR/START_MONTH/START_DAY-END_YEAR/END_MONTH/END_DAY " + "where YEAR, MONTH and DAY are integers. END days are inclusive", + ) + parser.add_argument( + "--operational-days", + required=False, + type=np.int, + help="Number of operational days to schedule (empty days do not count)", + ) + parser.add_argument( + "--timezone", + required=False, + type=np.int, + default=0, + help="Offset to apply to MJD to separate operational days [hours]", + ) + parser.add_argument( + "--gap-s", + required=False, + default=100, + type=np.float, + help="Gap between CES:es [seconds]", + ) + parser.add_argument( + "--gap-small-s", + required=False, + default=10, + type=np.float, + help="Gap between split CES:es [seconds]", + ) + parser.add_argument( + "--time-step-s", + required=False, + default=600, + type=np.float, + help="Time step after failed target acquisition [seconds]", + ) + parser.add_argument( + "--one-scan-per-day", + required=False, + default=False, + action="store_true", + help="Pad each operational day to have only one CES", + ) + parser.add_argument( + "--ces-max-time-s", + required=False, + default=900, + type=np.float, + help="Maximum length of a CES [seconds]", + ) + parser.add_argument( + "--debug", + required=False, + default=False, + action="store_true", + help="Write diagnostics, including patch plots.", + ) + parser.add_argument( + "--polmap", + required=False, + help="Include polarization from map in the plotted patches when --debug", + ) + parser.add_argument( + "--pol-min", + required=False, + type=np.float, + help="Lower plotting range for polarization map", + ) + parser.add_argument( + "--pol-max", + required=False, + type=np.float, + help="Upper plotting range for polarization map", + ) + parser.add_argument( + "--delay-sso-check", + required=False, + default=False, + action="store_true", + help="Only apply SSO check during simulated scan.", + ) + parser.add_argument( + "--pole-mode", + required=False, + default=False, + action="store_true", + help="Pole scheduling mode (no drift scan)", + ) + parser.add_argument( + "--pole-el-step-deg", + required=False, + default=0.25, + type=np.float, + help="Elevation step in pole scheduling mode [deg]", + ) + parser.add_argument( + "--pole-ces-time-s", + required=False, + default=3000, + type=np.float, + help="Time to scan at constant elevation in pole mode", + ) + parser.add_argument( + "--out", required=False, default="schedule.txt", help="Output filename" + ) + parser.add_argument( + "--boresight-offset-el-deg", + required=False, + default=0, + type=np.float, + help="Optional offset added to every observing elevation", + ) + parser.add_argument( + "--boresight-offset-az-deg", + required=False, + default=0, + type=np.float, + help="Optional offset added to every observing azimuth", + ) + parser.add_argument( + "--elevations-deg", + required=False, + help="Fixed observing elevations in a comma-separated list.", + ) + parser.add_argument( + "--partial-scans", + required=False, + action="store_true", + dest="allow_partial_scans", + help="Allow partials scans when full scans are not available.", + ) + parser.add_argument( + "--no-partial-scans", + required=False, + action="store_false", + dest="allow_partial_scans", + help="Allow partials scans when full scans are not available.", + ) + parser.set_defaults(allow_partial_scans=False) + + args = None + if opts is None: + try: + args = parser.parse_args() + except SystemExit: + sys.exit(0) + else: + try: + args = parser.parse_args(opts) + except SystemExit: + sys.exit(0) + + if args.operational_days is None and args.stop is None: + raise RuntimeError("You must provide --stop or --operational-days") + + stop_time = None + if args.start.endswith("Z"): + start_time = dateutil.parser.parse(args.start) + if args.stop is not None: + if not args.stop.endswith("Z"): + raise RuntimeError("Either both or neither times must be given in UTC") + stop_time = dateutil.parser.parse(args.stop) + else: + if args.timezone < 0: + tz = "-{:02}00".format(-args.timezone) + else: + tz = "+{:02}00".format(args.timezone) + start_time = dateutil.parser.parse(args.start + tz) + if args.stop is not None: + if args.stop.endswith("Z"): + raise RuntimeError("Either both or neither times must be given in UTC") + stop_time = dateutil.parser.parse(args.stop + tz) + + start_timestamp = start_time.timestamp() + if stop_time is None: + # Keep scheduling until the desired number of operational days is full. + stop_timestamp = 2 ** 60 + else: + stop_timestamp = stop_time.timestamp() + return args, start_timestamp, stop_timestamp + + +@function_timer +def parse_patch_sso(args, parts): + log = Logger.get() + log.info("SSO format") + name = parts[0] + weight = float(parts[2]) + radius = float(parts[3]) * degree + patch = SSOPatch(name, weight, radius, elevations=args.elevations_deg) + return patch + + +@function_timer +def parse_patch_cooler(args, parts, last_cycle_end): + log = Logger.get() + log.info("Cooler cycle format") + weight = float(parts[2]) + power = float(parts[3]) + hold_time_min = float(parts[4]) # in hours + hold_time_max = float(parts[5]) # in hours + cycle_time = float(parts[6]) # in hours + az = float(parts[7]) + el = float(parts[8]) + patch = CoolerCyclePatch( + weight, power, hold_time_min, hold_time_max, cycle_time, az, el, last_cycle_end + ) + return patch + + +@function_timer +def parse_patch_horizontal(args, parts): + """Parse an explicit patch definition line""" + log = Logger.get() + corners = [] + log.info("Horizontal format") + name = parts[0] + weight = float(parts[2]) + azmin = float(parts[3]) * degree + azmax = float(parts[4]) * degree + el = float(parts[5]) * degree + scantime = float(parts[6]) # minutes + patch = HorizontalPatch(name, weight, azmin, azmax, el, scantime) + return patch + + +@function_timer +def parse_patch_explicit(args, parts): + """Parse an explicit patch definition line""" + log = Logger.get() + corners = [] + log.info("Explicit-corners format: ") + name = parts[0] + i = 2 + definition = "" + while i + 1 < len(parts): + definition += " ({}, {})".format(parts[i], parts[i + 1]) + try: + # Assume coordinates in degrees + lon = float(parts[i]) * degree + lat = float(parts[i + 1]) * degree + except ValueError: + # Failed simple interpreration, assume pyEphem strings + lon = parts[i] + lat = parts[i + 1] + i += 2 + if args.patch_coord == "C": + corner = ephem.Equatorial(lon, lat, epoch="2000") + elif args.patch_coord == "E": + corner = ephem.Ecliptic(lon, lat, epoch="2000") + elif args.patch_coord == "G": + corner = ephem.Galactic(lon, lat, epoch="2000") + else: + raise RuntimeError("Unknown coordinate system: {}".format(args.patch_coord)) + corner = ephem.Equatorial(corner) + if corner.dec > 80 * degree or corner.dec < -80 * degree: + raise RuntimeError( + "{} has at least one circumpolar corner. " + "Circumpolar targeting not yet implemented".format(name) + ) + patch_corner = ephem.FixedBody() + patch_corner._ra = corner.ra + patch_corner._dec = corner.dec + corners.append(patch_corner) + log.info(definition) + return corners + + +@function_timer +def parse_patch_rectangular(args, parts): + """Parse a rectangular patch definition line""" + log = Logger.get() + corners = [] + log.info("Rectangular format") + name = parts[0] + try: + # Assume coordinates in degrees + lon_min = float(parts[2]) * degree + lat_max = float(parts[3]) * degree + lon_max = float(parts[4]) * degree + lat_min = float(parts[5]) * degree + except ValueError: + # Failed simple interpreration, assume pyEphem strings + lon_min = parts[2] + lat_max = parts[3] + lon_max = parts[4] + lat_min = parts[5] + if args.patch_coord == "C": + coordconv = ephem.Equatorial + elif args.patch_coord == "E": + coordconv = ephem.Ecliptic + elif args.patch_coord == "G": + coordconv = ephem.Galactic + else: + raise RuntimeError("Unknown coordinate system: {}".format(args.patch_coord)) + + nw_corner = coordconv(lon_min, lat_max, epoch="2000") + ne_corner = coordconv(lon_max, lat_max, epoch="2000") + se_corner = coordconv(lon_max, lat_min, epoch="2000") + sw_corner = coordconv(lon_min, lat_min, epoch="2000") + + lon_max = unwind_angle(lon_min, lon_max) + if lon_min < lon_max: + delta_lon = lon_max - lon_min + else: + delta_lon = lon_min - lon_max + area = (np.cos(np.pi / 2 - lat_max) - np.cos(np.pi / 2 - lat_min)) * delta_lon + + corners_temp = [] + add_side(nw_corner, ne_corner, corners_temp, coordconv) + add_side(ne_corner, se_corner, corners_temp, coordconv) + add_side(se_corner, sw_corner, corners_temp, coordconv) + add_side(sw_corner, nw_corner, corners_temp, coordconv) + + for corner in corners_temp: + if corner.dec > 80 * degree or corner.dec < -80 * degree: + raise RuntimeError( + "{} has at least one circumpolar corner. " + "Circumpolar targeting not yet implemented".format(name) + ) + patch_corner = ephem.FixedBody() + patch_corner._ra = corner.ra + patch_corner._dec = corner.dec + corners.append(patch_corner) + return corners, area + + +@function_timer +def add_side(corner1, corner2, corners_temp, coordconv): + """Add one side of a rectangle. + + Add one side of a rectangle with enough interpolation points. + """ + step = np.radians(1) + corners_temp.append(ephem.Equatorial(corner1)) + lon1 = corner1.ra + lon2 = corner2.ra + lat1 = corner1.dec + lat2 = corner2.dec + if lon1 == lon2: + lon = lon1 + if lat1 < lat2: + lat_step = step + else: + lat_step = -step + for lat in np.arange(lat1, lat2, lat_step): + corners_temp.append(ephem.Equatorial(coordconv(lon, lat, epoch="2000"))) + elif lat1 == lat2: + lat = lat1 + if lon1 < lon2: + lon_step = step / np.cos(lat) + else: + lon_step = -step / np.cos(lat) + for lon in np.arange(lon1, lon2, lon_step): + corners_temp.append(ephem.Equatorial(coordconv(lon, lat, epoch="2000"))) + else: + raise RuntimeError("add_side: both latitude and longitude change") + return + + +@function_timer +def parse_patch_center_and_width(args, parts): + """Parse center-and-width patch definition""" + log = Logger.get() + corners = [] + log.info("Center-and-width format") + try: + # Assume coordinates in degrees + lon = float(parts[2]) * degree + lat = float(parts[3]) * degree + except ValueError: + # Failed simple interpreration, assume pyEphem strings + lon = parts[2] + lat = parts[3] + width = float(parts[4]) * degree + if args.patch_coord == "C": + center = ephem.Equatorial(lon, lat, epoch="2000") + elif args.patch_coord == "E": + center = ephem.Ecliptic(lon, lat, epoch="2000") + elif args.patch_coord == "G": + center = ephem.Galactic(lon, lat, epoch="2000") + else: + raise RuntimeError("Unknown coordinate system: {}".format(args.patch_coord)) + center = ephem.Equatorial(center) + # Synthesize 8 corners around the center + phi = center.ra + theta = center.dec + r = width / 2 + ncorner = 8 + angstep = 2 * np.pi / ncorner + for icorner in range(ncorner): + ang = angstep * icorner + delta_theta = np.cos(ang) * r + delta_phi = np.sin(ang) * r / np.cos(theta + delta_theta) + patch_corner = ephem.FixedBody() + patch_corner._ra = phi + delta_phi + patch_corner._dec = theta + delta_theta + corners.append(patch_corner) + return corners + + +@function_timer +def parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp): + # Parse the patch definitions + log = Logger.get() + patches = [] + total_weight = 0 + for patch_def in args.patch: + parts = patch_def.split(",") + name = parts[0] + log.info('Adding patch "{}"'.format(name)) + if parts[1].upper() == "HORIZONTAL": + patch = parse_patch_horizontal(args, parts) + elif parts[1].upper() == "SSO": + patch = parse_patch_sso(args, parts) + elif parts[1].upper() == "COOLER": + patch = parse_patch_cooler(args, parts, start_timestamp) + else: + weight = float(parts[1]) + if np.isnan(weight): + raise RuntimeError("Patch has NaN priority: {}".format(patch_def)) + if weight == 0: + raise RuntimeError("Patch has zero priority: {}".format(patch_def)) + if len(parts[2:]) == 3: + corners = parse_patch_center_and_width(args, parts) + area = None + elif len(parts[2:]) == 4: + corners, area = parse_patch_rectangular(args, parts) + else: + corners = parse_patch_explicit(args, parts) + area = None + patch = Patch( + name, + weight, + corners, + el_min=args.el_min_deg * degree, + el_max=args.el_max_deg * degree, + el_step=args.el_step_deg * degree, + alternate=args.alternate, + site_lat=observer.lat, + area=area, + ra_period=args.ra_period, + ra_amplitude=args.ra_amplitude_deg, + dec_period=args.dec_period, + dec_amplitude=args.dec_amplitude_deg, + elevations=args.elevations_deg, + ) + if args.equalize_area or args.debug: + area = patch.get_area(observer, nside=32, equalize=args.equalize_area) + total_weight += patch.weight + patches.append(patch) + + log.debug( + "Highest possible observing elevation: {:.2f} degrees." + " Sky fraction = {:.4f}".format(patches[-1].el_max0 / degree, patch._area) + ) + + if args.debug: + import matplotlib.pyplot as plt + + polmap = None + if args.polmap: + polmap = hp.read_map(args.polmap, [1, 2]) + bad = polmap[0] == hp.UNSEEN + polmap = np.sqrt(polmap[0] ** 2 + polmap[1] ** 2) * 1e6 + polmap[bad] = hp.UNSEEN + plt.style.use("default") + cmap = cm.inferno + cmap.set_under("w") + plt.figure(figsize=[20, 4]) + plt.subplots_adjust(left=0.1, right=0.9) + patch_color = "black" + sun_color = "black" + sun_lw = 8 + sun_avoidance_color = "gray" + moon_color = "black" + moon_lw = 2 + moon_avoidance_color = "gray" + alpha = 0.5 + avoidance_alpha = 0.01 + sun_step = np.int(86400 * 1) + moon_step = np.int(86400 * 0.1) + for iplot, coord in enumerate("CEG"): + scoord = {"C": "Equatorial", "E": "Ecliptic", "G": "Galactic"}[coord] + title = scoord # + ' patch locations' + if polmap is None: + nside = 256 + avoidance_map = np.zeros(12 * nside ** 2) + # hp.mollview(np.zeros(12) + hp.UNSEEN, coord=coord, cbar=False, + # title='', sub=[1, 3, 1 + iplot], cmap=cmap) + else: + hp.mollview( + polmap, + coord="G" + coord, + cbar=True, + unit="$\mu$K", + min=args.polmin, + max=args.polmax, + norm="log", + cmap=cmap, + title=title, + sub=[1, 3, 1 + iplot], + notext=True, + format="%.1f", + xsize=1600, + ) + # Plot sun and moon avoidance circle + sunlon, sunlat = [], [] + moonlon, moonlat = [], [] + sun_avoidance_angle = args.sun_avoidance_angle_deg * degree + moon_avoidance_angle = args.moon_avoidance_angle_deg * degree + for lon, lat, sso, angle_min, color, step, lw in [ + ( + sunlon, + sunlat, + sun, + sun_avoidance_angle, + sun_avoidance_color, + sun_step, + sun_lw, + ), + ( + moonlon, + moonlat, + moon, + moon_avoidance_angle, + moon_avoidance_color, + moon_step, + moon_lw, + ), + ]: + for t in range(np.int(start_timestamp), np.int(stop_timestamp), step): + observer.date = to_DJD(t) + sso.compute(observer) + lon.append(sso.a_ra / degree) + lat.append(sso.a_dec / degree) + if angle_min <= 0: + continue + if polmap is None: + # accumulate avoidance map + vec = hp.dir2vec(lon[-1], lat[-1], lonlat=True) + pix = hp.query_disc(nside, vec, angle_min) + for p in pix: + avoidance_map[p] += 1 + else: + # plot a circle around the location + clon, clat = [], [] + phi = sso.a_ra + theta = sso.a_dec + r = angle_min + for ang in np.linspace(0, 2 * np.pi, 36): + dtheta = np.cos(ang) * r + dphi = np.sin(ang) * r / np.cos(theta + dtheta) + clon.append((phi + dphi) / degree) + clat.append((theta + dtheta) / degree) + hp.projplot( + clon, + clat, + "-", + color=color, + alpha=avoidance_alpha, + lw=lw, + threshold=1, + lonlat=True, + coord="C", + ) + if polmap is None: + avoidance_map[avoidance_map == 0] = hp.UNSEEN + hp.mollview( + avoidance_map, + coord="C" + coord, + cbar=False, + title="", + sub=[1, 3, 1 + iplot], + cmap=cmap, + ) + hp.graticule(30, verbose=False) + + # Plot patches + for patch in patches: + lon = [corner._ra / degree for corner in patch.corners] + lat = [corner._dec / degree for corner in patch.corners] + if len(lon) == 0: + # Special patch without sky coordinates + continue + lon.append(lon[0]) + lat.append(lat[0]) + log.info( + "{} corners:\n lon = {}\n lat= {}".format(patch.name, lon, lat) + ) + hp.projplot( + lon, + lat, + "-", + threshold=1, + lonlat=True, + coord="C", + color=patch_color, + lw=2, + alpha=alpha, + ) + if len(patches) > 10: + continue + # label the patch + it = np.argmax(lat) + area = patch.get_area(observer) + title = "{} {:.2f}%".format(patch.name, 100 * area) + hp.projtext( + lon[it], + lat[it], + title, + lonlat=True, + coord="C", + color=patch_color, + fontsize=14, + alpha=alpha, + ) + if polmap is not None: + # Plot Sun and Moon trajectory + hp.projplot( + sunlon, + sunlat, + "-", + color=sun_color, + alpha=alpha, + threshold=1, + lonlat=True, + coord="C", + lw=sun_lw, + ) + hp.projplot( + moonlon, + moonlat, + "-", + color=moon_color, + alpha=alpha, + threshold=1, + lonlat=True, + coord="C", + lw=moon_lw, + ) + hp.projtext( + sunlon[0], + sunlat[0], + "Sun", + color=sun_color, + lonlat=True, + coord="C", + fontsize=14, + alpha=alpha, + ) + hp.projtext( + moonlon[0], + moonlat[0], + "Moon", + color=moon_color, + lonlat=True, + coord="C", + fontsize=14, + alpha=alpha, + ) + + plt.savefig("patches.png") + plt.close() + + # Normalize the weights + for i in range(len(patches)): + patches[i].weight /= total_weight + return patches + + +def run_scheduler(opts=None): + args, start_timestamp, stop_timestamp = parse_args(opts=opts) + + observer = ephem.Observer() + observer.lon = args.site_lon + observer.lat = args.site_lat + observer.elevation = args.site_alt # In meters + observer.epoch = "2000" + observer.temp = 0 # in Celcius + observer.compute_pressure() + + sun = ephem.Sun() + moon = ephem.Moon() + + patches = parse_patches(args, observer, sun, moon, start_timestamp, stop_timestamp) + + build_schedule(args, start_timestamp, stop_timestamp, patches, observer, sun, moon) + return From 7789ac13ec321d8403c1d9e96122dbcf2504e53c Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 7 Dec 2020 12:17:20 -0800 Subject: [PATCH 039/690] Fix typos, add unit test for ScanMap. --- src/toast/covariance.py | 2 - src/toast/ops/CMakeLists.txt | 1 + src/toast/ops/mapmaker.py | 2 +- src/toast/ops/pointing_healpix.py | 9 ++-- src/toast/ops/scan_map.py | 21 ++++++++- src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/ops_scan_map.py | 76 +++++++++++++++++++++++++++++++ src/toast/tests/runner.py | 2 + 8 files changed, 106 insertions(+), 8 deletions(-) create mode 100644 src/toast/tests/ops_scan_map.py diff --git a/src/toast/covariance.py b/src/toast/covariance.py index 252111aad..62a91c0ec 100644 --- a/src/toast/covariance.py +++ b/src/toast/covariance.py @@ -6,8 +6,6 @@ from .timing import function_timer -from .operator import Operator - from ._libtoast import ( AlignedF64, cov_mult_diag, diff --git a/src/toast/ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt index 0ba5eb2cb..7142664e3 100644 --- a/src/toast/ops/CMakeLists.txt +++ b/src/toast/ops/CMakeLists.txt @@ -19,6 +19,7 @@ install(FILES mapmaker_binning.py mapmaker_projection.py mapmaker_templates.py + mapmaker.py madam.py DESTINATION ${PYTHON_SITE}/toast/ops ) diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index 6838a43b4..ecf34cfae 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -8,7 +8,7 @@ from ..utils import Logger -from ..traits import trait_docs, Int, Unicode, Bool +from ..traits import trait_docs, Int, Unicode, Bool, Instance from ..timing import function_timer diff --git a/src/toast/ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py index a6a46b2b3..20c2374ba 100644 --- a/src/toast/ops/pointing_healpix.py +++ b/src/toast/ops/pointing_healpix.py @@ -173,7 +173,10 @@ def _reset_hpix(self, change): # Current values: nside = self.nside nside_submap = self.nside_submap - nnz = self._nnz + mode = self.mode + self._nnz = 1 + if mode == "IQU": + self._nnz = 3 # Update to the trait that changed if change["name"] == "nside": @@ -182,9 +185,9 @@ def _reset_hpix(self, change): nside_submap = change["new"] if change["name"] == "mode": if change["new"] == "IQU": - nnz = 3 + self._nnz = 3 else: - nnz = 1 + self._nnz = 1 self.hpix = HealpixPixels(nside) self._n_pix = 12 * nside ** 2 self._n_pix_submap = 12 * nside_submap ** 2 diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index 36a3a9eca..f4beea49e 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -6,7 +6,7 @@ import numpy as np -from ..utils import Logger +from ..utils import Logger, AlignedF64 from ..traits import trait_docs, Int, Unicode, Bool @@ -74,6 +74,7 @@ def _exec(self, data, detectors=None, **kwargs): map_data = data[self.map_key] if not isinstance(map_data, PixelData): raise RuntimeError("The map to scan must be a PixelData instance") + map_dist = map_data.distribution for ob in data.obs: # Get the detectors we are using for this observation @@ -82,10 +83,26 @@ def _exec(self, data, detectors=None, **kwargs): # Nothing to do for this observation continue + # Sanity check the number of non-zeros between the map and the + # pointing matrix + check_nnz = 1 + if len(ob.detdata[self.weights].detector_shape) > 1: + check_nnz = ob.detdata[self.weights].detector_shape[-1] + if map_data.n_value != check_nnz: + msg = "Detector data '{}' in observation '{}' has {} nnz instead of {} in the map".format( + self.weights, ob.name, check_nnz, map_data.n_value + ) + log.error(msg) + raise RuntimeError(msg) + # Temporary array, re-used for all detectors maptod_raw = AlignedF64.zeros(ob.n_local_samples) maptod = maptod_raw.array() + # If our output detector data does not yet exist, create it + if self.det_data not in ob: + ob.detdata.create(self.det_data, dtype=np.float64, detectors=dets) + for det in dets: # The pixels, weights, and data. pix = ob.detdata[self.pixels][det] @@ -93,7 +110,7 @@ def _exec(self, data, detectors=None, **kwargs): ddata = ob.detdata[self.det_data][det] # Get local submap and pixels - local_sm, local_pix = dist.global_pixel_to_submap(pix) + local_sm, local_pix = map_dist.global_pixel_to_submap(pix) # We support projecting from either float64 or float32 maps. diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index c6f54a026..a8331ec2d 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -24,5 +24,6 @@ install(FILES ops_pointing_healpix.py sim_focalplane.py ops_memory_counter.py + ops_scan_map.py DESTINATION ${PYTHON_SITE}/toast/tests ) diff --git a/src/toast/tests/ops_scan_map.py b/src/toast/tests/ops_scan_map.py new file mode 100644 index 000000000..1952687a9 --- /dev/null +++ b/src/toast/tests/ops_scan_map.py @@ -0,0 +1,76 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from .. import ops as ops + +from ..pixels import PixelData + +from ._helpers import create_outdir, create_satellite_data + + +class ScanMapTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def create_fake_sky(self, data, dist_key, map_key): + dist = data[dist_key] + pix_data = PixelData(dist, np.float64, n_value=3) + # Just replicate the fake data across all local submaps + pix_data.data[:, :, 0] = 100.0 * np.random.uniform(size=dist.n_pix_submap) + pix_data.data[:, :, 1] = np.random.uniform(size=dist.n_pix_submap) + pix_data.data[:, :, 2] = np.random.uniform(size=dist.n_pix_submap) + data[map_key] = pix_data + + def test_scan(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + self.create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Manual check of the projection of map values to timestream + + map_data = data["fake_map"] + for ob in data.obs: + for det in ob.local_detectors: + wt = ob.detdata[pointing.weights][det] + local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( + ob.detdata[pointing.pixels][det] + ) + for i in range(ob.n_local_samples): + if local_pix[i] < 0: + continue + val = 0.0 + for j in range(3): + val += wt[i, j] * map_data.data[local_sm[i], local_pix[i], j] + np.testing.assert_almost_equal(val, ob.detdata["signal"][det, i]) + + del data + return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 54da2d06f..b50950795 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -35,6 +35,7 @@ from . import ops_pointing_healpix as test_ops_pointing_healpix from . import ops_sim_tod_noise as test_ops_sim_tod_noise from . import ops_mapmaker_utils as test_ops_mapmaker_utils +from . import ops_scan_map as test_ops_scan_map from . import covariance as test_covariance @@ -144,6 +145,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_pointing_healpix)) suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_utils)) + suite.addTest(loader.loadTestsFromModule(test_ops_scan_map)) suite.addTest(loader.loadTestsFromModule(test_covariance)) From 79b1465326744c7db5d1df9a43a4a0e48bca391e Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 7 Dec 2020 12:48:57 -0800 Subject: [PATCH 040/690] More work on madam port. [ci skip] --- src/toast/ops/__init__.py | 2 +- src/toast/ops/madam.py | 33 ++++---- src/toast/ops/sim_tod_noise.py | 8 +- src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/ops_madam.py | 139 +++++++++++++++++++++++++++++++++ src/toast/tests/runner.py | 2 + 6 files changed, 163 insertions(+), 22 deletions(-) create mode 100644 src/toast/tests/ops_madam.py diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index e55d42a4d..8c571e035 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -37,4 +37,4 @@ from .mapmaker import MapMaker -# from .madam import Madam +from .madam import Madam diff --git a/src/toast/ops/madam.py b/src/toast/ops/madam.py index 4863bfc90..557aa3758 100644 --- a/src/toast/ops/madam.py +++ b/src/toast/ops/madam.py @@ -4,11 +4,13 @@ from ..mpi import MPI, use_mpi +import os + import traitlets import numpy as np -from ..utils import Logger +from ..utils import Logger, Timer, GlobalTimers, dtype_to_aligned from ..traits import trait_docs, Int, Unicode, Bool, Dict @@ -16,10 +18,6 @@ from .operator import Operator -from .clear import Clear - -from .copy import Copy - from .memory_counter import MemoryCounter @@ -169,7 +167,6 @@ def _exec(self, data, detectors=None): ( all_dets, nsamp, - ndet, nnz, nnz_full, nnz_stride, @@ -179,14 +176,14 @@ def _exec(self, data, detectors=None): ) = self._prepare(data, detectors) psdinfo, signal_type, pixels_dtype, weight_dtype = self._stage_data( + data, + all_dets, nsamp, - ndet, nnz, nnz_full, nnz_stride, - obs_period_ranges, - psdfreqs, - dets, + interval_starts, + psd_freqs, nside, ) @@ -244,8 +241,8 @@ def _accelerators(self): def _prepare(self, data, detectors): """Examine the data and determine quantities needed to set up Madam buffers""" log = Logger.get() - timer = Timer() - timer.start() + # timer = Timer() + # timer.start() if "nside_map" not in self.params: raise RuntimeError( @@ -276,7 +273,7 @@ def _prepare(self, data, detectors): for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) - all_dets.add(dets) + all_dets.update(dets) # Check that the timestamps exist. if self.times not in ob.shared: @@ -397,9 +394,9 @@ def _prepare(self, data, detectors): # determine the number of samples per detector data.comm.comm_world.Barrier() - if self._rank == 0: - log.debug() - timer.report_clear("Collect dataset dimensions") + # if self._rank == 0: + # log.debug() + # timer.report_clear("Collect dataset dimensions") return ( all_dets, @@ -455,7 +452,7 @@ def _stage_data( # Copy timestamps and PSDs all at once, since they are never purged. - timestamp_storage = dtype_to_aligned(madam.TIMESTAMP_TYPE) + timestamp_storage, _ = dtype_to_aligned(madam.TIMESTAMP_TYPE) self._madam_timestamps_raw = timestamp_storage.zeros(nsamp) self._madam_timestamps = self._madam_timestamps_raw.array() psds = dict() @@ -504,7 +501,7 @@ def _stage_data( def copy_local(detdata_name, madam_dtype, dnnz, do_flags=False, do_purge=False): """Helper function to create a madam buffer from a local detdata key.""" - storage = dtype_to_aligned(madam_dtype) + storage, _ = dtype_to_aligned(madam_dtype) n_all_det = len(all_dets) raw = storage.zeros(nsamp * n_all_det) wrapped = raw.array() diff --git a/src/toast/ops/sim_tod_noise.py b/src/toast/ops/sim_tod_noise.py index a9bbcf6f5..7e0bc2b3d 100644 --- a/src/toast/ops/sim_tod_noise.py +++ b/src/toast/ops/sim_tod_noise.py @@ -218,7 +218,9 @@ class SimNoise(Operator): times = Unicode("times", help="Observation shared key for timestamps") - out = Unicode("noise", help="Observation detdata key for output noise timestreams") + out = Unicode( + "noise", help="Observation detdata key for accumulating noise timestreams" + ) @traitlets.validate("realization") def _check_realization(self, proposal): @@ -281,8 +283,8 @@ def _exec(self, data, detectors=None, **kwargs): # detectors within the observation. # Create output if it does not exist - if self.out not in ob: - ob.detdata.create(self.out, detshape=(), dtype=np.float64) + if self.out not in ob.detdata: + ob.detdata.create(self.out, dtype=np.float64) (rate, dt, dt_min, dt_max, dt_std) = rate_from_times( ob.shared[self.times].data diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index a8331ec2d..8dfe5b3fc 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -25,5 +25,6 @@ install(FILES sim_focalplane.py ops_memory_counter.py ops_scan_map.py + ops_madam.py DESTINATION ${PYTHON_SITE}/toast/tests ) diff --git a/src/toast/tests/ops_madam.py b/src/toast/tests/ops_madam.py new file mode 100644 index 000000000..42683869d --- /dev/null +++ b/src/toast/tests/ops_madam.py @@ -0,0 +1,139 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from ..noise import Noise + +from .. import ops as ops + +from ..pixels import PixelDistribution, PixelData + +from ._helpers import create_outdir, create_satellite_data + + +class MadamTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def create_fake_sky(self, data, dist_key, map_key): + dist = data[dist_key] + pix_data = PixelData(dist, np.float64, n_value=3) + # Just replicate the fake data across all local submaps + pix_data.data[:, :, 0] = 100.0 * np.random.uniform(size=dist.n_pix_submap) + pix_data.data[:, :, 1] = np.random.uniform(size=dist.n_pix_submap) + pix_data.data[:, :, 2] = np.random.uniform(size=dist.n_pix_submap) + data[map_key] = pix_data + + def test_scan(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + self.create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + def test_madam_output(self): + if not ops.Madam.available: + print("libmadam not available, skipping tests") + return + + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + self.create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise from this model + sim_noise = ops.SimNoise(noise_model="noise_model", out="signal") + sim_noise.apply(data) + + # Run madam on this + + # Madam assumes constant sample rate- just get it from the noise model for + # the first detector. + sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + pars = {} + pars["kfirst"] = "T" + pars["iter_max"] = 100 + pars["base_first"] = 5.0 + pars["fsample"] = sample_rate + pars["nside_map"] = pointing.nside + pars["nside_cross"] = pointing.nside + pars["nside_submap"] = min(8, pointing.nside) + pars["write_map"] = "F" + pars["write_binmap"] = "T" + pars["write_matrix"] = "F" + pars["write_wcov"] = "F" + pars["write_hits"] = "T" + pars["kfilter"] = "F" + pars["path_output"] = self.outdir + pars["info"] = 0 + + # FIXME: add a view here once our test data includes it + + madam = ops.Madam( + params=pars, + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + pixels_nested=pointing.nest, + det_out="destriped", + noise_model="noise_model", + copy_groups=2, + purge_det_data=False, + purge_pointing=True, + ) + madam.apply(data) + + for ob in data.obs: + for det in ob.local_detectors: + # Do some check... + pass + + del data + return diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index b50950795..b8cb884fb 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -36,6 +36,7 @@ from . import ops_sim_tod_noise as test_ops_sim_tod_noise from . import ops_mapmaker_utils as test_ops_mapmaker_utils from . import ops_scan_map as test_ops_scan_map +from . import ops_madam as test_ops_madam from . import covariance as test_covariance @@ -146,6 +147,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_utils)) suite.addTest(loader.loadTestsFromModule(test_ops_scan_map)) + suite.addTest(loader.loadTestsFromModule(test_ops_madam)) suite.addTest(loader.loadTestsFromModule(test_covariance)) From b584000b7c0630685e9501de52b82d56503a05dc Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 8 Dec 2020 10:42:25 -0800 Subject: [PATCH 041/690] Add unit tests for views. Add a new global view (key==None) which provides a default view of the whole local data. --- src/toast/observation.py | 4 + src/toast/observation_data.py | 71 ++++++++++++++---- src/toast/observation_view.py | 53 ++++++------- src/toast/tests/observation.py | 132 +++++++++++++++++++++++++++++++++ 4 files changed, 217 insertions(+), 43 deletions(-) diff --git a/src/toast/observation.py b/src/toast/observation.py index fd6705379..035c4c26a 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -299,6 +299,10 @@ def __init__( self.intervals = IntervalMgr(self._comm, self.dist.comm_row, self.dist.comm_col) + # Create a default global IntervalList that includes a single interval with + # all the local data span. This is useful for code that wants to access the + # whole observation in the same way as a particular view. + # Fully clear the observation def clear(self): diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index 4b68cc313..f531948a5 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -55,8 +55,8 @@ class DetectorData(object): slicing by index and by a list of detectors is possible:: - view = detdata[0:-1, 2:4] - view = detdata[["d01", "d03"], 3:8] + array_view = detdata[0:-1, 2:4] + array_view = detdata[["d01", "d03"], 3:8] Args: detectors (list): A list of detector names in exactly the order you wish. @@ -67,10 +67,12 @@ class DetectorData(object): data. The only supported types are 1, 2, 4, and 8 byte signed and unsigned integers, 4 and 8 byte floating point numbers, and 4 and 8 byte complex numbers. + view_data (array): (Internal use only) This makes it possible to create + DetectorData instances that act as a view on an existing array. """ - def __init__(self, detectors, shape, dtype): + def __init__(self, detectors, shape, dtype, view_data=None): log = Logger.get() self._detectors = detectors @@ -84,7 +86,6 @@ def __init__(self, detectors, shape, dtype): # construct a new dtype in case the parameter given is shortcut string self._dtype = np.dtype(dtype) self._storage_class, self.itemsize = dtype_to_aligned(dtype) - self.itemsize = 0 # Verify that our shape contains only integral values self._flatshape = len(self._detectors) @@ -94,13 +95,28 @@ def __init__(self, detectors, shape, dtype): log.error(msg) raise ValueError(msg) self._flatshape *= d + self._memsize = self.itemsize * self._flatshape shp = [len(self._detectors)] shp.extend(shape) self._shape = tuple(shp) - self._raw = self._storage_class.zeros(self._flatshape) - self._data = self._raw.array().reshape(self._shape) - self._memsize = self.itemsize * self._flatshape + if view_data is None: + # Allocate the data + self._raw = self._storage_class.zeros(self._flatshape) + self._data = self._raw.array().reshape(self._shape) + self._is_view = False + else: + # We are provided the data + if self._shape != view_data.shape: + msg = ( + "view data shape ({}) does not match constructor shape ({})".format( + view_data.shape, self._shape + ) + ) + log.error(msg) + raise RuntimeError(msg) + self._data = view_data + self._is_view = True @property def detectors(self): @@ -138,9 +154,10 @@ def clear(self): """ if hasattr(self, "_data"): del self._data - if hasattr(self, "_raw"): - self._raw.clear() - del self._raw + if not self._is_view: + if hasattr(self, "_raw"): + self._raw.clear() + del self._raw def __del__(self): self.clear() @@ -194,7 +211,7 @@ def _get_view(self, key): def __getitem__(self, key): view = self._get_view(key) - return np.array(self._data[view], dtype=self._dtype, copy=False) + return self._data[view] def __delitem__(self, key): raise NotImplementedError("Cannot delete individual elements") @@ -204,6 +221,26 @@ def __setitem__(self, key, value): view = self._get_view(key) self._data[view] = value + def view(self, key): + """Create a new DetectorData instance that acts as a view of the data. + + Args: + key (tuple/slice): This is an indexing on detector or both detector and + sample, the same as you would use to access data elements. + + Returns: + (DetectorData): A new instance whose data is a view of the current object. + + """ + full_view = self._get_view(key) + view_dets = self.detectors[full_view[0]] + return DetectorData( + view_dets, + self._data[full_view].shape[1:], + self._dtype, + view_data=self._data[full_view], + ) + def __iter__(self): return iter(self._data) @@ -211,7 +248,12 @@ def __len__(self): return len(self._detectors) def __repr__(self): - val = " Date: Tue, 8 Dec 2020 20:31:14 -0800 Subject: [PATCH 042/690] Add starting of unit test for binned map maker --- src/toast/ops/madam.py | 87 +++---------------- src/toast/ops/mapmaker_binning.py | 13 +-- src/toast/ops/mapmaker_utils.py | 68 +++++++++------ src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/ops_mapmaker_binning.py | 108 ++++++++++++++++++++++++ src/toast/tests/ops_mapmaker_utils.py | 15 ++-- src/toast/tests/runner.py | 2 + 7 files changed, 183 insertions(+), 111 deletions(-) create mode 100644 src/toast/tests/ops_mapmaker_binning.py diff --git a/src/toast/ops/madam.py b/src/toast/ops/madam.py index 557aa3758..12342fcd3 100644 --- a/src/toast/ops/madam.py +++ b/src/toast/ops/madam.py @@ -461,20 +461,10 @@ def _stage_data( time_offset = 0.0 for ob in data.obs: - if self.view is not None: - for vw in ob.view[self.view].shared[self.times]: - offset = interval_starts[interval] - slc = slice(offset, offset + len(vw), 1) - self._madam_timestamps[slc] = vw - if self.translate_timestamps: - off = self._madam_timestamps[offset] - time_offset - self._madam_timestamps[slc] -= off - time_offset = self._madam_timestamps[slc][-1] + 1.0 - interval += 1 - else: + for vw in ob.view[self.view].shared[self.times]: offset = interval_starts[interval] - slc = slice(offset, offset + ob.n_local_samples, 1) - self._madam_timestamps[slc] = ob.shared[self.times] + slc = slice(offset, offset + len(vw), 1) + self._madam_timestamps[slc] = vw if self.translate_timestamps: off = self._madam_timestamps[offset] - time_offset self._madam_timestamps[slc] -= off @@ -507,82 +497,31 @@ def copy_local(detdata_name, madam_dtype, dnnz, do_flags=False, do_purge=False): wrapped = raw.array() interval = 0 for ob in data.obs: - if self.view is not None: - for vw in ob.view[self.view].detdata[detdeta_name]: - offset = interval_starts[interval] - flags = None - if do_flags: - if ( - self.shared_flags is not None - or self.det_flags is not None - ): - # Using flags - flags = np.zeros(len(vw), dtype=np.uint8) - if self.shared_flags is not None: - flags |= ( - ob.view[self.view].shared[self.shared_flags] - & self.shared_flag_mask - ) - - for idet, det in enumerate(all_dets): - if det not in ob.local_detectors: - continue - slc = slice( - (idet * nsamp + offset) * dnnz, - (idet * nsamp + offset + len(vw)) * dnnz, - 1, - ) - if dnnz > 1: - wrapped[slc] = vw[idet].flatten()[::nnz_stride] - else: - wrapped[slc] = vw[idet].flatten() - detflags = None - if do_flags: - if self.det_flags is None: - detflags = flags - else: - detflags = np.copy(flags) - detflags |= ( - ob.view[self.view].detdata[self.det_flags][idet] - & self.det_flag_mask - ) - # The do_flags option should only be true if we are - # processing the pixel indices (which is how madam - # effectively implements flagging). So we will set - # all flagged samples to "-1" - if detflags is not None: - # sanity check - if nnz != 1: - raise RuntimeError( - "Internal error on madam copy. Only pixel indices should be flagged." - ) - wrapped[slc][detflags != 0] = -1 - interval += 1 - else: + for vw in ob.view[self.view].detdata[detdeta_name]: offset = interval_starts[interval] flags = None if do_flags: if self.shared_flags is not None or self.det_flags is not None: # Using flags - flags = np.zeros(ob.n_local_samples, dtype=np.uint8) + flags = np.zeros(len(vw), dtype=np.uint8) if self.shared_flags is not None: flags |= ( - ob.shared[self.shared_flags] & self.shared_flag_mask + ob.view[self.view].shared[self.shared_flags] + & self.shared_flag_mask ) + for idet, det in enumerate(all_dets): if det not in ob.local_detectors: continue slc = slice( (idet * nsamp + offset) * dnnz, - (idet * nsamp + offset + ob.n_local_samples) * dnnz, + (idet * nsamp + offset + len(vw)) * dnnz, 1, ) if dnnz > 1: - wrapped[slc] = ob.detdata[detdata_name][idet].flatten()[ - ::nnz_stride - ] + wrapped[slc] = vw[idet].flatten()[::nnz_stride] else: - wrapped[slc] = ob.detdata[detdata_name][idet].flatten() + wrapped[slc] = vw[idet].flatten() detflags = None if do_flags: if self.det_flags is None: @@ -590,7 +529,7 @@ def copy_local(detdata_name, madam_dtype, dnnz, do_flags=False, do_purge=False): else: detflags = np.copy(flags) detflags |= ( - ob.detdata[self.det_flags][idet] + ob.view[self.view].detdata[self.det_flags][idet] & self.det_flag_mask ) # The do_flags option should only be true if we are @@ -599,7 +538,7 @@ def copy_local(detdata_name, madam_dtype, dnnz, do_flags=False, do_purge=False): # all flagged samples to "-1" if detflags is not None: # sanity check - if dnnz != 1: + if nnz != 1: raise RuntimeError( "Internal error on madam copy. Only pixel indices should be flagged." ) diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index 51d20854c..1be1a5821 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -14,10 +14,14 @@ from ..pixels import PixelDistribution, PixelData +from ..covariance import covariance_apply + from .operator import Operator from .pipeline import Pipeline +from .clear import Clear + from .mapmaker_utils import BuildHitMap, BuildNoiseWeighted, BuildInverseCovariance @@ -137,10 +141,12 @@ def _exec(self, data, detectors=None, **kwargs): clear_pointing = Clear(detdata=[self.pointing.pixels, self.pointing.weights]) - # Noise weighted map + # Noise weighted map. We output this to the final binned map location, + # since we will multiply by the covariance in-place. build_zmap = BuildNoiseWeighted( pixel_dist=self.pixel_dist, + zmap=self.binned, pixels=self.pointing.pixels, weights=self.pointing.weights, noise_model=self.noise_model, @@ -165,14 +171,11 @@ def _exec(self, data, detectors=None, **kwargs): pipe_out = accum.apply(data, detectors=detectors) # Extract the results - binned_map = pipe_out[1] + binned_map = data[self.binned] # Apply the covariance covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) - # Store products - data[self.binned] = binned_map - return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index eb1d355e1..0b6af7844 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -14,6 +14,8 @@ from ..pixels import PixelDistribution, PixelData +from ..covariance import covariance_invert + from .._libtoast import ( cov_accum_zmap, cov_accum_diag_hits, @@ -24,6 +26,8 @@ from .clear import Clear +from .pipeline import Pipeline + @trait_docs class BuildHitMap(Operator): @@ -52,6 +56,8 @@ class BuildHitMap(Operator): help="The Data key containing the submap distribution", ) + hits = Unicode("hits", help="The Data key for the output hit map") + det_flags = Unicode( None, allow_none=True, help="Observation detdata key for flags to use" ) @@ -148,7 +154,8 @@ def _finalize(self, data, **kwargs): self._hits.sync_alltoallv() else: self._hits.sync_allreduce() - return self._hits + data[self.hits] = self._hits + return def _requires(self): req = { @@ -197,6 +204,10 @@ class BuildInverseCovariance(Operator): help="The Data key containing the submap distribution", ) + inverse_covariance = Unicode( + "inv_covariance", help="The Data key for the output inverse covariance" + ) + det_flags = Unicode( None, allow_none=True, help="Observation detdata key for flags to use" ) @@ -232,6 +243,8 @@ def _check_sync_type(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) self._invcov = None + self._weight_nnz = None + self._cov_nnz = None @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -256,9 +269,6 @@ def _exec(self, data, detectors=None, **kwargs): ) ) - weight_nnz = None - cov_nnz = None - for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) @@ -287,20 +297,20 @@ def _exec(self, data, detectors=None, **kwargs): if self._invcov is None: # We will store the lower triangle of the covariance. if len(wts.detector_shape) == 1: - weight_nnz = 1 + self._weight_nnz = 1 else: - weight_nnz = wts.detector_shape[1] - cov_nnz = weight_nnz * (weight_nnz + 1) // 2 - self._invcov = PixelData(dist, np.float64, n_value=cov_nnz) + self._weight_nnz = wts.detector_shape[1] + self._cov_nnz = self._weight_nnz * (self._weight_nnz + 1) // 2 + self._invcov = PixelData(dist, np.float64, n_value=self._cov_nnz) else: check_nnz = None if len(wts.detector_shape) == 1: check_nnz = 1 else: check_nnz = wts.detector_shape[1] - if check_nnz != weight_nnz: - msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( - ob.name, det, self.weights + if check_nnz != self._weight_nnz: + msg = "observation '{}', detector '{}', pointing weights '{}' has {} nnz, not {}".format( + ob.name, det, self.weights, check_nnz, self._weight_nnz ) raise RuntimeError(msg) @@ -325,7 +335,7 @@ def _exec(self, data, detectors=None, **kwargs): cov_accum_diag_invnpp( dist.n_local_submap, dist.n_pix_submap, - weight_nnz, + self._weight_nnz, local_sm.astype(np.int64), local_pix.astype(np.int64), wts[det].reshape(-1), @@ -340,7 +350,8 @@ def _finalize(self, data, **kwargs): self._invcov.sync_alltoallv() else: self._invcov.sync_allreduce() - return self._invcov + data[self.inverse_covariance] = self._invcov + return def _requires(self): req = { @@ -391,6 +402,8 @@ class BuildNoiseWeighted(Operator): help="The Data key containing the submap distribution", ) + zmap = Unicode("zmap", help="The Data key for the output noise weighted map") + det_data = Unicode( None, allow_none=True, help="Observation detdata key for the timestream data" ) @@ -430,6 +443,7 @@ def _check_sync_type(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) self._zmap = None + self._weight_nnz = None @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -458,8 +472,6 @@ def _exec(self, data, detectors=None, **kwargs): ) ) - weight_nnz = None - for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) @@ -488,17 +500,17 @@ def _exec(self, data, detectors=None, **kwargs): # detector we have worked with we create the PixelData object. if self._zmap is None: if len(wts.detector_shape) == 1: - weight_nnz = 1 + self._weight_nnz = 1 else: - weight_nnz = wts.detector_shape[1] - self._zmap = PixelData(dist, np.float64, n_value=weight_nnz) + self._weight_nnz = wts.detector_shape[1] + self._zmap = PixelData(dist, np.float64, n_value=self._weight_nnz) else: check_nnz = None if len(wts.detector_shape) == 1: check_nnz = 1 else: check_nnz = wts.detector_shape[1] - if check_nnz != weight_nnz: + if check_nnz != self._weight_nnz: msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( ob.name, det, self.weights ) @@ -541,7 +553,8 @@ def _finalize(self, data, **kwargs): self._zmap.sync_alltoallv() else: self._zmap.sync_allreduce() - return self._zmap + data[self.zmap] = self._zmap + return def _requires(self): req = { @@ -732,16 +745,19 @@ def _exec(self, data, detectors=None, **kwargs): build_hits = BuildHitMap( pixel_dist=self.pixel_dist, + hits=self.hits, pixels=self.pointing.pixels, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, sync_type=self.sync_type, ) - # Inverse covariance + # Inverse covariance. Note that we save the output to our specified + # "covariance" key, because we are going to invert it in-place. build_invcov = BuildInverseCovariance( pixel_dist=self.pixel_dist, + inverse_covariance=self.covariance, pixels=self.pointing.pixels, weights=self.pointing.weights, noise_model=self.noise_model, @@ -765,10 +781,10 @@ def _exec(self, data, detectors=None, **kwargs): pipe_out = accum.apply(data, detectors=detectors) # Extract the results - hits = pipe_out[1] - cov = pipe_out[2] + hits = data[self.hits] + cov = data[self.covariance] - # Invert the covariance + # Invert the covariance in place rcond = PixelData(cov.distribution, np.float64, n_value=1) covariance_invert( cov, @@ -777,9 +793,7 @@ def _exec(self, data, detectors=None, **kwargs): use_alltoallv=(self.sync_type == "alltoallv"), ) - # Store products - data[self.hits] = hits - data[self.covariance] = cov + # Store rcond data[self.rcond] = rcond return diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 8dfe5b3fc..d7ce0dda8 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -20,6 +20,7 @@ install(FILES ops_sim_satellite.py ops_sim_tod_noise.py ops_mapmaker_utils.py + ops_mapmaker_binning.py covariance.py ops_pointing_healpix.py sim_focalplane.py diff --git a/src/toast/tests/ops_mapmaker_binning.py b/src/toast/tests/ops_mapmaker_binning.py new file mode 100644 index 000000000..7007de4f8 --- /dev/null +++ b/src/toast/tests/ops_mapmaker_binning.py @@ -0,0 +1,108 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from ..noise import Noise + +from .. import ops as ops + +from ..pixels import PixelDistribution, PixelData + +from ._helpers import create_outdir, create_satellite_data + + +class MapmakerBinningTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def test_binned(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise + sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + sim_noise.apply(data) + + # Pointing operator + pointing = ops.PointingHealpix(nside=64, mode="IQU", hwp_angle="hwp_angle") + + # Build the covariance and hits + cov_and_hits = ops.CovarianceAndHits( + pixel_dist="pixel_dist", pointing=pointing, noise_model="noise_model" + ) + cov_and_hits.apply(data) + + # Set up binned map + + binner = ops.BinMap( + pixel_dist="pixel_dist", + covariance=cov_and_hits.covariance, + det_data="noise", + pointing=pointing, + noise_model="noise_model", + ) + binner.apply(data) + + binmap = binner.binned + + # # Manual check + # + # check_invnpp = PixelData(data["pixel_dist"], np.float64, n_value=6) + # check_invnpp_corr = PixelData(data["pixel_dist"], np.float64, n_value=6) + # + # for ob in data.obs: + # noise = ob["noise_model"] + # noise_corr = ob["noise_model_corr"] + # + # for det in ob.local_detectors: + # detweight = noise.detector_weight(det) + # detweight_corr = noise_corr.detector_weight(det) + # + # wt = ob.detdata["weights"][det] + # local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( + # ob.detdata["pixels"][det] + # ) + # for i in range(ob.n_local_samples): + # if local_pix[i] < 0: + # continue + # off = 0 + # for j in range(3): + # for k in range(j, 3): + # check_invnpp.data[local_sm[i], local_pix[i], off] += ( + # detweight * wt[i, j] * wt[i, k] + # ) + # check_invnpp_corr.data[local_sm[i], local_pix[i], off] += ( + # detweight_corr * wt[i, j] * wt[i, k] + # ) + # off += 1 + # + # check_invnpp.sync_allreduce() + # check_invnpp_corr.sync_allreduce() + # + # for sm in range(invnpp.distribution.n_local_submap): + # for px in range(invnpp.distribution.n_pix_submap): + # if invnpp.data[sm, px, 0] != 0: + # nt.assert_almost_equal( + # invnpp.data[sm, px], check_invnpp.data[sm, px] + # ) + # if invnpp_corr.data[sm, px, 0] != 0: + # nt.assert_almost_equal( + # invnpp_corr.data[sm, px], check_invnpp_corr.data[sm, px] + # ) + del data + return diff --git a/src/toast/tests/ops_mapmaker_utils.py b/src/toast/tests/ops_mapmaker_utils.py index 74133ed98..9554228ad 100644 --- a/src/toast/tests/ops_mapmaker_utils.py +++ b/src/toast/tests/ops_mapmaker_utils.py @@ -54,7 +54,8 @@ def test_hits(self): pointing.apply(data) build_hits = ops.BuildHitMap(pixel_dist="pixel_dist") - hits = build_hits.apply(data) + build_hits.apply(data) + hits = data[build_hits.hits] # Manual check check_hits = PixelData(data["pixel_dist"], np.int64, n_value=1) @@ -106,12 +107,14 @@ def test_inv_cov(self): build_invnpp = ops.BuildInverseCovariance( pixel_dist="pixel_dist", noise_model="noise_model" ) - invnpp = build_invnpp.apply(data) + build_invnpp.apply(data) + invnpp = data[build_invnpp.inverse_covariance] build_invnpp_corr = ops.BuildInverseCovariance( pixel_dist="pixel_dist", noise_model="noise_model_corr" ) - invnpp_corr = build_invnpp_corr.apply(data) + build_invnpp_corr.apply(data) + invnpp_corr = data[build_invnpp_corr.inverse_covariance] # Manual check @@ -193,14 +196,16 @@ def test_zmap(self): build_zmap = ops.BuildNoiseWeighted( pixel_dist="pixel_dist", noise_model="noise_model", det_data="noise" ) - zmap = build_zmap.apply(data) + build_zmap.apply(data) + zmap = data[build_zmap.zmap] build_zmap_corr = ops.BuildNoiseWeighted( pixel_dist="pixel_dist", noise_model="noise_model_corr", det_data="noise_corr", ) - zmap_corr = build_zmap_corr.apply(data) + build_zmap_corr.apply(data) + zmap_corr = data[build_zmap_corr.zmap] # Manual check diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index b8cb884fb..67bff6ba1 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -35,6 +35,7 @@ from . import ops_pointing_healpix as test_ops_pointing_healpix from . import ops_sim_tod_noise as test_ops_sim_tod_noise from . import ops_mapmaker_utils as test_ops_mapmaker_utils +from . import ops_mapmaker_binning as test_ops_mapmaker_binning from . import ops_scan_map as test_ops_scan_map from . import ops_madam as test_ops_madam @@ -146,6 +147,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_pointing_healpix)) suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_utils)) + suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_binning)) suite.addTest(loader.loadTestsFromModule(test_ops_scan_map)) suite.addTest(loader.loadTestsFromModule(test_ops_madam)) From b170bbbd781a191a4639823da2319209adfa8998 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 14 Dec 2020 10:22:50 -0800 Subject: [PATCH 043/690] Madam and toast binned map unit tests passing. --- platforms/cori-gcc.sh | 2 +- src/toast/ops/CMakeLists.txt | 1 + src/toast/ops/madam.py | 868 +++++++++++++++++------- src/toast/ops/madam_utils.py | 259 +++++++ src/toast/ops/mapmaker_utils.py | 16 +- src/toast/pixels_io.py | 4 +- src/toast/tests/ops_madam.py | 164 ++++- src/toast/tests/ops_mapmaker_binning.py | 145 +++- src/toast/tests/pixels.py | 60 +- src/toast/tests/runner.py | 6 +- 10 files changed, 1228 insertions(+), 297 deletions(-) create mode 100644 src/toast/ops/madam_utils.py diff --git a/platforms/cori-gcc.sh b/platforms/cori-gcc.sh index 5e0a82a4f..ec5e531f0 100755 --- a/platforms/cori-gcc.sh +++ b/platforms/cori-gcc.sh @@ -9,7 +9,7 @@ cmake \ -DCMAKE_CXX_FLAGS="-O3 -g -fPIC -pthread" \ -DPYTHON_EXECUTABLE:FILEPATH=$(which python3) \ -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \ - -DMKL_DISABLED=TRUE \ + -DMKL_DISABLED=TRUE \ -DBLAS_LIBRARIES=$CMBENV_AUX_ROOT/lib/libopenblas.so \ -DLAPACK_LIBRARIES=$CMBENV_AUX_ROOT/lib/libopenblas.so \ -DFFTW_ROOT=$CMBENV_AUX_ROOT \ diff --git a/src/toast/ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt index 7142664e3..38bc89a94 100644 --- a/src/toast/ops/CMakeLists.txt +++ b/src/toast/ops/CMakeLists.txt @@ -21,5 +21,6 @@ install(FILES mapmaker_templates.py mapmaker.py madam.py + madam_utils.py DESTINATION ${PYTHON_SITE}/toast/ops ) diff --git a/src/toast/ops/madam.py b/src/toast/ops/madam.py index 12342fcd3..688c8298a 100644 --- a/src/toast/ops/madam.py +++ b/src/toast/ops/madam.py @@ -20,6 +20,14 @@ from .memory_counter import MemoryCounter +from .madam_utils import ( + log_time_memory, + stage_local, + stage_in_turns, + restore_local, + restore_in_turns, +) + madam = None if use_mpi: @@ -87,6 +95,16 @@ class Madam(Operator): help="If True, clear all observation detector pointing data after copying to madam buffers", ) + restore_det_data = Bool( + False, + help="If True, restore detector data to observations on completion", + ) + + restore_pointing = Bool( + False, + help="If True, restore detector pointing to observations on completion", + ) + mcmode = Bool( False, help="If true, Madam will store auxiliary information such as pixel matrices and noise filter.", @@ -106,9 +124,59 @@ class Madam(Operator): help="Observation key with optional scaling factor for noise PSDs", ) + mem_report = Bool( + False, help="Print system memory use while staging / unstaging data." + ) + + @traitlets.validate("shared_flag_mask") + def _check_shared_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Shared flag mask should be a positive integer") + return check + + @traitlets.validate("det_flag_mask") + def _check_det_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Det flag mask should be a positive integer") + return check + + @traitlets.validate("restore_det_data") + def _check_restore_det_data(self, proposal): + check = proposal["value"] + if check and not self.purge_det_data: + raise traitlets.TraitError( + "Cannot set restore_det_data since purge_det_data is False" + ) + if check and self.det_out is not None: + raise traitlets.TraitError( + "Cannot set restore_det_data since det_out is not None" + ) + return check + + @traitlets.validate("restore_pointing") + def _check_restore_pointing(self, proposal): + check = proposal["value"] + if check and not self.purge_pointing: + raise traitlets.TraitError( + "Cannot set restore_pointing since purge_pointing is False" + ) + return check + + @traitlets.validate("det_out") + def _check_det_out(self, proposal): + check = proposal["value"] + if check is not None and self.restore_det_data: + raise traitlets.TraitError( + "If det_out is not None, restore_det_data should be False" + ) + return check + def __init__(self, **kwargs): super().__init__(**kwargs) self._cached = False + self._logprefix = "Madam:" @classmethod def available(cls): @@ -119,8 +187,7 @@ def clear(self): """Delete the underlying memory. This will forcibly delete the C-allocated memory and invalidate all python - references to the buffers. DO NOT CALL THIS unless you are sure all references - are no longer being used. + references to the buffers. """ if self._cached: @@ -163,7 +230,21 @@ def _exec(self, data, detectors=None): "You must set the pixels and weights before calling exec()" ) + # Set madam parameters that depend on our traits + if self.mcmode: + self.params["mcmode"] = True + else: + self.params["mcmode"] = False + + if self.det_out is not None: + self.params["write_tod"] = True + else: + self.params["write_tod"] = False + # Check input parameters and compute the sizes of Madam data objects + if data.comm.world_rank == 0: + msg = "{} Computing data sizes".format(self._logprefix) + log.info(msg) ( all_dets, nsamp, @@ -175,7 +256,10 @@ def _exec(self, data, detectors=None): nside, ) = self._prepare(data, detectors) - psdinfo, signal_type, pixels_dtype, weight_dtype = self._stage_data( + if data.comm.world_rank == 0: + msg = "{} Copying toast data to buffers".format(self._logprefix) + log.info(msg) + psdinfo, signal_dtype, pixels_dtype, weight_dtype = self._stage_data( data, all_dets, nsamp, @@ -187,26 +271,28 @@ def _exec(self, data, detectors=None): nside, ) - # self._destripe(pars, dets, periods, psdinfo) - # - # self._unstage_data( - # nsamp, - # nnz, - # nnz_full, - # obs_period_ranges, - # dets, - # signal_type, - # pixels_dtype, - # nside, - # weight_dtype, - # ) + if data.comm.world_rank == 0: + msg = "{} Destriping data".format(self._logprefix) + log.info(msg) + self._destripe(data, all_dets, interval_starts, psdinfo) - return + if data.comm.world_rank == 0: + msg = "{} Copying buffers back to toast data".format(self._logprefix) + log.info(msg) + self._unstage_data( + data, + all_dets, + nsamp, + nnz, + nnz_full, + interval_starts, + signal_dtype, + pixels_dtype, + nside, + weight_dtype, + ) - def __del__(self): - if self._cached: - madam.clear_caches() - self._cached = False + return def _finalize(self, data, **kwargs): return @@ -241,8 +327,8 @@ def _accelerators(self): def _prepare(self, data, detectors): """Examine the data and determine quantities needed to set up Madam buffers""" log = Logger.get() - # timer = Timer() - # timer.start() + timer = Timer() + timer.start() if "nside_map" not in self.params: raise RuntimeError( @@ -356,12 +442,14 @@ def _prepare(self, data, detectors): if data.comm.world_rank == 0: log.info( - "Madam: {:.2f} % of samples are included in valid " - "intervals.".format(nsamp_valid * 100.0 / nsamp) + "{}{:.2f} % of samples are included in valid intervals.".format( + self._logprefix, nsamp_valid * 100.0 / nsamp + ) ) nsamp = nsamp_valid + interval_starts = np.array(interval_starts, dtype=np.int64) all_dets = list(all_dets) ndet = len(all_dets) @@ -393,10 +481,13 @@ def _prepare(self, data, detectors): # Inspect the valid intervals across all observations to # determine the number of samples per detector - data.comm.comm_world.Barrier() - # if self._rank == 0: - # log.debug() - # timer.report_clear("Collect dataset dimensions") + data.comm.comm_world.barrier() + timer.stop() + if data.comm.world_rank == 0: + msg = "{} Compute data dimensions: {:0.1f} s".format( + self._logprefix, timer.seconds() + ) + log.debug(msg) return ( all_dets, @@ -429,9 +520,7 @@ def _stage_data( """ log = Logger.get() - - # Memory counting operator - mem_count = MemoryCounter(silent=True) + timer = Timer() nodecomm = data.comm.comm_world.Split_type( MPI.COMM_TYPE_SHARED, data.comm.world_rank @@ -446,242 +535,523 @@ def _stage_data( if self.copy_groups > 0: n_copy_groups = min(self.copy_groups, nodecomm.size) - # self._comm.Barrier() - # timer_tot = Timer() - # timer_tot.start() + if not self._cached: + # Only do this if we have not cached the data yet. + log_time_memory( + data, + prefix=self._logprefix, + mem_msg="Before staging", + full_mem=self.mem_report, + ) # Copy timestamps and PSDs all at once, since they are never purged. - timestamp_storage, _ = dtype_to_aligned(madam.TIMESTAMP_TYPE) - self._madam_timestamps_raw = timestamp_storage.zeros(nsamp) - self._madam_timestamps = self._madam_timestamps_raw.array() psds = dict() - interval = 0 - time_offset = 0.0 + timer.start() + + if not self._cached: + timestamp_storage, _ = dtype_to_aligned(madam.TIMESTAMP_TYPE) + self._madam_timestamps_raw = timestamp_storage.zeros(nsamp) + self._madam_timestamps = self._madam_timestamps_raw.array() - for ob in data.obs: - for vw in ob.view[self.view].shared[self.times]: - offset = interval_starts[interval] - slc = slice(offset, offset + len(vw), 1) - self._madam_timestamps[slc] = vw - if self.translate_timestamps: - off = self._madam_timestamps[offset] - time_offset - self._madam_timestamps[slc] -= off - time_offset = self._madam_timestamps[slc][-1] + 1.0 - interval += 1 - - # Get the noise object for this observation and create new - # entries in the dictionary when the PSD actually changes - nse = ob[self.noise_model] - nse_scale = 1.0 - if self.noise_scale is not None: - if self.noise_scale in ob: - nse_scale = float(ob[self.noise_scale]) - - for det in all_dets: - if det not in ob.local_detectors: - continue - psd = nse.psd(det) * nse_scale ** 2 - if det not in psds: - psds[det] = [(0.0, psd)] - else: - if not np.allclose(psds[det][-1][1], psd): - psds[det] += [(ob.shared[self.times][0], psd)] - - def copy_local(detdata_name, madam_dtype, dnnz, do_flags=False, do_purge=False): - """Helper function to create a madam buffer from a local detdata key.""" - storage, _ = dtype_to_aligned(madam_dtype) - n_all_det = len(all_dets) - raw = storage.zeros(nsamp * n_all_det) - wrapped = raw.array() interval = 0 + time_offset = 0.0 + for ob in data.obs: - for vw in ob.view[self.view].detdata[detdeta_name]: + for vw in ob.view[self.view].shared[self.times]: offset = interval_starts[interval] - flags = None - if do_flags: - if self.shared_flags is not None or self.det_flags is not None: - # Using flags - flags = np.zeros(len(vw), dtype=np.uint8) - if self.shared_flags is not None: - flags |= ( - ob.view[self.view].shared[self.shared_flags] - & self.shared_flag_mask - ) - - for idet, det in enumerate(all_dets): - if det not in ob.local_detectors: - continue - slc = slice( - (idet * nsamp + offset) * dnnz, - (idet * nsamp + offset + len(vw)) * dnnz, - 1, - ) - if dnnz > 1: - wrapped[slc] = vw[idet].flatten()[::nnz_stride] - else: - wrapped[slc] = vw[idet].flatten() - detflags = None - if do_flags: - if self.det_flags is None: - detflags = flags - else: - detflags = np.copy(flags) - detflags |= ( - ob.view[self.view].detdata[self.det_flags][idet] - & self.det_flag_mask - ) - # The do_flags option should only be true if we are - # processing the pixel indices (which is how madam - # effectively implements flagging). So we will set - # all flagged samples to "-1" - if detflags is not None: - # sanity check - if nnz != 1: - raise RuntimeError( - "Internal error on madam copy. Only pixel indices should be flagged." - ) - wrapped[slc][detflags != 0] = -1 + slc = slice(offset, offset + len(vw), 1) + self._madam_timestamps[slc] = vw + if self.translate_timestamps: + off = self._madam_timestamps[offset] - time_offset + self._madam_timestamps[slc] -= off + time_offset = self._madam_timestamps[slc][-1] + 1.0 interval += 1 - if do_purge: - del ob.detdata[detdata_name] - return raw, wrapped - - def copy_in_turns(detdata_name, madam_dtype, dnnz, do_flags): - """When purging data, take turns copying it.""" - raw = None - wrapped = None - for copying in range(n_copy_groups): - if nodecomm.rank % n_copy_groups == copying: - # Our turn to copy data - raw, wrapped = copy_local( - detdata_name, - madam_dtype, - dnnz, - do_flags=do_flags, - do_purge=True, - ) - nodecomm.barrier() - return raw, wrapped - # Copy the signal + # Get the noise object for this observation and create new + # entries in the dictionary when the PSD actually changes. The detector + # weights are obtained from the noise model. + + nse = ob[self.noise_model] + nse_scale = 1.0 + if self.noise_scale is not None: + if self.noise_scale in ob: + nse_scale = float(ob[self.noise_scale]) + + for det in all_dets: + if det not in ob.local_detectors: + continue + psd = nse.psd(det) * nse_scale ** 2 + detw = nse.detector_weight(det) + if det not in psds: + psds[det] = [(0.0, psd, detw)] + else: + if not np.allclose(psds[det][-1][1], psd): + psds[det] += [(ob.shared[self.times][0], psd, detw)] + + log_time_memory( + data, + timer=timer, + timer_msg="Copy timestamps and PSDs", + prefix=self._logprefix, + mem_msg="After timestamp staging", + full_mem=self.mem_report, + ) + + # Copy the signal. We always need to do this, even if we are running MCs. - if self.purge_det_data: - self._madam_signal_raw, self._madam_signal = copy_in_turns( - self.det_data, madam.SIGNAL_TYPE, 1, do_flags=False + signal_dtype = data.obs[0].detdata[self.det_data].dtype + + if self._cached: + # We have previously created the madam buffers. We just need to fill + # them from the toast data. Since both already exist we just copy the + # contents. + stage_local( + data, + nsamp, + self.view, + all_dets, + self.det_data, + self._madam_signal, + interval_starts, + 1, + 1, + None, + None, + None, + None, + do_purge=False, ) else: - self._madam_signal_raw, self._madam_signal = copy_local( - self.det_data, madam.SIGNAL_TYPE, 1, do_flags=False, do_purge=False - ) + # Signal buffers do not yet exist + if self.purge_det_data: + # Allocate in a staggered way. + self._madam_signal_raw, self._madam_signal = stage_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + self.view, + all_dets, + self.det_data, + madam.SIGNAL_TYPE, + interval_starts, + 1, + 1, + None, + None, + None, + None, + ) + else: + # Allocate and copy all at once. + storage, _ = dtype_to_aligned(madam.SIGNAL_TYPE) + self._madam_signal_raw = storage.zeros(nsamp * len(all_dets)) + self._madam_signal = self._madam_signal_raw.array() + + stage_local( + data, + nsamp, + self.view, + all_dets, + self.det_data, + self._madam_signal, + interval_starts, + 1, + 1, + None, + None, + None, + None, + do_purge=False, + ) + + log_time_memory( + data, + timer=timer, + timer_msg="Copy signal", + prefix=self._logprefix, + mem_msg="After signal staging", + full_mem=self.mem_report, + ) # Copy the pointing - if self.purge_pointing: - self._madam_pixels_raw, self._madam_pixels = copy_in_turns( - self.pixels, madam.PIXEL_TYPE, 1, do_flags=True + pixels_dtype = data.obs[0].detdata[self.pixels].dtype + weight_dtype = data.obs[0].detdata[self.weights].dtype + + if not self._cached: + # We do not have the pointing yet. + if self.purge_pointing: + # Allocate in a staggered way. + self._madam_pixels_raw, self._madam_pixels = stage_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + self.view, + all_dets, + self.pixels, + madam.PIXEL_TYPE, + interval_starts, + 1, + 1, + self.shared_flags, + self.shared_flag_mask, + self.det_flags, + self.det_flag_mask, + ) + + self._madam_pixweights_raw, self._madam_pixweights = stage_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + self.view, + all_dets, + self.weights, + madam.WEIGHT_TYPE, + interval_starts, + nnz, + nnz_stride, + None, + None, + None, + None, + ) + else: + # Allocate and copy all at once. + storage, _ = dtype_to_aligned(madam.PIXEL_TYPE) + self._madam_pixels_raw = storage.zeros(nsamp * len(all_dets)) + self._madam_pixels = self._madam_pixels_raw.array() + + stage_local( + data, + nsamp, + self.view, + all_dets, + self.pixels, + self._madam_pixels, + interval_starts, + 1, + 1, + self.shared_flags, + self.shared_flag_mask, + self.det_flags, + self.det_flag_mask, + do_purge=False, + ) + + storage, _ = dtype_to_aligned(madam.WEIGHT_TYPE) + self._madam_pixweights_raw = storage.zeros(nsamp * len(all_dets) * nnz) + self._madam_pixweights = self._madam_pixweights_raw.array() + + stage_local( + data, + nsamp, + self.view, + all_dets, + self.weights, + self._madam_pixweights, + interval_starts, + nnz, + nnz_stride, + None, + None, + None, + None, + do_purge=False, + ) + + log_time_memory( + data, + timer=timer, + timer_msg="Copy pointing", + prefix=self._logprefix, + mem_msg="After pointing staging", + full_mem=self.mem_report, ) - self._madam_weights_raw, self._madam_weights = copy_in_turns( - self.weights, madam.WEIGHT_TYPE, nnz, do_flags=False + + psdinfo = None + + if not self._cached: + # Detectors weights. Madam assumes a single noise weight for each detector + # that is constant. We set this based on the first observation or else use + # uniform weighting. + + ndet = len(all_dets) + detweights = np.ones(ndet, dtype=np.float64) + + if len(psds) > 0: + npsdbin = len(psd_freqs) + npsd = np.zeros(ndet, dtype=np.int64) + psdstarts = [] + psdvals = [] + for idet, det in enumerate(all_dets): + if det not in psds: + raise RuntimeError( + "Every detector must have at least " "one PSD" + ) + psdlist = psds[det] + npsd[idet] = len(psdlist) + for psdstart, psd, detw in psdlist: + psdstarts.append(psdstart) + psdvals.append(psd) + detweights[idet] = psdlist[0][2] + npsdtot = np.sum(npsd) + psdstarts = np.array(psdstarts, dtype=np.float64) + psdvals = np.hstack(psdvals).astype(madam.PSD_TYPE) + npsdval = psdvals.size + else: + # Uniform weighting + npsd = np.ones(ndet, dtype=np.int64) + npsdtot = np.sum(npsd) + psdstarts = np.zeros(npsdtot) + npsdbin = 10 + fsample = 10.0 + psd_freqs = np.arange(npsdbin) * fsample / npsdbin + npsdval = npsdbin * npsdtot + psdvals = np.ones(npsdval) + + psdinfo = (detweights, npsd, psdstarts, psd_freqs, psdvals) + + log_time_memory( + data, + timer=timer, + timer_msg="Collect PSD info", + prefix=self._logprefix, ) - else: - self._madam_pixels_raw, self._madam_pixels = copy_local( - self.pixels, madam.PIXEL_TYPE, 1, do_flags=True, do_purge=False + timer.stop() + del nodecomm + + return psdinfo, signal_dtype, pixels_dtype, weight_dtype + + @function_timer + def _unstage_data( + self, + data, + all_dets, + nsamp, + nnz, + nnz_full, + interval_starts, + signal_dtype, + pixels_dtype, + nside, + weight_dtype, + ): + """ + Restore data to TOAST observations. + + Optionally copy the signal and pointing back to TOAST if we previously + purged it to save memory. Also copy the destriped timestreams if desired. + + """ + log = Logger.get() + timer = Timer() + + nodecomm = data.comm.comm_world.Split_type( + MPI.COMM_TYPE_SHARED, data.comm.world_rank + ) + + # Determine how many processes per node should copy at once. + n_copy_groups = 1 + if self.purge_det_data or self.purge_pointing: + # We MAY be restoring some data- see if we should reduce the number of + # processes copying in parallel (if we are not purging data, there + # is no benefit to staggering the copy). + if self.copy_groups > 0: + n_copy_groups = min(self.copy_groups, nodecomm.size) + + log_time_memory( + data, + prefix=self._logprefix, + mem_msg="Before un-staging", + full_mem=self.mem_report, + ) + + # Copy the signal + + timer.start() + + out_name = self.det_data + if self.det_out is not None: + out_name = self.det_out + + if self.det_out is not None or (self.purge_det_data and self.restore_det_data): + # We are copying some kind of signal back + if not self.mcmode: + # We are not running multiple realizations, so delete as we copy. + restore_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + self.view, + all_dets, + out_name, + signal_dtype, + self._madam_signal, + self._madam_signal_raw, + interval_starts, + 1, + 0, + True, + ) + del self._madam_signal + del self._madam_signal_raw + else: + # We want to re-use the signal buffer, just copy. + restore_local( + data, + nsamp, + self.view, + all_dets, + out_name, + signal_dtype, + self._madam_signal, + interval_starts, + 1, + 0, + True, + ) + + log_time_memory( + data, + timer=timer, + timer_msg="Copy signal", + prefix=self._logprefix, + mem_msg="After restoring signal", + full_mem=self.mem_report, ) - self._madam_weights_raw, self._madam_weights = copy_local( - self.weights, madam.WEIGHT_TYPE, nnz, do_flags=False, do_purge=False + + # Copy the pointing + + if self.purge_pointing and self.restore_pointing: + # We previously purged it AND we want it back. + if not self.mcmode: + # We are not running multiple realizations, so delete as we copy. + restore_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + self.view, + all_dets, + self.pixels, + pixels_dtype, + self._madam_pixels, + self._madam_pixels_raw, + interval_starts, + 1, + nside, + self.pixels_nested, + ) + del self._madam_pixels + del self._madam_pixels_raw + restore_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + self.view, + all_dets, + self.weights, + weigths_dtype, + self._madam_pixweights, + self._madam_pixweights_raw, + interval_starts, + nnz, + 0, + True, + ) + del self._madam_pixweights + del self._madam_pixweights_raw + else: + # We want to re-use the pointing, just copy. + restore_local( + data, + nsamp, + self.view, + all_dets, + self.pixels, + pixels_dtype, + self._madam_pixels, + interval_starts, + 1, + nside, + self.pixels_nested, + ) + restore_local( + data, + nsamp, + self.view, + all_dets, + self.weights, + weight_dtype, + self._madam_pixweights, + interval_starts, + nnz, + 0, + True, + ) + + log_time_memory( + data, + timer=timer, + timer_msg="Copy pointing", + prefix=self._logprefix, + mem_msg="After restoring pointing", + full_mem=self.mem_report, ) - # Madam uses constant detector weights? - - # # detweights is either a dictionary of weights specified at - # # construction time, or else we use uniform weighting. - # detw = {} - # if self._detw is None: - # for idet, det in enumerate(detectors): - # detw[det] = 1.0 - # else: - # detw = self._detw - # - # detweights = np.zeros(ndet, dtype=np.float64) - # for idet, det in enumerate(detectors): - # detweights[idet] = detw[det] - # - # if len(psds) > 0: - # npsdbin = len(psdfreqs) - # - # npsd = np.zeros(ndet, dtype=np.int64) - # psdstarts = [] - # psdvals = [] - # for idet, det in enumerate(detectors): - # if det not in psds: - # raise RuntimeError("Every detector must have at least " "one PSD") - # psdlist = psds[det] - # npsd[idet] = len(psdlist) - # for psdstart, psd in psdlist: - # psdstarts.append(psdstart) - # psdvals.append(psd) - # npsdtot = np.sum(npsd) - # psdstarts = np.array(psdstarts, dtype=np.float64) - # psdvals = np.hstack(psdvals).astype(madam.PSD_TYPE) - # npsdval = psdvals.size - # else: - # npsd = np.ones(ndet, dtype=np.int64) - # npsdtot = np.sum(npsd) - # psdstarts = np.zeros(npsdtot) - # npsdbin = 10 - # fsample = 10.0 - # psdfreqs = np.arange(npsdbin) * fsample / npsdbin - # npsdval = npsdbin * npsdtot - # psdvals = np.ones(npsdval) - # psdinfo = (detweights, npsd, psdstarts, psdfreqs, psdvals) - # if self._rank == 0 and self._verbose: - # timer_tot.report_clear("Collect PSD info") - # return psdinfo, signal_dtype, pixels_dtype, weight_dtype - - # def _unstage_data(self): - # pass - # - # @function_timer - # def _destripe(self, pars, dets, periods, psdinfo): - # """Destripe the buffered data""" - # if self._verbose: - # memreport("just before calling libmadam.destripe", self._comm) - # if self._cached: - # # destripe - # outpath = "" - # if "path_output" in self.params: - # outpath = self.params["path_output"] - # outpath = outpath.encode("ascii") - # madam.destripe_with_cache( - # self._comm, - # self._madam_timestamps, - # self._madam_pixels, - # self._madam_pixweights, - # self._madam_signal, - # outpath, - # ) - # else: - # (detweights, npsd, psdstarts, psdfreqs, psdvals) = psdinfo - # - # # destripe - # madam.destripe( - # self._comm, - # pars, - # dets, - # detweights, - # self._madam_timestamps, - # self._madam_pixels, - # self._madam_pixweights, - # self._madam_signal, - # periods, - # npsd, - # psdstarts, - # psdfreqs, - # psdvals, - # ) - # - # if self._mcmode: - # self._cached = True - # return + del nodecomm + return + + @function_timer + def _destripe(self, data, dets, interval_starts, psdinfo): + """Destripe the buffered data""" + log_time_memory( + data, + prefix=self._logprefix, + mem_msg="Just before libmadam.destripe", + full_mem=self.mem_report, + ) + + if self._cached: + # destripe + outpath = "" + if "path_output" in self.params: + outpath = self.params["path_output"] + outpath = outpath.encode("ascii") + madam.destripe_with_cache( + data.comm.comm_world, + self._madam_timestamps, + self._madam_pixels, + self._madam_pixweights, + self._madam_signal, + outpath, + ) + else: + (detweights, npsd, psdstarts, psd_freqs, psdvals) = psdinfo + + # destripe + madam.destripe( + data.comm.comm_world, + self.params, + dets, + detweights, + self._madam_timestamps, + self._madam_pixels, + self._madam_pixweights, + self._madam_signal, + interval_starts, + npsd, + psdstarts, + psd_freqs, + psdvals, + ) + if self.mcmode: + self._cached = True + return diff --git a/src/toast/ops/madam_utils.py b/src/toast/ops/madam_utils.py new file mode 100644 index 000000000..d361cd475 --- /dev/null +++ b/src/toast/ops/madam_utils.py @@ -0,0 +1,259 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np + +from ..utils import Logger, Timer, GlobalTimers, dtype_to_aligned + +from ..timing import function_timer + +from .memory_counter import MemoryCounter + + +def log_time_memory( + data, timer=None, timer_msg=None, mem_msg=None, full_mem=False, prefix="" +): + log = Logger.get() + data.comm.comm_world.barrier() + restart = False + + if timer is not None: + if timer.is_running(): + timer.stop() + restart = True + + if data.comm.world_rank == 0: + msg = "{} {}: {:0.1f} s".format(prefix, timer_msg, timer.seconds()) + log.debug(msg) + + if mem_msg is not None: + # Dump toast memory use + mem_count = MemoryCounter(silent=True) + mem_count.total_bytes = 0 + toast_bytes = mem_count.apply(data) + + if data.comm.group_rank == 0: + msg = "{} {} Group {} memory = {:0.2f} GB".format( + prefix, mem_msg, data.comm.group, toast_bytes / 1024 ** 2 + ) + log.debug(msg) + if full_mem: + memreport(msg="{} {}".format(prefix, mem_msg), comm=comm) + if restart: + timer.start() + + +def stage_local( + data, + nsamp, + view, + dets, + detdata_name, + madam_buffer, + interval_starts, + nnz, + nnz_stride, + shared_flags, + shared_mask, + det_flags, + det_mask, + do_purge=False, +): + """Helper function to fill a madam buffer from a local detdata key.""" + n_det = len(dets) + interval = 0 + do_flags = False + if shared_flags is not None or det_flags is not None: + do_flags = True + # Flagging should only be enabled when we are processing the pixel indices + # (which is how madam effectively implements flagging). So we will set + # all flagged samples to "-1" below. + if nnz != 1: + raise RuntimeError( + "Internal error on madam copy. Only pixel indices should be flagged." + ) + for ob in data.obs: + for vw in ob.view[view].detdata[detdata_name]: + offset = interval_starts[interval] + flags = None + if do_flags: + # Using flags + flags = np.zeros(len(vw), dtype=np.uint8) + if shared_flags is not None: + flags |= ob.view[view].shared[shared_flags] & shared_mask + + for idet, det in enumerate(dets): + if det not in ob.local_detectors: + continue + slc = slice( + (idet * nsamp + offset) * nnz, + (idet * nsamp + offset + len(vw[idet])) * nnz, + 1, + ) + if nnz > 1: + madam_buffer[slc] = vw[idet].flatten()[::nnz_stride] + else: + madam_buffer[slc] = vw[idet].flatten() + detflags = None + if do_flags: + if det_flags is None: + detflags = flags + else: + detflags = np.copy(flags) + detflags |= ob.view[view].detdata[det_flags][idet] & det_mask + madam_buffer[slc][detflags != 0] = -1 + interval += 1 + if do_purge: + del ob.detdata[detdata_name] + return + + +def stage_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + view, + dets, + detdata_name, + madam_dtype, + interval_starts, + nnz, + nnz_stride, + shared_flags, + shared_mask, + det_flags, + det_mask, +): + """When purging data, take turns staging it.""" + raw = None + wrapped = None + for copying in range(n_copy_groups): + if nodecomm.rank % n_copy_groups == copying: + # Our turn to copy data + storage, _ = dtype_to_aligned(madam_dtype) + print("Allocate local of len ", nsamp * len(dets) * nnz) + raw = storage.zeros(nsamp * len(dets) * nnz) + wrapped = raw.array() + stage_local( + data, + nsamp, + view, + dets, + detdata_name, + wrapped, + interval_starts, + nnz, + nnz_stride, + shared_flags, + shared_mask, + det_flags, + det_mask, + do_purge=True, + ) + nodecomm.barrier() + return raw, wrapped + + +def restore_local( + data, + nsamp, + view, + dets, + detdata_name, + detdata_dtype, + madam_buffer, + interval_starts, + nnz, + nside, + nest, +): + """Helper function to create a detdata buffer from madam data.""" + n_det = len(dets) + interval = 0 + for ob in data.obs: + # Create the detector data + if nnz == 1: + ob.detdata.create(detdata_name, dtype=detdata_dtype) + else: + ob.detdata.create(detdata_name, dtype=detdata_dtype, detshape=(nnz,)) + print("Created detdata {} = {}".format(detdata_name, ob.detdata[detdata_name])) + print("madam buffer has shape = ", madam_buffer.shape) + for vw in ob.view[view].detdata[detdata_name]: + print("copying view {}".format(vw)) + offset = interval_starts[interval] + for idet, det in enumerate(dets): + if det not in ob.local_detectors: + continue + slc = slice( + (idet * nsamp + offset) * nnz, + (idet * nsamp + offset + len(vw[idet])) * nnz, + 1, + ) + print("vw[idet].shape = ", vw[idet].shape) + print( + "idet = {}, nsamp = {}, offset = {} len = {}, nnz = {}".format( + idet, nsamp, offset, len(vw[idet]), nnz + ) + ) + print(slc) + print("madam[slc].shape = ", madam_buffer[slc].shape) + if nnz > 1: + vw[idet] = madam_buffer[slc].reshape((-1, nnz)) + else: + # If this is the pointing pixel indices, AND if the original was + # in RING ordering, then make a temporary array to do the conversion + if nside > 0 and not nest: + temp_pixels = -1 * np.ones(len(vw[idet]), dtype=detdata_dtype) + npix = 12 * nside ** 2 + good = np.logical_and( + madam_buffer[slc] >= 0, madam_buffer[slc] < npix + ) + temp_pixels[good] = madam_buffer[slc][good] + temp_pixels[good] = hp.nest2ring(nside, temp_pixels[good]) + vw[idet] = temp_pixels + else: + vw[idet] = madam_buffer[slc] + interval += 1 + return + + +def restore_in_turns( + data, + nodecomm, + n_copy_groups, + nsamp, + view, + dets, + detdata_name, + detdata_dtype, + madam_buffer, + madam_buffer_raw, + interval_starts, + nnz, + nside, + nest, +): + """When restoring data, take turns copying it.""" + for copying in range(n_copy_groups): + if nodecomm.rank % n_copy_groups == copying: + # Our turn to copy data + restore_local( + data, + nsamp, + view, + dets, + detdata_name, + detdata_dtype, + madam_buffer, + interval_starts, + nnz, + nside, + nest, + ) + madam_buffer_raw.clear() + nodecomm.barrier() + return diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index 0b6af7844..89308749f 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -285,11 +285,11 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] - for det in dets: - # The pixels and weights for this detector. - pix = ob.detdata[self.pixels] - wts = ob.detdata[self.weights] + # The pixels and weights for this detector. + pix = ob.detdata[self.pixels] + wts = ob.detdata[self.weights] + for det in dets: # We require that the pointing matrix has the same number of # non-zero elements for every detector and every observation. # We check that here, and if this is the first observation and @@ -488,10 +488,12 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] + # The pixels and weights. + pix = ob.detdata[self.pixels] + wts = ob.detdata[self.weights] + for det in dets: - # The pixels and weights for this detector. - pix = ob.detdata[self.pixels] - wts = ob.detdata[self.weights] + # Data for this detector ddata = ob.detdata[self.det_data][det] # We require that the pointing matrix has the same number of diff --git a/src/toast/pixels_io.py b/src/toast/pixels_io.py index e27a55446..6517ecd8e 100644 --- a/src/toast/pixels_io.py +++ b/src/toast/pixels_io.py @@ -215,7 +215,7 @@ def write_healpix_fits(pix, path, nest=True, comm_bytes=10000000): if global_offset + n_copy > dist.n_pix: n_copy = dist.n_pix - global_offset for col in range(pix.n_value): - fdata[col][global_offset : global_offset + n_copy] = pix.data[ + fview[col][global_offset : global_offset + n_copy] = pix.data[ lc, 0:n_copy, col ] else: @@ -253,7 +253,7 @@ def write_healpix_fits(pix, path, nest=True, comm_bytes=10000000): if global_offset + n_copy > dist.n_pix: n_copy = dist.n_pix - global_offset for col in range(pix.n_value): - fdata[col][ + fview[col][ global_offset : global_offset + n_copy ] = recvview[c, 0:n_copy, col] sendbuf.fill(0) diff --git a/src/toast/tests/ops_madam.py b/src/toast/tests/ops_madam.py index 42683869d..0e28d1933 100644 --- a/src/toast/tests/ops_madam.py +++ b/src/toast/tests/ops_madam.py @@ -15,6 +15,8 @@ from .. import ops as ops +from ..vis import set_matplotlib_backend + from ..pixels import PixelDistribution, PixelData from ._helpers import create_outdir, create_satellite_data @@ -30,34 +32,12 @@ def create_fake_sky(self, data, dist_key, map_key): dist = data[dist_key] pix_data = PixelData(dist, np.float64, n_value=3) # Just replicate the fake data across all local submaps - pix_data.data[:, :, 0] = 100.0 * np.random.uniform(size=dist.n_pix_submap) - pix_data.data[:, :, 1] = np.random.uniform(size=dist.n_pix_submap) - pix_data.data[:, :, 2] = np.random.uniform(size=dist.n_pix_submap) + pix_data.data[:, :, 0] = 100.0 + pix_data.data[:, :, 1] = 0.1 + pix_data.data[:, :, 2] = 0.1 data[map_key] = pix_data - def test_scan(self): - # Create a fake satellite data set for testing - data = create_satellite_data(self.comm) - - # Create some detector pointing matrices - pointing = ops.PointingHealpix( - nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" - ) - pointing.apply(data) - - # Create fake polarized sky pixel values locally - self.create_fake_sky(data, "pixel_dist", "fake_map") - - # Scan map into timestreams - scanner = ops.ScanMap( - det_data="signal", - pixels=pointing.pixels, - weights=pointing.weights, - map_key="fake_map", - ) - scanner.apply(data) - - def test_madam_output(self): + def test_madam_det_out(self): if not ops.Madam.available: print("libmadam not available, skipping tests") return @@ -83,6 +63,34 @@ def test_madam_output(self): ) scanner.apply(data) + # if data.comm.world_rank == 0: + # set_matplotlib_backend() + # import matplotlib.pyplot as plt + # + # ob = data.obs[0] + # det = ob.local_detectors[0] + # xdata = ob.shared["times"].data + # ydata = ob.detdata["signal"][det] + # + # fig = plt.figure(figsize=(12, 8), dpi=72) + # ax = fig.add_subplot(1, 1, 1, aspect="auto") + # ax.plot( + # xdata, + # ydata, + # marker="o", + # c="red", + # label="{}, {}".format(ob.name, det), + # ) + # # cur_ylim = ax.get_ylim() + # # ax.set_ylim([0.001 * (nse.NET(det) ** 2), 10.0 * cur_ylim[1]]) + # ax.legend(loc=1) + # plt.title("Sky Signal") + # savefile = os.path.join( + # self.outdir, "signal_sky_{}_{}.pdf".format(ob.name, det) + # ) + # plt.savefig(savefile) + # plt.close() + # Create an uncorrelated noise model from focalplane detector properties default_model = ops.DefaultNoiseModel(noise_model="noise_model") default_model.apply(data) @@ -91,6 +99,72 @@ def test_madam_output(self): sim_noise = ops.SimNoise(noise_model="noise_model", out="signal") sim_noise.apply(data) + # if data.comm.world_rank == 0: + # set_matplotlib_backend() + # import matplotlib.pyplot as plt + # + # ob = data.obs[0] + # det = ob.local_detectors[0] + # xdata = ob.shared["times"].data + # ydata = ob.detdata["signal"][det] + # + # fig = plt.figure(figsize=(12, 8), dpi=72) + # ax = fig.add_subplot(1, 1, 1, aspect="auto") + # ax.plot( + # xdata, + # ydata, + # marker="o", + # c="red", + # label="{}, {}".format(ob.name, det), + # ) + # # cur_ylim = ax.get_ylim() + # # ax.set_ylim([0.001 * (nse.NET(det) ** 2), 10.0 * cur_ylim[1]]) + # ax.legend(loc=1) + # plt.title("Sky + Noise Signal") + # savefile = os.path.join( + # self.outdir, "signal_sky-noise_{}_{}.pdf".format(ob.name, det) + # ) + # plt.savefig(savefile) + # plt.close() + + # Compute timestream rms + + rms = dict() + for ob in data.obs: + rms[ob.name] = dict() + for det in ob.local_detectors: + # Add an offset to the data + ob.detdata["signal"][det] += 500.0 + rms[ob.name][det] = np.std(ob.detdata["signal"][det]) + + # if data.comm.world_rank == 0: + # set_matplotlib_backend() + # import matplotlib.pyplot as plt + # + # ob = data.obs[0] + # det = ob.local_detectors[0] + # xdata = ob.shared["times"].data + # ydata = ob.detdata["signal"][det] + # + # fig = plt.figure(figsize=(12, 8), dpi=72) + # ax = fig.add_subplot(1, 1, 1, aspect="auto") + # ax.plot( + # xdata, + # ydata, + # marker="o", + # c="red", + # label="{}, {}".format(ob.name, det), + # ) + # # cur_ylim = ax.get_ylim() + # # ax.set_ylim([0.001 * (nse.NET(det) ** 2), 10.0 * cur_ylim[1]]) + # ax.legend(loc=1) + # plt.title("Sky + Noise + Offset Signal") + # savefile = os.path.join( + # self.outdir, "signal_sky-noise-offset_{}_{}.pdf".format(ob.name, det) + # ) + # plt.savefig(savefile) + # plt.close() + # Run madam on this # Madam assumes constant sample rate- just get it from the noise model for @@ -100,19 +174,19 @@ def test_madam_output(self): pars = {} pars["kfirst"] = "T" pars["iter_max"] = 100 - pars["base_first"] = 5.0 + pars["base_first"] = 300.0 pars["fsample"] = sample_rate pars["nside_map"] = pointing.nside pars["nside_cross"] = pointing.nside pars["nside_submap"] = min(8, pointing.nside) - pars["write_map"] = "F" + pars["write_map"] = "T" pars["write_binmap"] = "T" pars["write_matrix"] = "F" pars["write_wcov"] = "F" pars["write_hits"] = "T" pars["kfilter"] = "F" pars["path_output"] = self.outdir - pars["info"] = 0 + pars["info"] = 2 # FIXME: add a view here once our test data includes it @@ -130,10 +204,38 @@ def test_madam_output(self): ) madam.apply(data) + # if data.comm.world_rank == 0: + # set_matplotlib_backend() + # import matplotlib.pyplot as plt + # + # ob = data.obs[0] + # det = ob.local_detectors[0] + # xdata = ob.shared["times"].data + # ydata = ob.detdata["destriped"][det] + # + # fig = plt.figure(figsize=(12, 8), dpi=72) + # ax = fig.add_subplot(1, 1, 1, aspect="auto") + # ax.plot( + # xdata, + # ydata, + # marker="o", + # c="red", + # label="{}, {}".format(ob.name, det), + # ) + # # cur_ylim = ax.get_ylim() + # # ax.set_ylim([0.001 * (nse.NET(det) ** 2), 10.0 * cur_ylim[1]]) + # ax.legend(loc=1) + # plt.title("Destriped Signal") + # savefile = os.path.join( + # self.outdir, "signal_destriped_{}_{}.pdf".format(ob.name, det) + # ) + # plt.savefig(savefile) + # plt.close() + for ob in data.obs: for det in ob.local_detectors: - # Do some check... - pass + check_rms = np.std(ob.detdata["destriped"][det]) + self.assertTrue(0.9 * check_rms < rms[ob.name][det]) del data return diff --git a/src/toast/tests/ops_mapmaker_binning.py b/src/toast/tests/ops_mapmaker_binning.py index 7007de4f8..4e8434008 100644 --- a/src/toast/tests/ops_mapmaker_binning.py +++ b/src/toast/tests/ops_mapmaker_binning.py @@ -9,14 +9,20 @@ from astropy import units as u +import healpy as hp + from .mpi import MPITestCase from ..noise import Noise +from ..vis import set_matplotlib_backend + from .. import ops as ops from ..pixels import PixelDistribution, PixelData +from ..pixels_io import write_healpix_fits + from ._helpers import create_outdir, create_satellite_data @@ -58,7 +64,7 @@ def test_binned(self): ) binner.apply(data) - binmap = binner.binned + binmap = data[binner.binned] # # Manual check # @@ -106,3 +112,140 @@ def test_binned(self): # ) del data return + + def test_compare_madam(self): + if not ops.Madam.available: + print("libmadam not available, skipping binned map comparison") + return + + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise + sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + sim_noise.apply(data) + + # Pointing operator + pointing = ops.PointingHealpix( + nside=64, mode="IQU", nest=True, hwp_angle="hwp_angle" + ) + + # Build the covariance and hits + cov_and_hits = ops.CovarianceAndHits( + pixel_dist="pixel_dist", pointing=pointing, noise_model="noise_model" + ) + cov_and_hits.apply(data) + + # Set up binned map + + binner = ops.BinMap( + pixel_dist="pixel_dist", + covariance=cov_and_hits.covariance, + det_data="noise", + pointing=pointing, + noise_model="noise_model", + ) + binner.apply(data) + + # Write binned map to disk so we can load the whole thing on one process. + + toast_hit_path = os.path.join(self.outdir, "toast_hits.fits") + toast_bin_path = os.path.join(self.outdir, "toast_bin.fits") + toast_cov_path = os.path.join(self.outdir, "toast_cov.fits") + write_healpix_fits(data[binner.binned], toast_bin_path, nest=True) + write_healpix_fits(data[cov_and_hits.hits], toast_hit_path, nest=True) + write_healpix_fits(data[cov_and_hits.covariance], toast_cov_path, nest=True) + + # Now run Madam on the same data and compare + + sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + pars = {} + pars["kfirst"] = "F" + pars["iter_max"] = 10 + pars["base_first"] = 1.0 + pars["fsample"] = sample_rate + pars["nside_map"] = pointing.nside + pars["nside_cross"] = pointing.nside + pars["nside_submap"] = min(8, pointing.nside) + pars["write_map"] = "F" + pars["write_binmap"] = "T" + pars["write_matrix"] = "F" + pars["write_wcov"] = "F" + pars["write_hits"] = "T" + pars["kfilter"] = "F" + pars["path_output"] = self.outdir + pars["info"] = 0 + + madam = ops.Madam( + params=pars, + det_data="noise", + pixels=pointing.pixels, + weights=pointing.weights, + pixels_nested=pointing.nest, + noise_model="noise_model", + ) + + # Generate persistent pointing + pointing.apply(data) + + # Run Madam + madam.apply(data) + + madam_hit_path = os.path.join(self.outdir, "madam_hmap.fits") + madam_bin_path = os.path.join(self.outdir, "madam_bmap.fits") + + if data.comm.world_rank == 0: + set_matplotlib_backend() + import matplotlib.pyplot as plt + + # Compare hit maps + + toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) + madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) + diff_hits = toast_hits - madam_hits + + outfile = os.path.join(self.outdir, "madam_hits.png") + hp.mollview(madam_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(self.outdir, "toast_hits.png") + hp.mollview(toast_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(self.outdir, "diff_hits.png") + hp.mollview(diff_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + # Compare binned maps + + toast_bin = hp.read_map(toast_bin_path, field=None, nest=True) + madam_bin = hp.read_map(madam_bin_path, field=None, nest=True) + # Set madam unhit pixels to zero + for stokes, ststr in zip(range(3), ["I", "Q", "U"]): + mask = hp.mask_bad(madam_bin[stokes]) + madam_bin[stokes][mask] = 0.0 + diff_map = toast_bin[stokes] - madam_bin[stokes] + print("diff map {} has rms {}".format(ststr, np.std(diff_map))) + outfile = os.path.join(self.outdir, "madam_bin_{}.png".format(ststr)) + hp.mollview(madam_bin[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(self.outdir, "toast_bin_{}.png".format(ststr)) + hp.mollview(toast_bin[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(self.outdir, "diff_bin_{}.png".format(ststr)) + hp.mollview(diff_map, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + nt.assert_almost_equal(toast_bin[stokes], madam_bin[stokes], decimal=6) + + del data + return diff --git a/src/toast/tests/pixels.py b/src/toast/tests/pixels.py index 5b00df973..88903eeb0 100644 --- a/src/toast/tests/pixels.py +++ b/src/toast/tests/pixels.py @@ -10,6 +10,8 @@ import numpy.testing as nt +import healpy as hp + from ..pixels import PixelDistribution, PixelData from .. import pixels_io as io @@ -152,14 +154,64 @@ def test_io(self): dist = self._make_pixdist(nside, nsb, self.comm) for tp in self.fitstypes: pdata = self._make_pixdata(dist, tp, 2) - pdata = PixelData(dist, tp, n_value=2) + pdata = PixelData(dist, tp, n_value=6) fitsfile = os.path.join( self.outdir, "data_N{}_sub{}_type-{}.fits".format( nside, nsb, np.dtype(tp).char ), ) - io.write_healpix_fits(pdata, fitsfile) - check = PixelData(dist, tp, n_value=2) - io.read_healpix_fits(check, fitsfile) + io.write_healpix_fits(pdata, fitsfile, nest=True) + check = PixelData(dist, tp, n_value=6) + io.read_healpix_fits(check, fitsfile, nest=True) nt.assert_equal(pdata.data, check.data) + if self.comm is None or self.comm.size == 1: + # Write out the data serially and compare + fdata = list() + for col in range(pdata.n_value): + fdata.append(np.zeros(pdata.distribution.n_pix)) + for lc, sm in enumerate(pdata.distribution.local_submaps): + global_offset = sm * pdata.distribution.n_pix_submap + n_copy = pdata.distribution.n_pix_submap + if global_offset + n_copy > pdata.distribution.n_pix: + n_copy = pdata.distribution.n_pix - global_offset + for col in range(pdata.n_value): + fdata[col][ + global_offset : global_offset + n_copy + ] = pdata.data[lc, 0:n_copy, col] + serialfile = os.path.join( + self.outdir, + "serial_N{}_sub{}_type-{}.fits".format( + nside, nsb, np.dtype(tp).char + ), + ) + hp.write_map( + serialfile, fdata, fits_IDL=False, nest=True, overwrite=True + ) + loaded = hp.read_map(serialfile, nest=True, field=None) + for lc, sm in enumerate(pdata.distribution.local_submaps): + global_offset = sm * pdata.distribution.n_pix_submap + n_check = pdata.distribution.n_pix_submap + if global_offset + n_check > pdata.distribution.n_pix: + n_check = pdata.distribution.n_pix - global_offset + for col in range(pdata.n_value): + nt.assert_equal( + loaded[col][ + global_offset : global_offset + n_check + ], + pdata.data[lc, 0:n_check, col], + ) + # Compare to file written with our own function + loaded = hp.read_map(fitsfile, nest=True, field=None) + for lc, sm in enumerate(pdata.distribution.local_submaps): + global_offset = sm * pdata.distribution.n_pix_submap + n_check = pdata.distribution.n_pix_submap + if global_offset + n_check > pdata.distribution.n_pix: + n_check = pdata.distribution.n_pix - global_offset + for col in range(pdata.n_value): + nt.assert_equal( + loaded[col][ + global_offset : global_offset + n_check + ], + pdata.data[lc, 0:n_check, col], + ) diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 67bff6ba1..475fff64f 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -6,6 +6,7 @@ import os import sys +import shutil import unittest @@ -108,8 +109,9 @@ def test(name=None, verbosity=2): if rank == 0: outdir = os.path.abspath(outdir) - if not os.path.isdir(outdir): - os.makedirs(outdir) + if os.path.isdir(outdir): + shutil.rmtree(outdir) + os.makedirs(outdir) if comm is not None: outdir = comm.bcast(outdir, root=0) From eb10924dd3ac4665364bb08e194d9805376a82a9 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 14 Dec 2020 12:47:51 -0800 Subject: [PATCH 044/690] Pointing and mapmaker utils now support data views. --- src/toast/ops/mapmaker.py | 3 +- src/toast/ops/mapmaker_binning.py | 3 +- src/toast/ops/mapmaker_templates.py | 51 ++++- src/toast/ops/mapmaker_utils.py | 340 +++++++++++++++------------- src/toast/ops/pointing_healpix.py | 7 + src/toast/templates/template.py | 47 ++-- 6 files changed, 266 insertions(+), 185 deletions(-) diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index ecf34cfae..2aadfb96f 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -90,7 +90,7 @@ def _check_projection(self, proposal): if not isinstance(bin, Operator): raise traitlets.TraitError("binning should be an Operator instance") # Check that this operator has the traits we expect - for trt in ["templated_matrix", "det_data", "binning"]: + for trt in ["template_matrix", "det_data", "binning"]: if not bin.has_trait(trt): msg = "binning operator should have a '{}' trait".format(trt) raise traitlets.TraitError(msg) @@ -128,6 +128,7 @@ def _exec(self, data, detectors=None, **kwargs): # Check map binning if self.map_binning is None: + # Use the same binning as the projection operator used in the solver. self.map_binning = self.projection.binning # Get the template matrix used in the projection diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index 1be1a5821..5848da41c 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -102,7 +102,7 @@ def _check_pointing(self, proposal): if not isinstance(pntg, Operator): raise traitlets.TraitError("pointing should be an Operator instance") # Check that this operator has the traits we expect - for trt in ["pixels", "weights", "create_dist"]: + for trt in ["pixels", "weights", "create_dist", "view"]: if not pntg.has_trait(trt): msg = "pointing operator should have a '{}' trait".format(trt) raise traitlets.TraitError(msg) @@ -147,6 +147,7 @@ def _exec(self, data, detectors=None, **kwargs): build_zmap = BuildNoiseWeighted( pixel_dist=self.pixel_dist, zmap=self.binned, + view=self.pointing.view, pixels=self.pointing.pixels, weights=self.pointing.weights, noise_model=self.noise_model, diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index eb877c269..cd7244bc7 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -21,10 +21,6 @@ class TemplateMatrix(Operator): API = Int(0, help="Internal interface version for this operator") - det_data = Unicode( - None, allow_none=True, help="Observation detdata key for the timestream data" - ) - templates = List( None, allow_none=True, help="This should be a list of Template instances" ) @@ -33,6 +29,26 @@ class TemplateMatrix(Operator): transpose = Bool(False, help="If True, apply the transpose.") + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional shared flagging") + @traitlets.validate("templates") def _check_templates(self, proposal): temps = proposal["value"] @@ -63,13 +79,21 @@ def _exec(self, data, detectors=None, **kwargs): "You must set the amplitudes trait before calling exec()" ) - # On the first call, we initialize all templates using the Data instance. + # On the first call, we initialize all templates using the Data instance and + # the fixed options for view, flagging, etc. if not self._initialized: for tmpl in self.templates: + tmpl.view = self.view + tmpl.det_flags = self.det_flags + tmpl.det_flag_mask = self.det_flag_mask + tmpl.shared_flags = self.shared_flags + tmpl.shared_flag_mask = self.shared_flag_mask + # This next line will trigger calculation of the number + # of amplitudes within each template. tmpl.data = data self._initialized = True - # Set the data we are using + # Set the data we are using for this execution for tmpl in self.templates: tmpl.det_data = self.det_data @@ -111,9 +135,20 @@ def _finalize(self, data, **kwargs): return def _requires(self): - req = dict() + req = { + "meta": list(), + "shared": list(), + "detdata": list(), + "intervals": list(), + } + if self.view is not None: + req["intervals"].append(self.view) if self.transpose: - req["detdata"] = [self.det_data] + req["detdata"].append(self.det_data) + if self.shared_flags is not None: + req["shared"].append(self.shared_flags) + if self.det_flags is not None: + req["detdata"].append(self.det_flags) return req def _provides(self): diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index 89308749f..626ee52e2 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -58,6 +58,10 @@ class BuildHitMap(Operator): hits = Unicode("hits", help="The Data key for the output hit map") + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + det_flags = Unicode( None, allow_none=True, help="Observation detdata key for flags to use" ) @@ -120,32 +124,35 @@ def _exec(self, data, detectors=None, **kwargs): # Nothing to do for this observation continue - for det in dets: - # Get local submap and pixels - local_sm, local_pix = dist.global_pixel_to_submap( - ob.detdata[self.pixels][det] - ) - - # Samples with telescope pointing problems are already flagged in the - # the pointing operators by setting the pixel numbers to a negative - # value. Here we optionally apply detector flags to the local - # pixel numbers to flag more samples. - - # Apply the flags if needed - if self.det_flags is not None: - flags = np.array(ob.detdata[self.det_flags]) - flags &= self.det_flag_mask - local_pix[flags != 0] = -1 - - cov_accum_diag_hits( - dist.n_local_submap, - dist.n_pix_submap, - 1, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - self._hits.raw, - ) - + # The pixels and weights view for this observation + pix = ob.view[self.view].detdata[self.pixels] + flgs = [None for x in pix] + if self.det_flags is not None: + flgs = ob.view[self.view].detdata[self.det_flags] + + # Process every data view + for pview, fview in zip(pix, flgs): + for det in dets: + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap(pview[det]) + + # Samples with telescope pointing problems are already flagged in + # the pointing operators by setting the pixel numbers to a negative + # value. Here we optionally apply detector flags to the local + # pixel numbers to flag more samples. + + # Apply the flags if needed + if self.det_flags is not None: + local_pix[fview[det] & self.det_flag_mask != 0] = -1 + + cov_accum_diag_hits( + dist.n_local_submap, + dist.n_pix_submap, + 1, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + self._hits.raw, + ) return def _finalize(self, data, **kwargs): @@ -162,13 +169,16 @@ def _requires(self): "meta": [self.pixel_dist], "shared": list(), "detdata": [self.pixels, self.weights], + "intervals": list(), } if self.det_flags is not None: req["detdata"].append(self.det_flags) + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): - prov = {"meta": list(), "shared": list(), "detdata": list()} + prov = {"meta": [self.hits]} return prov def _accelerators(self): @@ -208,6 +218,10 @@ class BuildInverseCovariance(Operator): "inv_covariance", help="The Data key for the output inverse covariance" ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + det_flags = Unicode( None, allow_none=True, help="Observation detdata key for flags to use" ) @@ -285,63 +299,68 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] - # The pixels and weights for this detector. - pix = ob.detdata[self.pixels] - wts = ob.detdata[self.weights] - - for det in dets: - # We require that the pointing matrix has the same number of - # non-zero elements for every detector and every observation. - # We check that here, and if this is the first observation and - # detector we have worked with we create the PixelData object. - if self._invcov is None: - # We will store the lower triangle of the covariance. - if len(wts.detector_shape) == 1: - self._weight_nnz = 1 - else: - self._weight_nnz = wts.detector_shape[1] - self._cov_nnz = self._weight_nnz * (self._weight_nnz + 1) // 2 - self._invcov = PixelData(dist, np.float64, n_value=self._cov_nnz) - else: - check_nnz = None - if len(wts.detector_shape) == 1: - check_nnz = 1 - else: - check_nnz = wts.detector_shape[1] - if check_nnz != self._weight_nnz: - msg = "observation '{}', detector '{}', pointing weights '{}' has {} nnz, not {}".format( - ob.name, det, self.weights, check_nnz, self._weight_nnz + # The pixels and weights view for this observation + pix = ob.view[self.view].detdata[self.pixels] + wts = ob.view[self.view].detdata[self.weights] + flgs = [None for x in wts] + if self.det_flags is not None: + flgs = ob.view[self.view].detdata[self.det_flags] + + # Process every data view + for pview, wview, fview in zip(pix, wts, flgs): + for det in dets: + # We require that the pointing matrix has the same number of + # non-zero elements for every detector and every observation. + # We check that here, and if this is the first observation and + # detector we have worked with we create the PixelData object. + if self._invcov is None: + # We will store the lower triangle of the covariance. + if len(wview.detector_shape) == 1: + self._weight_nnz = 1 + else: + self._weight_nnz = wview.detector_shape[1] + self._cov_nnz = self._weight_nnz * (self._weight_nnz + 1) // 2 + self._invcov = PixelData( + dist, np.float64, n_value=self._cov_nnz ) - raise RuntimeError(msg) - - # Get local submap and pixels - local_sm, local_pix = dist.global_pixel_to_submap(pix[det]) - - # Get the detector weight from the noise model. - detweight = noise.detector_weight(det) - - # Samples with telescope pointing problems are already flagged in the - # the pointing operators by setting the pixel numbers to a negative - # value. Here we optionally apply detector flags to the local - # pixel numbers to flag more samples. - - # Apply the flags if needed - if self.det_flags is not None: - flags = np.array(ob.detdata[self.det_flags]) - flags &= self.det_flag_mask - local_pix[flags != 0] = -1 - - # Accumulate - cov_accum_diag_invnpp( - dist.n_local_submap, - dist.n_pix_submap, - self._weight_nnz, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - wts[det].reshape(-1), - detweight, - self._invcov.raw, - ) + else: + check_nnz = None + if len(wview.detector_shape) == 1: + check_nnz = 1 + else: + check_nnz = wview.detector_shape[1] + if check_nnz != self._weight_nnz: + msg = "observation '{}', detector '{}', pointing weights '{}' has {} nnz, not {}".format( + ob.name, det, self.weights, check_nnz, self._weight_nnz + ) + raise RuntimeError(msg) + + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap(pview[det]) + + # Get the detector weight from the noise model. + detweight = noise.detector_weight(det) + + # Samples with telescope pointing problems are already flagged in + # the pointing operators by setting the pixel numbers to a negative + # value. Here we optionally apply detector flags to the local + # pixel numbers to flag more samples. + + # Apply the flags if needed + if self.det_flags is not None: + local_pix[fview[det] & self.det_flag_mask != 0] = -1 + + # Accumulate + cov_accum_diag_invnpp( + dist.n_local_submap, + dist.n_pix_submap, + self._weight_nnz, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + wview[det].reshape(-1), + detweight, + self._invcov.raw, + ) return def _finalize(self, data, **kwargs): @@ -358,13 +377,16 @@ def _requires(self): "meta": [self.pixel_dist, self.noise_model], "shared": list(), "detdata": [self.pixels, self.weights], + "intervals": list(), } if self.det_flags is not None: req["detdata"].append(self.det_flags) + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): - prov = {"meta": list(), "shared": list(), "detdata": list()} + prov = {"meta": [self.inverse_covariance]} return prov def _accelerators(self): @@ -404,6 +426,10 @@ class BuildNoiseWeighted(Operator): zmap = Unicode("zmap", help="The Data key for the output noise weighted map") + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + det_data = Unicode( None, allow_none=True, help="Observation detdata key for the timestream data" ) @@ -488,65 +514,71 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] - # The pixels and weights. - pix = ob.detdata[self.pixels] - wts = ob.detdata[self.weights] - - for det in dets: - # Data for this detector - ddata = ob.detdata[self.det_data][det] - - # We require that the pointing matrix has the same number of - # non-zero elements for every detector and every observation. - # We check that here, and if this is the first observation and - # detector we have worked with we create the PixelData object. - if self._zmap is None: - if len(wts.detector_shape) == 1: - self._weight_nnz = 1 - else: - self._weight_nnz = wts.detector_shape[1] - self._zmap = PixelData(dist, np.float64, n_value=self._weight_nnz) - else: - check_nnz = None - if len(wts.detector_shape) == 1: - check_nnz = 1 - else: - check_nnz = wts.detector_shape[1] - if check_nnz != self._weight_nnz: - msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( - ob.name, det, self.weights + # The pixels and weights view for this observation + pix = ob.view[self.view].detdata[self.pixels] + wts = ob.view[self.view].detdata[self.weights] + ddat = ob.view[self.view].detdata[self.det_data] + flgs = [None for x in wts] + if self.det_flags is not None: + flgs = ob.view[self.view].detdata[self.det_flags] + + # Process every data view + for pview, wview, dview, fview in zip(pix, wts, ddat, flgs): + for det in dets: + # Data for this detector + ddata = dview[det] + + # We require that the pointing matrix has the same number of + # non-zero elements for every detector and every observation. + # We check that here, and if this is the first observation and + # detector we have worked with we create the PixelData object. + if self._zmap is None: + if len(wview.detector_shape) == 1: + self._weight_nnz = 1 + else: + self._weight_nnz = wview.detector_shape[1] + self._zmap = PixelData( + dist, np.float64, n_value=self._weight_nnz ) - raise RuntimeError(msg) - - # Get local submap and pixels - local_sm, local_pix = dist.global_pixel_to_submap(pix[det]) - - # Get the detector weight from the noise model. - detweight = noise.detector_weight(det) - - # Samples with telescope pointing problems are already flagged in the - # the pointing operators by setting the pixel numbers to a negative - # value. Here we optionally apply detector flags to the local - # pixel numbers to flag more samples. - - # Apply the flags if needed - if self.det_flags is not None: - flags = np.array(ob.detdata[self.det_flags]) - flags &= self.det_flag_mask - local_pix[flags != 0] = -1 - - # Accumulate - cov_accum_zmap( - dist.n_local_submap, - dist.n_pix_submap, - self._zmap.n_value, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - wts[det].reshape(-1), - detweight, - ddata, - self._zmap.raw, - ) + else: + check_nnz = None + if len(wview.detector_shape) == 1: + check_nnz = 1 + else: + check_nnz = wview.detector_shape[1] + if check_nnz != self._weight_nnz: + msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( + ob.name, det, self.weights + ) + raise RuntimeError(msg) + + # Get local submap and pixels + local_sm, local_pix = dist.global_pixel_to_submap(pview[det]) + + # Get the detector weight from the noise model. + detweight = noise.detector_weight(det) + + # Samples with telescope pointing problems are already flagged in the + # the pointing operators by setting the pixel numbers to a negative + # value. Here we optionally apply detector flags to the local + # pixel numbers to flag more samples. + + # Apply the flags if needed + if self.det_flags is not None: + local_pix[fview[det] & self.det_flag_mask != 0] = -1 + + # Accumulate + cov_accum_zmap( + dist.n_local_submap, + dist.n_pix_submap, + self._zmap.n_value, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + wview[det].reshape(-1), + detweight, + ddata, + self._zmap.raw, + ) return def _finalize(self, data, **kwargs): @@ -563,13 +595,16 @@ def _requires(self): "meta": [self.pixel_dist, self.noise_model, self.det_data], "shared": list(), "detdata": [self.pixels, self.weights], + "intervals": list(), } if self.det_flags is not None: req["detdata"].append(self.det_flags) + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): - prov = {"meta": list(), "shared": list(), "detdata": list()} + prov = {"meta": [self.zmap]} return prov def _accelerators(self): @@ -668,18 +703,11 @@ def _check_pointing(self, proposal): if pntg is not None: if not isinstance(pntg, Operator): raise traitlets.TraitError("pointing should be an Operator instance") - if not pntg.has_trait("pixels"): - raise traitlets.TraitError( - "pointing operator should have a 'pixels' trait" - ) - if not pntg.has_trait("weights"): - raise traitlets.TraitError( - "pointing operator should have a 'weights' trait" - ) - if not pntg.has_trait("create_dist"): - raise traitlets.TraitError( - "pointing operator should have a 'create_dist' trait" - ) + # Check that this operator has the traits we expect + for trt in ["pixels", "weights", "create_dist", "view"]: + if not pntg.has_trait(trt): + msg = "pointing operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) return pntg def __init__(self, **kwargs): @@ -748,6 +776,7 @@ def _exec(self, data, detectors=None, **kwargs): build_hits = BuildHitMap( pixel_dist=self.pixel_dist, hits=self.hits, + view=self.pointing.view, pixels=self.pointing.pixels, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, @@ -760,6 +789,7 @@ def _exec(self, data, detectors=None, **kwargs): build_invcov = BuildInverseCovariance( pixel_dist=self.pixel_dist, inverse_covariance=self.covariance, + view=self.pointing.view, pixels=self.pointing.pixels, weights=self.pointing.weights, noise_model=self.noise_model, diff --git a/src/toast/ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py index 20c2374ba..f07d33bca 100644 --- a/src/toast/ops/pointing_healpix.py +++ b/src/toast/ops/pointing_healpix.py @@ -61,6 +61,10 @@ class PointingHealpix(Operator): mode = Unicode("I", help="The Stokes weights to generate (I or IQU)") + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + boresight = Unicode("boresight_radec", help="Observation shared key for boresight") hwp_angle = Unicode( @@ -401,6 +405,7 @@ def _requires(self): self.boresight, ], "detdata": list(), + "intervals": list(), } if self.cal is not None: req["meta"].append(self.cal) @@ -408,6 +413,8 @@ def _requires(self): req["shared"].append(self.shared_flags) if self.hwp_angle is not None: req["shared"].append(self.hwp_angle) + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index a83fff4a9..b4686d93c 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -33,10 +33,6 @@ class Template(TraitConfig): # Note: The TraitConfig base class defines a "name" attribute. - det_data = Unicode( - None, allow_none=True, help="Observation detdata key for the timestream data" - ) - data = Instance( None, klass=Data, @@ -44,6 +40,26 @@ class Template(TraitConfig): help="This must be an instance of a Data class (or None)", ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to use" + ) + + det_flag_mask = Int(0, help="Bit mask value for optional flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional shared flagging") + @traitlets.validate("data") def _check_data(self, proposal): dat = proposal["value"] @@ -54,24 +70,15 @@ def _check_data(self, proposal): self.initialize(dat) return dat - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def _initialize(self, newdata): + @traitlets.observe("data") + def _initialize(self, change): + # Derived classes should implement this method to do any set up (like + # computing the number of amplitudes) whenever the data changes. + newdata = change["data"] raise NotImplementedError("Derived class must implement _initialize()") - def initialize(self, newdata): - """Initialize instance after the data trait has been set. - - Templates use traits to set their properties, which allows them to be - configured easily with the constructor or afterwards and enables them to be - built from config files. However, the `data` trait may not be set at - construction time and this trait is likely used to compute the number of - template amplitudes that will be used and other parameters. This explicit - initialize method is called whenever the `data` trait is set. - - """ - self._initialize(newdata) + def __init__(self, **kwargs): + super().__init__(**kwargs) def _zeros(self): raise NotImplementedError("Derived class must implement _zeros()") From f9c7d63add178da5b8568207d72235b6bdb3ff8f Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 14 Dec 2020 12:49:29 -0800 Subject: [PATCH 045/690] Begin work on specific templates. --- src/toast/templates/fourier2d.py | 57 ++++++++++++++++++++++++++++ src/toast/templates/offset.py | 54 +++++++++++++++++++++++++++ src/toast/templates/subharmonic.py | 60 ++++++++++++++++++++++++++++++ 3 files changed, 171 insertions(+) create mode 100644 src/toast/templates/fourier2d.py create mode 100644 src/toast/templates/offset.py create mode 100644 src/toast/templates/subharmonic.py diff --git a/src/toast/templates/fourier2d.py b/src/toast/templates/fourier2d.py new file mode 100644 index 000000000..d7bbe08d5 --- /dev/null +++ b/src/toast/templates/fourier2d.py @@ -0,0 +1,57 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float + +from ..data import Data + +from .template import Template + + +@trait_docs +class Fourier2D(Template): + """This class represents atmospheric fluctuations in front of the focalplane + as 2D Fourier modes. + + """ + + # Notes: The TraitConfig base class defines a "name" attribute. The Template + # class (derived from TraitConfig) defines the following traits already: + # data : The Data instance we are working with + # view : The timestream view we are using + # det_data : The detector data key with the timestreams + # det_flags : Optional detector flags + # det_flag_mask : Bit mask for detector flags + # shared_flags : Optional detector flags + # shared_flag_mask : Bit mask for detector flags + # + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @traitlets.observe("data") + def _initialize(self, change): + # Derived classes should implement this method to do any set up (like + # computing the number of amplitudes) whenever the data changes. + newdata = change["data"] + return + + def _zeros(self): + raise NotImplementedError("Derived class must implement _zeros()") + + def _add_to_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _add_to_signal()") + + def _project_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _project_signal()") + + def _add_prior(self, amplitudes_in, amplitudes_out): + # Not all Templates implement the prior + return + + def _apply_precond(self, amplitudes_in, amplitudes_out): + raise NotImplementedError("Derived class must implement _apply_precond()") diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py new file mode 100644 index 000000000..30e0d4700 --- /dev/null +++ b/src/toast/templates/offset.py @@ -0,0 +1,54 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float + +from ..data import Data + +from .template import Template + + +@trait_docs +class Offset(Template): + """This class represents noise fluctuations as a step function.""" + + # Notes: The TraitConfig base class defines a "name" attribute. The Template + # class (derived from TraitConfig) defines the following traits already: + # data : The Data instance we are working with + # view : The timestream view we are using + # det_data : The detector data key with the timestreams + # det_flags : Optional detector flags + # det_flag_mask : Bit mask for detector flags + # shared_flags : Optional detector flags + # shared_flag_mask : Bit mask for detector flags + # + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @traitlets.observe("data") + def _initialize(self, change): + # Derived classes should implement this method to do any set up (like + # computing the number of amplitudes) whenever the data changes. + newdata = change["data"] + return + + def _zeros(self): + raise NotImplementedError("Derived class must implement _zeros()") + + def _add_to_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _add_to_signal()") + + def _project_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _project_signal()") + + def _add_prior(self, amplitudes_in, amplitudes_out): + # Not all Templates implement the prior + return + + def _apply_precond(self, amplitudes_in, amplitudes_out): + raise NotImplementedError("Derived class must implement _apply_precond()") diff --git a/src/toast/templates/subharmonic.py b/src/toast/templates/subharmonic.py new file mode 100644 index 000000000..76df7954b --- /dev/null +++ b/src/toast/templates/subharmonic.py @@ -0,0 +1,60 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float + +from ..data import Data + +from .template import Template + + +@trait_docs +class SubHarmonic(Template): + """This class represents sub-harmonic noise fluctuations. + + Sub-harmonic means that the characteristic frequency of the noise + modes is lower than 1/T where T is the length of the interval + being fitted. + + """ + + # Notes: The TraitConfig base class defines a "name" attribute. The Template + # class (derived from TraitConfig) defines the following traits already: + # data : The Data instance we are working with + # view : The timestream view we are using + # det_data : The detector data key with the timestreams + # det_flags : Optional detector flags + # det_flag_mask : Bit mask for detector flags + # shared_flags : Optional detector flags + # shared_flag_mask : Bit mask for detector flags + # + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @traitlets.observe("data") + def _initialize(self, change): + # Derived classes should implement this method to do any set up (like + # computing the number of amplitudes) whenever the data changes. + newdata = change["data"] + return + + def _zeros(self): + raise NotImplementedError("Derived class must implement _zeros()") + + def _add_to_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _add_to_signal()") + + def _project_signal(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _project_signal()") + + def _add_prior(self, amplitudes_in, amplitudes_out): + # Not all Templates implement the prior + return + + def _apply_precond(self, amplitudes_in, amplitudes_out): + raise NotImplementedError("Derived class must implement _apply_precond()") From 7c457a5caf059e9b6af40c9f188c20e7b6ab849c Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 15 Dec 2020 07:34:44 -0800 Subject: [PATCH 046/690] Rename file --- ...pmaker_projection.py => mapmaker_solve.py} | 0 src/toast/templates/offset.py | 11 +++++++ src/toast/templates/template.py | 31 ++++++++++--------- 3 files changed, 27 insertions(+), 15 deletions(-) rename src/toast/ops/{mapmaker_projection.py => mapmaker_solve.py} (100%) diff --git a/src/toast/ops/mapmaker_projection.py b/src/toast/ops/mapmaker_solve.py similarity index 100% rename from src/toast/ops/mapmaker_projection.py rename to src/toast/ops/mapmaker_solve.py diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index 30e0d4700..9aec7e023 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -27,6 +27,16 @@ class Offset(Template): # shared_flag_mask : Bit mask for detector flags # + step_length = Int(1000000, help="Number of samples per baseline step") + + noise_model = Unicode( + None, + allow_none=True, + help="Observation key containing the optional noise model", + ) + + precond_width = Int(20, help="Preconditioner width in terms of offsets / baselines") + def __init__(self, **kwargs): super().__init__(**kwargs) @@ -35,6 +45,7 @@ def _initialize(self, change): # Derived classes should implement this method to do any set up (like # computing the number of amplitudes) whenever the data changes. newdata = change["data"] + self return def _zeros(self): diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index b4686d93c..a3c0732a3 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -261,7 +261,7 @@ class Amplitudes(object): of amplitudes and for doing global reductions. If n_global == n_local, then every process has a full copy of the amplitude - values. The the two arguments are different, then each process has a subset of + values. If the two arguments are different, then each process has a subset of values. If local_indices is None, then each process has a unique set of values and the total number across all processes must sum to n_global. If local_indices is given, then it is the explicit locations of the local values within the global @@ -425,7 +425,9 @@ def sync(self, comm_bytes=10000000): None """ - if self._comm is None: + if self._comm is None or self._local_indices is None: + # We have either one process or every process has a disjoint set of + # amplitudes. Nothing to sync. return log = Logger.get() @@ -461,7 +463,9 @@ def sync(self, comm_bytes=10000000): def dot(self, other): """Perform a dot product with another Amplitudes object. - The other instance must have the same data distribution. + The other instance must have the same data distribution. The two objects are + assumed to have already been synchronized, so that any amplitudes that exist + on multiple processes have the same values. Args: other (Amplitudes): The other instance. @@ -476,19 +480,16 @@ def dot(self, other): raise RuntimeError("Amplitudes must have the same number of local values") local_result = np.dot(self.local, other.local) result = None - if self._full: - # Every process has a copy of all amplitudes, so we are done + if self._comm is None or self._full: + # Only one process, or every process has the full set of values. result = local_result else: - if self._comm is None: - # Only one process - result = local_result + if self._local_indices is None: + # Every process has a unique set of amplitudes. Reduce the local + # dot products. + result = MPI.allreduce(local_result, op=MPI.SUM) else: - if self._local_indices is None: - # Every process has a unique set of amplitudes. Reduce. - result = MPI.allreduce(local_result, op=MPI.SUM) - else: - # More complicated, since we need to reduce each amplitude only - # once. Implement techniques from other existing code when needed. - raise NotImplementedError("dot of explicitly indexed amplitudes") + # More complicated, since we need to reduce each amplitude only + # once. Implement techniques from other existing code when needed. + raise NotImplementedError("dot of explicitly indexed amplitudes") return result From 3781898479f406940c6703a31584700d35419a87 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 31 Dec 2020 09:47:26 -0800 Subject: [PATCH 047/690] More work on template amplitudes, flagging, mapmaker solver. --- src/toast/ops/CMakeLists.txt | 2 +- src/toast/ops/__init__.py | 2 +- src/toast/ops/mapmaker.py | 210 ++++++------ src/toast/ops/mapmaker_solve.py | 489 +++++++++++++++++++++++++--- src/toast/ops/mapmaker_templates.py | 14 + src/toast/ops/operator.py | 4 +- src/toast/ops/scan_map.py | 103 ++++++ src/toast/templates/fourier2d.py | 10 +- src/toast/templates/offset.py | 327 ++++++++++++++++++- src/toast/templates/subharmonic.py | 10 +- src/toast/templates/template.py | 163 +++++++++- src/toast/tests/ops_scan_map.py | 49 +++ 12 files changed, 1182 insertions(+), 201 deletions(-) diff --git a/src/toast/ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt index 38bc89a94..0e937edae 100644 --- a/src/toast/ops/CMakeLists.txt +++ b/src/toast/ops/CMakeLists.txt @@ -17,7 +17,7 @@ install(FILES scan_map.py mapmaker_utils.py mapmaker_binning.py - mapmaker_projection.py + mapmaker_solve.py mapmaker_templates.py mapmaker.py madam.py diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index 8c571e035..aa81c0978 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -24,7 +24,7 @@ from .pointing_healpix import PointingHealpix -from .scan_map import ScanMap +from .scan_map import ScanMap, ScanMask from .mapmaker_utils import ( BuildHitMap, diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index 2aadfb96f..f2a45230d 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -8,7 +8,7 @@ from ..utils import Logger -from ..traits import trait_docs, Int, Unicode, Bool, Instance +from ..traits import trait_docs, Int, Unicode, Bool, Float, Instance from ..timing import function_timer @@ -43,14 +43,14 @@ class MapMaker(Operator): .. math:: Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} - Where `P` is the pointing matrix. This operator takes a "Projection" instance - as one of its traits, and that operator performs: + or in terms of the binning operation: .. math:: - PROJ = M^T N^{-1} Z + Z = I - P B - This projection operator is then used to compute the right hand side of the - solver and for each calculation of the left hand side. + Where `P` is the pointing matrix. This operator takes one operator for the + template matrix `M` and one operator for the binning, `B`. It then + uses a conjugate gradient solver to solve for the amplitudes. After solving for the template amplitudes, a final map of the signal estimate is computed using a simple binning: @@ -61,6 +61,10 @@ class MapMaker(Operator): Where the "prime" indicates that this final map might be computed using a different pointing matrix than the one used to solve for the template amplitudes. + The template-subtracted detector timestreams are saved either in the input + `det_data` key of each observation, or (if overwrite == False) in an obs.detdata + key that matches the name of this class instance. + """ # Class traits @@ -71,30 +75,31 @@ class MapMaker(Operator): None, allow_none=True, help="Observation detdata key for the timestream data" ) - projection = Instance( + convergence = Float(1.0e-12, help="Relative convergence limit") + + iter_max = Int(100, help="Maximum number of iterations") + + overwrite = Bool( + False, help="Overwrite the input detector data for use as scratch space" + ) + + binning = Instance( klass=Operator, allow_none=True, - help="This must be an instance of a projection operator", + help="Binning operator used for solving template amplitudes", ) - map_binning = Instance( + template_matrix = Instance( klass=Operator, allow_none=True, - help="Binning operator for final map making. Default uses same operator as projection.", + help="This must be an instance of a template matrix operator", ) - @traitlets.validate("projection") - def _check_projection(self, proposal): - proj = proposal["value"] - if proj is not None: - if not isinstance(bin, Operator): - raise traitlets.TraitError("binning should be an Operator instance") - # Check that this operator has the traits we expect - for trt in ["template_matrix", "det_data", "binning"]: - if not bin.has_trait(trt): - msg = "binning operator should have a '{}' trait".format(trt) - raise traitlets.TraitError(msg) - return bin + map_binning = Instance( + klass=Operator, + allow_none=True, + help="Binning operator for final map making. Default uses same operator as solver.", + ) @traitlets.validate("map_binning") def _check_binning(self, proposal): @@ -120,111 +125,86 @@ def _exec(self, data, detectors=None, **kwargs): if self.det_data is None: raise RuntimeError("You must set the det_data trait before calling exec()") - # Check projection - if self.projection is None: - raise RuntimeError( - "You must set the projection trait before calling exec()" - ) - # Check map binning if self.map_binning is None: # Use the same binning as the projection operator used in the solver. self.map_binning = self.projection.binning - # Get the template matrix used in the projection - template_matrix = self.projection.template_matrix - - # Compute the RHS - - if template_matrix.amplitudes in data: - # Clear any existing amplitudes, so that it will be created. - del data[template_matrix.amplitudes] - - self.projection.det_data = self.det_data - self.projection.apply(data) - - # Copy structure of RHS and zero as starting point for the solver - - # Solve for amplitudes + # For computing the RHS and also for each iteration of the LHS we will need + # a full detector-data sized buffer for use as scratch space. We can either + # destroy the input data to save memory (useful if this is the last operator + # processing the data) or we can create a temporary set of timestreams. + + copy_det = None + clear_temp = None + detdata_name = self.det_data + + if not self.overwrite: + # Use a temporary detdata named after this operator + detdata_name = self.name + # Copy the original data into place, and then use this copy destructively. + copy_det = Copy( + detdata=[ + (self.det_data, detdata_name), + ] + ) + copy_det.apply(data, detectors=detectors) + + # Compute the RHS. Overwrite inputs, either the original or the copy. + + self.template_matrix.amplitudes = "amplitudes_rhs" + rhs_calc = SolverRHS( + det_data=detdata_name, + overwrite=True, + binning=self.binning, + template_matrix=self.template_matrix, + ) + rhs.apply(data, detectors=detectors) + + # Set up the LHS operator. Use either the original timestreams or the copy + # as temp space. + + self.template_matrix.amplitudes = "amplitudes" + lhs_calc = SolverLHS( + det_temp=detdata_name, + binning=self.binning, + template_matrix=self.template_matrix, + ) + + # Solve for amplitudes. + solve( + data, + detectors, + lhs_calc, + data["amplitudes_rhs"], + convergence=self.convergence, + n_iter_max=self.iter_max, + ) + + # Reset our timestreams to zero + for ob in data.obs: + ob.detdata[detdata_name][:] = 0.0 + + # Project our solved amplitudes into timestreams. We output to either the + # input det_data or our temp space. + + self.template_matrix.transpose = False + self.template_matrix.apply(data, detectors=detectors) + + # Make a binned map of these template-subtracted timestreams + + self.map_binning.det_data = detdata_name + self.map_binning.apply(data, detectors=detectors) return - @function_timer - def _solve(self): - """Standard issue PCG solution of A.x = b - - Returns: - x : the least squares solution - """ - log = Logger.get() - timer0 = Timer() - timer0.start() - timer = Timer() - timer.start() - # Initial guess is zero amplitudes - guess = self.templates.zero_amplitudes() - # print("guess:", guess) # DEBUG - # print("RHS:", self.rhs) # DEBUG - residual = self.rhs.copy() - # print("residual(1):", residual) # DEBUG - residual -= self.apply_lhs(guess) - # print("residual(2):", residual) # DEBUG - precond_residual = self.templates.apply_precond(residual) - proposal = precond_residual.copy() - sqsum = precond_residual.dot(residual) - init_sqsum, best_sqsum, last_best = sqsum, sqsum, sqsum - if self.rank == 0: - log.info("Initial residual: {}".format(init_sqsum)) - # Iterate to convergence - for iiter in range(self.niter_max): - if not np.isfinite(sqsum): - raise RuntimeError("Residual is not finite") - alpha = sqsum - alpha /= proposal.dot(self.apply_lhs(proposal)) - alpha_proposal = proposal.copy() - alpha_proposal *= alpha - guess += alpha_proposal - residual -= self.apply_lhs(alpha_proposal) - del alpha_proposal - # Prepare for next iteration - precond_residual = self.templates.apply_precond(residual) - beta = 1 / sqsum - # Check for convergence - sqsum = precond_residual.dot(residual) - if self.rank == 0: - timer.report_clear( - "Iter = {:4} relative residual: {:12.4e}".format( - iiter, sqsum / init_sqsum - ) - ) - if sqsum < init_sqsum * self.convergence_limit or sqsum < 1e-30: - if self.rank == 0: - timer0.report_clear( - "PCG converged after {} iterations".format(iiter) - ) - break - best_sqsum = min(sqsum, best_sqsum) - if iiter % 10 == 0 and iiter >= self.niter_min: - if last_best < best_sqsum * 2: - if self.rank == 0: - timer0.report_clear( - "PCG stalled after {} iterations".format(iiter) - ) - break - last_best = best_sqsum - # Select the next direction - beta *= sqsum - proposal *= beta - proposal += precond_residual - # log.info("{} : Solution: {}".format(self.rank, guess)) # DEBUG - return guess - def _finalize(self, data, **kwargs): return def _requires(self): # This operator require everything that its sub-operators needs. - req = self.projection.requires() + req = self.binning.requires() + req.update(self.template_matrix.requires()) if self.map_binning is not None: req.update(self.map_binning.requires()) req["detdata"].append(self.det_data) @@ -235,7 +215,7 @@ def _provides(self): if self.map_binning is not None: prov["meta"] = [self.map_binning.binned] else: - prov["meta"] = [self.projection.binning.binned] + prov["meta"] = [self.binning.binned] return prov def _accelerators(self): diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index 5134c8076..25b5fba9b 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -26,8 +26,8 @@ @trait_docs -class Projection(Operator): - """Operator for map-making projection to template amplitudes. +class SolverRHS(Operator): + """Operator for computing the Right Hand Side of the conjugate gradient solver. This operator performs: @@ -56,6 +56,10 @@ class Projection(Operator): None, allow_none=True, help="Observation detdata key for the timestream data" ) + overwrite = Bool( + False, help="Overwrite the input detector data for use as scratch space" + ) + binning = Instance( klass=Operator, allow_none=True, @@ -105,47 +109,76 @@ def __init__(self, **kwargs): def _exec(self, data, detectors=None, **kwargs): log = Logger.get() - # Check that the detector data is set + # Check that the inputs are set if self.det_data is None: raise RuntimeError("You must set the det_data trait before calling exec()") + if self.binning is None: + raise RuntimeError("You must set the binning trait before calling exec()") + if self.template_matrix is None: + raise RuntimeError( + "You must set the template_matrix trait before calling exec()" + ) + + # Build a pipeline to make the binned map, optionally one detector at a time. - # Set data input for binning self.binning.det_data = self.det_data + bin_pipe = None + if self.binning.save_pointing: + # Process all detectors at once + bin_pipe = Pipeline(detector_sets=["ALL"]) + else: + # Process one detector at a time and clear pointing after each one. + bin_pipe = Pipeline(detector_sets=["SINGLE"]) + bin_pipe.operators = [self.binning] + bin_pipe.apply(data, detectors=detectors) + + # Build a pipeline for the projection and template matrix application. + # First create the operators that we will use. + + # Name of the temporary detdata created if we are not overwriting inputs + det_temp = "temp_RHS" + # Use the same pointing operator as the binning pointing = self.binning.pointing # Set up operator for optional clearing of the pointing matrices clear_pointing = Clear(detdata=[pointing.pixels, pointing.weights]) - # Name of the temporary detdata created - det_temp = "temp_projection" - - # Copy data operator - copy_det = Copy( - detdata=[ - (self.det_data, det_temp), - ] - ) - - # Set up map-scanning operator + # Optionally Copy data to a temporary location to avoid overwriting the input. + copy_det = None + clear_temp = None + if not self.overwrite: + copy_det = Copy( + detdata=[ + (self.det_data, det_temp), + ] + ) + clear_temp = Clear(detdata=[det_temp]) + + # The detdata name we will use (either the original or the temp one) + detdata_name = self.det_data + if not self.overwrite: + detdata_name = det_temp + + # Set up map-scanning operator to project the binned map. scan_map = ScanMap( pixels=pointing.pixels, weights=pointing.weights, map_key=self.binning.binned, - det_data=det_temp, + det_data=detdata_name, subtract=True, ) # Set up noise weighting operator noise_weight = NoiseWeight( - noise_model=self.binning.noise_model, det_data=det_temp + noise_model=self.binning.noise_model, det_data=detdata_name ) - # Set up template matrix operator + # Set up template matrix operator. self.template_matrix.transpose = True - self.template_matrix.det_data = det_temp + self.template_matrix.det_data = detdata_name # Create a pipeline that projects the binned map and applies noise # weights and templates. @@ -154,30 +187,244 @@ def _exec(self, data, detectors=None, **kwargs): if self.binning.save_pointing: # Process all detectors at once proj_pipe = Pipeline(detector_sets=["ALL"]) - proj_pipe.operators = [ - copy_det, - pointing, - scan_map, - noise_weight, - self.template_matrix, - ] + oplist = list() + if not self.overwrite: + oplist.append(copy_det) + oplist.extend( + [ + pointing, + scan_map, + noise_weight, + self.template_matrix, + ] + ) + if not self.overwrite: + oplist.append(clear_temp) + proj_pipe.operators = oplist else: # Process one detector at a time and clear pointing after each one. proj_pipe = Pipeline(detector_sets=["SINGLE"]) - proj_pipe.operators = [ - copy_det, - pointing, - scan_map, - clear_pointing, - noise_weight, - self.template_matrix, - ] + if not self.overwrite: + oplist.append(copy_det) + oplist.extend( + [ + pointing, + scan_map, + clear_pointing, + noise_weight, + self.template_matrix, + ] + ) + if not self.overwrite: + oplist.append(clear_temp) + proj_pipe.operators = oplist + + # Run this projection pipeline. - # Compute the binned map. + proj_pipe.apply(data, detectors=detectors) - self.binning.apply(data, detectors=detectors) + return - # Project and apply template matrix. + def _finalize(self, data, **kwargs): + return + + def _requires(self): + # This operator require everything that its sub-operators needs. + req = self.binning.requires() + req.update(self.template_matrix.requires()) + req["detdata"].append(self.det_data) + return req + + def _provides(self): + prov = self.binning.provides() + prov["meta"].append(self.template_matrix.amplitudes) + return prov + + def _accelerators(self): + return list() + + +@trait_docs +class SolverLHS(Operator): + """Operator for computing the Left Hand Side of the conjugate gradient solver. + + This operator performs: + + .. math:: + a' = M^T N^{-1} Z M a + M_p a + + Where `a` and `a'` are the input and output template amplitudes. The template + amplitudes are stored in the Data object and are updated in place. `N` is + the time domain diagonal noise covariance and `M` is a set of templates. The `Z` + matrix is given by: + + .. math:: + Z = I - P (P^T N^{-1} P)^{-1} P^T N^{-1} + + Where `P` is the pointing matrix. In terms of the binning operation this is: + + .. math:: + Z = I - P B + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_temp = Unicode( + "temp_LHS", help="Observation detdata key for temporary timestream data" + ) + + binning = Instance( + klass=Operator, + allow_none=True, + help="This must be an instance of a binning operator", + ) + + template_matrix = Instance( + klass=Operator, + allow_none=True, + help="This must be an instance of a template matrix operator", + ) + + @traitlets.validate("binning") + def _check_binning(self, proposal): + bin = proposal["value"] + if bin is not None: + if not isinstance(bin, Operator): + raise traitlets.TraitError("binning should be an Operator instance") + # Check that this operator has the traits we expect + for trt in ["pointing", "det_data", "binned"]: + if not bin.has_trait(trt): + msg = "binning operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) + return bin + + @traitlets.validate("template_matrix") + def _check_matrix(self, proposal): + mat = proposal["value"] + if mat is not None: + if not isinstance(mat, Operator): + raise traitlets.TraitError( + "template_matrix should be an Operator instance" + ) + # Check that this operator has the traits we expect + for trt in ["templates", "amplitudes", "det_data", "transpose"]: + if not mat.has_trait(trt): + msg = "template_matrix operator should have a '{}' trait".format( + trt + ) + raise traitlets.TraitError(msg) + return mat + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + # Check that input traits are set + if self.binning is None: + raise RuntimeError("You must set the binning trait before calling exec()") + if self.template_matrix is None: + raise RuntimeError( + "You must set the template_matrix trait before calling exec()" + ) + + # Build a pipeline to project amplitudes into timestreams and make a binned + # map. + + self.template_matrix.transpose = False + self.template_matrix.det_data = self.det_temp + self.binning.det_data = self.det_temp + + bin_pipe = None + if self.binning.save_pointing: + # Process all detectors at once + bin_pipe = Pipeline(detector_sets=["ALL"]) + else: + # Process one detector at a time and clear pointing after each one. + bin_pipe = Pipeline(detector_sets=["SINGLE"]) + + bin_pipe.operators = [self.template_matrix, self.binning] + + bin_pipe.apply(data, detectors=detectors) + + # Build a pipeline for the projection and template matrix application. + # First create the operators that we will use. + + # Use the same pointing operator as the binning + pointing = self.binning.pointing + + # Set up operator for optional clearing of the pointing matrices + clear_pointing = Clear(detdata=[pointing.pixels, pointing.weights]) + + # Set up map-scanning operator to project the binned map. + scan_map = ScanMap( + pixels=pointing.pixels, + weights=pointing.weights, + map_key=self.binning.binned, + det_data=self.det_temp, + subtract=True, + ) + + # Set up noise weighting operator + noise_weight = NoiseWeight( + noise_model=self.binning.noise_model, det_data=self.det_temp + ) + + # Same operator, but now we are applying the transpose. + self.template_matrix.transpose = True + + # Create a pipeline that projects the binned map and applies noise + # weights and templates. + + proj_pipe = None + if self.binning.save_pointing: + # Process all detectors at once + proj_pipe = Pipeline(detector_sets=["ALL"]) + oplist = list() + if not self.overwrite: + oplist.append(copy_det) + oplist.extend( + [ + pointing, + scan_map, + noise_weight, + self.template_matrix, + ] + ) + if not self.overwrite: + oplist.append(clear_temp) + proj_pipe.operators = oplist + else: + # Process one detector at a time and clear pointing after each one. + proj_pipe = Pipeline(detector_sets=["SINGLE"]) + if not self.overwrite: + oplist.append(copy_det) + oplist.extend( + [ + pointing, + scan_map, + clear_pointing, + noise_weight, + self.template_matrix, + ] + ) + if not self.overwrite: + oplist.append(clear_temp) + proj_pipe.operators = oplist + + # Zero out the amplitudes before accumulating the updated values + + for ob in data.obs: + amplitudes = ob[self.amplitudes] + for ampname, ampvals in amplitudes.items(): + ampvals.reset() + + # Run the projection pipeline. proj_pipe.apply(data, detectors=detectors) @@ -190,14 +437,174 @@ def _requires(self): # This operator require everything that its sub-operators needs. req = self.binning.requires() req.update(self.template_matrix.requires()) - req["detdata"].append(self.det_data) + req["meta"].append(self.amplitudes) return req def _provides(self): - prov = {"meta": list(), "shared": list(), "detdata": list()} - if self.save_pointing: - prov["detdata"].extend([self.pixels, self.weights]) + prov = self.binning.provides() return prov def _accelerators(self): return list() + + +def solve( + data, + detectors, + lhs, + rhs_amps, + guess=None, + convergence=1.0e-12, + n_iter_max=100, + n_iter_min=3, +): + """Solve for template amplitudes. + + This uses a standard preconditioned conjugate gradient technique (e.g. Shewchuk, + 1994) to solve for the template amplitudes. The Right Hand Side amplitude values + are precomputed and passed to this function. The starting guess of the solver + can be passed in or else zeros are used. + + Args: + data (Data): The distributed data object. + detectors (list): The subset of detectors used for the mapmaking. + lhs (Operator): The LHS operator. + rhs_amps (Amplitudes): The RHS value. + guess (Amplitudes): The starting guess. If None, use all zeros. + convergence (float): The convergence limit. + n_iter_max (int): The maximum number of iterations. + + Returns: + None + + """ + log = Logger.get() + timer_full = Timer() + timer_full.start() + timer = Timer() + timer.start() + + # The global communicator we are using (or None) + comm = data.comm.comm_world + + # Solving A * x = b ... + + # The name of the amplitudes which are updated in place by the LHS operator + lhs_amps = lhs.template_matrix.amplitudes + + # The starting guess + if guess is None: + # Copy structure of the RHS and set to zero + data[lhs_amps] = rhs_amps.duplicate() + data[lhs_amps].reset() + else: + # FIXME: add a check that the structure of the guess matches the RHS. + data[lhs_amps] = guess + + # Compute q = A * x (in place) + lhs.apply(data, detectors=detectors) + + # The initial residual + # r = b - q + residual = rhs_amps.duplicate() + residual -= data[lhs_amps] + + # The preconditioned residual + # s = M^-1 * r + precond_residual = residual.duplicate() + precond_residual.reset() + lhs.template_matrix.apply_precond(residual, precond_residual) + + # The proposal + # d = s + proposal = precond_residual.duplicate() + + # delta_new = r^T * d + sqsum = precond_residual.dot(residual) + + init_sqsum = sqsum + best_sqsum = sqsum + last_best = sqsum + + sqsum_last = None + + if comm is not None: + comm.barrier() + timer.stop() + if data.comm.world_rank == 0: + msg = "MapMaker initial residual = {}, {:0.2f} s".format(sqsum, timer.seconds()) + log.info(msg) + timer.clear() + timer.start() + + for iter in range(n_iter_max): + if not np.isfinite(sqsum): + raise RuntimeError("Residual is not finite") + + # Update LHS amplitude inputs + data[lhs_amps].local[:] = proposal.local + + # q = A * d (in place) + lhs.apply(data, detectors=detectors) + + # alpha = delta_new / (d^T * q) + alpha = sqsum + alpha /= proposal.dot(data[lhs_amps]) + + # r -= alpha * q + data[lhs_amps] *= alpha + residual -= data[lhs_amps] + + # The preconditioned residual + # s = M^-1 * r + lhs.template_matrix.apply_precond(residual, precond_residual) + + # delta_old = delta_new + sqsum_last = sqsum + + # delta_new = r^T * s + sqsum = precond_residual.dot(residual) + + if comm is not None: + comm.barrier() + timer.stop() + if data.comm.world_rank == 0: + msg = "MapMaker iteration {:4d}, relative residual = {}, {:0.2f} s".format( + iter, sqsum, timer.seconds() + ) + log.info(msg) + timer.clear() + timer.start() + + # beta = delta_new / delta_old + beta = sqsum / sqsum_last + + # New proposal + # d = s + beta * d + proposal *= beta + proposal += precond_residual + + # Check for convergence + if sqsum < init_sqsum * convergence or sqsum < 1e-30: + timer.stop() + timer_full.stop() + if data.comm.world_rank == 0: + msg = "MapMaker PCG converged after {:4d} iterations and {:0.2f} seconds".format( + iter, timer_full.seconds() + ) + log.info(msg) + break + + best_sqsum = min(sqsum, best_sqsum) + + if iter % 10 == 0 and iter >= n_iter_min: + if last_best < best_sqsum * 2: + timer.stop() + timer_full.stop() + if data.comm.world_rank == 0: + msg = "MapMaker PCG stalled after {:4d} iterations and {:0.2f} seconds".format( + iter, timer_full.seconds() + ) + log.info(msg) + break + last_best = best_sqsum diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index cd7244bc7..2917973f2 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -65,6 +65,20 @@ def __init__(self, **kwargs): super().__init__(**kwargs) self._initialized = False + def apply_precond(self, amps_in, amps_out): + """Apply the preconditioner from all templates to the amplitudes. + + This can only be called after the operator has been used at least once so that + the templates are initialized. + + """ + if not self._initialized: + raise RuntimeError( + "You must call exec() once before applying preconditioners" + ) + for tmpl in self.templates: + tmpl.apply_precond(amps_in, amps_out) + @function_timer def _exec(self, data, detectors=None, **kwargs): log = Logger.get() diff --git a/src/toast/ops/operator.py b/src/toast/ops/operator.py index 7f8b97893..a4b02725b 100644 --- a/src/toast/ops/operator.py +++ b/src/toast/ops/operator.py @@ -121,7 +121,9 @@ def provides(self): return self._provides() def _accelerators(self): - raise NotImplementedError("Fell through to Operator base class") + # Do not force descendent classes to implement this. If it is not + # implemented, then it is clear that the class does not support any + # accelerators return list() def accelerators(self): diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index f4beea49e..a3f534e4e 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -177,3 +177,106 @@ def _provides(self): def _accelerators(self): return list() + + +@trait_docs +class ScanMask(Operator): + """Operator which uses the pointing matrix to set timestream flags from a mask. + + The mask must be a PixelData instance with an integer data type. The data for each + pixel is bitwise-and combined with the mask_bits to form a result. for each + detector sample crossing a pixel with a non-zero result, the detector flag is + bitwise-or'd with the specified value. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_flags = Unicode( + None, allow_none=True, help="Observation detdata key for flags to set" + ) + + det_flags_value = Int( + 1, help="The detector flag value to set where the mask result is non-zero" + ) + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + mask_key = Unicode( + None, + allow_none=True, + help="The Data key where the mask is located", + ) + + mask_bits = Int( + 255, help="The number to bitwise-and with each mask value to form the result" + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + # Check that the detector data is set + if self.det_flags is None: + raise RuntimeError("You must set the det_flags trait before calling exec()") + + # Check that the mask is set + if self.mask_key is None: + raise RuntimeError("You must set the mask_key trait before calling exec()") + if self.mask_key not in data: + msg = "The mask_key '{}' does not exist in the data".format(self.mask_key) + raise RuntimeError(msg) + + mask_data = data[self.mask_key] + if not isinstance(mask_data, PixelData): + raise RuntimeError("The mask to scan must be a PixelData instance") + mask_dist = mask_data.distribution + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # If our output detector data does not yet exist, create it with a default + # width of one byte per sample. + if self.det_flags not in ob: + ob.detdata.create(self.det_flags, dtype=np.uint8, detectors=dets) + + for det in dets: + # The pixels and flags. + pix = ob.detdata[self.pixels][det] + dflags = ob.detdata[self.det_flags][det] + + # Get local submap and pixels + local_sm, local_pix = mask_dist.global_pixel_to_submap(pix) + + # We could move this to compiled code if it is too slow... + masked = mask_data[local_sm, local_pix, 0] & self.mask_bits + dflags[masked != 0] |= self.det_flags_value + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = { + "meta": [mask_key], + "shared": list(), + "detdata": [self.pixels, self.det_flags], + } + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/templates/fourier2d.py b/src/toast/templates/fourier2d.py index d7bbe08d5..b52f6c307 100644 --- a/src/toast/templates/fourier2d.py +++ b/src/toast/templates/fourier2d.py @@ -26,18 +26,14 @@ class Fourier2D(Template): # det_data : The detector data key with the timestreams # det_flags : Optional detector flags # det_flag_mask : Bit mask for detector flags - # shared_flags : Optional detector flags - # shared_flag_mask : Bit mask for detector flags + # shared_flags : Optional shared flags + # shared_flag_mask : Bit mask for shared flags # def __init__(self, **kwargs): super().__init__(**kwargs) - @traitlets.observe("data") - def _initialize(self, change): - # Derived classes should implement this method to do any set up (like - # computing the number of amplitudes) whenever the data changes. - newdata = change["data"] + def _initialize(self, new_data): return def _zeros(self): diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index 9aec7e023..51cb0aa58 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -2,19 +2,34 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import numpy as np -from ..utils import Logger +import scipy + +from ..utils import Logger, rate_from_times + +from ..mpi import MPI from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float from ..data import Data -from .template import Template +from .template import Template, Amplitudes + +from .._libtoast import template_offset_add_to_signal, template_offset_project_signal @trait_docs class Offset(Template): - """This class represents noise fluctuations as a step function.""" + """This class represents noise fluctuations as a step function. + + Every process stores the offsets for its local data. Although our data is arranged + in observations and then in terms of detectors, we will often be projecting our + template for a single detector at a time. Because of this, we will arrange our + template amplitudes in "detector major" order and store offsets into this for each + observation. + + """ # Notes: The TraitConfig base class defines a "name" attribute. The Template # class (derived from TraitConfig) defines the following traits already: @@ -23,11 +38,13 @@ class Offset(Template): # det_data : The detector data key with the timestreams # det_flags : Optional detector flags # det_flag_mask : Bit mask for detector flags - # shared_flags : Optional detector flags - # shared_flag_mask : Bit mask for detector flags + # shared_flags : Optional shared flags + # shared_flag_mask : Bit mask for shared flags # - step_length = Int(1000000, help="Number of samples per baseline step") + step_time = Float(10000.0, help="Seconds per baseline step") + + times = Unicode("times", help="Observation shared key for timestamps") noise_model = Unicode( None, @@ -40,26 +57,300 @@ class Offset(Template): def __init__(self, **kwargs): super().__init__(**kwargs) - @traitlets.observe("data") - def _initialize(self, change): - # Derived classes should implement this method to do any set up (like - # computing the number of amplitudes) whenever the data changes. - newdata = change["data"] - self + def _initialize(self, new_data): + # Compute the step boundaries for every observation and the number of + # amplitude values on this process. Every process only stores amplitudes + # for its locally assigned data. + + # Use this as an "Ordered Set". We want the unique detectors on this process, + # but sorted in order of occurrence. + all_dets = OrderedDict() + + # Amplitude lengths of all views for each obs + self._obs_views = dict() + + # Sample rate for each obs. + self._obs_rate = dict() + + # Offset covariance and preconditioner for each obs and detector. + self._freq = dict() + self._filters = dict() + self._precond = dict() + + for iob, ob in enumerate(new_data.obs): + # Compute sample rate from timestamps + self._obs_rate[iob] = rate_from_times(ob.shared[self.times]) + + # The step length for this observation + step_length = self.step_time * self._obs_rate[iob] + + # Track number of offset amplitudes per view. + self._obs_views[iob] = list() + for view_slice in ob.view[self.view]: + slice_len = view_slice.stop - view_slice.start + view_n_amp = slice_len // step_length + self._obs_views[iob].append(view_n_amp) + + # The noise model. + if self.noise_model is not None: + if self.noise_model not in ob: + msg = "Observation {}: noise model {} does not exist".format( + ob.name, self.noise_model + ) + log.error(msg) + raise RuntimeError(msg) + self._filters[iob] = dict() + self._precond[iob] = dict() + + # Determine the binning for the noise prior + obstime = ob.shared[self.times][-1] - ob.shared[self.times][0] + tbase = step_length + fbase = 1.0 / tbase + powmin = np.floor(np.log10(1 / obstime)) - 1 + powmax = min(np.ceil(np.log10(1 / tbase)) + 2, self._obs_rate[iob]) + self._freq[iob] = np.logspace(powmin, powmax, 1000) + + # Build up detector list + for d in ob.local_detectors: + if d not in all_dets: + all_dets[d] = None + + self._all_dets = list(all_dets.keys()) + + # Go through the data one local detector at a time and compute the offsets into + # the amplitudes. Also compute the amplitude noise filter and preconditioner + # for each detector and each interval / view. + + self._det_start = dict() + + offset = 0 + for det in self._all_dets: + self._det_start[det] = offset + for iob, ob in enumerate(new_data.obs): + if det not in ob.local_detectors: + continue + if self.noise_model is not None: + offset_psd = self._get_offset_psd( + ob[self.noise_model], self._freq[iob], det + ) + ( + self._filters[iob][det], + self._precond[iob][det], + ) = self._get_filter_and_precond( + self._freq[iob], offset_psd, ob.view[self.view] + ) + offset += np.sum(self.obs_views[iob]) + + self._n_local = offset + if new_data.comm.comm_world is None: + self._n_global = self._n_local + else: + self._n_global = new_data.comm.comm_world.allreduce( + self._n_local, op=MPI.SUM + ) + return + @function_timer + def _get_offset_psd(self, noise, freq, det): + psdfreq = noise.freq(det) + psd = noise.psd(det) + rate = noise.rate(det) + # Remove the white noise component from the PSD + psd = psd.copy() * np.sqrt(rate) + psd -= np.amin(psd[psdfreq > 1.0]) + psd[psd < 1e-30] = 1e-30 + + # The calculation of `offset_psd` is from Keihänen, E. et al: + # "Making CMB temperature and polarization maps with Madam", + # A&A 510:A57, 2010 + logfreq = np.log(psdfreq) + logpsd = np.log(psd) + + def interpolate_psd(x): + result = np.zeros(x.size) + good = np.abs(x) > 1e-10 + logx = np.log(np.abs(x[good])) + logresult = np.interp(logx, logfreq, logpsd) + result[good] = np.exp(logresult) + return result + + def g(x): + bad = np.abs(x) < 1e-10 + good = np.logical_not(bad) + arg = np.pi * x[good] + result = bad.astype(np.float64) + result[good] = (np.sin(arg) / arg) ** 2 + return result + + tbase = self.step_length + fbase = 1 / tbase + offset_psd = interpolate_psd(freq) * g(freq * tbase) + for m in range(1, 2): + offset_psd += interpolate_psd(freq + m * fbase) * g(freq * tbase + m) + offset_psd += interpolate_psd(freq - m * fbase) * g(freq * tbase - m) + offset_psd *= fbase + return offset_psd + + @function_timer + def _get_filter_and_precond(self, freq, offset_psd, view_slices): + logfreq = np.log(freq) + logpsd = np.log(offset_psd) + logfilter = np.log(1 / offset_psd) + + def interpolate(x, psd): + result = np.zeros(x.size) + good = np.abs(x) > 1e-10 + logx = np.log(np.abs(x[good])) + logresult = np.interp(logx, logfreq, psd) + result[good] = np.exp(logresult) + return result + + def truncate(noisefilter, lim=1e-4): + icenter = noisefilter.size // 2 + ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim + icut = np.argwhere(ind)[-1][0] + if icut % 2 == 0: + icut += 1 + noisefilter = np.roll(noisefilter, icenter) + noisefilter = noisefilter[icenter - icut : icenter + icut + 1] + return noisefilter + + vw_filters = list() + vw_precond = list() + for offset_slice, sigmasqs in offset_slices: + nstep = offset_slice.stop - offset_slice.start + filterlen = nstep * 2 + 1 + filterfreq = np.fft.rfftfreq(filterlen, self.step_length) + noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter))) + noisefilters.append(noisefilter) + # Build the band-diagonal preconditioner + if self.precond_width <= 1: + # Compute C_a prior + preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd))) + else: + # Compute Cholesky decomposition prior + wband = min(self.precond_width, noisefilter.size // 2) + precond_width = max(wband, min(self.precond_width, nstep)) + icenter = noisefilter.size // 2 + preconditioner = np.zeros([precond_width, nstep], dtype=np.float64) + preconditioner[0] = sigmasqs + preconditioner[:wband, :] += np.repeat( + noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1 + ) + lower = True + scipy.linalg.cholesky_banded( + preconditioner, overwrite_ab=True, lower=lower, check_finite=True + ) + preconditioners.append((preconditioner, lower)) + return noisefilters, preconditioners + def _zeros(self): - raise NotImplementedError("Derived class must implement _zeros()") + return Amplitudes(self.data.comm.comm_world, self._n_global, self._n_local) + @function_timer def _add_to_signal(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _add_to_signal()") + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if det not in ob.local_detectors: + continue + # The step length for this observation + step_length = self.step_time * self._obs_rate[iob] + for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): + n_amp_view = self._obs_views[iob][ivw] + template_offset_add_to_signal( + step_length, amplitudes.local[offset : offset + n_amp_view], vw + ) + offset += n_amp_view + @function_timer def _project_signal(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _project_signal()") + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if det not in ob.local_detectors: + continue + # The step length for this observation + step_length = self.step_time * self._obs_rate[iob] + for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): + n_amp_view = self._obs_views[iob][ivw] + template_offset_project_signal( + step_length, vw, amplitudes.local[offset : offset + n_amp_view] + ) + offset += n_amp_view + + @function_timer + def _project_flags(self, detector, amplitudes): + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if det not in ob.local_detectors: + continue + # The step length for this observation + step_length = self.step_time * self._obs_rate[iob] + obview = ob.view[self.view] + for ivw, vw_ in enumerate(ob.view[self.view].detdata[self.det_data]): + n_amp_view = self._obs_views[iob][ivw] + flags = np.array() + template_offset_project_flags( + step_length, + flags, + amplitudes.local_flags[offset : offset + n_amp_view], + ) + offset += n_amp_view + @function_timer def _add_prior(self, amplitudes_in, amplitudes_out): - # Not all Templates implement the prior - return + if self.noise_model is None: + # No noise model is specified, so no prior is used. + return + for det in self._all_dets: + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if det not in ob.local_detectors: + continue + for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): + n_amp_view = self._obs_views[iob][ivw] + amps_in = amplitudes_in[offset : offset + n_amp_view] + amps_out = amplitudes_out[offset : offset + n_amp_view] + amps_out[:] += scipy.signal.convolve( + amps_in, self._filters[iob][det], mode="same" + ) + offset += n_amp_view + @function_timer def _apply_precond(self, amplitudes_in, amplitudes_out): - raise NotImplementedError("Derived class must implement _apply_precond()") + # offset_amplitudes_in = amplitudes_in[self.name] + # offset_amplitudes_out = amplitudes_out[self.name] + # if self.use_noise_prior: + # # C_a preconditioner + # for iobs, obs in enumerate(self.data.obs): + # tod = obs["tod"] + # for det in tod.local_dets: + # slices = self.offset_slices[iobs][det] + # preconditioners = self.preconditioners[iobs][det] + # for (offsetslice, sigmasqs), preconditioner in zip( + # slices, preconditioners + # ): + # amps_in = offset_amplitudes_in[offsetslice] + # if self.precond_width <= 1: + # # Use C_a prior + # # scipy.signal.convolve will use either `convolve` or `fftconvolve` + # # depending on the size of the inputs + # amps_out = scipy.signal.convolve( + # amps_in, preconditioner, mode="same" + # ) + # else: + # # Use pre-computed Cholesky decomposition + # amps_out = scipy.linalg.cho_solve_banded( + # preconditioner, + # amps_in, + # overwrite_b=False, + # check_finite=True, + # ) + # offset_amplitudes_out[offsetslice] = amps_out + # else: + # # Diagonal preconditioner + # offset_amplitudes_out[:] = offset_amplitudes_in + # for itemplate, iobs, det, todslice, sigmasq in self.offset_templates: + # offset_amplitudes_out[itemplate] *= sigmasq + # + return diff --git a/src/toast/templates/subharmonic.py b/src/toast/templates/subharmonic.py index 76df7954b..d32ca1746 100644 --- a/src/toast/templates/subharmonic.py +++ b/src/toast/templates/subharmonic.py @@ -29,18 +29,14 @@ class SubHarmonic(Template): # det_data : The detector data key with the timestreams # det_flags : Optional detector flags # det_flag_mask : Bit mask for detector flags - # shared_flags : Optional detector flags - # shared_flag_mask : Bit mask for detector flags + # shared_flags : Optional shared flags + # shared_flag_mask : Bit mask for shared flags # def __init__(self, **kwargs): super().__init__(**kwargs) - @traitlets.observe("data") - def _initialize(self, change): - # Derived classes should implement this method to do any set up (like - # computing the number of amplitudes) whenever the data changes. - newdata = change["data"] + def _initialize(self, new_data): return def _zeros(self): diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index a3c0732a3..f2ce31fd7 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -5,6 +5,7 @@ from ..utils import ( Logger, + AlignedU8, AlignedF32, AlignedF64, ) @@ -71,15 +72,20 @@ def _check_data(self, proposal): return dat @traitlets.observe("data") - def _initialize(self, change): + def initialize(self, change): # Derived classes should implement this method to do any set up (like # computing the number of amplitudes) whenever the data changes. newdata = change["data"] - raise NotImplementedError("Derived class must implement _initialize()") + self._initialize(newdata) def __init__(self, **kwargs): super().__init__(**kwargs) + def _initialize(self, new_data): + # Derived classes should implement this method to do any set up (like + # computing the number of amplitudes) whenever the data changes. + raise NotImplementedError("Derived class must implement _initialize()") + def _zeros(self): raise NotImplementedError("Derived class must implement _zeros()") @@ -148,6 +154,35 @@ def project_signal(self, detector, amplitudes): raise RuntimeError("You must set the data trait before using a template") self._project_signal(detector, amplitudes) + def _project_flags(self, detector, amplitudes): + raise NotImplementedError("Derived class must implement _project_flags()") + + def project_flags(self, detector, amplitudes): + """Project timestream flags into template amplitude flags. + + For some types of templates, excessive timestream flagging can corrupt some of + the template amplitudes (for example using the offset template with short + step lengths). It is up to each template class to determine the impact of the + timestream flags on template amplitudes. + + The flags of the input amplitudes are updated in place. + + Args: + detector (str): The detector name. + amplitudes (Amplitudes): The Amplitude values for this template. + + Returns: + None + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before using a template") + # Short circuit if there are no shared or detector flags specified. + if self.det_flags is None and self.shared_flags is None: + return + else: + self._project_flags(detector, amplitudes) + def _add_prior(self, amplitudes_in, amplitudes_out): # Not all Templates implement the prior return @@ -195,6 +230,22 @@ def apply_precond(self, amplitudes_in, amplitudes_out): raise RuntimeError("You must set the data trait before using a template") self._apply_precond(amplitudes_in, amplitudes_out) + def _accelerators(self): + # Do not force descendent classes to implement this. If it is not + # implemented, then it is clear that the class does not support any + # accelerators + return list() + + def accelerators(self): + """List of accelerators supported by this Template. + + Returns: + (list): List of pre-defined accelerator names supported by this + operator (and by TOAST). + + """ + return self._accelerators() + @classmethod def get_class_config_path(cls): return "/templates/{}".format(cls.__qualname__) @@ -314,6 +365,15 @@ def __init__(self, comm, n_global, n_local, local_indices=None, dtype=np.float64 self._raw = self._storage_class.zeros(self._n_local) self.local = self._raw.array() + # Support flagging of template amplitudes. This can be used to flag some + # amplitudes if too many timestream samples contributing to the amplitude value + # are bad. It can also be used when iteratively masking template amplitudes + # and sky pixels. We will be passing these flags to compiled code, and there + # is no way easy way to do this using numpy bool and C++ bool. So we waste + # a bit of memory and use a whole byte per amplitude. + self._raw_flags = AlignedU8.zeros(self._n_local) + self.local_flags = self._raw_flags.array() + def clear(self): """Delete the underlying memory. @@ -327,10 +387,90 @@ def clear(self): if hasattr(self, "_raw"): self._raw.clear() del self._raw + if hasattr(self, "local_flags"): + del self.local_flags + if hasattr(self, "_raw_flags"): + self._raw_flags.clear() + del self._raw_flags def __del__(self): self.clear() + def __eq__(self, value): + if isinstance(value, Amplitudes): + return self.local == value.local + else: + return self.local == value + + # Arithmetic. These assume that flagging is consistent between the pairs of + # Amplitudes (always true when used in the mapmaking) or that the flagged values + # have been zeroed out. + + def __iadd__(self, other): + if isinstance(other, Amplitudes): + self.local += other.local + else: + self.local += other + + def __isub__(self, other): + if isinstance(other, Amplitudes): + self.local -= other.local + else: + self.local -= other + + def __imul__(self, other): + if isinstance(other, Amplitudes): + self.local *= other.local + else: + self.local *= other + + def __itruediv__(self, other): + if isinstance(other, Amplitudes): + self.local /= other.local + else: + self.local /= other + + def __add__(self, other): + result = self.duplicate() + result += other + return result + + def __sub__(self, other): + result = self.duplicate() + result -= other + return result + + def __mul__(self, other): + result = self.duplicate() + result *= other + return result + + def __truediv__(self, other): + result = self.duplicate() + result /= other + return result + + def reset(self): + """Set all amplitude values to zero.""" + self.local[:] = 0 + + def reset_flags(self): + """Set all flag values to zero.""" + self.local_flags[:] = 0 + + def duplicate(self): + """Return a copy of the data.""" + ret = Amplitudes( + self._comm, + self._n_global, + self._n_local, + local_indices=self._local_indices, + dtype=self._dtype, + ) + ret.local[:] = self.local + ret.local_flags[:] = self.local_flags + return ret + @property def comm(self): return _comm @@ -345,11 +485,17 @@ def n_local(self): """The number of locally stored amplitudes.""" return self._n_local + @property + def n_local_flagged(self): + """The number of locally amplitudes that are flagged.""" + return np.count_nonzero(self.local_flags) + def _get_global_values(comm_offset, send_buffer): n_buf = len(send_buffer) if self._full: # Shortcut if we have all global amplitudes locally send_buffer[:] = self.local[comm_offset : comm_offset + n_buf] + send_buffer[self.local_flags[comm_offset : comm_offset + n_buf] != 0] = 0 else: # Need to compute our overlap with the global range. send_buffer[:] = 0 @@ -370,9 +516,9 @@ def _get_global_values(comm_offset, send_buffer): n_copy = self._global_last + 1 - local_off else: n_copy = n_buf - buf_off - send_buffer[buf_off : buf_off + n_copy] = self.local[ - local_off : local_off + n_copy - ] + send_view = send_buffer[buf_off : buf_off + n_copy] + send_view[:] = self.local[local_off : local_off + n_copy] + send_view[self.local_flags[local_off : local_off + n_copy] != 0] = 0 else: # Need to efficiently do the lookup. Pull existing techniques from # old code when we need this. @@ -413,10 +559,6 @@ def _set_global_values(comm_offset, recv_buffer): def sync(self, comm_bytes=10000000): """Perform an Allreduce across all processes. - If a derived class has only locally unique amplitudes on each process (for - example, destriping baseline offsets), then they should override this method - and make it a no-op. - Args: comm_bytes (int): The maximum number of bytes to communicate in each call to Allreduce. @@ -465,7 +607,8 @@ def dot(self, other): The other instance must have the same data distribution. The two objects are assumed to have already been synchronized, so that any amplitudes that exist - on multiple processes have the same values. + on multiple processes have the same values. This further assumes that any + flagged amplitudes have been set to zero. Args: other (Amplitudes): The other instance. diff --git a/src/toast/tests/ops_scan_map.py b/src/toast/tests/ops_scan_map.py index 1952687a9..6524f972d 100644 --- a/src/toast/tests/ops_scan_map.py +++ b/src/toast/tests/ops_scan_map.py @@ -74,3 +74,52 @@ def test_scan(self): del data return + + def test_mask(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + self.create_fake_sky(data, "pixel_dist", "fake_map") + + # Generate a mask + data["fake_mask"] = PixelData(data["pixel_dist"], np.uint8, n_value=1) + small_vals = data["fake_map"].data[:, :, 0] < 10.0 + # print("{} map vals masked".format(np.sum(small_vals))) + data["fake_mask"].data[small_vals] = 1 + + # Scan mask into flags + scanner = ops.ScanMask( + det_flags="mask_flags", + det_flags_value=1, + pixels=pointing.pixels, + mask_key="fake_mask", + mask_bits=1, + ) + scanner.apply(data) + + # Manual check of the values + + mask_data = data["fake_mask"] + for ob in data.obs: + for det in ob.local_detectors: + local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( + ob.detdata[pointing.pixels][det] + ) + for i in range(ob.n_local_samples): + if local_pix[i] < 0: + continue + mask_val = mask_data.data[local_sm[i], local_pix[i], 0] + if mask_val > 0: + self.assertTrue(ob.detdata["mask_flags"][det, i] == 1) + else: + self.assertTrue(ob.detdata["mask_flags"][det, i] == 0) + + del data + return From ef92d9193cfbf20b5328994fba12ce8ce13c3a4e Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sat, 2 Jan 2021 12:13:49 -0800 Subject: [PATCH 048/690] Make a separate set of mapmaker flags for use during the amplitude solving. --- src/toast/ops/mapmaker.py | 276 +++++++++++++++++++++++++++++- src/toast/ops/mapmaker_binning.py | 8 +- src/toast/ops/mapmaker_solve.py | 47 ++++- src/toast/ops/mapmaker_utils.py | 40 ++++- src/toast/templates/offset.py | 6 +- src/toast/templates/template.py | 17 +- 6 files changed, 360 insertions(+), 34 deletions(-) diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index f2a45230d..bf3e3b63b 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -63,7 +63,7 @@ class MapMaker(Operator): The template-subtracted detector timestreams are saved either in the input `det_data` key of each observation, or (if overwrite == False) in an obs.detdata - key that matches the name of this class instance. + key based on the name of this class instance. """ @@ -79,10 +79,26 @@ class MapMaker(Operator): iter_max = Int(100, help="Maximum number of iterations") + solve_rcond_threshold = Float( + 1.0e-8, + help="When solving, minimum value for inverse pixel condition number cut.", + ) + + map_rcond_threshold = Float( + 1.0e-8, + help="For final map, minimum value for inverse pixel condition number cut.", + ) + overwrite = Bool( False, help="Overwrite the input detector data for use as scratch space" ) + mask = Unicode( + None, + allow_none=True, + help="Data key for pixel mask to use in solving. First bit of pixel values is tested", + ) + binning = Instance( klass=Operator, allow_none=True, @@ -98,17 +114,50 @@ class MapMaker(Operator): map_binning = Instance( klass=Operator, allow_none=True, - help="Binning operator for final map making. Default uses same operator as solver.", + help="Binning operator for final map making. Default is same as solver", ) - @traitlets.validate("map_binning") + @traitlets.validate("binning") def _check_binning(self, proposal): + bin = proposal["value"] + if bin is not None: + if not isinstance(bin, Operator): + raise traitlets.TraitError("binning should be an Operator instance") + # Check that this operator has the traits we require + for trt in [ + "det_data", + "pixel_dist", + "pointing", + "binned", + "covariance", + "det_flags", + "det_flag_mask", + "shared_flags", + "shared_flag_mask", + "noise_model", + "save_pointing", + "sync_type", + ]: + if not bin.has_trait(trt): + msg = "binning operator should have a '{}' trait".format(trt) + raise traitlets.TraitError(msg) + return bin + + @traitlets.validate("map_binning") + def _check_map_binning(self, proposal): bin = proposal["value"] if bin is not None: if not isinstance(bin, Operator): raise traitlets.TraitError("map_binning should be an Operator instance") # Check that this operator has the traits we expect - for trt in ["det_data", "binned"]: + for trt in [ + "det_data", + "pointing", + "covariance", + "noise_model", + "save_pointing", + "sync_type", + ]: if not bin.has_trait(trt): msg = "map_binning operator should have a '{}' trait".format(trt) raise traitlets.TraitError(msg) @@ -117,9 +166,33 @@ def _check_binning(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) + def _log_info(comm, rank, msg, timer=None): + """Helper function to log an INFO level message from rank zero""" + log = Logger.get() + if comm is not None: + comm.barrier() + if timer is not None: + timer.stop() + if rank == 0: + if timer is None: + msg = "MapMaker {}".format(msg) + else: + msg = "MapMaker {} {:0.2f} s".format(msg, timer.seconds()) + log.info(msg) + if timer is not None: + timer.clear() + timer.start() + @function_timer def _exec(self, data, detectors=None, **kwargs): log = Logger.get() + timer = Timer() + + # The global communicator we are using (or None) + comm = data.comm.comm_world + rank = 0 + if comm is not None: + rank = comm.rank # Check that the detector data is set if self.det_data is None: @@ -127,21 +200,25 @@ def _exec(self, data, detectors=None, **kwargs): # Check map binning if self.map_binning is None: - # Use the same binning as the projection operator used in the solver. - self.map_binning = self.projection.binning + # Use the same binning used in the solver. + self.map_binning = self.binning # For computing the RHS and also for each iteration of the LHS we will need # a full detector-data sized buffer for use as scratch space. We can either # destroy the input data to save memory (useful if this is the last operator # processing the data) or we can create a temporary set of timestreams. + timer.start() + copy_det = None clear_temp = None detdata_name = self.det_data if not self.overwrite: + self._log_info(comm, rank, "overwrite is False, making data copy") + # Use a temporary detdata named after this operator - detdata_name = self.name + detdata_name = "{}_signal".format(self.name) # Copy the original data into place, and then use this copy destructively. copy_det = Copy( detdata=[ @@ -149,9 +226,139 @@ def _exec(self, data, detectors=None, **kwargs): ] ) copy_det.apply(data, detectors=detectors) + self._log_info(comm, rank, "data copy finished in", timer=timer) + + # Flagging. We create a new set of data flags for the solver that includes: + # - one bit for a bitwise OR of all detector / shared flags + # - one bit for any pixel mask, projected to TOD + # - one bit for any poorly conditioned pixels, projected to TOD + + # We use the input binning operator to define the flags that the user has + # specified. We will save the name / bit mask for these and restore them later. + # Then we will use the binning operator with our mapmaking flags. These input + # flags are combined to the first bit (== 1) of the solver flags. + + self._log_info(comm, rank, "begin building flags for solver") + + flagname = "{}_flags".format(self.name) + + save_det_flags = self.binning.det_flags + save_det_flag_mask = self.binning.det_flag_mask + save_shared_flags = self.binning.shared_flags + save_shared_flag_mask = self.binning.shared_flag_mask + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + # + # + # USE A VIEW + # + # + + starting_flags = np.zeros(ob.n_local_samples, dtype=np.uint8) + if save_shared_flags is not None: + starting_flags[:] = np.where( + ob.shared[save_shared_flags] & save_shared_flag_mask > 0, 1, 0 + ) + ob.detdata.create(flagname, dtype=np.uint8, detectors=detectors) + for d in dets: + ob.datadata[flagname][d, :] = starting_flags + if save_det_flags is not None: + ob.datadata[flagname][d, :] |= np.where( + ob.detdata[save_det_flags][d] & save_det_flag_mask > 0, 1, 0 + ) + + # Now scan any input mask to this same flag field. We use the second bit (== 2) + # for these mask flags. For the input mask bit we check the first bit of the + # pixel values. This is noted in the help string for the mask trait. + + # Use the same pointing operator as the binning + scan_pointing = self.binning.pointing + + # Set up operator for optional clearing of the pointing matrices + clear_pointing = Clear(detdata=[scan_pointing.pixels, scan_pointing.weights]) + + scanner = ops.ScanMask( + det_flags=flagname, + det_flags_value=2, + pixels=pointing.pixels, + mask_key=self.mask, + mask_bits=1, + ) + + scan_pipe = None + if self.binning.save_pointing: + # Process all detectors at once + scan_pipe = Pipeline( + detector_sets=["ALL"], operators=[scan_pointing, scanner] + ) + else: + # Process one detector at a time and clear pointing after each one. + scan_pipe = Pipeline( + detector_sets=["SINGLE"], + operators=[scan_pointing, scanner, clear_pointing], + ) + + scan_pipe.apply(data, detectors=detectors) + + self._log_info(comm, rank, "finished flag building in", timer=timer) + + # Now construct the noise covariance, hits, and condition number mask + + self._log_info(comm, rank, "begin build of solver covariance") + + solver_hits_name = "{}_solve_hits".format(self.name) + solver_rcond_name = "{}_solve_rcond".format(self.name) + solver_rcond_mask_name = "{}_solve_rcond_mask".format(self.name) + + solver_cov = CovarianceAndHits( + pixel_dist=self.binning.pixel_dist, + covariance=self.binning.covariance, + hits=solver_hits_name, + rcond=solver_rcond_name, + view=self.binning.pointing.view, + det_flags=flagname, + det_flag_mask=255, + pointing=self.binning.pointing, + noise_model=self.binning.noise_model, + rcond_threshold=self.solve_rcond_threshold, + sync_type=self.binning.sync_type, + save_pointing=self.binning.save_pointing, + ) + + solver_cov.apply(data, detectors=detectors) + + data[solver_rcond_mask_name] = PixelData( + self.binning.pixel_dist, dtype=np.uint8, n_value=1 + ) + data[solver_rcond_mask_name].raw[:] = np.where( + data[solver_rcond_name].raw.array() < self.solve_rcond_threshold, 1, 0 + ) + + # Re-use our mask scanning pipeline, setting third bit (== 4) + scanner.det_flags_value = 4 + scanner.mask_key = solver_rcond_mask_name + scan_pipe.apply(data, detectors=detectors) + + self._log_info( + comm, rank, "finished build of solver covariance in", timer=timer + ) # Compute the RHS. Overwrite inputs, either the original or the copy. + self._log_info(comm, rank, "begin RHS calculation") + + # Set our binning operator to use only our new solver flags + self.binning.shared_flags = None + self.binning.shared_flag_mask = 0 + self.binning.det_flags = flagname + self.binning.det_flag_mask = 255 + self.template_matrix.amplitudes = "amplitudes_rhs" rhs_calc = SolverRHS( det_data=detdata_name, @@ -161,10 +368,14 @@ def _exec(self, data, detectors=None, **kwargs): ) rhs.apply(data, detectors=detectors) + self._log_info(comm, rank, "finished RHS calculation in", timer=timer) + # Set up the LHS operator. Use either the original timestreams or the copy # as temp space. - self.template_matrix.amplitudes = "amplitudes" + self._log_info(comm, rank, "begin PCG solver") + + self.template_matrix.amplitudes = "{}_amplitudes".format(self.name) lhs_calc = SolverLHS( det_temp=detdata_name, binning=self.binning, @@ -181,6 +392,20 @@ def _exec(self, data, detectors=None, **kwargs): n_iter_max=self.iter_max, ) + self._log_info(comm, rank, "finished solver in", timer=timer) + + # Restore flag names and masks to binning operator, in case it is being used + # for the final map making or for other external operations. + + self.binning.det_flags = save_det_flags + self.binning.det_flag_mask = save_det_flag_mask + self.binning.shared_flags = save_shared_flags + self.binning.shared_flag_mask = save_shared_flag_mask + + self._log_info( + comm, rank, "begin projection of final amplitudes to timestreams" + ) + # Reset our timestreams to zero for ob in data.obs: ob.detdata[detdata_name][:] = 0.0 @@ -191,11 +416,46 @@ def _exec(self, data, detectors=None, **kwargs): self.template_matrix.transpose = False self.template_matrix.apply(data, detectors=detectors) + self._log_info(comm, rank, "finished amplitude projection in", timer=timer) + + # Now construct the noise covariance, hits, and condition number mask for the + # final binned map. + + self._log_info(comm, rank, "begin build of final binning covariance") + + hits_name = "{}_hits".format(self.name) + rcond_name = "{}_rcond".format(self.name) + + final_cov = CovarianceAndHits( + pixel_dist=self.map_binning.pixel_dist, + covariance=self.map_binning.covariance, + hits=hits_name, + rcond=rcond_name, + view=self.map_binning.pointing.view, + det_flags=self.map_binning.det_flags, + det_flag_mask=self.map_binning.det_flag_mask, + shared_flags=self.map_binning.shared_flags, + shared_flag_mask=self.map_binning.shared_flag_mask, + pointing=self.map_binning.pointing, + noise_model=self.map_binning.noise_model, + rcond_threshold=self.map_rcond_threshold, + sync_type=self.map_binning.sync_type, + save_pointing=self.map_binning.save_pointing, + ) + + final_cov.apply(data, detectors=detectors) + + self._log_info(comm, rank, "finished build of final covariance in", timer=timer) + # Make a binned map of these template-subtracted timestreams + self._log_info(comm, rank, "begin final map binning") + self.map_binning.det_data = detdata_name self.map_binning.apply(data, detectors=detectors) + self._log_info(comm, rank, "finished final binning in", timer=timer) + return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index 5848da41c..7de8cca37 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -61,7 +61,13 @@ class BinMap(Operator): None, allow_none=True, help="Observation detdata key for flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") + det_flag_mask = Int(0, help="Bit mask value for optional detector flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional telescope flagging") pointing = Instance( klass=Operator, diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index 25b5fba9b..1724c2039 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -321,9 +321,33 @@ def _check_matrix(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) + def _log_debug(comm, rank, msg, timer=None): + """Helper function to log a DEBUG level message from rank zero""" + log = Logger.get() + if comm is not None: + comm.barrier() + if timer is not None: + timer.stop() + if rank == 0: + if timer is None: + msg = "MapMaker LHS {}".format(msg) + else: + msg = "MapMaker LHS {} {:0.2f} s".format(msg, timer.seconds()) + log.debug(msg) + if timer is not None: + timer.clear() + timer.start() + @function_timer def _exec(self, data, detectors=None, **kwargs): log = Logger.get() + timer = Timer() + + # The global communicator we are using (or None) + comm = data.comm.comm_world + rank = 0 + if comm is not None: + rank = comm.rank # Check that input traits are set if self.binning is None: @@ -335,6 +359,8 @@ def _exec(self, data, detectors=None, **kwargs): # Build a pipeline to project amplitudes into timestreams and make a binned # map. + timer.start() + self._log_debug(comm, rank, "begin project amplitudes and binning") self.template_matrix.transpose = False self.template_matrix.det_data = self.det_temp @@ -352,9 +378,13 @@ def _exec(self, data, detectors=None, **kwargs): bin_pipe.apply(data, detectors=detectors) - # Build a pipeline for the projection and template matrix application. + self._log_debug(comm, rank, "projection and binning finished in", timer=timer) + + # Build a pipeline for the map scanning and template matrix application. # First create the operators that we will use. + self._log_debug(comm, rank, "begin scan map and accumulate amplitudes") + # Use the same pointing operator as the binning pointing = self.binning.pointing @@ -428,6 +458,10 @@ def _exec(self, data, detectors=None, **kwargs): proj_pipe.apply(data, detectors=detectors) + self._log_debug( + comm, rank, "map scan and amplitude accumulate finished in", timer=timer + ) + return def _finalize(self, data, **kwargs): @@ -486,6 +520,9 @@ def solve( # The global communicator we are using (or None) comm = data.comm.comm_world + rank = 0 + if comm is not None: + rank = comm.rank # Solving A * x = b ... @@ -531,7 +568,7 @@ def solve( if comm is not None: comm.barrier() timer.stop() - if data.comm.world_rank == 0: + if rank == 0: msg = "MapMaker initial residual = {}, {:0.2f} s".format(sqsum, timer.seconds()) log.info(msg) timer.clear() @@ -568,7 +605,7 @@ def solve( if comm is not None: comm.barrier() timer.stop() - if data.comm.world_rank == 0: + if rank == 0: msg = "MapMaker iteration {:4d}, relative residual = {}, {:0.2f} s".format( iter, sqsum, timer.seconds() ) @@ -588,7 +625,7 @@ def solve( if sqsum < init_sqsum * convergence or sqsum < 1e-30: timer.stop() timer_full.stop() - if data.comm.world_rank == 0: + if rank == 0: msg = "MapMaker PCG converged after {:4d} iterations and {:0.2f} seconds".format( iter, timer_full.seconds() ) @@ -601,7 +638,7 @@ def solve( if last_best < best_sqsum * 2: timer.stop() timer_full.stop() - if data.comm.world_rank == 0: + if rank == 0: msg = "MapMaker PCG stalled after {:4d} iterations and {:0.2f} seconds".format( iter, timer_full.seconds() ) diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index 626ee52e2..19854bf03 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -66,7 +66,13 @@ class BuildHitMap(Operator): None, allow_none=True, help="Observation detdata key for flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") + det_flag_mask = Int(0, help="Bit mask value for optional detector flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional telescope flagging") pixels = Unicode("pixels", help="Observation detdata key for pixel indices") @@ -226,7 +232,13 @@ class BuildInverseCovariance(Operator): None, allow_none=True, help="Observation detdata key for flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") + det_flag_mask = Int(0, help="Bit mask value for optional detector flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional telescope flagging") pixels = Unicode("pixels", help="Observation detdata key for pixel indices") @@ -438,7 +450,13 @@ class BuildNoiseWeighted(Operator): None, allow_none=True, help="Observation detdata key for flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") + det_flag_mask = Int(0, help="Bit mask value for optional detector flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional telescope flagging") pixels = Unicode("pixels", help="Observation detdata key for pixel indices") @@ -655,11 +673,21 @@ class CovarianceAndHits(Operator): help="The Data key where the inverse condition number should be stored", ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + det_flags = Unicode( None, allow_none=True, help="Observation detdata key for flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") + det_flag_mask = Int(0, help="Bit mask value for optional detector flagging") + + shared_flags = Unicode( + None, allow_none=True, help="Observation shared key for telescope flags to use" + ) + + shared_flag_mask = Int(0, help="Bit mask value for optional telescope flagging") pointing = Instance( klass=Operator, @@ -780,6 +808,8 @@ def _exec(self, data, detectors=None, **kwargs): pixels=self.pointing.pixels, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, + shared_flags=self.shared_flags, + shared_flag_mask=self.shared_flag_mask, sync_type=self.sync_type, ) @@ -795,6 +825,8 @@ def _exec(self, data, detectors=None, **kwargs): noise_model=self.noise_model, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, + shared_flags=self.shared_flags, + shared_flag_mask=self.shared_flag_mask, sync_type=self.sync_type, ) diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index 51cb0aa58..ee6c666ad 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -36,10 +36,8 @@ class Offset(Template): # data : The Data instance we are working with # view : The timestream view we are using # det_data : The detector data key with the timestreams - # det_flags : Optional detector flags - # det_flag_mask : Bit mask for detector flags - # shared_flags : Optional shared flags - # shared_flag_mask : Bit mask for shared flags + # flags : Optional detector solver flags + # flag_mask : Bit mask for detector solver flags # step_time = Float(10000.0, help="Seconds per baseline step") diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index f2ce31fd7..4e876690e 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -49,17 +49,11 @@ class Template(TraitConfig): None, allow_none=True, help="Observation detdata key for the timestream data" ) - det_flags = Unicode( - None, allow_none=True, help="Observation detdata key for flags to use" + flags = Unicode( + None, allow_none=True, help="Observation detdata key for solver flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") - - shared_flags = Unicode( - None, allow_none=True, help="Observation shared key for telescope flags to use" - ) - - shared_flag_mask = Int(0, help="Bit mask value for optional shared flagging") + flag_mask = Int(0, help="Bit mask value for solver flags") @traitlets.validate("data") def _check_data(self, proposal): @@ -367,8 +361,7 @@ def __init__(self, comm, n_global, n_local, local_indices=None, dtype=np.float64 # Support flagging of template amplitudes. This can be used to flag some # amplitudes if too many timestream samples contributing to the amplitude value - # are bad. It can also be used when iteratively masking template amplitudes - # and sky pixels. We will be passing these flags to compiled code, and there + # are bad. We will be passing these flags to compiled code, and there # is no way easy way to do this using numpy bool and C++ bool. So we waste # a bit of memory and use a whole byte per amplitude. self._raw_flags = AlignedU8.zeros(self._n_local) @@ -487,7 +480,7 @@ def n_local(self): @property def n_local_flagged(self): - """The number of locally amplitudes that are flagged.""" + """The number of local amplitudes that are flagged.""" return np.count_nonzero(self.local_flags) def _get_global_values(comm_offset, send_buffer): From 1e5fec9ac004b872b21b1e0dc48a7a76c83cdf1e Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Mon, 4 Jan 2021 12:57:15 -0800 Subject: [PATCH 049/690] Add compiled code for offset temlate. Begin unit tests. --- src/libtoast/CMakeLists.txt | 1 + src/libtoast/include/toast.hpp | 1 + .../include/toast/template_offset.hpp | 20 ++ src/libtoast/src/toast_template_offset.cpp | 72 ++++++ src/toast/CMakeLists.txt | 1 + src/toast/_libtoast/module.cpp | 1 + src/toast/_libtoast/module.hpp | 1 + src/toast/_libtoast/template_offset.cpp | 75 ++++++ src/toast/observation_view.py | 4 +- src/toast/ops/mapmaker.py | 57 +++-- src/toast/ops/mapmaker_templates.py | 18 +- src/toast/ops/noise_weight.py | 2 +- src/toast/ops/pointing_healpix.py | 213 +++++++++-------- src/toast/templates/CMakeLists.txt | 1 + src/toast/templates/__init__.py | 2 + src/toast/templates/offset.py | 219 +++++++++++------- src/toast/templates/template.py | 66 ++---- src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/runner.py | 4 + src/toast/tests/template_offset.py | 93 ++++++++ 20 files changed, 591 insertions(+), 261 deletions(-) create mode 100644 src/libtoast/include/toast/template_offset.hpp create mode 100644 src/libtoast/src/toast_template_offset.cpp create mode 100644 src/toast/_libtoast/template_offset.cpp create mode 100644 src/toast/tests/template_offset.py diff --git a/src/libtoast/CMakeLists.txt b/src/libtoast/CMakeLists.txt index f708bb3fa..b91adb646 100644 --- a/src/libtoast/CMakeLists.txt +++ b/src/libtoast/CMakeLists.txt @@ -37,6 +37,7 @@ set(toast_SOURCES src/toast_atm.cpp src/toast_atm_sim.cpp src/toast_atm_observe.cpp + src/toast_template_offset.cpp tests/toast_test_runner.cpp tests/toast_test_env.cpp tests/toast_test_utils.cpp diff --git a/src/libtoast/include/toast.hpp b/src/libtoast/include/toast.hpp index 327839d10..d1c4bf0d6 100644 --- a/src/libtoast/include/toast.hpp +++ b/src/libtoast/include/toast.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #endif // ifndef TOAST_HPP diff --git a/src/libtoast/include/toast/template_offset.hpp b/src/libtoast/include/toast/template_offset.hpp new file mode 100644 index 000000000..cd538b89d --- /dev/null +++ b/src/libtoast/include/toast/template_offset.hpp @@ -0,0 +1,20 @@ + +// Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +// All rights reserved. Use of this source code is governed by +// a BSD-style license that can be found in the LICENSE file. + +#ifndef TOAST_TEMPLATE_OFFSET_HPP +#define TOAST_TEMPLATE_OFFSET_HPP + +#include + + +namespace toast { +void template_offset_add_to_signal(int64_t step_length, int64_t n_amp, + double * amplitudes, int64_t n_data, double * data); + +void template_offset_project_signal(int64_t step_length, int64_t n_data, double * data, + int64_t n_amp, double * amplitudes); +} + +#endif // ifndef TOAST_TEMPLATE_OFFSET_HPP diff --git a/src/libtoast/src/toast_template_offset.cpp b/src/libtoast/src/toast_template_offset.cpp new file mode 100644 index 000000000..9014abc97 --- /dev/null +++ b/src/libtoast/src/toast_template_offset.cpp @@ -0,0 +1,72 @@ + +// Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +// All rights reserved. Use of this source code is governed by +// a BSD-style license that can be found in the LICENSE file. + +#include +#include +#include + +#include + +void toast::template_offset_add_to_signal(int64_t step_length, int64_t n_amp, + double * amplitudes, + int64_t n_data, double * data) { + std::cerr << "DBG add_to " << step_length << " " << n_amp << " " << amplitudes + << " " << n_data << " " << data << std::endl; + + // All but the last amplitude have the same number of samples. + if (toast::is_aligned(amplitudes) && toast::is_aligned(data)) { + #pragma omp simd + for (int64_t i = 0; i < n_amp - 1; ++i) { + int64_t doff = i * step_length; + for (int64_t j = 0; j < step_length; ++j) { + data[doff + j] += amplitudes[i]; + } + } + } else { + for (int64_t i = 0; i < n_amp - 1; ++i) { + int64_t doff = i * step_length; + for (int64_t j = 0; j < step_length; ++j) { + data[doff + j] += amplitudes[i]; + } + } + } + + // Now handle the final amplitude. + for (int64_t j = (n_amp - 1) * step_length; j < n_data; ++j) { + data[j] += amplitudes[n_amp - 1]; + } + return; +} + +void toast::template_offset_project_signal(int64_t step_length, int64_t n_data, + double * data, int64_t n_amp, + double * amplitudes) { + std::cerr << "DBG project " << step_length << " " << n_data << " " << data << " " << + n_amp << " " << amplitudes << std::endl; + + // All but the last amplitude have the same number of samples. + if (toast::is_aligned(amplitudes) && toast::is_aligned(data)) { + #pragma omp simd + for (int64_t i = 0; i < n_amp - 1; ++i) { + int64_t doff = i * step_length; + for (int64_t j = 0; j < step_length; ++j) { + amplitudes[i] += data[doff + j]; + } + } + } else { + for (int64_t i = 0; i < n_amp - 1; ++i) { + int64_t doff = i * step_length; + for (int64_t j = 0; j < step_length; ++j) { + amplitudes[i] += data[doff + j]; + } + } + } + + // Now handle the final amplitude. + for (int64_t j = (n_amp - 1) * step_length; j < n_data; ++j) { + amplitudes[n_amp - 1] += data[j]; + } + return; +} diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 39328070e..4fcb0ab53 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -45,6 +45,7 @@ pybind11_add_module(_libtoast MODULE _libtoast/pixels.cpp _libtoast/todmap_mapmaker.cpp _libtoast/atm.cpp + _libtoast/template_offset.cpp ) if(OpenMP_CXX_FOUND) diff --git a/src/toast/_libtoast/module.cpp b/src/toast/_libtoast/module.cpp index 60389c4bc..2cfc6a9eb 100644 --- a/src/toast/_libtoast/module.cpp +++ b/src/toast/_libtoast/module.cpp @@ -41,6 +41,7 @@ PYBIND11_MODULE(_libtoast, m) { init_pixels(m); init_todmap_mapmaker(m); init_atm(m); + init_template_offset(m); // Internal unit test runner m.def( diff --git a/src/toast/_libtoast/module.hpp b/src/toast/_libtoast/module.hpp index 5a35267b5..ff22640fa 100644 --- a/src/toast/_libtoast/module.hpp +++ b/src/toast/_libtoast/module.hpp @@ -414,5 +414,6 @@ void init_map_cov(py::module & m); void init_pixels(py::module & m); void init_todmap_mapmaker(py::module & m); void init_atm(py::module & m); +void init_template_offset(py::module & m); #endif // ifndef LIBTOAST_HPP diff --git a/src/toast/_libtoast/template_offset.cpp b/src/toast/_libtoast/template_offset.cpp new file mode 100644 index 000000000..d04f459fc --- /dev/null +++ b/src/toast/_libtoast/template_offset.cpp @@ -0,0 +1,75 @@ + +// Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +// All rights reserved. Use of this source code is governed by +// a BSD-style license that can be found in the LICENSE file. + +#include + + +void init_template_offset(py::module & m) { + m.def( + "template_offset_add_to_signal", [](int64_t step_length, py::buffer amplitudes, + py::buffer data) { + pybuffer_check_1D (amplitudes); + pybuffer_check_1D (data); + py::buffer_info info_amplitudes = amplitudes.request(); + py::buffer_info info_data = data.request(); + int64_t n_amp = info_amplitudes.size; + int64_t n_data = info_data.size; + double * raw_amplitudes = reinterpret_cast (info_amplitudes.ptr); + double * raw_data = reinterpret_cast (info_data.ptr); + toast::template_offset_add_to_signal(step_length, n_amp, raw_amplitudes, + n_data, raw_data); + return; + }, py::arg("step_length"), py::arg("amplitudes"), py::arg( + "data"), R"( + Accumulate offset amplitudes to timestream data. + + Each amplitude value is accumulated to `step_length` number of samples. The + final offset will be at least this many samples, but may be more if the step + size does not evenly divide into the number of samples. + + Args: + step_length (int64): The minimum number of samples for each offset. + amplitudes (array): The float64 amplitude values. + data (array): The float64 timestream values to accumulate. + + Returns: + None. + + )"); + + m.def( + "template_offset_project_signal", [](int64_t step_length, py::buffer data, + py::buffer amplitudes) { + pybuffer_check_1D (amplitudes); + pybuffer_check_1D (data); + py::buffer_info info_amplitudes = amplitudes.request(); + py::buffer_info info_data = data.request(); + int64_t n_amp = info_amplitudes.size; + int64_t n_data = info_data.size; + double * raw_amplitudes = reinterpret_cast (info_amplitudes.ptr); + double * raw_data = reinterpret_cast (info_data.ptr); + toast::template_offset_add_to_signal(step_length, n_data, raw_data, + n_amp, raw_amplitudes); + return; + }, py::arg("step_length"), py::arg("data"), py::arg( + "amplitudes"), R"( + Accumulate timestream data into offset amplitudes. + + Chunks of `step_length` number of samples are accumulated into the offset + amplitudes. If step_length does not evenly divide into the total number of + samples, the final amplitude will be extended to include the remainder. + + Args: + step_length (int64): The minimum number of samples for each offset. + data (array): The float64 timestream values. + amplitudes (array): The float64 amplitude values. + + Returns: + None. + + )"); + + return; +} diff --git a/src/toast/observation_view.py b/src/toast/observation_view.py index 9fab0cd5c..0b1b6456e 100644 --- a/src/toast/observation_view.py +++ b/src/toast/observation_view.py @@ -167,10 +167,10 @@ def __setitem__(self, key, value): raise RuntimeError("Cannot set views directly- simply access them.") def __iter__(self): - return iter(self._internal) + return iter(self.obj) def __len__(self): - return len(self._internal) + return len(self.obj) def clear(self): self.obj._views.clear() diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index bf3e3b63b..659439f12 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -152,8 +152,14 @@ def _check_map_binning(self, proposal): # Check that this operator has the traits we expect for trt in [ "det_data", + "pixel_dist", "pointing", + "binned", "covariance", + "det_flags", + "det_flag_mask", + "shared_flags", + "shared_flag_mask", "noise_model", "save_pointing", "sync_type", @@ -235,7 +241,7 @@ def _exec(self, data, detectors=None, **kwargs): # We use the input binning operator to define the flags that the user has # specified. We will save the name / bit mask for these and restore them later. - # Then we will use the binning operator with our mapmaking flags. These input + # Then we will use the binning operator with our solver flags. These input # flags are combined to the first bit (== 1) of the solver flags. self._log_info(comm, rank, "begin building flags for solver") @@ -247,31 +253,38 @@ def _exec(self, data, detectors=None, **kwargs): save_shared_flags = self.binning.shared_flags save_shared_flag_mask = self.binning.shared_flag_mask + # Use the same data view as the pointing operator in binning + solve_view = self.binning.pointing.view + for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) if len(dets) == 0: # Nothing to do for this observation continue - - # - # - # USE A VIEW - # - # - - starting_flags = np.zeros(ob.n_local_samples, dtype=np.uint8) - if save_shared_flags is not None: - starting_flags[:] = np.where( - ob.shared[save_shared_flags] & save_shared_flag_mask > 0, 1, 0 - ) + # Create the new solver flags ob.detdata.create(flagname, dtype=np.uint8, detectors=detectors) - for d in dets: - ob.datadata[flagname][d, :] = starting_flags - if save_det_flags is not None: - ob.datadata[flagname][d, :] |= np.where( - ob.detdata[save_det_flags][d] & save_det_flag_mask > 0, 1, 0 + # The data views + views = ob.view[solve_view] + # For each view... + for vw in range(len(views)): + view_samples = views[vw].stop - views[vw].start + starting_flags = np.zeros(view_samples, dtype=np.uint8) + if save_shared_flags is not None: + starting_flags[:] = np.where( + views.shared[save_shared_flags][vw] & save_shared_flag_mask > 0, + 1, + 0, ) + for d in dets: + views.detdata[flagname][vw][d, :] = starting_flags + if save_det_flags is not None: + views.detdata[flagname][vw][d, :] |= np.where( + views.detdata[save_det_flags][vw][d] & save_det_flag_mask + > 0, + 1, + 0, + ) # Now scan any input mask to this same flag field. We use the second bit (== 2) # for these mask flags. For the input mask bit we check the first bit of the @@ -359,7 +372,9 @@ def _exec(self, data, detectors=None, **kwargs): self.binning.det_flags = flagname self.binning.det_flag_mask = 255 - self.template_matrix.amplitudes = "amplitudes_rhs" + rhs_amplitude_key = "{}_amplitudes_rhs".format(self.name) + + self.template_matrix.amplitudes = rhs_amplitude_key rhs_calc = SolverRHS( det_data=detdata_name, overwrite=True, @@ -375,7 +390,9 @@ def _exec(self, data, detectors=None, **kwargs): self._log_info(comm, rank, "begin PCG solver") - self.template_matrix.amplitudes = "{}_amplitudes".format(self.name) + amplitude_key = "{}_amplitudes".format(self.name) + self.template_matrix.amplitudes = amplitude_key + lhs_calc = SolverLHS( det_temp=detdata_name, binning=self.binning, diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 2917973f2..353b91b18 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -37,17 +37,11 @@ class TemplateMatrix(Operator): None, allow_none=True, help="Observation detdata key for the timestream data" ) - det_flags = Unicode( - None, allow_none=True, help="Observation detdata key for flags to use" + flags = Unicode( + None, allow_none=True, help="Observation detdata key for solver flags to use" ) - det_flag_mask = Int(0, help="Bit mask value for optional flagging") - - shared_flags = Unicode( - None, allow_none=True, help="Observation shared key for telescope flags to use" - ) - - shared_flag_mask = Int(0, help="Bit mask value for optional shared flagging") + flag_mask = Int(0, help="Bit mask value for solver flags") @traitlets.validate("templates") def _check_templates(self, proposal): @@ -98,10 +92,8 @@ def _exec(self, data, detectors=None, **kwargs): if not self._initialized: for tmpl in self.templates: tmpl.view = self.view - tmpl.det_flags = self.det_flags - tmpl.det_flag_mask = self.det_flag_mask - tmpl.shared_flags = self.shared_flags - tmpl.shared_flag_mask = self.shared_flag_mask + tmpl.flags = self.flags + tmpl.flag_mask = self.flag_mask # This next line will trigger calculation of the number # of amplitudes within each template. tmpl.data = data diff --git a/src/toast/ops/noise_weight.py b/src/toast/ops/noise_weight.py index 77807a23a..5c9162bde 100644 --- a/src/toast/ops/noise_weight.py +++ b/src/toast/ops/noise_weight.py @@ -31,7 +31,7 @@ class NoiseWeight(Operator): API = traitlets.Int(0, help="Internal interface version for this operator") noise_model = traitlets.Unicode( - "noise_model", help="The observation key for storing the noise model" + "noise_model", help="The observation key containing the noise model" ) det_data = Unicode( diff --git a/src/toast/ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py index f07d33bca..cd390dd1a 100644 --- a/src/toast/ops/pointing_healpix.py +++ b/src/toast/ops/pointing_healpix.py @@ -247,33 +247,6 @@ def _exec(self, data, detectors=None, **kwargs): if not self.overwrite: continue - # Get the flags if needed - flags = None - if self.shared_flags is not None: - flags = np.array(ob.shared[self.shared_flags]) - flags &= self.shared_flag_mask - - # HWP angle if needed - hwp_angle = None - if self.hwp_angle is not None: - hwp_angle = ob.shared[self.hwp_angle] - - # Boresight pointing quaternions - in_boresight = ob.shared[self.boresight] - - # Coordinate transform if needed - boresight = in_boresight - if coord_rot is not None: - boresight = qa.mult(coord_rot, in_boresight) - - # Focalplane for this observation - focalplane = ob.telescope.focalplane - - # Optional calibration - cal = None - if self.cal is not None: - cal = ob[self.cal] - # Create output data for the pixels, weights and optionally the # detector quaternions. @@ -306,81 +279,117 @@ def _exec(self, data, detectors=None, **kwargs): detectors=dets, ) - for det in dets: - props = focalplane[det] - - # Get the cross polar response from the focalplane - epsilon = 0.0 - if "pol_leakage" in props: - epsilon = props["pol_leakage"] - - # Detector quaternion offset from the boresight - detquat = props["quat"] - - # Timestream of detector quaternions - quats = qa.mult(boresight, detquat) - if self.quats is not None: - ob.detdata[self.quats][det, :] = quats - - # Cal for this detector - dcal = 1.0 - if cal is not None: - dcal = cal[det] - - # Buffered pointing calculation - buf_off = 0 - buf_n = tod_buffer_length - while buf_off < ob.n_local_samples: - if buf_off + buf_n > ob.n_local_samples: - buf_n = ob.n_local_samples - buf_off - bslice = slice(buf_off, buf_off + buf_n) - - # This buffer of detector quaternions - detp = quats[bslice, :].reshape(-1) - - # Buffer of HWP angle - hslice = None - if hwp_angle is not None: - hslice = hwp_angle[bslice].reshape(-1) - - # Buffer of flags - fslice = None - if flags is not None: - fslice = flags[bslice].reshape(-1) - - # Pixel and weight buffers - pxslice = ob.detdata[self.pixels][det, bslice].reshape(-1) - wtslice = ob.detdata[self.weights][det, bslice].reshape(-1) - - pbuf = pxslice - wbuf = wtslice - if self.single_precision: - pbuf = np.zeros(len(pxslice), dtype=np.int64) - wbuf = np.zeros(len(wtslice), dtype=np.float64) - - pointing_matrix_healpix( - self.hpix, - self.nest, - epsilon, - dcal, - self.mode, - detp, - hslice, - fslice, - pxslice, - wtslice, - ) - - if self.single_precision: - pxslice[:] = pbuf.astype(np.int32) - wtslice[:] = wbuf.astype(np.float32) - - buf_off += buf_n - - if self.create_dist is not None: - self._local_submaps[ - ob.detdata["pixels"][det] // self._n_pix_submap - ] = True + # Loop over views + views = ob.view[self.view] + for vw in range(len(views)): + # Get the flags if needed + flags = None + if self.shared_flags is not None: + flags = np.array(views.shared[self.shared_flags][vw]) + flags &= self.shared_flag_mask + + # HWP angle if needed + hwp_angle = None + if self.hwp_angle is not None: + hwp_angle = views.shared[self.hwp_angle][vw] + + # Boresight pointing quaternions + in_boresight = views.shared[self.boresight][vw] + + # Coordinate transform if needed + boresight = in_boresight + if coord_rot is not None: + boresight = qa.mult(coord_rot, in_boresight) + + # Focalplane for this observation + focalplane = ob.telescope.focalplane + + # Optional calibration + cal = None + if self.cal is not None: + cal = ob[self.cal] + + view_samples = len(boresight) + + for det in dets: + props = focalplane[det] + + # Get the cross polar response from the focalplane + epsilon = 0.0 + if "pol_leakage" in props: + epsilon = props["pol_leakage"] + + # Detector quaternion offset from the boresight + detquat = props["quat"] + + # Timestream of detector quaternions + quats = qa.mult(boresight, detquat) + if self.quats is not None: + views.detdata[self.quats][vw][det, :] = quats + + # Cal for this detector + dcal = 1.0 + if cal is not None: + dcal = cal[det] + + # Buffered pointing calculation + buf_off = 0 + buf_n = tod_buffer_length + while buf_off < view_samples: + if buf_off + buf_n > view_samples: + buf_n = view_samples - buf_off + bslice = slice(buf_off, buf_off + buf_n) + + # This buffer of detector quaternions + detp = quats[bslice, :].reshape(-1) + + # Buffer of HWP angle + hslice = None + if hwp_angle is not None: + hslice = hwp_angle[bslice].reshape(-1) + + # Buffer of flags + fslice = None + if flags is not None: + fslice = flags[bslice].reshape(-1) + + # Pixel and weight buffers + pxslice = views.detdata[self.pixels][vw][det, bslice].reshape( + -1 + ) + wtslice = views.detdata[self.weights][vw][det, bslice].reshape( + -1 + ) + + pbuf = pxslice + wbuf = wtslice + if self.single_precision: + pbuf = np.zeros(len(pxslice), dtype=np.int64) + wbuf = np.zeros(len(wtslice), dtype=np.float64) + + pointing_matrix_healpix( + self.hpix, + self.nest, + epsilon, + dcal, + self.mode, + detp, + hslice, + fslice, + pxslice, + wtslice, + ) + + if self.single_precision: + pxslice[:] = pbuf.astype(np.int32) + wtslice[:] = wbuf.astype(np.float32) + + buf_off += buf_n + + if self.create_dist is not None: + self._local_submaps[ + views.detdata[self.pixels][vw][det] // self._n_pix_submap + ] = True return def _finalize(self, data, **kwargs): diff --git a/src/toast/templates/CMakeLists.txt b/src/toast/templates/CMakeLists.txt index 86fd06b68..0ad0fb829 100644 --- a/src/toast/templates/CMakeLists.txt +++ b/src/toast/templates/CMakeLists.txt @@ -4,5 +4,6 @@ install(FILES __init__.py template.py + offset.py DESTINATION ${PYTHON_SITE}/toast/templates ) diff --git a/src/toast/templates/__init__.py b/src/toast/templates/__init__.py index 4dc58a800..967d00554 100644 --- a/src/toast/templates/__init__.py +++ b/src/toast/templates/__init__.py @@ -5,3 +5,5 @@ # Import Templates into our public API from .template import Template, Amplitudes + +from .offset import Offset diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index ee6c666ad..e427e79ae 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -2,12 +2,16 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections import OrderedDict + import numpy as np import scipy from ..utils import Logger, rate_from_times +from ..timing import function_timer + from ..mpi import MPI from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float @@ -50,11 +54,17 @@ class Offset(Template): help="Observation key containing the optional noise model", ) + good_fraction = Float( + 0.5, + help="Fraction of unflagged samples needed to keep a given offset amplitude", + ) + precond_width = Int(20, help="Preconditioner width in terms of offsets / baselines") def __init__(self, **kwargs): super().__init__(**kwargs) + @function_timer def _initialize(self, new_data): # Compute the step boundaries for every observation and the number of # amplitude values on this process. Every process only stores amplitudes @@ -77,15 +87,21 @@ def _initialize(self, new_data): for iob, ob in enumerate(new_data.obs): # Compute sample rate from timestamps - self._obs_rate[iob] = rate_from_times(ob.shared[self.times]) + (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(ob.shared[self.times]) + self._obs_rate[iob] = rate # The step length for this observation - step_length = self.step_time * self._obs_rate[iob] + step_length = int(self.step_time * self._obs_rate[iob]) # Track number of offset amplitudes per view. self._obs_views[iob] = list() for view_slice in ob.view[self.view]: - slice_len = view_slice.stop - view_slice.start + slice_len = None + if view_slice.start is None: + # This is a view of the whole obs + slice_len = ob.n_local_samples + else: + slice_len = view_slice.stop - view_slice.start view_n_amp = slice_len // step_length self._obs_views[iob].append(view_n_amp) @@ -137,7 +153,7 @@ def _initialize(self, new_data): ) = self._get_filter_and_precond( self._freq[iob], offset_psd, ob.view[self.view] ) - offset += np.sum(self.obs_views[iob]) + offset += np.sum(self._obs_views[iob]) self._n_local = offset if new_data.comm.comm_world is None: @@ -147,6 +163,42 @@ def _initialize(self, new_data): self._n_local, op=MPI.SUM ) + # Now that we know the number of amplitudes, we go through the solver flags + # and determine what amplitudes, if any, are poorly constrained. These are + # stored internally as a bool array, and used when constructing a new + # Amplitudes object. + + self._amp_flags = np.zeros(self._n_local, dtype=np.bool) + if self.flags is not None: + offset = 0 + bad_frac = 1.0 - self.good_fraction + for det in self._all_dets: + for iob, ob in enumerate(new_data.obs): + if det not in ob.local_detectors: + continue + + # The step length for this observation + step_length = int(self.step_time * self._obs_rate[iob]) + + # Loop over views of flags + views = ob.view[self.view] + for ivw, vw in enumerate(views.detdata[self.flags]): + n_amp_view = self._obs_views[iob][ivw] + view_samples = len(vw[det]) + voff = 0 + for amp in range(n_amp_view): + amplen = step_length + if amp == n_amp_view - 1: + amplen = view_samples - voff + frac = ( + np.count_nonzero( + vw[det][voff : voff + amplen] & self.flag_mask + ) + / amplen + ) + self._amp_flags[offset + amp] = frac > bad_frac + voff += step_length + offset += n_amp_view return @function_timer @@ -181,8 +233,8 @@ def g(x): result[good] = (np.sin(arg) / arg) ** 2 return result - tbase = self.step_length - fbase = 1 / tbase + tbase = self.step_time + fbase = 1.0 / tbase offset_psd = interpolate_psd(freq) * g(freq * tbase) for m in range(1, 2): offset_psd += interpolate_psd(freq + m * fbase) * g(freq * tbase + m) @@ -192,72 +244,87 @@ def g(x): @function_timer def _get_filter_and_precond(self, freq, offset_psd, view_slices): - logfreq = np.log(freq) - logpsd = np.log(offset_psd) - logfilter = np.log(1 / offset_psd) - - def interpolate(x, psd): - result = np.zeros(x.size) - good = np.abs(x) > 1e-10 - logx = np.log(np.abs(x[good])) - logresult = np.interp(logx, logfreq, psd) - result[good] = np.exp(logresult) - return result - - def truncate(noisefilter, lim=1e-4): - icenter = noisefilter.size // 2 - ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim - icut = np.argwhere(ind)[-1][0] - if icut % 2 == 0: - icut += 1 - noisefilter = np.roll(noisefilter, icenter) - noisefilter = noisefilter[icenter - icut : icenter + icut + 1] - return noisefilter - - vw_filters = list() - vw_precond = list() - for offset_slice, sigmasqs in offset_slices: - nstep = offset_slice.stop - offset_slice.start - filterlen = nstep * 2 + 1 - filterfreq = np.fft.rfftfreq(filterlen, self.step_length) - noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter))) - noisefilters.append(noisefilter) - # Build the band-diagonal preconditioner - if self.precond_width <= 1: - # Compute C_a prior - preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd))) - else: - # Compute Cholesky decomposition prior - wband = min(self.precond_width, noisefilter.size // 2) - precond_width = max(wband, min(self.precond_width, nstep)) - icenter = noisefilter.size // 2 - preconditioner = np.zeros([precond_width, nstep], dtype=np.float64) - preconditioner[0] = sigmasqs - preconditioner[:wband, :] += np.repeat( - noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1 - ) - lower = True - scipy.linalg.cholesky_banded( - preconditioner, overwrite_ab=True, lower=lower, check_finite=True - ) - preconditioners.append((preconditioner, lower)) - return noisefilters, preconditioners + # logfreq = np.log(freq) + # logpsd = np.log(offset_psd) + # logfilter = np.log(1 / offset_psd) + # + # def interpolate(x, psd): + # result = np.zeros(x.size) + # good = np.abs(x) > 1e-10 + # logx = np.log(np.abs(x[good])) + # logresult = np.interp(logx, logfreq, psd) + # result[good] = np.exp(logresult) + # return result + # + # def truncate(noisefilter, lim=1e-4): + # icenter = noisefilter.size // 2 + # ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim + # icut = np.argwhere(ind)[-1][0] + # if icut % 2 == 0: + # icut += 1 + # noisefilter = np.roll(noisefilter, icenter) + # noisefilter = noisefilter[icenter - icut : icenter + icut + 1] + # return noisefilter + # + # vw_filters = list() + # vw_precond = list() + # for offset_slice, sigmasqs in offset_slices: + # nstep = offset_slice.stop - offset_slice.start + # filterlen = nstep * 2 + 1 + # filterfreq = np.fft.rfftfreq(filterlen, self.step_length) + # noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter))) + # noisefilters.append(noisefilter) + # # Build the band-diagonal preconditioner + # if self.precond_width <= 1: + # # Compute C_a prior + # preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd))) + # else: + # # Compute Cholesky decomposition prior + # wband = min(self.precond_width, noisefilter.size // 2) + # precond_width = max(wband, min(self.precond_width, nstep)) + # icenter = noisefilter.size // 2 + # preconditioner = np.zeros([precond_width, nstep], dtype=np.float64) + # preconditioner[0] = sigmasqs + # preconditioner[:wband, :] += np.repeat( + # noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1 + # ) + # lower = True + # scipy.linalg.cholesky_banded( + # preconditioner, overwrite_ab=True, lower=lower, check_finite=True + # ) + # preconditioners.append((preconditioner, lower)) + # return noisefilters, preconditioners + return list(), list() + + def _detectors(self): + return self._all_dets def _zeros(self): - return Amplitudes(self.data.comm.comm_world, self._n_global, self._n_local) + z = Amplitudes(self.data.comm.comm_world, self._n_global, self._n_local) + z.local_flags[:] = np.where(self._amp_flags, 1, 0) + return z @function_timer def _add_to_signal(self, detector, amplitudes): offset = self._det_start[detector] for iob, ob in enumerate(self.data.obs): - if det not in ob.local_detectors: + if detector not in ob.local_detectors: continue # The step length for this observation - step_length = self.step_time * self._obs_rate[iob] + step_length = int(self.step_time * self._obs_rate[iob]) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] + print( + "calling add_to_signal: ", + step_length, + amplitudes.local[offset : offset + n_amp_view], + vw[detector], + flush=True, + ) template_offset_add_to_signal( - step_length, amplitudes.local[offset : offset + n_amp_view], vw + step_length, + amplitudes.local[offset : offset + n_amp_view], + vw[detector], ) offset += n_amp_view @@ -265,33 +332,23 @@ def _add_to_signal(self, detector, amplitudes): def _project_signal(self, detector, amplitudes): offset = self._det_start[detector] for iob, ob in enumerate(self.data.obs): - if det not in ob.local_detectors: + if detector not in ob.local_detectors: continue # The step length for this observation - step_length = self.step_time * self._obs_rate[iob] + step_length = int(self.step_time * self._obs_rate[iob]) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] - template_offset_project_signal( - step_length, vw, amplitudes.local[offset : offset + n_amp_view] + print( + "calling project_signal: ", + step_length, + vw[detector], + amplitudes.local[offset : offset + n_amp_view], + flush=True, ) - offset += n_amp_view - - @function_timer - def _project_flags(self, detector, amplitudes): - offset = self._det_start[detector] - for iob, ob in enumerate(self.data.obs): - if det not in ob.local_detectors: - continue - # The step length for this observation - step_length = self.step_time * self._obs_rate[iob] - obview = ob.view[self.view] - for ivw, vw_ in enumerate(ob.view[self.view].detdata[self.det_data]): - n_amp_view = self._obs_views[iob][ivw] - flags = np.array() - template_offset_project_flags( + template_offset_project_signal( step_length, - flags, - amplitudes.local_flags[offset : offset + n_amp_view], + vw[detector], + amplitudes.local[offset : offset + n_amp_view], ) offset += n_amp_view diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index 4e876690e..d5556f041 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -2,15 +2,13 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +import numpy as np -from ..utils import ( - Logger, - AlignedU8, - AlignedF32, - AlignedF64, -) +import traitlets -from ..traits import TraitConfig +from ..utils import Logger, AlignedU8, AlignedF32, AlignedF64, dtype_to_aligned + +from ..traits import TraitConfig, Instance, Unicode, Int from ..data import Data @@ -35,7 +33,6 @@ class Template(TraitConfig): # Note: The TraitConfig base class defines a "name" attribute. data = Instance( - None, klass=Data, allow_none=True, help="This must be an instance of a Data class (or None)", @@ -61,15 +58,11 @@ def _check_data(self, proposal): if dat is not None: if not isinstance(dat, Data): raise traitlets.TraitError("data should be a Data instance") - # Call the instance initialization. - self.initialize(dat) return dat @traitlets.observe("data") def initialize(self, change): - # Derived classes should implement this method to do any set up (like - # computing the number of amplitudes) whenever the data changes. - newdata = change["data"] + newdata = change["new"] self._initialize(newdata) def __init__(self, **kwargs): @@ -80,6 +73,24 @@ def _initialize(self, new_data): # computing the number of amplitudes) whenever the data changes. raise NotImplementedError("Derived class must implement _initialize()") + def _detectors(self): + # Derived classes should return the list of detectors they support. + raise NotImplementedError("Derived class must implement _detectors()") + + def detectors(self): + """Return a list of detectors supported by the template. + + This list will change whenever the `data` trait is set, which initializes + the template. + + Returns: + (list): The detectors with local amplitudes across all observations. + + """ + if self.data is None: + raise RuntimeError("You must set the data trait before calling detectors()") + return self._detectors() + def _zeros(self): raise NotImplementedError("Derived class must implement _zeros()") @@ -148,35 +159,6 @@ def project_signal(self, detector, amplitudes): raise RuntimeError("You must set the data trait before using a template") self._project_signal(detector, amplitudes) - def _project_flags(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _project_flags()") - - def project_flags(self, detector, amplitudes): - """Project timestream flags into template amplitude flags. - - For some types of templates, excessive timestream flagging can corrupt some of - the template amplitudes (for example using the offset template with short - step lengths). It is up to each template class to determine the impact of the - timestream flags on template amplitudes. - - The flags of the input amplitudes are updated in place. - - Args: - detector (str): The detector name. - amplitudes (Amplitudes): The Amplitude values for this template. - - Returns: - None - - """ - if self.data is None: - raise RuntimeError("You must set the data trait before using a template") - # Short circuit if there are no shared or detector flags specified. - if self.det_flags is None and self.shared_flags is None: - return - else: - self._project_flags(detector, amplitudes) - def _add_prior(self, amplitudes_in, amplitudes_out): # Not all Templates implement the prior return diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index d7ce0dda8..1f6b897ce 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -27,5 +27,6 @@ install(FILES ops_memory_counter.py ops_scan_map.py ops_madam.py + template_offset.py DESTINATION ${PYTHON_SITE}/toast/tests ) diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 475fff64f..84fabda7d 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -43,6 +43,8 @@ from . import covariance as test_covariance +from . import template_offset as test_template_offset + # # from . import cache as testcache # @@ -155,6 +157,8 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_covariance)) + suite.addTest(loader.loadTestsFromModule(test_template_offset)) + # # suite.addTest(loader.loadTestsFromModule(testtod)) # diff --git a/src/toast/tests/template_offset.py b/src/toast/tests/template_offset.py new file mode 100644 index 000000000..71a08f8e3 --- /dev/null +++ b/src/toast/tests/template_offset.py @@ -0,0 +1,93 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from ..utils import rate_from_times + +from .. import ops + +from ..templates import Offset + +from ._helpers import create_outdir, create_satellite_data + + +class TemplateOffsetTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def test_projection(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create a default noise model + noise_model = ops.DefaultNoiseModel() + noise_model.apply(data) + + # Create some empty detector data + for ob in data.obs: + ob.detdata.create("signal", dtype=np.float64) + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + step_seconds = float(int(ob_time / 10.0)) + + tmpl = Offset( + det_data="signal", + times="times", + noise_model=noise_model.noise_model, + step_time=step_seconds, + ) + # Set the data + tmpl.data = data + + # Get some amplitudes and set to one + amps = tmpl.zeros() + amps.local[:] = 1.0 + + # Project. + for det in tmpl.detectors(): + for ob in data.obs: + tmpl.add_to_signal(det, amps) + + # Verify + for ob in data.obs: + for det in ob.local_detectors: + np.testing.assert_equal(ob.detdata["signal"][det], 1.0) + + # Accumulate amplitudes + for det in tmpl.detectors(): + for ob in data.obs: + tmpl.project_signal(det, amps) + + # Verify + for ob in data.obs: + # Get the step boundaries + rate = rate_from_times(ob.shared["times"]) + step_samples = int(step_seconds * rate) + n_step = ob.n_local_samples // step_samples + slices = [ + slice(x * step_samples, (x + 1) * step_samples, 1) + for x in range(n_step - 1) + ] + sizes = [step_samples for x in range(n_step - 1)] + slices.append(slice((n_step - 1) * step_samples, ob.n_local_samples, 1)) + sizes.append(ob.n_local_samples - (n_step - 1) * step_samples) + + for det in ob.local_detectors: + for slc, sz in zip(slices, sizes): + np.testing.assert_equal(ob.detdata["signal"][det, slc], 1.0 * sz) + + del data + return From acd41bcd6431e7e3dcfeb17fba995f20a747bef6 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 5 Jan 2021 16:47:30 -0800 Subject: [PATCH 050/690] Many small typo fixes. Change detdata create key from detshape to sample_shape. Begin work on mapmaker unit tests. Finish porting offset template, but still debugging. --- src/libtoast/src/toast_template_offset.cpp | 6 - src/toast/_libtoast/template_offset.cpp | 4 +- src/toast/observation_data.py | 48 +-- src/toast/ops/__init__.py | 2 + src/toast/ops/copy.py | 6 +- src/toast/ops/madam_utils.py | 2 +- src/toast/ops/mapmaker.py | 60 ++-- src/toast/ops/mapmaker_solve.py | 54 ++- src/toast/ops/mapmaker_templates.py | 17 +- src/toast/ops/noise_weight.py | 2 +- src/toast/ops/pointing_healpix.py | 10 +- src/toast/ops/scan_map.py | 4 +- src/toast/templates/__init__.py | 2 +- src/toast/templates/offset.py | 387 +++++++++++++-------- src/toast/templates/template.py | 199 ++++++++++- src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/observation.py | 12 +- src/toast/tests/ops_mapmaker.py | 224 ++++++++++++ src/toast/tests/runner.py | 2 + src/toast/tests/template_offset.py | 6 +- 20 files changed, 782 insertions(+), 266 deletions(-) create mode 100644 src/toast/tests/ops_mapmaker.py diff --git a/src/libtoast/src/toast_template_offset.cpp b/src/libtoast/src/toast_template_offset.cpp index 9014abc97..d7cac81da 100644 --- a/src/libtoast/src/toast_template_offset.cpp +++ b/src/libtoast/src/toast_template_offset.cpp @@ -12,9 +12,6 @@ void toast::template_offset_add_to_signal(int64_t step_length, int64_t n_amp, double * amplitudes, int64_t n_data, double * data) { - std::cerr << "DBG add_to " << step_length << " " << n_amp << " " << amplitudes - << " " << n_data << " " << data << std::endl; - // All but the last amplitude have the same number of samples. if (toast::is_aligned(amplitudes) && toast::is_aligned(data)) { #pragma omp simd @@ -43,9 +40,6 @@ void toast::template_offset_add_to_signal(int64_t step_length, int64_t n_amp, void toast::template_offset_project_signal(int64_t step_length, int64_t n_data, double * data, int64_t n_amp, double * amplitudes) { - std::cerr << "DBG project " << step_length << " " << n_data << " " << data << " " << - n_amp << " " << amplitudes << std::endl; - // All but the last amplitude have the same number of samples. if (toast::is_aligned(amplitudes) && toast::is_aligned(data)) { #pragma omp simd diff --git a/src/toast/_libtoast/template_offset.cpp b/src/toast/_libtoast/template_offset.cpp index d04f459fc..a168eb12a 100644 --- a/src/toast/_libtoast/template_offset.cpp +++ b/src/toast/_libtoast/template_offset.cpp @@ -50,8 +50,8 @@ void init_template_offset(py::module & m) { int64_t n_data = info_data.size; double * raw_amplitudes = reinterpret_cast (info_amplitudes.ptr); double * raw_data = reinterpret_cast (info_data.ptr); - toast::template_offset_add_to_signal(step_length, n_data, raw_data, - n_amp, raw_amplitudes); + toast::template_offset_project_signal(step_length, n_data, raw_data, + n_amp, raw_amplitudes); return; }, py::arg("step_length"), py::arg("data"), py::arg( "amplitudes"), R"( diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index f531948a5..7abf85a4b 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -278,7 +278,7 @@ class DetDataMgr(MutableMapping): New objects can be created several ways. The "create()" method: - ob.detdata.create(name, detshape=None, dtype=None, detectors=None) + ob.detdata.create(name, sample_shape=None, dtype=None, detectors=None) gives full control over creating the named object and specifying the shape of each detector sample. The detectors argument can be used to restrict the object @@ -300,7 +300,7 @@ class DetDataMgr(MutableMapping): It is also possible to create a new object by assigning an array. In that case the array must either have the full size of the DetectorData object - (n_det x n_sample x detshape) or must have dimensions (n_sample x detshape), in + (n_det x n_sample x sample_shape) or must have dimensions (n_sample x sample_shape), in which case the array is copied to all detectors. For example: ob.detdata[name] = np.ones( @@ -327,7 +327,7 @@ def __init__(self, detectors, samples): self.detectors = detectors self._internal = dict() - def create(self, name, detshape=None, dtype=np.float64, detectors=None): + def create(self, name, sample_shape=None, dtype=np.float64, detectors=None): """Create a local DetectorData buffer on this process. This method can be used to create arrays of detector data for storing signal, @@ -335,7 +335,7 @@ def create(self, name, detshape=None, dtype=np.float64, detectors=None): Args: name (str): The name of the detector data (signal, flags, etc) - detshape (tuple): Use this shape for the data of each detector sample. + sample_shape (tuple): Use this shape for the data of each detector sample. Use None or an empty tuple if you want one element per sample. dtype (np.dtype): Use this dtype for each element. detectors (list): Only construct a data object for this set of detectors. @@ -361,12 +361,12 @@ def create(self, name, detshape=None, dtype=np.float64, detectors=None): raise ValueError(msg) data_shape = None - if detshape is None or len(detshape) == 0: + if sample_shape is None or len(sample_shape) == 0: data_shape = (self.samples,) - elif len(detshape) == 1 and detshape[0] == 1: + elif len(sample_shape) == 1 and sample_shape[0] == 1: data_shape = (self.samples,) else: - data_shape = (self.samples,) + detshape + data_shape = (self.samples,) + sample_shape # Create the data object self._internal[name] = DetectorData(detectors, data_shape, dtype) @@ -399,7 +399,7 @@ def __setitem__(self, key, value): # Create it first self.create( key, - detshape=value.detector_shape, + sample_shape=value.detector_shape[1:], dtype=value.dtype, detectors=value.detectors, ) @@ -411,7 +411,7 @@ def __setitem__(self, key, value): self._internal[key][d] = value[d] elif isinstance(value, Mapping): # This is a dictionary of detector arrays - detshape = None + sample_shape = None dtype = None for d, ddata in value.items(): if d not in self.detectors: @@ -422,11 +422,11 @@ def __setitem__(self, key, value): d, ddata.shape[0], self.samples ) raise ValueError(msg) - if detshape is None: - detshape = ddata.shape[1:] + if sample_shape is None: + sample_shape = ddata.shape[1:] dtype = ddata.dtype else: - if detshape != ddata.shape[1:]: + if sample_shape != ddata.shape[1:]: msg = "All detector arrays must have the same shape" raise ValueError(msg) if dtype != ddata.dtype: @@ -435,12 +435,12 @@ def __setitem__(self, key, value): if key not in self._internal: self.create( key, - detshape=detshape, + sample_shape=sample_shape, dtype=dtype, detectors=sorted(value.keys()), ) else: - if (self.samples,) + detshape != self._internal[key].detector_shape: + if (self.samples,) + sample_shape != self._internal[key].detector_shape: msg = "Assignment value has wrong detector shape" raise ValueError(msg) for d, ddata in value.items(): @@ -450,20 +450,20 @@ def __setitem__(self, key, value): shp = value.shape if shp[0] == self.samples: # This is a single detector array, being assigned to all detectors - detshape = None + sample_shape = None if len(shp) > 1: - detshape = shp[1:] + sample_shape = shp[1:] if key not in self._internal: self.create( key, - detshape=detshape, + sample_shape=sample_shape, dtype=value.dtype, detectors=self.detectors, ) else: fullshape = (self.samples,) - if detshape is not None: - fullshape += detshape + if sample_shape is not None: + fullshape += sample_shape if fullshape != self._internal[key].detector_shape: msg = "Assignment value has wrong detector shape" raise ValueError(msg) @@ -474,20 +474,20 @@ def __setitem__(self, key, value): if shp[1] != self.samples: msg = "Assignment value has wrong number of samples" raise ValueError(msg) - detshape = None + sample_shape = None if len(shp) > 2: - detshape = shp[2:] + sample_shape = shp[2:] if key not in self._internal: self.create( key, - detshape=detshape, + sample_shape=sample_shape, dtype=value.dtype, detectors=self.detectors, ) else: fullshape = (self.samples,) - if detshape is not None: - fullshape += detshape + if sample_shape is not None: + fullshape += sample_shape if fullshape != self._internal[key].detector_shape: msg = "Assignment value has wrong detector shape" raise ValueError(msg) diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index aa81c0978..41da5f208 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -35,6 +35,8 @@ from .mapmaker_binning import BinMap +from .mapmaker_templates import TemplateMatrix + from .mapmaker import MapMaker from .madam import Madam diff --git a/src/toast/ops/copy.py b/src/toast/ops/copy.py index 6d294480f..3f2dacd45 100644 --- a/src/toast/ops/copy.py +++ b/src/toast/ops/copy.py @@ -163,9 +163,13 @@ def _exec(self, data, detectors=None, **kwargs): log.error(msg) raise RuntimeError(msg) else: + sample_shape = None + shp = ob.detdata[in_key].detector_shape + if len(shp) > 1: + sample_shape = shp[1:] ob.detdata.create( out_key, - detshape=ob.detdata[in_key].detector_shape, + sample_shape=sample_shape, dtype=ob.detdata[in_key].dtype, detectors=ob.detdata[in_key].detectors, ) diff --git a/src/toast/ops/madam_utils.py b/src/toast/ops/madam_utils.py index d361cd475..27de9d847 100644 --- a/src/toast/ops/madam_utils.py +++ b/src/toast/ops/madam_utils.py @@ -179,7 +179,7 @@ def restore_local( if nnz == 1: ob.detdata.create(detdata_name, dtype=detdata_dtype) else: - ob.detdata.create(detdata_name, dtype=detdata_dtype, detshape=(nnz,)) + ob.detdata.create(detdata_name, dtype=detdata_dtype, sample_shape=(nnz,)) print("Created detdata {} = {}".format(detdata_name, ob.detdata[detdata_name])) print("madam buffer has shape = ", madam_buffer.shape) for vw in ob.view[view].detdata[detdata_name]: diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index 659439f12..9e74af576 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -10,7 +10,7 @@ from ..traits import trait_docs, Int, Unicode, Bool, Float, Instance -from ..timing import function_timer +from ..timing import function_timer, Timer from ..pixels import PixelDistribution, PixelData @@ -22,7 +22,11 @@ from .copy import Copy -from .scan_map import ScanMap +from .scan_map import ScanMap, ScanMask + +from .mapmaker_utils import CovarianceAndHits + +from .mapmaker_solve import solve, SolverRHS, SolverLHS @trait_docs @@ -172,7 +176,7 @@ def _check_map_binning(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) - def _log_info(comm, rank, msg, timer=None): + def _log_info(self, comm, rank, msg, timer=None): """Helper function to log an INFO level message from rank zero""" log = Logger.get() if comm is not None: @@ -232,7 +236,7 @@ def _exec(self, data, detectors=None, **kwargs): ] ) copy_det.apply(data, detectors=detectors) - self._log_info(comm, rank, "data copy finished in", timer=timer) + self._log_info(comm, rank, " data copy finished in", timer=timer) # Flagging. We create a new set of data flags for the solver that includes: # - one bit for a bitwise OR of all detector / shared flags @@ -268,7 +272,12 @@ def _exec(self, data, detectors=None, **kwargs): views = ob.view[solve_view] # For each view... for vw in range(len(views)): - view_samples = views[vw].stop - views[vw].start + view_samples = None + if views[vw].start is None: + # There is one view of the whole obs + view_samples = ob.n_local_samples + else: + view_samples = views[vw].stop - views[vw].start starting_flags = np.zeros(view_samples, dtype=np.uint8) if save_shared_flags is not None: starting_flags[:] = np.where( @@ -296,14 +305,15 @@ def _exec(self, data, detectors=None, **kwargs): # Set up operator for optional clearing of the pointing matrices clear_pointing = Clear(detdata=[scan_pointing.pixels, scan_pointing.weights]) - scanner = ops.ScanMask( + scanner = ScanMask( det_flags=flagname, - det_flags_value=2, - pixels=pointing.pixels, - mask_key=self.mask, + pixels=scan_pointing.pixels, mask_bits=1, ) + scanner.det_flags_value = 2 + scanner.mask_key = self.mask + scan_pipe = None if self.binning.save_pointing: # Process all detectors at once @@ -317,9 +327,11 @@ def _exec(self, data, detectors=None, **kwargs): operators=[scan_pointing, scanner, clear_pointing], ) - scan_pipe.apply(data, detectors=detectors) + if self.mask is not None: + # We actually have an input mask. Scan it. + scan_pipe.apply(data, detectors=detectors) - self._log_info(comm, rank, "finished flag building in", timer=timer) + self._log_info(comm, rank, " finished flag building in", timer=timer) # Now construct the noise covariance, hits, and condition number mask @@ -347,11 +359,11 @@ def _exec(self, data, detectors=None, **kwargs): solver_cov.apply(data, detectors=detectors) data[solver_rcond_mask_name] = PixelData( - self.binning.pixel_dist, dtype=np.uint8, n_value=1 - ) - data[solver_rcond_mask_name].raw[:] = np.where( - data[solver_rcond_name].raw.array() < self.solve_rcond_threshold, 1, 0 + data[self.binning.pixel_dist], dtype=np.uint8, n_value=1 ) + data[solver_rcond_mask_name].raw[ + data[solver_rcond_name].raw.array() < self.solve_rcond_threshold + ] = 1 # Re-use our mask scanning pipeline, setting third bit (== 4) scanner.det_flags_value = 4 @@ -359,7 +371,7 @@ def _exec(self, data, detectors=None, **kwargs): scan_pipe.apply(data, detectors=detectors) self._log_info( - comm, rank, "finished build of solver covariance in", timer=timer + comm, rank, " finished build of solver covariance in", timer=timer ) # Compute the RHS. Overwrite inputs, either the original or the copy. @@ -381,9 +393,9 @@ def _exec(self, data, detectors=None, **kwargs): binning=self.binning, template_matrix=self.template_matrix, ) - rhs.apply(data, detectors=detectors) + rhs_calc.apply(data, detectors=detectors) - self._log_info(comm, rank, "finished RHS calculation in", timer=timer) + self._log_info(comm, rank, " finished RHS calculation in", timer=timer) # Set up the LHS operator. Use either the original timestreams or the copy # as temp space. @@ -404,12 +416,12 @@ def _exec(self, data, detectors=None, **kwargs): data, detectors, lhs_calc, - data["amplitudes_rhs"], + data[rhs_amplitude_key], convergence=self.convergence, n_iter_max=self.iter_max, ) - self._log_info(comm, rank, "finished solver in", timer=timer) + self._log_info(comm, rank, " finished solver in", timer=timer) # Restore flag names and masks to binning operator, in case it is being used # for the final map making or for other external operations. @@ -433,7 +445,7 @@ def _exec(self, data, detectors=None, **kwargs): self.template_matrix.transpose = False self.template_matrix.apply(data, detectors=detectors) - self._log_info(comm, rank, "finished amplitude projection in", timer=timer) + self._log_info(comm, rank, " finished amplitude projection in", timer=timer) # Now construct the noise covariance, hits, and condition number mask for the # final binned map. @@ -462,7 +474,9 @@ def _exec(self, data, detectors=None, **kwargs): final_cov.apply(data, detectors=detectors) - self._log_info(comm, rank, "finished build of final covariance in", timer=timer) + self._log_info( + comm, rank, " finished build of final covariance in", timer=timer + ) # Make a binned map of these template-subtracted timestreams @@ -471,7 +485,7 @@ def _exec(self, data, detectors=None, **kwargs): self.map_binning.det_data = detdata_name self.map_binning.apply(data, detectors=detectors) - self._log_info(comm, rank, "finished final binning in", timer=timer) + self._log_info(comm, rank, " finished final binning in", timer=timer) return diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index 1724c2039..40711d9da 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -8,9 +8,9 @@ from ..utils import Logger -from ..traits import trait_docs, Int, Unicode, Bool +from ..traits import trait_docs, Int, Unicode, Bool, Instance -from ..timing import function_timer +from ..timing import function_timer, Timer from ..pixels import PixelDistribution, PixelData @@ -24,6 +24,8 @@ from .scan_map import ScanMap +from .noise_weight import NoiseWeight + @trait_docs class SolverRHS(Operator): @@ -204,6 +206,7 @@ def _exec(self, data, detectors=None, **kwargs): else: # Process one detector at a time and clear pointing after each one. proj_pipe = Pipeline(detector_sets=["SINGLE"]) + oplist = list() if not self.overwrite: oplist.append(copy_det) oplist.extend( @@ -321,7 +324,7 @@ def _check_matrix(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) - def _log_debug(comm, rank, msg, timer=None): + def _log_debug(self, comm, rank, msg, timer=None): """Helper function to log a DEBUG level message from rank zero""" log = Logger.get() if comm is not None: @@ -405,7 +408,7 @@ def _exec(self, data, detectors=None, **kwargs): noise_model=self.binning.noise_model, det_data=self.det_temp ) - # Same operator, but now we are applying the transpose. + # Same template matrix operator, but now we are applying the transpose. self.template_matrix.transpose = True # Create a pipeline that projects the binned map and applies noise @@ -414,45 +417,31 @@ def _exec(self, data, detectors=None, **kwargs): proj_pipe = None if self.binning.save_pointing: # Process all detectors at once - proj_pipe = Pipeline(detector_sets=["ALL"]) - oplist = list() - if not self.overwrite: - oplist.append(copy_det) - oplist.extend( - [ + proj_pipe = Pipeline( + detector_sets=["ALL"], + operators=[ pointing, scan_map, noise_weight, self.template_matrix, - ] + ], ) - if not self.overwrite: - oplist.append(clear_temp) - proj_pipe.operators = oplist else: # Process one detector at a time and clear pointing after each one. - proj_pipe = Pipeline(detector_sets=["SINGLE"]) - if not self.overwrite: - oplist.append(copy_det) - oplist.extend( - [ + proj_pipe = Pipeline( + detector_sets=["SINGLE"], + operators=[ pointing, scan_map, clear_pointing, noise_weight, self.template_matrix, - ] + ], ) - if not self.overwrite: - oplist.append(clear_temp) - proj_pipe.operators = oplist # Zero out the amplitudes before accumulating the updated values - for ob in data.obs: - amplitudes = ob[self.amplitudes] - for ampname, ampvals in amplitudes.items(): - ampvals.reset() + data[self.template_matrix.amplitudes].reset() # Run the projection pipeline. @@ -532,6 +521,10 @@ def solve( # The starting guess if guess is None: # Copy structure of the RHS and set to zero + if lhs_amps in data: + msg = "LHS amplitudes '{}' already exists in data".format(lhs_amps) + log.error(msg) + raise RuntimeError(msg) data[lhs_amps] = rhs_amps.duplicate() data[lhs_amps].reset() else: @@ -546,6 +539,10 @@ def solve( residual = rhs_amps.duplicate() residual -= data[lhs_amps] + print("RHS ", rhs_amps) + print("Guess", data[lhs_amps]) + print(residual) + # The preconditioned residual # s = M^-1 * r precond_residual = residual.duplicate() @@ -579,7 +576,8 @@ def solve( raise RuntimeError("Residual is not finite") # Update LHS amplitude inputs - data[lhs_amps].local[:] = proposal.local + for k, v in data[lhs_amps].items(): + v.local[:] = proposal[k].local # q = A * d (in place) lhs.apply(data, detectors=detectors) diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 353b91b18..157e86991 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -10,6 +10,8 @@ from ..timing import function_timer +from ..templates import Template, AmplitudesMap + from .operator import Operator @@ -65,13 +67,20 @@ def apply_precond(self, amps_in, amps_out): This can only be called after the operator has been used at least once so that the templates are initialized. + Args: + amps_in (AmplitudesMap): The input amplitudes. + amps_out (AmplitudesMap): The output amplitudes, modified in place. + + Returns: + None + """ if not self._initialized: raise RuntimeError( "You must call exec() once before applying preconditioners" ) for tmpl in self.templates: - tmpl.apply_precond(amps_in, amps_out) + tmpl.apply_precond(amps_in[tmpl.name], amps_out[tmpl.name]) @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -107,7 +116,7 @@ def _exec(self, data, detectors=None, **kwargs): if self.amplitudes not in data: # The output template amplitudes do not yet exist. Create these with # all zero values. - data[self.amplitudes] = dict() + data[self.amplitudes] = AmplitudesMap() for tmpl in self.templates: data[self.amplitudes][tmpl.name] = tmpl.zeros() for ob in data.obs: @@ -118,7 +127,7 @@ def _exec(self, data, detectors=None, **kwargs): continue for d in dets: for tmpl in self.templates: - tmpl.project_signal(d, data[self.amplitudes[tmpl.name]]) + tmpl.project_signal(d, data[self.amplitudes][tmpl.name]) else: if self.amplitudes not in data: msg = "Template amplitudes '{}' do not exist in data".format( @@ -134,7 +143,7 @@ def _exec(self, data, detectors=None, **kwargs): continue for d in dets: for tmpl in self.templates: - tmpl.add_to_signal(d, data[self.amplitudes[tmpl.name]]) + tmpl.add_to_signal(d, data[self.amplitudes][tmpl.name]) return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/noise_weight.py b/src/toast/ops/noise_weight.py index 5c9162bde..3fe090427 100644 --- a/src/toast/ops/noise_weight.py +++ b/src/toast/ops/noise_weight.py @@ -62,7 +62,7 @@ def _exec(self, data, detectors=None, **kwargs): for d in dets: # Get the detector weight from the noise model. - detweight = noise.detector_weight(det) + detweight = noise.detector_weight(d) # Apply ob.detdata[self.det_data][d] *= detweight diff --git a/src/toast/ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py index cd390dd1a..406254bc8 100644 --- a/src/toast/ops/pointing_healpix.py +++ b/src/toast/ops/pointing_healpix.py @@ -252,21 +252,21 @@ def _exec(self, data, detectors=None, **kwargs): if self.single_precision: ob.detdata.create( - self.pixels, detshape=(), dtype=np.int32, detectors=dets + self.pixels, sample_shape=(), dtype=np.int32, detectors=dets ) ob.detdata.create( self.weights, - detshape=(self._nnz,), + sample_shape=(self._nnz,), dtype=np.float32, detectors=dets, ) else: ob.detdata.create( - self.pixels, detshape=(), dtype=np.int64, detectors=dets + self.pixels, sample_shape=(), dtype=np.int64, detectors=dets ) ob.detdata.create( self.weights, - detshape=(self._nnz,), + sample_shape=(self._nnz,), dtype=np.float64, detectors=dets, ) @@ -274,7 +274,7 @@ def _exec(self, data, detectors=None, **kwargs): if self.quats is not None: ob.detdata.create( self.quats, - detshape=(4,), + sample_shape=(4,), dtype=np.float64, detectors=dets, ) diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index a3f534e4e..eaf228608 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -100,7 +100,7 @@ def _exec(self, data, detectors=None, **kwargs): maptod = maptod_raw.array() # If our output detector data does not yet exist, create it - if self.det_data not in ob: + if self.det_data not in ob.detdata: ob.detdata.create(self.det_data, dtype=np.float64, detectors=dets) for det in dets: @@ -246,7 +246,7 @@ def _exec(self, data, detectors=None, **kwargs): # If our output detector data does not yet exist, create it with a default # width of one byte per sample. - if self.det_flags not in ob: + if self.det_flags not in ob.detdata: ob.detdata.create(self.det_flags, dtype=np.uint8, detectors=dets) for det in dets: diff --git a/src/toast/templates/__init__.py b/src/toast/templates/__init__.py index 967d00554..40b197511 100644 --- a/src/toast/templates/__init__.py +++ b/src/toast/templates/__init__.py @@ -4,6 +4,6 @@ # Import Templates into our public API -from .template import Template, Amplitudes +from .template import Template, Amplitudes, AmplitudesMap from .offset import Offset diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index e427e79ae..5556276e4 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -8,7 +8,7 @@ import scipy -from ..utils import Logger, rate_from_times +from ..utils import Logger, rate_from_times, AlignedF32 from ..timing import function_timer @@ -59,6 +59,11 @@ class Offset(Template): help="Fraction of unflagged samples needed to keep a given offset amplitude", ) + use_noise_prior = Bool( + False, + help="Construct the offset noise covariance and use it for a noise prior and as a preconditioner", + ) + precond_width = Int(20, help="Preconditioner width in terms of offsets / baselines") def __init__(self, **kwargs): @@ -80,10 +85,8 @@ def _initialize(self, new_data): # Sample rate for each obs. self._obs_rate = dict() - # Offset covariance and preconditioner for each obs and detector. + # Frequency bins for the noise prior for each obs. self._freq = dict() - self._filters = dict() - self._precond = dict() for iob, ob in enumerate(new_data.obs): # Compute sample rate from timestamps @@ -113,16 +116,15 @@ def _initialize(self, new_data): ) log.error(msg) raise RuntimeError(msg) - self._filters[iob] = dict() - self._precond[iob] = dict() # Determine the binning for the noise prior - obstime = ob.shared[self.times][-1] - ob.shared[self.times][0] - tbase = step_length - fbase = 1.0 / tbase - powmin = np.floor(np.log10(1 / obstime)) - 1 - powmax = min(np.ceil(np.log10(1 / tbase)) + 2, self._obs_rate[iob]) - self._freq[iob] = np.logspace(powmin, powmax, 1000) + if self.use_noise_prior: + obstime = ob.shared[self.times][-1] - ob.shared[self.times][0] + tbase = step_length + fbase = 1.0 / tbase + powmin = np.floor(np.log10(1 / obstime)) - 1 + powmax = min(np.ceil(np.log10(1 / tbase)) + 2, self._obs_rate[iob]) + self._freq[iob] = np.logspace(powmin, powmax, 1000) # Build up detector list for d in ob.local_detectors: @@ -132,8 +134,7 @@ def _initialize(self, new_data): self._all_dets = list(all_dets.keys()) # Go through the data one local detector at a time and compute the offsets into - # the amplitudes. Also compute the amplitude noise filter and preconditioner - # for each detector and each interval / view. + # the amplitudes. self._det_start = dict() @@ -143,18 +144,10 @@ def _initialize(self, new_data): for iob, ob in enumerate(new_data.obs): if det not in ob.local_detectors: continue - if self.noise_model is not None: - offset_psd = self._get_offset_psd( - ob[self.noise_model], self._freq[iob], det - ) - ( - self._filters[iob][det], - self._precond[iob][det], - ) = self._get_filter_and_precond( - self._freq[iob], offset_psd, ob.view[self.view] - ) offset += np.sum(self._obs_views[iob]) + # Now we know the total number of amplitudes. + self._n_local = offset if new_data.comm.comm_world is None: self._n_global = self._n_local @@ -166,43 +159,192 @@ def _initialize(self, new_data): # Now that we know the number of amplitudes, we go through the solver flags # and determine what amplitudes, if any, are poorly constrained. These are # stored internally as a bool array, and used when constructing a new - # Amplitudes object. + # Amplitudes object. We also compute and store the variance of each amplitude, + # based on the noise weight of the detector and the number of flagged samples. + # Boolean flags self._amp_flags = np.zeros(self._n_local, dtype=np.bool) - if self.flags is not None: - offset = 0 - bad_frac = 1.0 - self.good_fraction - for det in self._all_dets: - for iob, ob in enumerate(new_data.obs): - if det not in ob.local_detectors: - continue - # The step length for this observation - step_length = int(self.step_time * self._obs_rate[iob]) + # For the sigmasq values (offset / baseline variance), we have one per + # amplitude, which can approach the size of the time ordered data. Store these + # in C-allocated memory as 32bit float. + self._sigmasq_raw = AlignedF32.zeros(self._n_local) + self._sigmasq = self._sigmasq_raw.array() - # Loop over views of flags - views = ob.view[self.view] - for ivw, vw in enumerate(views.detdata[self.flags]): - n_amp_view = self._obs_views[iob][ivw] - view_samples = len(vw[det]) + offset = 0 + for det in self._all_dets: + for iob, ob in enumerate(new_data.obs): + if det not in ob.local_detectors: + continue + + # Noise weight + detnoise = 1.0 + if self.noise_model is not None: + detnoise = ob[self.noise_model].detector_weight(det) + + # The step length for this observation + step_length = int(self.step_time * self._obs_rate[iob]) + + # Loop over views + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_samples = None + if vw.start is None: + # This is a view of the whole obs + view_samples = ob.n_local_samples + else: + view_samples = vw.stop - vw.start + n_amp_view = slice_len // step_length + + # Move this loop to compiled code if it is slow + if self.flags is None: voff = 0 for amp in range(n_amp_view): amplen = step_length if amp == n_amp_view - 1: amplen = view_samples - voff - frac = ( - np.count_nonzero( - vw[det][voff : voff + amplen] & self.flag_mask - ) - / amplen + self._sigmasq[offset + amp] = 1.0 / (detnoise * amplen) + else: + flags = views.detdata[self.flags][ivw] + voff = 0 + for amp in range(n_amp_view): + amplen = step_length + if amp == n_amp_view - 1: + amplen = view_samples - voff + n_good = amplen - np.count_nonzero( + flags[det][voff : voff + amplen] & self.flag_mask ) - self._amp_flags[offset + amp] = frac > bad_frac + if (n_good / amplen) > self.good_fraction: + # Keep this + self._sigmasq[offset + amp] = 1.0 / (detnoise * n_good) + else: + # Flag it + self._sigmasq[offset + amp] = 0.0 + self._amp_flags[offset + amp] = True voff += step_length - offset += n_amp_view + offset += n_amp_view + + # Compute the amplitude noise filter and preconditioner for each detector + # and each view. + + self._filters = dict() + self._precond = dict() + + if self.use_noise_prior: + offset = 0 + for det in self._all_dets: + for iob, ob in enumerate(new_data.obs): + if det not in ob.local_detectors: + continue + if iob not in self._filters: + self._filters[iob] = dict() + self._precond[iob] = dict() + if self.noise_model is not None: + # We have noise information. Get the PSD describing noise + # correlations between offset amplitudes for this observation. + offset_psd = self._get_offset_psd( + ob[self.noise_model], self._freq[iob], self.step_time, det + ) + + # Log version of offset PSD for interpolation + logfreq = np.log(freq) + logpsd = np.log(offset_psd) + logfilter = np.log(1 / offset_psd) + + # Helper functions + def _interpolate(x, psd): + result = np.zeros(x.size) + good = np.abs(x) > 1e-10 + logx = np.log(np.abs(x[good])) + logresult = np.interp(logx, logfreq, psd) + result[good] = np.exp(logresult) + return result + + def _truncate(noisefilter, lim=1e-4): + icenter = noisefilter.size // 2 + ind = ( + np.abs(noisefilter[:icenter]) + > np.abs(noisefilter[0]) * lim + ) + icut = np.argwhere(ind)[-1][0] + if icut % 2 == 0: + icut += 1 + noisefilter = np.roll(noisefilter, icenter) + noisefilter = noisefilter[ + icenter - icut : icenter + icut + 1 + ] + return noisefilter + + # Compute the list of filters and preconditioners (one per view) + # For this detector. + self._filters[iob][det] = list() + self._precond[iob][det] = list() + + # Loop over views + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_samples = None + if vw.start is None: + # This is a view of the whole obs + view_samples = ob.n_local_samples + else: + view_samples = vw.stop - vw.start + n_amp_view = self._obs_view[iob][ivw] + sigmasq_slice = self._sigmasq[offset : offset + n_amp_view] + + # nstep = offset_slice.stop - offset_slice.start + + filterlen = n_amp_view * 2 + 1 + filterfreq = np.fft.rfftfreq(filterlen, self.step_time) + noisefilter = _truncate( + np.fft.irfft(_interpolate(filterfreq, logfilter)) + ) + self._filters[iob][det].append(noisefilter) + + # Build the band-diagonal preconditioner + lower = None + if self.precond_width <= 1: + # Compute C_a prior + preconditioner = _truncate( + np.fft.irfft(_interpolate(filterfreq, logpsd)) + ) + else: + # Compute Cholesky decomposition prior + wband = min(self.precond_width, noisefilter.size // 2) + precond_width = max( + wband, min(self.precond_width, n_amp_view) + ) + icenter = noisefilter.size // 2 + preconditioner = np.zeros( + [precond_width, nstep], dtype=np.float64 + ) + preconditioner[0] = sigmasq_slice + preconditioner[:wband, :] += np.repeat( + noisefilter[icenter : icenter + wband, np.newaxis], + n_amp_view, + 1, + ) + lower = True + scipy.linalg.cholesky_banded( + preconditioner, + overwrite_ab=True, + lower=lower, + check_finite=True, + ) + self._precond[iob][det].append((preconditioner, lower)) + offset += n_amp_view return - @function_timer - def _get_offset_psd(self, noise, freq, det): + def __del__(self): + if hasattr(self, "_sigmasq"): + del self._sigmasq + if hasattr(self, "_sigmasq_raw"): + self._sigmasq_raw.clear() + del self._sigmasq_raw + + @staticmethod + def _get_offset_psd(noise, freq, step_time, det): + """Compute the PSD of the baseline offsets.""" psdfreq = noise.freq(det) psd = noise.psd(det) rate = noise.rate(det) @@ -233,7 +375,7 @@ def g(x): result[good] = (np.sin(arg) / arg) ** 2 return result - tbase = self.step_time + tbase = step_time fbase = 1.0 / tbase offset_psd = interpolate_psd(freq) * g(freq * tbase) for m in range(1, 2): @@ -242,60 +384,6 @@ def g(x): offset_psd *= fbase return offset_psd - @function_timer - def _get_filter_and_precond(self, freq, offset_psd, view_slices): - # logfreq = np.log(freq) - # logpsd = np.log(offset_psd) - # logfilter = np.log(1 / offset_psd) - # - # def interpolate(x, psd): - # result = np.zeros(x.size) - # good = np.abs(x) > 1e-10 - # logx = np.log(np.abs(x[good])) - # logresult = np.interp(logx, logfreq, psd) - # result[good] = np.exp(logresult) - # return result - # - # def truncate(noisefilter, lim=1e-4): - # icenter = noisefilter.size // 2 - # ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim - # icut = np.argwhere(ind)[-1][0] - # if icut % 2 == 0: - # icut += 1 - # noisefilter = np.roll(noisefilter, icenter) - # noisefilter = noisefilter[icenter - icut : icenter + icut + 1] - # return noisefilter - # - # vw_filters = list() - # vw_precond = list() - # for offset_slice, sigmasqs in offset_slices: - # nstep = offset_slice.stop - offset_slice.start - # filterlen = nstep * 2 + 1 - # filterfreq = np.fft.rfftfreq(filterlen, self.step_length) - # noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter))) - # noisefilters.append(noisefilter) - # # Build the band-diagonal preconditioner - # if self.precond_width <= 1: - # # Compute C_a prior - # preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd))) - # else: - # # Compute Cholesky decomposition prior - # wband = min(self.precond_width, noisefilter.size // 2) - # precond_width = max(wband, min(self.precond_width, nstep)) - # icenter = noisefilter.size // 2 - # preconditioner = np.zeros([precond_width, nstep], dtype=np.float64) - # preconditioner[0] = sigmasqs - # preconditioner[:wband, :] += np.repeat( - # noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1 - # ) - # lower = True - # scipy.linalg.cholesky_banded( - # preconditioner, overwrite_ab=True, lower=lower, check_finite=True - # ) - # preconditioners.append((preconditioner, lower)) - # return noisefilters, preconditioners - return list(), list() - def _detectors(self): return self._all_dets @@ -314,13 +402,6 @@ def _add_to_signal(self, detector, amplitudes): step_length = int(self.step_time * self._obs_rate[iob]) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] - print( - "calling add_to_signal: ", - step_length, - amplitudes.local[offset : offset + n_amp_view], - vw[detector], - flush=True, - ) template_offset_add_to_signal( step_length, amplitudes.local[offset : offset + n_amp_view], @@ -338,13 +419,6 @@ def _project_signal(self, detector, amplitudes): step_length = int(self.step_time * self._obs_rate[iob]) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] - print( - "calling project_signal: ", - step_length, - vw[detector], - amplitudes.local[offset : offset + n_amp_view], - flush=True, - ) template_offset_project_signal( step_length, vw[detector], @@ -367,45 +441,52 @@ def _add_prior(self, amplitudes_in, amplitudes_out): amps_in = amplitudes_in[offset : offset + n_amp_view] amps_out = amplitudes_out[offset : offset + n_amp_view] amps_out[:] += scipy.signal.convolve( - amps_in, self._filters[iob][det], mode="same" + amps_in, self._filters[iob][det][ivw], mode="same" ) offset += n_amp_view @function_timer def _apply_precond(self, amplitudes_in, amplitudes_out): - # offset_amplitudes_in = amplitudes_in[self.name] - # offset_amplitudes_out = amplitudes_out[self.name] - # if self.use_noise_prior: - # # C_a preconditioner - # for iobs, obs in enumerate(self.data.obs): - # tod = obs["tod"] - # for det in tod.local_dets: - # slices = self.offset_slices[iobs][det] - # preconditioners = self.preconditioners[iobs][det] - # for (offsetslice, sigmasqs), preconditioner in zip( - # slices, preconditioners - # ): - # amps_in = offset_amplitudes_in[offsetslice] - # if self.precond_width <= 1: - # # Use C_a prior - # # scipy.signal.convolve will use either `convolve` or `fftconvolve` - # # depending on the size of the inputs - # amps_out = scipy.signal.convolve( - # amps_in, preconditioner, mode="same" - # ) - # else: - # # Use pre-computed Cholesky decomposition - # amps_out = scipy.linalg.cho_solve_banded( - # preconditioner, - # amps_in, - # overwrite_b=False, - # check_finite=True, - # ) - # offset_amplitudes_out[offsetslice] = amps_out - # else: - # # Diagonal preconditioner - # offset_amplitudes_out[:] = offset_amplitudes_in - # for itemplate, iobs, det, todslice, sigmasq in self.offset_templates: - # offset_amplitudes_out[itemplate] *= sigmasq - # + if self.use_noise_prior: + # C_a preconditioner + for det in self._all_dets: + offset = self._det_start[det] + for iob, ob in enumerate(new_data.obs): + if det not in ob.local_detectors: + continue + # Loop over views + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_samples = None + if vw.start is None: + # This is a view of the whole obs + view_samples = ob.n_local_samples + else: + view_samples = vw.stop - vw.start + + n_amp_view = self._obs_view[iob][ivw] + amp_slice = slice(offset, offset + n_amp_view, 1) + + amps_in = amplitudes_in[amp_slice] + amps_out = None + if self.precond_width <= 1: + # Use C_a prior + # scipy.signal.convolve will use either `convolve` or + # `fftconvolve` depending on the size of the inputs + amps_out = scipy.signal.convolve( + amps_in, self._precond[iob][det][ivw], mode="same" + ) + else: + # Use pre-computed Cholesky decomposition + amps_out = scipy.linalg.cho_solve_banded( + self._precond[iob][det][ivw], + amps_in, + overwrite_b=False, + check_finite=True, + ) + amplitudes_out[amp_slice] = amps_out + else: + # Diagonal preconditioner + amplitudes_out.local[:] = amplitudes_in.local + amplitudes_out.local *= self._sigmasq return diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index d5556f041..dd95da455 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -2,6 +2,8 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections.abc import MutableMapping + import numpy as np import traitlets @@ -371,6 +373,12 @@ def clear(self): def __del__(self): self.clear() + def __repr__(self): + val = "".format( + self.n_global, self.n_local, self.comm, self.local, self.local_flags + ) + return val + def __eq__(self, value): if isinstance(value, Amplitudes): return self.local == value.local @@ -383,27 +391,31 @@ def __eq__(self, value): def __iadd__(self, other): if isinstance(other, Amplitudes): - self.local += other.local + self.local[:] += other.local else: - self.local += other + self.local[:] += other + return self def __isub__(self, other): if isinstance(other, Amplitudes): - self.local -= other.local + self.local[:] -= other.local else: - self.local -= other + self.local[:] -= other + return self def __imul__(self, other): if isinstance(other, Amplitudes): - self.local *= other.local + self.local[:] *= other.local else: - self.local *= other + self.local[:] *= other + return self def __itruediv__(self, other): if isinstance(other, Amplitudes): - self.local /= other.local + self.local[:] /= other.local else: - self.local /= other + self.local[:] /= other + return self def __add__(self, other): result = self.duplicate() @@ -448,7 +460,7 @@ def duplicate(self): @property def comm(self): - return _comm + return self._comm @property def n_global(self): @@ -611,3 +623,172 @@ def dot(self, other): # once. Implement techniques from other existing code when needed. raise NotImplementedError("dot of explicitly indexed amplitudes") return result + + +class AmplitudesMap(MutableMapping): + """Helper class to provide arithmetic operations on a collection of Amplitudes. + + This simply provides syntactic sugar to reduce duplicated code when working with + a collection of Amplitudes in the map making. + + """ + + def __init__(self): + self._internal = dict() + + # Mapping methods + + def __getitem__(self, key): + return self._internal[key] + + def __delitem__(self, key): + del self._internal[key] + + def __setitem__(self, key, value): + if not isinstance(value, Amplitudes): + raise RuntimeError( + "Only Amplitudes objects may be assigned to an AmplitudesMap" + ) + self._internal[key] = value + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def __repr__(self): + val = " Date: Tue, 12 Jan 2021 10:59:22 -0800 Subject: [PATCH 051/690] [WIP] Many fixes, new features in DetectorData class, improvements to unit tests. Still chasing one issue before I can remove debugging statements. [ci skip] --- src/toast/observation.py | 4 - src/toast/observation_data.py | 232 ++++++++++-- src/toast/ops/CMakeLists.txt | 4 +- src/toast/ops/__init__.py | 6 +- src/toast/ops/arithmetic.py | 151 ++++++++ src/toast/ops/copy.py | 61 ++- src/toast/ops/{clear.py => delete.py} | 16 +- src/toast/ops/mapmaker.py | 485 +++++++++++++++--------- src/toast/ops/mapmaker_binning.py | 43 ++- src/toast/ops/mapmaker_solve.py | 95 +++-- src/toast/ops/mapmaker_templates.py | 38 +- src/toast/ops/mapmaker_utils.py | 188 ++++++--- src/toast/ops/pipeline.py | 26 +- src/toast/ops/pointing_healpix.py | 39 +- src/toast/ops/reset.py | 127 +++++++ src/toast/ops/scan_map.py | 16 +- src/toast/ops/sim_tod_noise.py | 4 +- src/toast/templates/offset.py | 24 ++ src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/_helpers.py | 27 ++ src/toast/tests/ops_madam.py | 13 +- src/toast/tests/ops_mapmaker.py | 42 +- src/toast/tests/ops_mapmaker_binning.py | 66 ++-- src/toast/tests/ops_mapmaker_solve.py | 256 +++++++++++++ src/toast/tests/ops_scan_map.py | 69 +++- src/toast/tests/runner.py | 2 + 26 files changed, 1550 insertions(+), 485 deletions(-) create mode 100644 src/toast/ops/arithmetic.py rename src/toast/ops/{clear.py => delete.py} (81%) create mode 100644 src/toast/ops/reset.py create mode 100644 src/toast/tests/ops_mapmaker_solve.py diff --git a/src/toast/observation.py b/src/toast/observation.py index 035c4c26a..fd6705379 100644 --- a/src/toast/observation.py +++ b/src/toast/observation.py @@ -299,10 +299,6 @@ def __init__( self.intervals = IntervalMgr(self._comm, self.dist.comm_row, self.dist.comm_col) - # Create a default global IntervalList that includes a single interval with - # all the local data span. This is useful for code that wants to access the - # whole observation in the same way as a particular view. - # Fully clear the observation def clear(self): diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index 7abf85a4b..726619420 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -75,35 +75,22 @@ class DetectorData(object): def __init__(self, detectors, shape, dtype, view_data=None): log = Logger.get() - self._detectors = detectors - if len(self._detectors) == 0: - msg = "You must specify a list of at least one detector name" - log.error(msg) - raise ValueError(msg) - - self._name2idx = {y: x for x, y in enumerate(self._detectors)} + self._set_detectors(detectors) - # construct a new dtype in case the parameter given is shortcut string - self._dtype = np.dtype(dtype) - self._storage_class, self.itemsize = dtype_to_aligned(dtype) + ( + self._storage_class, + self.itemsize, + self._dtype, + self._shape, + self._flatshape, + ) = self._data_props(detectors, shape, dtype) - # Verify that our shape contains only integral values - self._flatshape = len(self._detectors) - for d in shape: - if not isinstance(d, (int, np.integer)): - msg = "input shape contains non-integer values" - log.error(msg) - raise ValueError(msg) - self._flatshape *= d - self._memsize = self.itemsize * self._flatshape + self._fullsize = 0 + self._memsize = 0 - shp = [len(self._detectors)] - shp.extend(shape) - self._shape = tuple(shp) if view_data is None: # Allocate the data - self._raw = self._storage_class.zeros(self._flatshape) - self._data = self._raw.array().reshape(self._shape) + self._allocate() self._is_view = False else: # We are provided the data @@ -118,6 +105,39 @@ def __init__(self, detectors, shape, dtype, view_data=None): self._data = view_data self._is_view = True + def _set_detectors(self, detectors): + self._detectors = detectors + if len(self._detectors) == 0: + msg = "You must specify a list of at least one detector name" + log.error(msg) + raise ValueError(msg) + self._name2idx = {y: x for x, y in enumerate(self._detectors)} + + def _data_props(self, detectors, detshape, dtype): + dt = np.dtype(dtype) + storage_class, itemsize = dtype_to_aligned(dtype) + + # Verify that our shape contains only integral values + flatshape = len(detectors) + for d in detshape: + if not isinstance(d, (int, np.integer)): + msg = "input shape contains non-integer values" + log.error(msg) + raise ValueError(msg) + flatshape *= d + + shp = [len(detectors)] + shp.extend(detshape) + shp = tuple(shp) + return (storage_class, itemsize, dt, shp, flatshape) + + def _allocate(self): + self._fullsize = self._flatshape + self._memsize = self.itemsize * self._fullsize + self._raw = self._storage_class.zeros(self._fullsize) + self._flatdata = self._raw.array()[: self._flatshape] + self._data = self._flatdata.reshape(self._shape) + @property def detectors(self): return list(self._detectors) @@ -144,6 +164,57 @@ def memory_use(self): def data(self): return self._data + @property + def flatdata(self): + return self._flatdata + + def change_detectors(self, detectors): + """Modify the list of detectors. + + This attempts to re-use the underlying memory and just change the detector + mapping to that memory. This is useful if memory allocation is expensive. + If the new list of detectors is longer than the original, a new memory buffer + is allocated. If the new list of detectors is shorter than the original, the + buffer is kept and only a subset is used. + + Args: + detectors (list): A list of detector names in exactly the order you wish. + + Returns: + None + + """ + log = Logger.get() + if self._is_view: + msg = "Cannot resize a DetectorData view" + log.error(msg) + raise RuntimeError(msg) + + if detectors == self._detectors: + # No-op + return + + # Get the new data properties + (storage_class, itemsize, dt, shp, flatshape) = self._data_props( + detectors, self._shape[1:], self._dtype + ) + + self._set_detectors(detectors) + + if flatshape > self._fullsize: + # We have to reallocate... + self.clear() + self._shape = shp + self._flatshape = flatshape + self._allocate() + else: + # We can re-use the existing memory + self._shape = shp + self._flatshape = flatshape + self._flatdata = self._raw.array()[: self._flatshape] + self._flatdata[:] = 0 + self._data = self._flatdata.reshape(self._shape) + def clear(self): """Delete the underlying memory. @@ -155,6 +226,8 @@ def clear(self): if hasattr(self, "_data"): del self._data if not self._is_view: + if hasattr(self, "_flatdata"): + del self._flatdata if hasattr(self, "_raw"): self._raw.clear() del self._raw @@ -300,8 +373,9 @@ class DetDataMgr(MutableMapping): It is also possible to create a new object by assigning an array. In that case the array must either have the full size of the DetectorData object - (n_det x n_sample x sample_shape) or must have dimensions (n_sample x sample_shape), in - which case the array is copied to all detectors. For example: + (n_det x n_sample x sample_shape) or must have dimensions + (n_sample x sample_shape), in which case the array is copied to all detectors. + For example: ob.detdata[name] = np.ones( (len(ob.local_detectors), ob.n_local_samples, 4), dtype=np.float32 @@ -327,12 +401,27 @@ def __init__(self, detectors, samples): self.detectors = detectors self._internal = dict() + def _data_shape(self, sample_shape): + dshape = None + if sample_shape is None or len(sample_shape) == 0: + dshape = (self.samples,) + elif len(sample_shape) == 1 and sample_shape[0] == 1: + dshape = (self.samples,) + else: + dshape = (self.samples,) + sample_shape + return dshape + def create(self, name, sample_shape=None, dtype=np.float64, detectors=None): """Create a local DetectorData buffer on this process. This method can be used to create arrays of detector data for storing signal, flags, or other timestream products on each process. + If the named detector data already exists in an observation, then additional + checks are done that the sample_shape and dtype match the existing object. + If so, then the DetectorData.change_detectors() method is called to re-use + this existing memory buffer if possible. + Args: name (str): The name of the detector data (signal, flags, etc) sample_shape (tuple): Use this shape for the data of each detector sample. @@ -347,10 +436,6 @@ def create(self, name, sample_shape=None, dtype=np.float64, detectors=None): """ log = Logger.get() - if name in self._internal: - msg = "Detector data with name '{}' already exists.".format(name) - log.error(msg) - raise RuntimeError(msg) if detectors is None: detectors = self.detectors @@ -360,19 +445,88 @@ def create(self, name, sample_shape=None, dtype=np.float64, detectors=None): msg = "detector '{}' not in this observation".format(d) raise ValueError(msg) - data_shape = None - if sample_shape is None or len(sample_shape) == 0: - data_shape = (self.samples,) - elif len(sample_shape) == 1 and sample_shape[0] == 1: - data_shape = (self.samples,) - else: - data_shape = (self.samples,) + sample_shape + data_shape = self._data_shape(sample_shape) + + if name in self._internal: + msg = "detdata '{}' already exists".format(name) + log.error(msg) + raise RuntimeError(msg) # Create the data object + print("DetDataMgr[{}] allocate for {}".format(name, detectors), flush=True) self._internal[name] = DetectorData(detectors, data_shape, dtype) return + def ensure(self, name, sample_shape=None, dtype=np.float64, detectors=None): + """Ensure that the observation has the named detector data. + + If the named detdata object does not exist, it is created. If it does exist + and the sample shape and dtype are compatible, then it is checked whether the + specified detectors are already included. If not, it calls the + DetectorData.change_detectors() method to re-use this existing memory buffer if + possible. + + Args: + name (str): The name of the detector data (signal, flags, etc) + sample_shape (tuple): Use this shape for the data of each detector sample. + Use None or an empty tuple if you want one element per sample. + dtype (np.dtype): Use this dtype for each element. + detectors (list): Ensure that these detectors exist in the object. + + Returns: + None + + """ + if detectors is None: + detectors = self.detectors + else: + for d in detectors: + if d not in self.detectors: + msg = "detector '{}' not in this observation".format(d) + raise ValueError(msg) + + data_shape = self._data_shape(sample_shape) + + if name in self._internal: + # The object already exists. Check properties. + dt = np.dtype(dtype) + if dt != self._internal[name].dtype: + msg = "Detector data '{}' already exists with dtype {}.".format( + name, self._internal[name].dtype + ) + log.error(msg) + raise RuntimeError(msg) + if data_shape != self._internal[name].detector_shape: + msg = "Detector data '{}' already exists with det shape {}.".format( + name, self._internal[name].detector_shape + ) + log.error(msg) + raise RuntimeError(msg) + # Ok, we can re-use this. Are the detectors already included in the data? + change = False + for d in detectors: + if d not in self._internal[name].detectors: + change = True + if change: + print( + "DetDataMgr[{}] change detectors to {}".format(name, detectors), + flush=True, + ) + self._internal[name].change_detectors(detectors) + else: + print( + "DetDataMgr[{}] detectors {} already included".format( + name, detectors + ), + flush=True, + ) + else: + # Create the data object + self.create( + name, sample_shape=sample_shape, dtype=dtype, detectors=detectors + ) + # Mapping methods def __getitem__(self, key): @@ -830,9 +984,11 @@ def __delitem__(self, key): if key in self._del_callbacks: try: self._del_callbacks[key](key) + del self._del_callbacks[key] except: pass - del self._internal[key] + if key in self._internal: + del self._internal[key] def __setitem__(self, key, value): if not isinstance(value, IntervalList): diff --git a/src/toast/ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt index 0e937edae..f54dc81dc 100644 --- a/src/toast/ops/CMakeLists.txt +++ b/src/toast/ops/CMakeLists.txt @@ -5,8 +5,10 @@ install(FILES __init__.py operator.py pipeline.py - clear.py + delete.py copy.py + reset.py + arithmetic.py memory_counter.py sim_hwp.py sim_tod_noise.py diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index 41da5f208..14b0163d4 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -8,10 +8,14 @@ from .memory_counter import MemoryCounter -from .clear import Clear +from .delete import Delete from .copy import Copy +from .reset import Reset + +from .arithmetic import Add, Subtract + from .pipeline import Pipeline from .sim_satellite import SimSatellite diff --git a/src/toast/ops/arithmetic.py b/src/toast/ops/arithmetic.py new file mode 100644 index 000000000..f7e3cfaae --- /dev/null +++ b/src/toast/ops/arithmetic.py @@ -0,0 +1,151 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets + +from ..utils import Logger + +from ..mpi import MPI + +from ..traits import trait_docs, Int, Unicode, List + +from .operator import Operator + + +@trait_docs +class Add(Operator): + """Add two detdata timestreams. + + The result is stored in the first detdata object. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + first = Unicode(None, allow_none=True, help="The first detdata object") + + second = Unicode(None, allow_none=True, help="The second detdata object") + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.first is None: + msg = "The first trait must be set before calling exec" + log.error(msg) + raise RuntimeError(msg) + + if self.second is None: + msg = "The second trait must be set before calling exec" + log.error(msg) + raise RuntimeError(msg) + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + if self.first not in ob.detdata: + msg = "The first detdata key '{}' does not exist in observation {}".format( + self.first, ob.name + ) + log.error(msg) + raise RuntimeError(msg) + if self.second not in ob.detdata: + msg = "The second detdata key '{}' does not exist in observation {}".format( + self.second, ob.name + ) + log.error(msg) + raise RuntimeError(msg) + for d in dets: + ob.detdata[self.first][d, :] += ob.detdata[self.second][d, :] + + def _finalize(self, data, **kwargs): + return None + + def _requires(self): + req = {"detdata": [self.first, self.second]} + return req + + def _provides(self): + prov = dict() + return prov + + def _accelerators(self): + # Eventually we can copy memory objects on devices... + return list() + + +@trait_docs +class Subtract(Operator): + """Subtract two detdata timestreams. + + The result is stored in the first detdata object. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + first = Unicode(None, allow_none=True, help="The first detdata object") + + second = Unicode(None, allow_none=True, help="The second detdata object") + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + if self.first is None: + msg = "The first trait must be set before calling exec" + log.error(msg) + raise RuntimeError(msg) + + if self.second is None: + msg = "The second trait must be set before calling exec" + log.error(msg) + raise RuntimeError(msg) + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + if self.first not in ob.detdata: + msg = "The first detdata key '{}' does not exist in observation {}".format( + self.first, ob.name + ) + log.error(msg) + raise RuntimeError(msg) + if self.second not in ob.detdata: + msg = "The second detdata key '{}' does not exist in observation {}".format( + self.second, ob.name + ) + log.error(msg) + raise RuntimeError(msg) + for d in dets: + ob.detdata[self.first][d, :] -= ob.detdata[self.second][d, :] + + def _finalize(self, data, **kwargs): + return None + + def _requires(self): + req = {"detdata": [self.first, self.second]} + return req + + def _provides(self): + prov = dict() + return prov + + def _accelerators(self): + # Eventually we can copy memory objects on devices... + return list() diff --git a/src/toast/ops/copy.py b/src/toast/ops/copy.py index 3f2dacd45..0432f377d 100644 --- a/src/toast/ops/copy.py +++ b/src/toast/ops/copy.py @@ -40,6 +40,12 @@ class Copy(Operator): None, allow_none=True, help="List of tuples of Observation shared keys to copy" ) + intervals = List( + None, + allow_none=True, + help="List of tuples of Observation intervals keys to copy", + ) + @traitlets.validate("meta") def _check_meta(self, proposal): val = proposal["value"] @@ -138,43 +144,58 @@ def _exec(self, data, detectors=None, **kwargs): ob.shared[out_key]._flat[:] = ob.shared[in_key]._flat if self.detdata is not None: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue for in_key, out_key in self.detdata: if out_key in ob.detdata: - # The key exists- verify that dimensions match - if ( - ob.detdata[out_key].detectors - != ob.detdata[in_key].detectors - ): - msg = "Cannot copy to existing detdata key {} with different detectors".format( - out_key - ) - log.error(msg) - raise RuntimeError(msg) + # The key exists- verify that dimensions / dtype match if ob.detdata[out_key].dtype != ob.detdata[in_key].dtype: msg = "Cannot copy to existing detdata key {} with different dtype".format( out_key ) log.error(msg) raise RuntimeError(msg) - if ob.detdata[out_key].shape != ob.detdata[in_key].shape: - msg = "Cannot copy to existing detdata key {} with different shape".format( + if ( + ob.detdata[out_key].detector_shape + != ob.detdata[in_key].detector_shape + ): + msg = "Cannot copy to existing detdata key {} with different detector shape".format( out_key ) log.error(msg) raise RuntimeError(msg) + if ob.detdata[out_key].detectors != dets: + # The output has a different set of detectors. Reallocate. + print( + "Copy: reset detdata {} for dets {}".format( + out_key, dets + ), + flush=True, + ) + ob.detdata[out_key].change_detectors(dets) else: sample_shape = None shp = ob.detdata[in_key].detector_shape if len(shp) > 1: sample_shape = shp[1:] + print( + "Copy: allocate detdata {} for dets {}".format( + out_key, dets + ), + flush=True, + ) ob.detdata.create( out_key, sample_shape=sample_shape, dtype=ob.detdata[in_key].dtype, - detectors=ob.detdata[in_key].detectors, + detectors=dets, ) - ob.detdata[out_key][:] = ob.detdata[in_key][:] - + # Copy detector data + for d in dets: + ob.detdata[out_key][d, :] = ob.detdata[in_key][d, :] return def _finalize(self, data, **kwargs): @@ -188,16 +209,20 @@ def _requires(self): req["detdata"] = [x[0] for x in self.detdata] if self.shared is not None: req["shared"] = [x[0] for x in self.shared] + if self.intervals is not None: + req["intervals"] = [x[0] for x in self.intervals] return req def _provides(self): prov = dict() if self.meta is not None: - req["meta"] = [x[1] for x in self.meta] + prov["meta"] = [x[1] for x in self.meta] if self.detdata is not None: - req["detdata"] = [x[1] for x in self.detdata] + prov["detdata"] = [x[1] for x in self.detdata] if self.shared is not None: - req["shared"] = [x[1] for x in self.shared] + prov["shared"] = [x[1] for x in self.shared] + if self.intervals is not None: + prov["intervals"] = [x[1] for x in self.intervals] return prov def _accelerators(self): diff --git a/src/toast/ops/clear.py b/src/toast/ops/delete.py similarity index 81% rename from src/toast/ops/clear.py rename to src/toast/ops/delete.py index bf0bbd4ac..8cf90a8a2 100644 --- a/src/toast/ops/clear.py +++ b/src/toast/ops/delete.py @@ -12,10 +12,10 @@ @trait_docs -class Clear(Operator): +class Delete(Operator): """Class to purge data from observations. - This operator takes lists of shared, detdata, and meta keys to delete from + This operator takes lists of shared, detdata, intervals and meta keys to delete from observations. """ @@ -36,6 +36,12 @@ class Clear(Operator): None, allow_none=True, help="List of Observation shared keys to delete" ) + intervals = List( + None, + allow_none=True, + help="List of tuples of Observation intervals keys to delete", + ) + def __init__(self, **kwargs): super().__init__(**kwargs) @@ -50,6 +56,10 @@ def _exec(self, data, detectors=None, **kwargs): for key in self.shared: # This ignores non-existant keys del ob.shared[key] + if self.intervals is not None: + for key in self.intervals: + # This ignores non-existant keys + del ob.intervals[key] if self.meta is not None: for key in self.meta: try: @@ -72,6 +82,8 @@ def _requires(self): req["detdata"] = list(self.detdata) if self.shared is not None: req["shared"] = list(self.shared) + if self.intervals is not None: + req["intervals"] = list(self.intervals) return req def _provides(self): diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index 9e74af576..b8ff42247 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -8,6 +8,8 @@ from ..utils import Logger +from ..mpi import MPI + from ..traits import trait_docs, Int, Unicode, Bool, Float, Instance from ..timing import function_timer, Timer @@ -18,10 +20,12 @@ from .pipeline import Pipeline -from .clear import Clear +from .delete import Delete from .copy import Copy +from .arithmetic import Subtract + from .scan_map import ScanMap, ScanMask from .mapmaker_utils import CovarianceAndHits @@ -93,10 +97,6 @@ class MapMaker(Operator): help="For final map, minimum value for inverse pixel condition number cut.", ) - overwrite = Bool( - False, help="Overwrite the input detector data for use as scratch space" - ) - mask = Unicode( None, allow_none=True, @@ -121,6 +121,16 @@ class MapMaker(Operator): help="Binning operator for final map making. Default is same as solver", ) + mc_mode = Bool(False, help="If True, re-use solver flags, sparse covariances, etc") + + save_cleaned = Bool( + False, help="If True, save the template-subtracted detector timestreams" + ) + + overwrite_cleaned = Bool( + False, help="If True and save_cleaned is True, overwrite the input data" + ) + @traitlets.validate("binning") def _check_binning(self, proposal): bin = proposal["value"] @@ -139,7 +149,7 @@ def _check_binning(self, proposal): "shared_flags", "shared_flag_mask", "noise_model", - "save_pointing", + "saved_pointing", "sync_type", ]: if not bin.has_trait(trt): @@ -165,7 +175,7 @@ def _check_map_binning(self, proposal): "shared_flags", "shared_flag_mask", "noise_model", - "save_pointing", + "saved_pointing", "sync_type", ]: if not bin.has_trait(trt): @@ -213,166 +223,208 @@ def _exec(self, data, detectors=None, **kwargs): # Use the same binning used in the solver. self.map_binning = self.binning - # For computing the RHS and also for each iteration of the LHS we will need - # a full detector-data sized buffer for use as scratch space. We can either - # destroy the input data to save memory (useful if this is the last operator - # processing the data) or we can create a temporary set of timestreams. - - timer.start() - - copy_det = None - clear_temp = None - detdata_name = self.det_data - - if not self.overwrite: - self._log_info(comm, rank, "overwrite is False, making data copy") - - # Use a temporary detdata named after this operator - detdata_name = "{}_signal".format(self.name) - # Copy the original data into place, and then use this copy destructively. - copy_det = Copy( - detdata=[ - (self.det_data, detdata_name), - ] - ) - copy_det.apply(data, detectors=detectors) - self._log_info(comm, rank, " data copy finished in", timer=timer) - - # Flagging. We create a new set of data flags for the solver that includes: - # - one bit for a bitwise OR of all detector / shared flags - # - one bit for any pixel mask, projected to TOD - # - one bit for any poorly conditioned pixels, projected to TOD + # Binning parameters for the solver. # We use the input binning operator to define the flags that the user has # specified. We will save the name / bit mask for these and restore them later. # Then we will use the binning operator with our solver flags. These input # flags are combined to the first bit (== 1) of the solver flags. - self._log_info(comm, rank, "begin building flags for solver") - - flagname = "{}_flags".format(self.name) - save_det_flags = self.binning.det_flags save_det_flag_mask = self.binning.det_flag_mask save_shared_flags = self.binning.shared_flags save_shared_flag_mask = self.binning.shared_flag_mask - # Use the same data view as the pointing operator in binning - solve_view = self.binning.pointing.view - - for ob in data.obs: - # Get the detectors we are using for this observation - dets = ob.select_local_detectors(detectors) - if len(dets) == 0: - # Nothing to do for this observation - continue - # Create the new solver flags - ob.detdata.create(flagname, dtype=np.uint8, detectors=detectors) - # The data views - views = ob.view[solve_view] - # For each view... - for vw in range(len(views)): - view_samples = None - if views[vw].start is None: - # There is one view of the whole obs - view_samples = ob.n_local_samples - else: - view_samples = views[vw].stop - views[vw].start - starting_flags = np.zeros(view_samples, dtype=np.uint8) - if save_shared_flags is not None: - starting_flags[:] = np.where( - views.shared[save_shared_flags][vw] & save_shared_flag_mask > 0, - 1, - 0, - ) - for d in dets: - views.detdata[flagname][vw][d, :] = starting_flags - if save_det_flags is not None: - views.detdata[flagname][vw][d, :] |= np.where( - views.detdata[save_det_flags][vw][d] & save_det_flag_mask - > 0, - 1, - 0, - ) + # Also save the name of the user-requested output binned map. During the + # solve we will output to a temporary map and then restore this name, in + # case we are using the same binning operator for the solve and the final + # output. + save_binned = self.binning.binned - # Now scan any input mask to this same flag field. We use the second bit (== 2) - # for these mask flags. For the input mask bit we check the first bit of the - # pixel values. This is noted in the help string for the mask trait. + # Data products, prefixed with the name of the operator. - # Use the same pointing operator as the binning - scan_pointing = self.binning.pointing + solver_hits_name = "{}_solve_hits".format(self.name) + solver_rcond_name = "{}_solve_rcond".format(self.name) + solver_rcond_mask_name = "{}_solve_rcond_mask".format(self.name) - # Set up operator for optional clearing of the pointing matrices - clear_pointing = Clear(detdata=[scan_pointing.pixels, scan_pointing.weights]) + hits_name = "{}_hits".format(self.name) + rcond_name = "{}_rcond".format(self.name) - scanner = ScanMask( - det_flags=flagname, - pixels=scan_pointing.pixels, - mask_bits=1, - ) + flagname = "{}_flags".format(self.name) + clean_name = "{}_cleaned".format(self.name) - scanner.det_flags_value = 2 - scanner.mask_key = self.mask + timer.start() - scan_pipe = None - if self.binning.save_pointing: - # Process all detectors at once - scan_pipe = Pipeline( - detector_sets=["ALL"], operators=[scan_pointing, scanner] - ) + # Flagging. We create a new set of data flags for the solver that includes: + # - one bit for a bitwise OR of all detector / shared flags + # - one bit for any pixel mask, projected to TOD + # - one bit for any poorly conditioned pixels, projected to TOD + + if self.mc_mode: + # Verify that our flags exist + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + for d in dets: + if d not in ob.detdata[flagname].detectors: + msg = "In MC mode, flags missing for observation {}, det {}".format( + ob.name, d + ) + self._log_info(comm, rank, "MC mode, reusing flags for solver") else: - # Process one detector at a time and clear pointing after each one. - scan_pipe = Pipeline( - detector_sets=["SINGLE"], - operators=[scan_pointing, scanner, clear_pointing], + self._log_info(comm, rank, "begin building flags for solver") + + # Use the same data view as the pointing operator in binning + solve_view = self.binning.pointing.view + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + # Create the new solver flags + ob.detdata.ensure(flagname, dtype=np.uint8, detectors=detectors) + # The data views + views = ob.view[solve_view] + # For each view... + for vw in range(len(views)): + view_samples = None + if views[vw].start is None: + # There is one view of the whole obs + view_samples = ob.n_local_samples + else: + view_samples = views[vw].stop - views[vw].start + starting_flags = np.zeros(view_samples, dtype=np.uint8) + if save_shared_flags is not None: + starting_flags[:] = np.where( + views.shared[save_shared_flags][vw] & save_shared_flag_mask + > 0, + 1, + 0, + ) + for d in dets: + views.detdata[flagname][vw][d, :] = starting_flags + if save_det_flags is not None: + views.detdata[flagname][vw][d, :] |= np.where( + views.detdata[save_det_flags][vw][d] + & save_det_flag_mask + > 0, + 1, + 0, + ) + + # Now scan any input mask to this same flag field. We use the second bit + # (== 2) for these mask flags. For the input mask bit we check the first + # bit of the pixel values. This is noted in the help string for the mask + # trait. Note that we explicitly expand the pointing once here and do not + # save it. Even if we are eventually saving the pointing, we want to do + # that later when building the covariance and the pixel distribution. + + # Use the same pointing operator as the binning + scan_pointing = self.binning.pointing + + scanner = ScanMask( + det_flags=flagname, + pixels=scan_pointing.pixels, + mask_bits=1, ) - if self.mask is not None: - # We actually have an input mask. Scan it. - scan_pipe.apply(data, detectors=detectors) - - self._log_info(comm, rank, " finished flag building in", timer=timer) - - # Now construct the noise covariance, hits, and condition number mask + scanner.det_flags_value = 2 + scanner.mask_key = self.mask - self._log_info(comm, rank, "begin build of solver covariance") + scan_pipe = Pipeline( + detector_sets=["SINGLE"], operators=[scan_pointing, scanner] + ) - solver_hits_name = "{}_solve_hits".format(self.name) - solver_rcond_name = "{}_solve_rcond".format(self.name) - solver_rcond_mask_name = "{}_solve_rcond_mask".format(self.name) + if self.mask is not None: + # We have a mask. Scan it. + scan_pipe.apply(data, detectors=detectors) + + self._log_info(comm, rank, " finished flag building in", timer=timer) + + # Now construct the noise covariance, hits, and condition number mask for + # the solver. + + if self.mc_mode: + # Verify that our covariance and other products exist. + if self.binning.pixel_dist not in data: + msg = "MC mode, pixel distribution '{}' does not exist".format( + self.binning.pixel_dist + ) + log.error(msg) + raise RuntimeError(msg) + if self.binning.covariance not in data: + msg = "MC mode, covariance '{}' does not exist".format( + self.binning.covariance + ) + log.error(msg) + raise RuntimeError(msg) + + self._log_info(comm, rank, "MC mode, reusing covariance for solver") + else: + self._log_info(comm, rank, "begin build of solver covariance") + + solver_cov = CovarianceAndHits( + pixel_dist=self.binning.pixel_dist, + covariance=self.binning.covariance, + hits=solver_hits_name, + rcond=solver_rcond_name, + view=self.binning.pointing.view, + det_flags=flagname, + det_flag_mask=255, + pointing=self.binning.pointing, + noise_model=self.binning.noise_model, + rcond_threshold=self.solve_rcond_threshold, + sync_type=self.binning.sync_type, + save_pointing=self.binning.saved_pointing, + ) - solver_cov = CovarianceAndHits( - pixel_dist=self.binning.pixel_dist, - covariance=self.binning.covariance, - hits=solver_hits_name, - rcond=solver_rcond_name, - view=self.binning.pointing.view, - det_flags=flagname, - det_flag_mask=255, - pointing=self.binning.pointing, - noise_model=self.binning.noise_model, - rcond_threshold=self.solve_rcond_threshold, - sync_type=self.binning.sync_type, - save_pointing=self.binning.save_pointing, - ) + solver_cov.apply(data, detectors=detectors) - solver_cov.apply(data, detectors=detectors) + data[solver_rcond_mask_name] = PixelData( + data[self.binning.pixel_dist], dtype=np.uint8, n_value=1 + ) + data[solver_rcond_mask_name].raw[ + data[solver_rcond_name].raw.array() < self.solve_rcond_threshold + ] = 1 - data[solver_rcond_mask_name] = PixelData( - data[self.binning.pixel_dist], dtype=np.uint8, n_value=1 - ) - data[solver_rcond_mask_name].raw[ - data[solver_rcond_name].raw.array() < self.solve_rcond_threshold - ] = 1 + # Re-use our mask scanning pipeline, setting third bit (== 4) + scanner.det_flags_value = 4 + scanner.mask_key = solver_rcond_mask_name + scan_pipe.apply(data, detectors=detectors) - # Re-use our mask scanning pipeline, setting third bit (== 4) - scanner.det_flags_value = 4 - scanner.mask_key = solver_rcond_mask_name - scan_pipe.apply(data, detectors=detectors) + self._log_info( + comm, rank, " finished build of solver covariance in", timer=timer + ) - self._log_info( - comm, rank, " finished build of solver covariance in", timer=timer - ) + local_total = 0 + local_cut = 0 + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + for vw in ob.view[solve_view].detdata[flagname]: + for d in dets: + local_total += len(vw[d]) + local_cut += np.count_nonzero(vw[d]) + total = 0 + cut = 0 + if comm is None: + total = local_total + cut = local_cut + else: + total = comm.reduce(local_total, op=MPI.SUM, root=0) + cut = comm.reduce(local_cut, op=MPI.SUM, root=0) + msg = "Solver flags cut {} / {} = {:0.2f}% of samples".format( + cut, total, 100.0 * (cut / total) + ) + self._log_info(comm, rank, msg) # Compute the RHS. Overwrite inputs, either the original or the copy. @@ -384,17 +436,23 @@ def _exec(self, data, detectors=None, **kwargs): self.binning.det_flags = flagname self.binning.det_flag_mask = 255 + # Set the binning operator to output to temporary map. This will be + # overwritten on each iteration of the solver. + self.binning.binned = "{}_solve_bin".format(self.name) + rhs_amplitude_key = "{}_amplitudes_rhs".format(self.name) self.template_matrix.amplitudes = rhs_amplitude_key rhs_calc = SolverRHS( - det_data=detdata_name, - overwrite=True, + det_data=self.det_data, + overwrite=False, binning=self.binning, template_matrix=self.template_matrix, ) rhs_calc.apply(data, detectors=detectors) + print("RHS = ", data[rhs_amplitude_key], flush=True) + self._log_info(comm, rank, " finished RHS calculation in", timer=timer) # Set up the LHS operator. Use either the original timestreams or the copy @@ -406,7 +464,6 @@ def _exec(self, data, detectors=None, **kwargs): self.template_matrix.amplitudes = amplitude_key lhs_calc = SolverLHS( - det_temp=detdata_name, binning=self.binning, template_matrix=self.template_matrix, ) @@ -430,60 +487,118 @@ def _exec(self, data, detectors=None, **kwargs): self.binning.det_flag_mask = save_det_flag_mask self.binning.shared_flags = save_shared_flags self.binning.shared_flag_mask = save_shared_flag_mask - - self._log_info( - comm, rank, "begin projection of final amplitudes to timestreams" - ) - - # Reset our timestreams to zero - for ob in data.obs: - ob.detdata[detdata_name][:] = 0.0 - - # Project our solved amplitudes into timestreams. We output to either the - # input det_data or our temp space. - - self.template_matrix.transpose = False - self.template_matrix.apply(data, detectors=detectors) - - self._log_info(comm, rank, " finished amplitude projection in", timer=timer) + self.binning.binned = save_binned # Now construct the noise covariance, hits, and condition number mask for the # final binned map. - self._log_info(comm, rank, "begin build of final binning covariance") + if self.mc_mode: + # Verify that our covariance and other products exist. + if self.map_binning.pixel_dist not in data: + msg = "MC mode, pixel distribution '{}' does not exist".format( + self.map_binning.pixel_dist + ) + log.error(msg) + raise RuntimeError(msg) + if self.map_binning.covariance not in data: + msg = "MC mode, covariance '{}' does not exist".format( + self.map_binning.covariance + ) + log.error(msg) + raise RuntimeError(msg) + self._log_info(comm, rank, "MC mode, reusing covariance for final binning") + else: + self._log_info(comm, rank, "begin build of final binning covariance") + + final_cov = CovarianceAndHits( + pixel_dist=self.map_binning.pixel_dist, + covariance=self.map_binning.covariance, + hits=hits_name, + rcond=rcond_name, + view=self.map_binning.pointing.view, + det_flags=self.map_binning.det_flags, + det_flag_mask=self.map_binning.det_flag_mask, + shared_flags=self.map_binning.shared_flags, + shared_flag_mask=self.map_binning.shared_flag_mask, + pointing=self.map_binning.pointing, + noise_model=self.map_binning.noise_model, + rcond_threshold=self.map_rcond_threshold, + sync_type=self.map_binning.sync_type, + save_pointing=self.map_binning.saved_pointing, + ) - hits_name = "{}_hits".format(self.name) - rcond_name = "{}_rcond".format(self.name) + final_cov.apply(data, detectors=detectors) - final_cov = CovarianceAndHits( - pixel_dist=self.map_binning.pixel_dist, - covariance=self.map_binning.covariance, - hits=hits_name, - rcond=rcond_name, - view=self.map_binning.pointing.view, - det_flags=self.map_binning.det_flags, - det_flag_mask=self.map_binning.det_flag_mask, - shared_flags=self.map_binning.shared_flags, - shared_flag_mask=self.map_binning.shared_flag_mask, - pointing=self.map_binning.pointing, - noise_model=self.map_binning.noise_model, - rcond_threshold=self.map_rcond_threshold, - sync_type=self.map_binning.sync_type, - save_pointing=self.map_binning.save_pointing, - ) + self._log_info( + comm, rank, " finished build of final covariance in", timer=timer + ) - final_cov.apply(data, detectors=detectors) + # Project the solved template amplitudes into timestreams and subtract + # from the original. Then make a binned map of the result. - self._log_info( - comm, rank, " finished build of final covariance in", timer=timer - ) + self._log_info(comm, rank, "begin final map binning") - # Make a binned map of these template-subtracted timestreams + temp_project = "{}_temp_project".format(self.name) - self._log_info(comm, rank, "begin final map binning") + # Projecting amplitudes to a temp space + self.template_matrix.transpose = False + self.template_matrix.det_data = temp_project + + if self.map_binning.binned == "binned": + # The user did not modify the default name of the output binned map. + # Set this to something more descriptive, named after our operator + # instance. + self.map_binning.binned = "{}_map".format(self.name) + + # Binning the cleaned data + self.map_binning.det_data = clean_name + + # Operator to copy the input data to the cleaned location + copy_input = Copy(detdata=[(self.det_data, clean_name)]) + + pre_pipe = None + if self.save_cleaned: + # We are going to be saving a full copy of the template-subtracted data + if self.overwrite_cleaned: + # We are going to modify the input data in place + sub_cleaned = Subtract(first=self.det_data, second=temp_project) + pre_pipe = Pipeline( + detector_sets=["SINGLE"], + operators=[ + self.template_matrix, + sub_cleaned, + ], + ) + else: + # We need to create a new full set of timestreams. Do this now + # all at once for all detectors. + copy_input.apply(data, detectors=detectors) + # Pipeline to project one detector at a time and subtract. + sub_cleaned = Subtract(first=clean_name, second=temp_project) + pre_pipe = Pipeline( + detector_sets=["SINGLE"], + operators=[ + self.template_matrix, + sub_cleaned, + ], + ) + else: + # Not saving cleaned timestreams. Use a preprocessing pipeline that + # just projects and subtracts data one detector at a time. + sub_cleaned = Subtract(first=clean_name, second=temp_project) + pre_pipe = Pipeline( + detector_sets=["SINGLE"], + operators=[ + self.template_matrix, + copy_input, + sub_cleaned, + ], + ) - self.map_binning.det_data = detdata_name + # Do the final binning + self.map_binning.pre_process = pre_pipe self.map_binning.apply(data, detectors=detectors) + self.map_binning.pre_process = None self._log_info(comm, rank, " finished final binning in", timer=timer) diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index 7de8cca37..6e3350a40 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -20,7 +20,7 @@ from .pipeline import Pipeline -from .clear import Clear +from .delete import Delete from .mapmaker_utils import BuildHitMap, BuildNoiseWeighted, BuildInverseCovariance @@ -75,6 +75,12 @@ class BinMap(Operator): help="This must be an instance of a pointing operator", ) + pre_process = Instance( + klass=Operator, + allow_none=True, + help="Optional extra operator to run prior to binning", + ) + noise_model = Unicode( "noise_model", help="Observation key containing the noise model" ) @@ -83,9 +89,7 @@ class BinMap(Operator): "allreduce", help="Communication algorithm: 'allreduce' or 'alltoallv'" ) - save_pointing = Bool( - False, help="If True, do not clear detector pointing matrices after use" - ) + saved_pointing = Bool(False, help="If True, use previously computed pointing") @traitlets.validate("det_flag_mask") def _check_flag_mask(self, proposal): @@ -143,9 +147,17 @@ def _exec(self, data, detectors=None, **kwargs): self.pointing.create_dist = None - # Set up clearing of the pointing matrices + # If the binned map already exists in the data, verify the distribution and + # reset to zero. - clear_pointing = Clear(detdata=[self.pointing.pixels, self.pointing.weights]) + if self.binned in data: + if data[self.binned].distribution != data[self.pixel_dist]: + raise RuntimeError( + "Pixel distribution '{}' does not match existing binned map '{}'".format( + self.pixel_dist, self.binned + ) + ) + data[self.binned].raw[:] = 0.0 # Noise weighted map. We output this to the final binned map location, # since we will multiply by the covariance in-place. @@ -166,21 +178,24 @@ def _exec(self, data, detectors=None, **kwargs): # Build a pipeline to expand pointing and accumulate accum = None - if self.save_pointing: - # Process all detectors at once + accum_ops = list() + if self.pre_process is not None: + accum_ops.append(self.pre_process) + if self.saved_pointing: + # Process all detectors at once, using existing pointing accum = Pipeline(detector_sets=["ALL"]) - accum.operators = [self.pointing, build_zmap] + accum_ops.extend([build_zmap]) else: - # Process one detector at a time and clear pointing after each one. + # Process one detector at a time. accum = Pipeline(detector_sets=["SINGLE"]) - accum.operators = [self.pointing, build_zmap, clear_pointing] - + accum_ops.extend([self.pointing, build_zmap]) + accum.operators = accum_ops pipe_out = accum.apply(data, detectors=detectors) # Extract the results binned_map = data[self.binned] - # Apply the covariance + # Apply the covariance in place covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) return @@ -198,8 +213,6 @@ def _requires(self): def _provides(self): prov = {"meta": [self.binned], "shared": list(), "detdata": list()} - if self.save_pointing: - prov["detdata"].extend([self.pointing.pixels, self.pointing.weights]) return prov def _accelerators(self): diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index 40711d9da..1aa11a62b 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -18,10 +18,12 @@ from .pipeline import Pipeline -from .clear import Clear +from .delete import Delete from .copy import Copy +from .reset import Reset + from .scan_map import ScanMap from .noise_weight import NoiseWeight @@ -81,7 +83,7 @@ def _check_binning(self, proposal): if not isinstance(bin, Operator): raise traitlets.TraitError("binning should be an Operator instance") # Check that this operator has the traits we expect - for trt in ["pointing", "det_data", "binned"]: + for trt in ["pointing", "det_data", "binned", "saved_pointing"]: if not bin.has_trait(trt): msg = "binning operator should have a '{}' trait".format(trt) raise traitlets.TraitError(msg) @@ -121,19 +123,10 @@ def _exec(self, data, detectors=None, **kwargs): "You must set the template_matrix trait before calling exec()" ) - # Build a pipeline to make the binned map, optionally one detector at a time. + # Make a binned map self.binning.det_data = self.det_data - - bin_pipe = None - if self.binning.save_pointing: - # Process all detectors at once - bin_pipe = Pipeline(detector_sets=["ALL"]) - else: - # Process one detector at a time and clear pointing after each one. - bin_pipe = Pipeline(detector_sets=["SINGLE"]) - bin_pipe.operators = [self.binning] - bin_pipe.apply(data, detectors=detectors) + self.binning.apply(data, detectors=detectors) # Build a pipeline for the projection and template matrix application. # First create the operators that we will use. @@ -144,19 +137,14 @@ def _exec(self, data, detectors=None, **kwargs): # Use the same pointing operator as the binning pointing = self.binning.pointing - # Set up operator for optional clearing of the pointing matrices - clear_pointing = Clear(detdata=[pointing.pixels, pointing.weights]) - # Optionally Copy data to a temporary location to avoid overwriting the input. copy_det = None - clear_temp = None if not self.overwrite: copy_det = Copy( detdata=[ (self.det_data, det_temp), ] ) - clear_temp = Clear(detdata=[det_temp]) # The detdata name we will use (either the original or the temp one) detdata_name = self.det_data @@ -178,7 +166,6 @@ def _exec(self, data, detectors=None, **kwargs): ) # Set up template matrix operator. - self.template_matrix.transpose = True self.template_matrix.det_data = detdata_name @@ -186,25 +173,22 @@ def _exec(self, data, detectors=None, **kwargs): # weights and templates. proj_pipe = None - if self.binning.save_pointing: - # Process all detectors at once + if self.binning.saved_pointing: + # Process all detectors at once, since we have the pointing already proj_pipe = Pipeline(detector_sets=["ALL"]) oplist = list() if not self.overwrite: oplist.append(copy_det) oplist.extend( [ - pointing, scan_map, noise_weight, self.template_matrix, ] ) - if not self.overwrite: - oplist.append(clear_temp) proj_pipe.operators = oplist else: - # Process one detector at a time and clear pointing after each one. + # Process one detector at a time. proj_pipe = Pipeline(detector_sets=["SINGLE"]) oplist = list() if not self.overwrite: @@ -213,19 +197,21 @@ def _exec(self, data, detectors=None, **kwargs): [ pointing, scan_map, - clear_pointing, noise_weight, self.template_matrix, ] ) - if not self.overwrite: - oplist.append(clear_temp) proj_pipe.operators = oplist # Run this projection pipeline. proj_pipe.apply(data, detectors=detectors) + if not self.overwrite: + # Clean up our temp buffer + delete_temp = Delete(detdata=[det_temp]) + delete_temp.apply(data) + return def _finalize(self, data, **kwargs): @@ -298,7 +284,7 @@ def _check_binning(self, proposal): if not isinstance(bin, Operator): raise traitlets.TraitError("binning should be an Operator instance") # Check that this operator has the traits we expect - for trt in ["pointing", "det_data", "binned"]: + for trt in ["pointing", "det_data", "binned", "saved_pointing"]: if not bin.has_trait(trt): msg = "binning operator should have a '{}' trait".format(trt) raise traitlets.TraitError(msg) @@ -360,26 +346,22 @@ def _exec(self, data, detectors=None, **kwargs): "You must set the template_matrix trait before calling exec()" ) - # Build a pipeline to project amplitudes into timestreams and make a binned - # map. + # Project amplitudes into timestreams and make a binned map. + timer.start() self._log_debug(comm, rank, "begin project amplitudes and binning") self.template_matrix.transpose = False self.template_matrix.det_data = self.det_temp - self.binning.det_data = self.det_temp - bin_pipe = None - if self.binning.save_pointing: - # Process all detectors at once - bin_pipe = Pipeline(detector_sets=["ALL"]) - else: - # Process one detector at a time and clear pointing after each one. - bin_pipe = Pipeline(detector_sets=["SINGLE"]) + self.binning.det_data = self.det_temp - bin_pipe.operators = [self.template_matrix, self.binning] + self.binning.pre_process = self.template_matrix + self.binning.apply(data, detectors=detectors) + self.binning.pre_process = None - bin_pipe.apply(data, detectors=detectors) + bd = data[self.binning.binned].data + print("lhs binned map = ", bd[bd != 0], flush=True) self._log_debug(comm, rank, "projection and binning finished in", timer=timer) @@ -391,9 +373,6 @@ def _exec(self, data, detectors=None, **kwargs): # Use the same pointing operator as the binning pointing = self.binning.pointing - # Set up operator for optional clearing of the pointing matrices - clear_pointing = Clear(detdata=[pointing.pixels, pointing.weights]) - # Set up map-scanning operator to project the binned map. scan_map = ScanMap( pixels=pointing.pixels, @@ -415,25 +394,23 @@ def _exec(self, data, detectors=None, **kwargs): # weights and templates. proj_pipe = None - if self.binning.save_pointing: + if self.binning.saved_pointing: # Process all detectors at once proj_pipe = Pipeline( detector_sets=["ALL"], operators=[ - pointing, scan_map, noise_weight, self.template_matrix, ], ) else: - # Process one detector at a time and clear pointing after each one. + # Process one detector at a time. proj_pipe = Pipeline( detector_sets=["SINGLE"], operators=[ pointing, scan_map, - clear_pointing, noise_weight, self.template_matrix, ], @@ -540,8 +517,8 @@ def solve( residual -= data[lhs_amps] print("RHS ", rhs_amps) - print("Guess", data[lhs_amps]) - print(residual) + print("LHS", data[lhs_amps]) + print("residual", residual) # The preconditioned residual # s = M^-1 * r @@ -549,12 +526,17 @@ def solve( precond_residual.reset() lhs.template_matrix.apply_precond(residual, precond_residual) + print("precond_residual", precond_residual) + # The proposal # d = s proposal = precond_residual.duplicate() + # print("proposal", proposal) + # delta_new = r^T * d sqsum = precond_residual.dot(residual) + print("sqsum = ", sqsum) init_sqsum = sqsum best_sqsum = sqsum @@ -579,26 +561,36 @@ def solve( for k, v in data[lhs_amps].items(): v.local[:] = proposal[k].local + print("LHS input = ", data[lhs_amps], flush=True) + # q = A * d (in place) lhs.apply(data, detectors=detectors) + print("LHS output", data[lhs_amps]) + # alpha = delta_new / (d^T * q) alpha = sqsum alpha /= proposal.dot(data[lhs_amps]) + print("alpha = ", alpha) + # r -= alpha * q data[lhs_amps] *= alpha residual -= data[lhs_amps] + # print("residual", residual) # The preconditioned residual # s = M^-1 * r lhs.template_matrix.apply_precond(residual, precond_residual) + # print("precond_residual", precond_residual) + # delta_old = delta_new sqsum_last = sqsum # delta_new = r^T * s sqsum = precond_residual.dot(residual) + print("sqsum = ", sqsum) if comm is not None: comm.barrier() @@ -619,6 +611,8 @@ def solve( proposal *= beta proposal += precond_residual + # print("proposal", proposal) + # Check for convergence if sqsum < init_sqsum * convergence or sqsum < 1e-30: timer.stop() @@ -631,6 +625,7 @@ def solve( break best_sqsum = min(sqsum, best_sqsum) + print("best_sqsum = ", best_sqsum) if iter % 10 == 0 and iter >= n_iter_min: if last_best < best_sqsum * 2: diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 157e86991..62979cc5e 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -2,6 +2,8 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections import OrderedDict + import traitlets from ..utils import Logger @@ -112,6 +114,21 @@ def _exec(self, data, detectors=None, **kwargs): for tmpl in self.templates: tmpl.det_data = self.det_data + # We loop over detectors. Internally, each template loops over observations + # and ignores observations where the detector does not exist. + + all_dets = None + if detectors is None: + # We don't have an explicit list of detectors- build our superset. + all_dets = OrderedDict() + for ob in data.obs: + for d in ob.local_detectors: + if d not in all_dets: + all_dets[d] = None + all_dets = list(all_dets.keys()) + else: + all_dets = detectors + if self.transpose: if self.amplitudes not in data: # The output template amplitudes do not yet exist. Create these with @@ -119,15 +136,9 @@ def _exec(self, data, detectors=None, **kwargs): data[self.amplitudes] = AmplitudesMap() for tmpl in self.templates: data[self.amplitudes][tmpl.name] = tmpl.zeros() - for ob in data.obs: - # Get the detectors we are using for this observation - dets = ob.select_local_detectors(detectors) - if len(dets) == 0: - # Nothing to do for this observation - continue - for d in dets: - for tmpl in self.templates: - tmpl.project_signal(d, data[self.amplitudes][tmpl.name]) + for d in all_dets: + for tmpl in self.templates: + tmpl.project_signal(d, data[self.amplitudes][tmpl.name]) else: if self.amplitudes not in data: msg = "Template amplitudes '{}' do not exist in data".format( @@ -135,15 +146,20 @@ def _exec(self, data, detectors=None, **kwargs): ) log.error(msg) raise RuntimeError(msg) + # Ensure that our output detector data exists in each observation for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) if len(dets) == 0: # Nothing to do for this observation continue + ob.detdata.ensure(self.det_data, detectors=dets) for d in dets: - for tmpl in self.templates: - tmpl.add_to_signal(d, data[self.amplitudes][tmpl.name]) + ob.detdata[self.det_data][d, :] = 0 + + for d in all_dets: + for tmpl in self.templates: + tmpl.add_to_signal(d, data[self.amplitudes][tmpl.name]) return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index 19854bf03..e625c6823 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -24,7 +24,7 @@ from .operator import Operator -from .clear import Clear +from .delete import Delete from .pipeline import Pipeline @@ -96,7 +96,6 @@ def _check_sync_type(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) - self._hits = None @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -119,9 +118,26 @@ def _exec(self, data, detectors=None, **kwargs): "Building hit map with pixel_distribution {}".format(self.pixel_dist) ) - # On first call, get the pixel distribution and create our distributed hitmap - if self._hits is None: - self._hits = PixelData(dist, np.int64, n_value=1) + hits = None + if self.hits in data: + # We have an existing map from a previous call. Verify + # the distribution and nnz. + if data[self.hits].distribution != dist: + msg = "Existing hits '{}' has different data distribution".format( + self.hits + ) + log.error(msg) + raise RuntimeError(msg) + if data[self.hits].n_value != 1: + msg = "Existing hits '{}' has {} nnz, not 1".format( + self.hits, data[self.hits].n_value + ) + log.error(msg) + raise RuntimeError(msg) + hits = data[self.hits] + else: + data[self.hits] = PixelData(dist, np.int64, n_value=1) + hits = data[self.hits] for ob in data.obs: # Get the detectors we are using for this observation @@ -157,17 +173,16 @@ def _exec(self, data, detectors=None, **kwargs): 1, local_sm.astype(np.int64), local_pix.astype(np.int64), - self._hits.raw, + hits.raw, ) return def _finalize(self, data, **kwargs): - if self._hits is not None: + if self.hits in data: if self.sync_type == "alltoallv": - self._hits.sync_alltoallv() + data[self.hits].sync_alltoallv() else: - self._hits.sync_allreduce() - data[self.hits] = self._hits + data[self.hits].sync_allreduce() return def _requires(self): @@ -268,9 +283,6 @@ def _check_sync_type(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) - self._invcov = None - self._weight_nnz = None - self._cov_nnz = None @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -295,6 +307,10 @@ def _exec(self, data, detectors=None, **kwargs): ) ) + invcov = None + weight_nnz = None + cov_nnz = None + for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) @@ -325,25 +341,45 @@ def _exec(self, data, detectors=None, **kwargs): # non-zero elements for every detector and every observation. # We check that here, and if this is the first observation and # detector we have worked with we create the PixelData object. - if self._invcov is None: + if invcov is None: # We will store the lower triangle of the covariance. if len(wview.detector_shape) == 1: - self._weight_nnz = 1 + weight_nnz = 1 else: - self._weight_nnz = wview.detector_shape[1] - self._cov_nnz = self._weight_nnz * (self._weight_nnz + 1) // 2 - self._invcov = PixelData( - dist, np.float64, n_value=self._cov_nnz - ) + weight_nnz = wview.detector_shape[1] + cov_nnz = weight_nnz * (weight_nnz + 1) // 2 + if self.inverse_covariance in data: + # We have an existing map from a previous call. Verify + # the distribution and nnz. + if data[self.inverse_covariance].distribution != dist: + msg = "Existing inv cov '{}' has different data distribution".format( + self.inverse_covariance + ) + log.error(msg) + raise RuntimeError(msg) + if data[self.inverse_covariance].n_value != cov_nnz: + msg = "Existing inv cov '{}' has {} nnz, but pointing implies {}".format( + self.inverse_covariance, + data[self.inverse_covariance].n_value, + cov_nnz, + ) + log.error(msg) + raise RuntimeError(msg) + invcov = data[self.inverse_covariance] + else: + data[self.inverse_covariance] = PixelData( + dist, np.float64, n_value=cov_nnz + ) + invcov = data[self.inverse_covariance] else: check_nnz = None if len(wview.detector_shape) == 1: check_nnz = 1 else: check_nnz = wview.detector_shape[1] - if check_nnz != self._weight_nnz: + if check_nnz != weight_nnz: msg = "observation '{}', detector '{}', pointing weights '{}' has {} nnz, not {}".format( - ob.name, det, self.weights, check_nnz, self._weight_nnz + ob.name, det, self.weights, check_nnz, weight_nnz ) raise RuntimeError(msg) @@ -366,22 +402,21 @@ def _exec(self, data, detectors=None, **kwargs): cov_accum_diag_invnpp( dist.n_local_submap, dist.n_pix_submap, - self._weight_nnz, + weight_nnz, local_sm.astype(np.int64), local_pix.astype(np.int64), wview[det].reshape(-1), detweight, - self._invcov.raw, + invcov.raw, ) return def _finalize(self, data, **kwargs): - if self._invcov is not None: + if self.inverse_covariance in data: if self.sync_type == "alltoallv": - self._invcov.sync_alltoallv() + data[self.inverse_covariance].sync_alltoallv() else: - self._invcov.sync_allreduce() - data[self.inverse_covariance] = self._invcov + data[self.inverse_covariance].sync_allreduce() return def _requires(self): @@ -486,8 +521,6 @@ def _check_sync_type(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) - self._zmap = None - self._weight_nnz = None @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -516,6 +549,9 @@ def _exec(self, data, detectors=None, **kwargs): ) ) + zmap = None + weight_nnz = None + for ob in data.obs: # Get the detectors we are using for this observation dets = ob.select_local_detectors(detectors) @@ -546,25 +582,58 @@ def _exec(self, data, detectors=None, **kwargs): # Data for this detector ddata = dview[det] + print("Zmap det {} = {}".format(det, ddata), flush=True) + # We require that the pointing matrix has the same number of # non-zero elements for every detector and every observation. # We check that here, and if this is the first observation and - # detector we have worked with we create the PixelData object. - if self._zmap is None: + # detector we have worked with we create the PixelData object + # if needed. + if zmap is None: if len(wview.detector_shape) == 1: - self._weight_nnz = 1 + weight_nnz = 1 else: - self._weight_nnz = wview.detector_shape[1] - self._zmap = PixelData( - dist, np.float64, n_value=self._weight_nnz - ) + weight_nnz = wview.detector_shape[1] + if self.zmap in data: + # We have an existing map from a previous call. Verify + # the distribution and nnz. + if data[self.zmap].distribution != dist: + msg = "Existing ZMap '{}' has different data distribution".format( + self.zmap + ) + log.error(msg) + raise RuntimeError(msg) + if data[self.zmap].n_value != weight_nnz: + msg = "Existing ZMap '{}' has {} nnz, but pointing has {}".format( + self.zmap, data[self.zmap].n_value, weight_nnz + ) + log.error(msg) + raise RuntimeError(msg) + print( + "Zmap found existing PixelData {}".format(self.zmap), + flush=True, + ) + zmap = data[self.zmap] + else: + print( + "Zmap allocating PixelData {}".format(self.zmap), + flush=True, + ) + data[self.zmap] = PixelData( + dist, np.float64, n_value=weight_nnz + ) + zmap = data[self.zmap] else: + print( + "Zmap PixelData {} already loaded".format(self.zmap), + flush=True, + ) check_nnz = None if len(wview.detector_shape) == 1: check_nnz = 1 else: check_nnz = wview.detector_shape[1] - if check_nnz != self._weight_nnz: + if check_nnz != weight_nnz: msg = "observation {}, detector {}, pointing weights {} has inconsistent number of values".format( ob.name, det, self.weights ) @@ -589,23 +658,26 @@ def _exec(self, data, detectors=None, **kwargs): cov_accum_zmap( dist.n_local_submap, dist.n_pix_submap, - self._zmap.n_value, + zmap.n_value, local_sm.astype(np.int64), local_pix.astype(np.int64), wview[det].reshape(-1), detweight, ddata, - self._zmap.raw, + zmap.raw, ) + zm = zmap.raw.array() + print("Zmap after det {} ".format(det), zm[zm != 0], flush=True) return def _finalize(self, data, **kwargs): - if self._zmap is not None: + if self.zmap in data: + # We have called exec() at least once if self.sync_type == "alltoallv": - self._zmap.sync_alltoallv() + data[self.zmap].sync_alltoallv() else: - self._zmap.sync_allreduce() - data[self.zmap] = self._zmap + data[self.zmap].sync_allreduce() + print("Zmap final sync of {}".format(self.zmap), flush=True) return def _requires(self): @@ -740,7 +812,6 @@ def _check_pointing(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) - self._invcov = None @function_timer def _exec(self, data, detectors=None, **kwargs): @@ -755,13 +826,11 @@ def _exec(self, data, detectors=None, **kwargs): self.pointing.create_dist = None - # Set up clearing of the pointing matrices - - clear_pointing = Clear(detdata=[self.pointing.pixels, self.pointing.weights]) - # If we do not have a pixel distribution yet, we must make one pass through # the pointing to build this first. + pointing_done = False + if self.pixel_dist not in data: if detectors is not None: msg = "A subset of detectors is specified, but the pixel distribution\n" @@ -784,16 +853,13 @@ def _exec(self, data, detectors=None, **kwargs): # We are keeping the pointing, which means we need to run all detectors # at once so they all end up in the detdata for all observations. pixel_dist_pipe = Pipeline(detector_sets=["ALL"]) - pixel_dist_pipe.operators = [ - self.pointing, - ] + pointing_done = True else: # Run one detector a at time and discard. pixel_dist_pipe = Pipeline(detector_sets=["SINGLE"]) - pixel_dist_pipe.operators = [ - self.pointing, - clear_pointing, - ] + pixel_dist_pipe.operators = [ + self.pointing, + ] pipe_out = pixel_dist_pipe.apply(data, detectors=detectors) # Turn pixel distribution creation off again @@ -836,11 +902,15 @@ def _exec(self, data, detectors=None, **kwargs): if self.save_pointing: # Process all detectors at once accum = Pipeline(detector_sets=["ALL"]) - accum.operators = [self.pointing, build_hits, build_invcov] + if pointing_done: + # We already computed the pointing once and saved it. + accum.operators = [build_hits, build_invcov] + else: + accum.operators = [self.pointing, build_hits, build_invcov] else: - # Process one detector at a time and clear pointing after each one. + # Process one detector at a time. accum = Pipeline(detector_sets=["SINGLE"]) - accum.operators = [self.pointing, build_hits, build_invcov, clear_pointing] + accum.operators = [self.pointing, build_hits, build_invcov] pipe_out = accum.apply(data, detectors=detectors) diff --git a/src/toast/ops/pipeline.py b/src/toast/ops/pipeline.py index a37ff5fd4..77bef4028 100644 --- a/src/toast/ops/pipeline.py +++ b/src/toast/ops/pipeline.py @@ -2,6 +2,8 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections import OrderedDict + import traitlets from ..utils import Logger @@ -137,6 +139,10 @@ def _exec(self, data, detectors=None, **kwargs): "If using 'ALL' for a detector set, there should only be one set" ) for op in self.operators: + msg = "Pipeline calling operator '{}' exec() with ALL dets".format( + op.name + ) + log.verbose(msg) op.exec(ds, detectors=detectors) elif det_set == "SINGLE": # If this is given, then there should be only one entry @@ -147,13 +153,14 @@ def _exec(self, data, detectors=None, **kwargs): # We are running one detector at a time. We will loop over all # detectors in the superset of detectors across all observations. - all_local_dets = set() + all_local_dets = OrderedDict() for ob in ds.obs: for det in ob.local_detectors: - all_local_dets.add(det) + all_local_dets[det] = None + all_local_dets = list(all_local_dets.keys()) # If we were given a more restrictive list, prune the global list - selected_dets = list(all_local_dets) + selected_dets = all_local_dets if detectors is not None: selected_dets = list() for det in all_local_dets: @@ -161,7 +168,13 @@ def _exec(self, data, detectors=None, **kwargs): selected_dets.append(det) for det in selected_dets: + msg = "Pipeline SINGLE detector {}".format(det) + log.verbose(msg) for op in self.operators: + msg = "Pipeline calling operator '{}' exec()".format( + op.name + ) + log.verbose(msg) op.exec(ds, detectors=[det]) else: # We are running sets of detectors at once. For this detector @@ -172,7 +185,11 @@ def _exec(self, data, detectors=None, **kwargs): for det in det_set: if det in detectors: selected_set.append(det) + msg = "Pipeline detector set {}".format(selected_set) + log.verbose(msg) for op in self.operators: + msg = "Pipeline calling operator '{}' exec()".format(op.name) + log.verbose(msg) op.exec(ds, detectors=selected_set) # Copy to / from accelerator... @@ -180,9 +197,12 @@ def _exec(self, data, detectors=None, **kwargs): return def _finalize(self, data, **kwargs): + log = Logger.get() result = list() if self.operators is not None: for op in self.operators: + msg = "Pipeline calling operator '{}' finalize()".format(op.name) + log.verbose(msg) result.append(op.finalize(data)) return result diff --git a/src/toast/ops/pointing_healpix.py b/src/toast/ops/pointing_healpix.py index 406254bc8..cebc6c16b 100644 --- a/src/toast/ops/pointing_healpix.py +++ b/src/toast/ops/pointing_healpix.py @@ -113,8 +113,6 @@ class PointingHealpix(Operator): help="The output boresight coordinate system ('C', 'E', 'G')", ) - overwrite = Bool(False, help="If True, regenerate pointing even if it exists") - @traitlets.validate("nside") def _check_nside(self, proposal): check = proposal["value"] @@ -170,6 +168,13 @@ def _check_coord_out(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) + # Check that healpix pixels are set up. If the nside / mode are left as + # defaults, then the 'observe' function will not have been called yet. + if not hasattr(self, "_nnz"): + nnz = 1 + if self.mode == "IQU": + nnz = 3 + self._set_hpix(self.nside, self.nside_submap, nnz) @traitlets.observe("nside", "nside_submap", "mode") def _reset_hpix(self, change): @@ -178,10 +183,9 @@ def _reset_hpix(self, change): nside = self.nside nside_submap = self.nside_submap mode = self.mode - self._nnz = 1 + nnz = 1 if mode == "IQU": - self._nnz = 3 - + nnz = 3 # Update to the trait that changed if change["name"] == "nside": nside = change["new"] @@ -189,13 +193,17 @@ def _reset_hpix(self, change): nside_submap = change["new"] if change["name"] == "mode": if change["new"] == "IQU": - self._nnz = 3 + nnz = 3 else: - self._nnz = 1 + nnz = 1 + self._set_hpix(nside, nside_submap, nnz) + + def _set_hpix(self, nside, nside_submap, nnz): self.hpix = HealpixPixels(nside) self._n_pix = 12 * nside ** 2 self._n_pix_submap = 12 * nside_submap ** 2 self._n_submap = (nside // nside_submap) ** 2 + self._nnz = nnz self._local_submaps = None @function_timer @@ -242,29 +250,24 @@ def _exec(self, data, detectors=None, **kwargs): # Nothing to do for this observation continue - if self.pixels in ob.detdata and self.weight in ob.detdata: - # The pointing already exists! - if not self.overwrite: - continue - - # Create output data for the pixels, weights and optionally the + # Create (or re-use) output data for the pixels, weights and optionally the # detector quaternions. if self.single_precision: - ob.detdata.create( + ob.detdata.ensure( self.pixels, sample_shape=(), dtype=np.int32, detectors=dets ) - ob.detdata.create( + ob.detdata.ensure( self.weights, sample_shape=(self._nnz,), dtype=np.float32, detectors=dets, ) else: - ob.detdata.create( + ob.detdata.ensure( self.pixels, sample_shape=(), dtype=np.int64, detectors=dets ) - ob.detdata.create( + ob.detdata.ensure( self.weights, sample_shape=(self._nnz,), dtype=np.float64, @@ -272,7 +275,7 @@ def _exec(self, data, detectors=None, **kwargs): ) if self.quats is not None: - ob.detdata.create( + ob.detdata.ensure( self.quats, sample_shape=(4,), dtype=np.float64, diff --git a/src/toast/ops/reset.py b/src/toast/ops/reset.py new file mode 100644 index 000000000..ae038f489 --- /dev/null +++ b/src/toast/ops/reset.py @@ -0,0 +1,127 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import numbers + +import traitlets + +from ..utils import Logger + +from ..traits import trait_docs, Int, Unicode, List + +from .operator import Operator + + +@trait_docs +class Reset(Operator): + """Class to reset data from observations. + + This operator takes lists of shared, detdata, intervals, and meta keys to reset. + Numerical data objects and arrays are set to zero. String objects are set to an + empty string. Any object that defines a `clear()` method will have that called. + Any object not matching those criteria will be set to None. Since an IntervalList + is not mutable, any specified intervals will simply be deleted. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + meta = List( + None, allow_none=True, help="List of Observation dictionary keys to reset" + ) + + detdata = List( + None, allow_none=True, help="List of Observation detdata keys to reset" + ) + + shared = List( + None, allow_none=True, help="List of Observation shared keys to reset" + ) + + intervals = List( + None, + allow_none=True, + help="List of tuples of Observation intervals keys to reset", + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + for ob in data.obs: + if self.detdata is not None: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + for key in self.detdata: + for d in dets: + ob.detdata[key][d, :] = 0 + print( + "Reset detdata {}, dets {}, result = ".format(key, dets), + ob.detdata[key], + flush=True, + ) + if self.shared is not None: + for key in self.shared: + scomm = ob.shared[key].nodecomm + if scomm is None: + # No MPI, just set to zero + ob.shared[key].data[:] = 0 + else: + # Only rank zero on each node resets + if scomm.rank == 0: + ob.shared[key]._flat[:] = 0 + scomm.barrier() + if self.intervals is not None: + for key in self.intervals: + # This ignores non-existant keys + del ob.intervals[key] + if self.meta is not None: + for key in self.meta: + if isinstance(ob[key], np.ndarray): + # This is an array, set to zero + ob[key][:] = 0 + elif hasattr(ob[key], "clear"): + # This is some kind of container (list, dict, etc). Clear it. + ob[key].clear() + elif isinstance(ob[key], bool): + # Boolean scalar, set to False + ob[key] = False + elif isinstance(ob[key], numbers.Number): + # This is a scalar numeric value + ob[key] = 0 + elif isinstance(ob[key], (str, bytes)): + # This is string-like + ob[key] = "" + else: + # This is something else. Set to None + ob[key] = None + return + + def _finalize(self, data, **kwargs): + return None + + def _requires(self): + req = dict() + if self.meta is not None: + req["meta"] = list(self.meta) + if self.detdata is not None: + req["detdata"] = list(self.detdata) + if self.shared is not None: + req["shared"] = list(self.shared) + if self.intervals is not None: + req["intervals"] = list(self.intervals) + return req + + def _provides(self): + return dict() + + def _accelerators(self): + # Eventually we can delete memory objects on devices... + return list() diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index eaf228608..510735ab2 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -100,8 +100,7 @@ def _exec(self, data, detectors=None, **kwargs): maptod = maptod_raw.array() # If our output detector data does not yet exist, create it - if self.det_data not in ob.detdata: - ob.detdata.create(self.det_data, dtype=np.float64, detectors=dets) + ob.detdata.ensure(self.det_data, detectors=dets) for det in dets: # The pixels, weights, and data. @@ -141,18 +140,27 @@ def _exec(self, data, detectors=None, **kwargs): "Projection supports only float32 and float64 binned maps" ) + print("========= {} ==========".format(det)) + print("Scanned map TOD = ", maptod) + print("Scanned original TOD = ", ddata) + # zero-out if needed if self.zero: ddata[:] = 0.0 + print("Scanned: zero-ing TOD = ", ddata) # Add or subtract. Note that the map scanned timestream will have # zeros anywhere that the pointing is bad, but those samples (and # any other detector flags) should be handled at other steps of the # processing. if self.subtract: - ddata -= maptod + print("Scanned: subtracting TOD") + ddata[:] -= maptod else: - ddata += maptod + print("Scanned: adding TOD") + ddata[:] += maptod + + print("Scanned final = ", ddata) del maptod maptod_raw.clear() diff --git a/src/toast/ops/sim_tod_noise.py b/src/toast/ops/sim_tod_noise.py index 7e0bc2b3d..6c71fb54c 100644 --- a/src/toast/ops/sim_tod_noise.py +++ b/src/toast/ops/sim_tod_noise.py @@ -283,8 +283,8 @@ def _exec(self, data, detectors=None, **kwargs): # detectors within the observation. # Create output if it does not exist - if self.out not in ob.detdata: - ob.detdata.create(self.out, dtype=np.float64) + if (self.out not in ob.detdata) or (dets != ob.detdata[self.out].detectors): + ob.detdata.create(self.out, dtype=np.float64, detectors=dets) (rate, dt, dt_min, dt_max, dt_std) = rate_from_times( ob.shared[self.times].data diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index 5556276e4..3e0470663 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -227,6 +227,8 @@ def _initialize(self, new_data): # Compute the amplitude noise filter and preconditioner for each detector # and each view. + print("Offset sigmasq = ", self._sigmasq, flush=True) + self._filters = dict() self._precond = dict() @@ -400,13 +402,35 @@ def _add_to_signal(self, detector, amplitudes): continue # The step length for this observation step_length = int(self.step_time * self._obs_rate[iob]) + print( + "Offset input det {}, ob {} = ".format(detector, iob), + ob.view[self.view].detdata[self.det_data], + flush=True, + ) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] + print( + "Offset input det {}, ob {}, view {} = ".format(detector, iob, ivw), + vw[detector], + flush=True, + ) + print( + "Offset input amplitude range = {} - {}".format( + offset, offset + n_amp_view - 1 + ) + ) template_offset_add_to_signal( step_length, amplitudes.local[offset : offset + n_amp_view], vw[detector], ) + print( + "Offset output det {}, ob {}, view {} = ".format( + detector, iob, ivw + ), + vw[detector], + flush=True, + ) offset += n_amp_view @function_timer diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index cfa8357b9..69ffa50dd 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -21,6 +21,7 @@ install(FILES ops_sim_tod_noise.py ops_mapmaker_utils.py ops_mapmaker_binning.py + ops_mapmaker_solve.py ops_mapmaker.py covariance.py ops_pointing_healpix.py diff --git a/src/toast/tests/_helpers.py b/src/toast/tests/_helpers.py index 9d45dfc23..42fae7de0 100644 --- a/src/toast/tests/_helpers.py +++ b/src/toast/tests/_helpers.py @@ -20,6 +20,8 @@ from ..observation import DetectorData, Observation +from ..pixels import PixelData + from .. import ops as ops @@ -91,6 +93,7 @@ def create_telescope(group_size, sample_rate=10.0 * u.Hz): n_pix=npix, sample_rate=sample_rate, f_min=1.0e-5 * u.Hz, + net=0.5, f_knee=(sample_rate / 2000.0), ) return Telescope("test", focalplane=fp) @@ -150,18 +153,42 @@ def create_satellite_data( tele = create_telescope(toastcomm.group_size, sample_rate=sample_rate) + # Scan fast enough to cover some sky in a short amount of time. Reduce the + # angles to achieve a more compact hit map. sim_sat = ops.SimSatellite( name="sim_sat", num_observations=(toastcomm.ngroups * obs_per_group), telescope=tele, hwp_rpm=10.0, observation_time=obs_time, + spin_period=1.0 * u.minute, + spin_angle=2.0 * u.degree, + prec_period=5.0 * u.minute, + prec_angle=2.0 * u.degree, ) sim_sat.apply(data) return data +def create_fake_sky(data, dist_key, map_key): + np.random.seed(987654321) + dist = data[dist_key] + pix_data = PixelData(dist, np.float64, n_value=3) + # Just replicate the fake data across all local submaps + off = 0 + for submap in range(dist.n_submap): + I_data = 100.0 * np.random.normal(size=dist.n_pix_submap) + Q_data = np.random.normal(size=dist.n_pix_submap) + U_data = np.random.normal(size=dist.n_pix_submap) + if submap in dist.local_submaps: + pix_data.data[off, :, 0] = I_data + pix_data.data[off, :, 1] = Q_data + pix_data.data[off, :, 2] = U_data + off += 1 + data[map_key] = pix_data + + def uniform_chunks(samples, nchunk=100): """Divide some number of samples into chunks. diff --git a/src/toast/tests/ops_madam.py b/src/toast/tests/ops_madam.py index 0e28d1933..134140033 100644 --- a/src/toast/tests/ops_madam.py +++ b/src/toast/tests/ops_madam.py @@ -19,7 +19,7 @@ from ..pixels import PixelDistribution, PixelData -from ._helpers import create_outdir, create_satellite_data +from ._helpers import create_outdir, create_satellite_data, create_fake_sky class MadamTest(MPITestCase): @@ -28,15 +28,6 @@ def setUp(self): self.outdir = create_outdir(self.comm, fixture_name) np.random.seed(123456) - def create_fake_sky(self, data, dist_key, map_key): - dist = data[dist_key] - pix_data = PixelData(dist, np.float64, n_value=3) - # Just replicate the fake data across all local submaps - pix_data.data[:, :, 0] = 100.0 - pix_data.data[:, :, 1] = 0.1 - pix_data.data[:, :, 2] = 0.1 - data[map_key] = pix_data - def test_madam_det_out(self): if not ops.Madam.available: print("libmadam not available, skipping tests") @@ -52,7 +43,7 @@ def test_madam_det_out(self): pointing.apply(data) # Create fake polarized sky pixel values locally - self.create_fake_sky(data, "pixel_dist", "fake_map") + create_fake_sky(data, "pixel_dist", "fake_map") # Scan map into timestreams scanner = ops.ScanMap( diff --git a/src/toast/tests/ops_mapmaker.py b/src/toast/tests/ops_mapmaker.py index 7e9073917..5cd9758ab 100644 --- a/src/toast/tests/ops_mapmaker.py +++ b/src/toast/tests/ops_mapmaker.py @@ -25,7 +25,7 @@ from ..pixels_io import write_healpix_fits -from ._helpers import create_outdir, create_satellite_data +from ._helpers import create_outdir, create_satellite_data, create_fake_sky class MapmakerTest(MPITestCase): @@ -36,18 +36,42 @@ def setUp(self): def test_offset(self): # Create a fake satellite data set for testing - data = create_satellite_data(self.comm) + data = create_satellite_data(self.comm, obs_time=20.0 * u.minute) + + # Create some sky signal timestreams. + + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Now clear the pointing and reset things for use with the mapmaking test later + delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + delete_pointing.apply(data) + pointing.create_dist = None # Create an uncorrelated noise model from focalplane detector properties default_model = ops.DefaultNoiseModel(noise_model="noise_model") default_model.apply(data) - # Simulate noise - sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + # Simulate noise and accumulate to signal + sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") sim_noise.apply(data) - # Pointing operator for solver - pointing = ops.PointingHealpix(nside=64, mode="IQU", hwp_angle="hwp_angle") + print(data.obs[0].detdata["signal"]) + print("Done generating starting TOD", flush=True) # Set up binning operator for solving binner = ops.BinMap( @@ -72,7 +96,7 @@ def test_offset(self): # Map maker mapper = ops.MapMaker( - det_data="noise", + det_data="signal", binning=binner, template_matrix=tmatrix, ) @@ -80,8 +104,10 @@ def test_offset(self): # Make the map mapper.apply(data) + print(data) + # Access the output - final_map = data[mapper.binning.binned] + # final_map = data[mapper.binning.binned] del data return diff --git a/src/toast/tests/ops_mapmaker_binning.py b/src/toast/tests/ops_mapmaker_binning.py index 4e8434008..5346f7008 100644 --- a/src/toast/tests/ops_mapmaker_binning.py +++ b/src/toast/tests/ops_mapmaker_binning.py @@ -23,6 +23,8 @@ from ..pixels_io import write_healpix_fits +from ..covariance import covariance_apply + from ._helpers import create_outdir, create_satellite_data @@ -66,50 +68,26 @@ def test_binned(self): binmap = data[binner.binned] - # # Manual check - # - # check_invnpp = PixelData(data["pixel_dist"], np.float64, n_value=6) - # check_invnpp_corr = PixelData(data["pixel_dist"], np.float64, n_value=6) - # - # for ob in data.obs: - # noise = ob["noise_model"] - # noise_corr = ob["noise_model_corr"] - # - # for det in ob.local_detectors: - # detweight = noise.detector_weight(det) - # detweight_corr = noise_corr.detector_weight(det) - # - # wt = ob.detdata["weights"][det] - # local_sm, local_pix = data["pixel_dist"].global_pixel_to_submap( - # ob.detdata["pixels"][det] - # ) - # for i in range(ob.n_local_samples): - # if local_pix[i] < 0: - # continue - # off = 0 - # for j in range(3): - # for k in range(j, 3): - # check_invnpp.data[local_sm[i], local_pix[i], off] += ( - # detweight * wt[i, j] * wt[i, k] - # ) - # check_invnpp_corr.data[local_sm[i], local_pix[i], off] += ( - # detweight_corr * wt[i, j] * wt[i, k] - # ) - # off += 1 - # - # check_invnpp.sync_allreduce() - # check_invnpp_corr.sync_allreduce() - # - # for sm in range(invnpp.distribution.n_local_submap): - # for px in range(invnpp.distribution.n_pix_submap): - # if invnpp.data[sm, px, 0] != 0: - # nt.assert_almost_equal( - # invnpp.data[sm, px], check_invnpp.data[sm, px] - # ) - # if invnpp_corr.data[sm, px, 0] != 0: - # nt.assert_almost_equal( - # invnpp_corr.data[sm, px], check_invnpp_corr.data[sm, px] - # ) + # Manual check + + pointing.apply(data) + + noise_weight = ops.BuildNoiseWeighted( + pixel_dist="pixel_dist", + noise_model="noise_model", + pixels=pointing.pixels, + weights=pointing.weights, + det_data="noise", + zmap="zmap", + ) + noise_weight.apply(data) + + covariance_apply(data[cov_and_hits.covariance], data["zmap"]) + + for sm in range(binmap.distribution.n_local_submap): + for px in range(binmap.distribution.n_pix_submap): + nt.assert_almost_equal(binmap.data[sm, px], data["zmap"].data[sm, px]) + del data return diff --git a/src/toast/tests/ops_mapmaker_solve.py b/src/toast/tests/ops_mapmaker_solve.py new file mode 100644 index 000000000..acafff47a --- /dev/null +++ b/src/toast/tests/ops_mapmaker_solve.py @@ -0,0 +1,256 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +import healpy as hp + +from .mpi import MPITestCase + +from ..noise import Noise + +from ..vis import set_matplotlib_backend + +from .. import ops as ops + +from ..templates import Offset, AmplitudesMap + +from ..ops.mapmaker_solve import SolverRHS, SolverLHS + +from ._helpers import create_outdir, create_satellite_data + + +class MapmakerSolveTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def test_rhs(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise + sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="noise") + sim_noise.apply(data) + + # Pointing operator + pointing = ops.PointingHealpix(nside=64, mode="IQU", hwp_angle="hwp_angle") + + # Build the covariance and hits + cov_and_hits = ops.CovarianceAndHits( + pixel_dist="pixel_dist", + pointing=pointing, + noise_model=default_model.noise_model, + ) + cov_and_hits.apply(data) + + # Set up binner + binner = ops.BinMap( + pixel_dist="pixel_dist", + covariance=cov_and_hits.covariance, + det_data=sim_noise.out, + pointing=pointing, + noise_model=default_model.noise_model, + save_pointing=False, + ) + + # Set up template matrix with just an offset template. + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + step_seconds = float(int(ob_time / 10.0)) + tmpl = Offset( + times="times", + noise_model=default_model.noise_model, + step_time=step_seconds, + ) + tmatrix = ops.TemplateMatrix(templates=[tmpl]) + tmatrix.amplitudes = "RHS" + + # Set up RHS operator and run it. We set overwrite=False so that a temporary + # detdata object is used. We need the original timestream for our manual check + # below. + + rhs_calc = SolverRHS( + det_data=sim_noise.out, + overwrite=False, + binning=binner, + template_matrix=tmatrix, + ) + rhs_calc.apply(data) + + # Get the output binned map used by the RHS operator. + rhs_binned = data[binner.binned] + + bd = data[binner.binned].data + print("rhs binned map = ", bd[bd != 0]) + + # Manual check. This applies the same operators as the RHS operator, but + # checks things along the way. And these lower-level operators are unit + # tested elsewhere as well... + + # Make the binned map in a different location + binner.binned = "check" + binner.det_data = sim_noise.out + binner.apply(data) + + check_binned = data[binner.binned] + bd = data[binner.binned].data + print("check binned map = ", bd[bd != 0], flush=True) + + # Verify that the binned map elements agree + np.testing.assert_equal(rhs_binned.raw.array(), check_binned.raw.array()) + + # Scan the binned map and subtract from the original detector data. + pointing.apply(data) + + scan_map = ops.ScanMap( + pixels=pointing.pixels, + weights=pointing.weights, + map_key=binner.binned, + det_data=sim_noise.out, + subtract=True, + ) + scan_map.apply(data) + + # Apply diagonal noise weight. + nw = ops.NoiseWeight(noise_model=binner.noise_model, det_data=sim_noise.out) + nw.apply(data) + + # Project our timestreams to template amplitudes. Store this in a different + # data key that the RHS operator. + + tmatrix.amplitudes = "check_RHS" + tmatrix.det_data = sim_noise.out + tmatrix.apply(data) + + # Verify that the output amplitudes agree + np.testing.assert_equal( + data["RHS"][tmpl.name].local, data["check_RHS"][tmpl.name].local + ) + + del data + return + + def test_lhs(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Set up template matrix with just an offset template. + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + step_seconds = float(int(ob_time / 10.0)) + tmpl = Offset( + times="times", + noise_model=default_model.noise_model, + step_time=step_seconds, + ) + tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # For testing the LHS calculation, we first generate fake template + # amplitudes. Then we manually check the result by projecting these to + # a timestream and running the RHS operator on it. In the case of no noise + # prior, this should be equivalent. We use a temperature-only pointing + # matrix so that it can be consistent with constant-valued timestreams. + + # Manually set the data for this template (normally done by + # TemplateMatrix.exec()) so we can pre-generate amplitudes. + tmpl.data = data + data["amplitudes"] = AmplitudesMap() + data["amplitudes"][tmpl.name] = tmpl.zeros() + data["amplitudes"][tmpl.name].local[:] = np.random.uniform( + low=-1000.0, high=1000.0, size=data["amplitudes"][tmpl.name].n_local + ) + + print("amplitudes = ", data["amplitudes"]) + + for ob in data.obs: + ob.detdata.create("signal") + + tmatrix.amplitudes = "amplitudes" + tmatrix.det_data = "signal" + tmatrix.data = data + tmatrix.transpose = False + tmatrix.apply(data) + + for ob in data.obs: + print("signal = ", ob.detdata["signal"]) + + # Pointing operator + pointing = ops.PointingHealpix(nside=64, mode="I", hwp_angle="hwp_angle") + + # Build the covariance and hits + cov_and_hits = ops.CovarianceAndHits( + pixel_dist="pixel_dist", + pointing=pointing, + noise_model=default_model.noise_model, + ) + cov_and_hits.apply(data) + + # Set up binner + binner = ops.BinMap( + pixel_dist="pixel_dist", + covariance=cov_and_hits.covariance, + det_data="signal", + pointing=pointing, + noise_model=default_model.noise_model, + save_pointing=False, + ) + + # Set up RHS operator and run it. + + tmatrix.amplitudes = "amplitudes_check" + binner.binned = "rhs_binned" + rhs_calc = SolverRHS( + det_data="signal", + overwrite=True, + binning=binner, + template_matrix=tmatrix, + ) + rhs_calc.apply(data) + + bd = data[binner.binned].data + print("rhs binned map = ", bd[bd != 0]) + + print("amplitudes_check = ", data["amplitudes_check"]) + + # Now we will run the LHS operator and compare. Re-use the previous detdata + # array for temp space. + + tmatrix.amplitudes = "amplitudes" + binner.binned = "lhs_binned" + lhs_calc = SolverLHS( + det_temp="signal", + binning=binner, + template_matrix=tmatrix, + ) + lhs_calc.apply(data) + + print("amplitudes out = ", data["amplitudes"]) + + # Verify that the output amplitudes agree + np.testing.assert_equal( + data["amplitudes"][tmpl.name].local, + data["amplitudes_check"][tmpl.name].local, + ) + + del data + return diff --git a/src/toast/tests/ops_scan_map.py b/src/toast/tests/ops_scan_map.py index 6524f972d..7e6c4a324 100644 --- a/src/toast/tests/ops_scan_map.py +++ b/src/toast/tests/ops_scan_map.py @@ -15,7 +15,7 @@ from ..pixels import PixelData -from ._helpers import create_outdir, create_satellite_data +from ._helpers import create_outdir, create_satellite_data, create_fake_sky class ScanMapTest(MPITestCase): @@ -24,15 +24,6 @@ def setUp(self): self.outdir = create_outdir(self.comm, fixture_name) np.random.seed(123456) - def create_fake_sky(self, data, dist_key, map_key): - dist = data[dist_key] - pix_data = PixelData(dist, np.float64, n_value=3) - # Just replicate the fake data across all local submaps - pix_data.data[:, :, 0] = 100.0 * np.random.uniform(size=dist.n_pix_submap) - pix_data.data[:, :, 1] = np.random.uniform(size=dist.n_pix_submap) - pix_data.data[:, :, 2] = np.random.uniform(size=dist.n_pix_submap) - data[map_key] = pix_data - def test_scan(self): # Create a fake satellite data set for testing data = create_satellite_data(self.comm) @@ -44,7 +35,7 @@ def test_scan(self): pointing.apply(data) # Create fake polarized sky pixel values locally - self.create_fake_sky(data, "pixel_dist", "fake_map") + create_fake_sky(data, "pixel_dist", "fake_map") # Scan map into timestreams scanner = ops.ScanMap( @@ -75,6 +66,62 @@ def test_scan(self): del data return + def test_scan_add_subtract(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create some detector pointing matrices + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + self.create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams twice, adding once and then subtracting. Also test + # zero option. + + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + rms = list() + for ob in data.obs: + for det in ob.local_detectors: + rms.append(np.std(ob.detdata["signal"][det])) + rms = np.array(rms) + + scanner.zero = True + scanner.apply(data) + + trms = list() + for ob in data.obs: + for det in ob.local_detectors: + trms.append(np.std(ob.detdata["signal"][det])) + trms = np.array(trms) + + np.testing.assert_equal(trms, rms) + + scanner.zero = False + scanner.subtract = True + scanner.apply(data) + + trms = list() + for ob in data.obs: + for det in ob.local_detectors: + trms.append(np.std(ob.detdata["signal"][det])) + trms = np.array(trms) + + np.testing.assert_equal(trms, np.zeros_like(trms)) + + del data + return + def test_mask(self): # Create a fake satellite data set for testing data = create_satellite_data(self.comm) diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 8a0454d43..8962e8fe8 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -37,6 +37,7 @@ from . import ops_sim_tod_noise as test_ops_sim_tod_noise from . import ops_mapmaker_utils as test_ops_mapmaker_utils from . import ops_mapmaker_binning as test_ops_mapmaker_binning +from . import ops_mapmaker_solve as test_ops_mapmaker_solve from . import ops_mapmaker as test_ops_mapmaker from . import ops_scan_map as test_ops_scan_map from . import ops_madam as test_ops_madam @@ -153,6 +154,7 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_ops_sim_tod_noise)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_utils)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_binning)) + suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker_solve)) suite.addTest(loader.loadTestsFromModule(test_ops_mapmaker)) suite.addTest(loader.loadTestsFromModule(test_ops_scan_map)) suite.addTest(loader.loadTestsFromModule(test_ops_madam)) From 5d3eed462b9adeaed7c0ee0fa9795b8b72d15447 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sat, 16 Jan 2021 00:14:34 -0800 Subject: [PATCH 052/690] Many improvements to mapmaker and unit tests. Unit test for destriping without noise filter now passes and agrees with madam. --- src/libtoast/src/toast_template_offset.cpp | 5 + src/toast/ops/__init__.py | 2 +- src/toast/ops/mapmaker.py | 38 +- src/toast/ops/mapmaker_binning.py | 29 ++ src/toast/ops/mapmaker_solve.py | 277 +++++++---- src/toast/ops/mapmaker_templates.py | 40 ++ src/toast/ops/mapmaker_utils.py | 52 ++- src/toast/ops/noise_weight.py | 1 + src/toast/ops/scan_map.py | 152 +++++- src/toast/templates/offset.py | 104 +++-- src/toast/tests/_helpers.py | 11 +- src/toast/tests/ops_mapmaker.py | 510 +++++++++++++++------ src/toast/tests/ops_mapmaker_solve.py | 6 +- 13 files changed, 927 insertions(+), 300 deletions(-) diff --git a/src/libtoast/src/toast_template_offset.cpp b/src/libtoast/src/toast_template_offset.cpp index d7cac81da..9f5f4ccb5 100644 --- a/src/libtoast/src/toast_template_offset.cpp +++ b/src/libtoast/src/toast_template_offset.cpp @@ -45,6 +45,9 @@ void toast::template_offset_project_signal(int64_t step_length, int64_t n_data, #pragma omp simd for (int64_t i = 0; i < n_amp - 1; ++i) { int64_t doff = i * step_length; + + // std::cout << "DBG accum " << step_length << " samples to base " << i << + // std::endl; for (int64_t j = 0; j < step_length; ++j) { amplitudes[i] += data[doff + j]; } @@ -59,6 +62,8 @@ void toast::template_offset_project_signal(int64_t step_length, int64_t n_data, } // Now handle the final amplitude. + // std::cout << "DBG accum " << n_data - (n_amp - 1) * step_length << + // " samples to base " << n_amp - 1 << std::endl; for (int64_t j = (n_amp - 1) * step_length; j < n_data; ++j) { amplitudes[n_amp - 1] += data[j]; } diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index 14b0163d4..fc211e572 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -28,7 +28,7 @@ from .pointing_healpix import PointingHealpix -from .scan_map import ScanMap, ScanMask +from .scan_map import ScanMap, ScanMask, ScanScale from .mapmaker_utils import ( BuildHitMap, diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index b8ff42247..1afa4bedd 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -234,6 +234,7 @@ def _exec(self, data, detectors=None, **kwargs): save_det_flag_mask = self.binning.det_flag_mask save_shared_flags = self.binning.shared_flags save_shared_flag_mask = self.binning.shared_flag_mask + save_covariance = self.binning.covariance # Also save the name of the user-requested output binned map. During the # solve we will output to a temporary map and then restore this name, in @@ -244,15 +245,22 @@ def _exec(self, data, detectors=None, **kwargs): # Data products, prefixed with the name of the operator. solver_hits_name = "{}_solve_hits".format(self.name) + solver_cov_name = "{}_solve_cov".format(self.name) solver_rcond_name = "{}_solve_rcond".format(self.name) solver_rcond_mask_name = "{}_solve_rcond_mask".format(self.name) + solver_result = "{}_solve_amplitudes".format(self.name) + solver_rhs = "{}_solve_rhs".format(self.name) + solver_bin = "{}_solve_bin".format(self.name) hits_name = "{}_hits".format(self.name) + cov_name = "{}_cov".format(self.name) rcond_name = "{}_rcond".format(self.name) flagname = "{}_flags".format(self.name) clean_name = "{}_cleaned".format(self.name) + self.binning.covariance = solver_cov_name + timer.start() # Flagging. We create a new set of data flags for the solver that includes: @@ -438,12 +446,11 @@ def _exec(self, data, detectors=None, **kwargs): # Set the binning operator to output to temporary map. This will be # overwritten on each iteration of the solver. - self.binning.binned = "{}_solve_bin".format(self.name) - - rhs_amplitude_key = "{}_amplitudes_rhs".format(self.name) + self.binning.binned = solver_bin - self.template_matrix.amplitudes = rhs_amplitude_key + self.template_matrix.amplitudes = solver_rhs rhs_calc = SolverRHS( + name="{}_rhs".format(self.name), det_data=self.det_data, overwrite=False, binning=self.binning, @@ -451,29 +458,31 @@ def _exec(self, data, detectors=None, **kwargs): ) rhs_calc.apply(data, detectors=detectors) - print("RHS = ", data[rhs_amplitude_key], flush=True) + print("RHS = ", data[solver_rhs], flush=True) self._log_info(comm, rank, " finished RHS calculation in", timer=timer) - # Set up the LHS operator. Use either the original timestreams or the copy - # as temp space. + # Set up the LHS operator. self._log_info(comm, rank, "begin PCG solver") - amplitude_key = "{}_amplitudes".format(self.name) - self.template_matrix.amplitudes = amplitude_key - lhs_calc = SolverLHS( + name="{}_lhs".format(self.name), binning=self.binning, template_matrix=self.template_matrix, ) + # If we eventually want to support an input starting guess of the + # amplitudes, we would need to ensure that data[amplitude_key] is set + # at this point... + # Solve for amplitudes. solve( data, detectors, lhs_calc, - data[rhs_amplitude_key], + solver_rhs, + solver_result, convergence=self.convergence, n_iter_max=self.iter_max, ) @@ -488,10 +497,14 @@ def _exec(self, data, detectors=None, **kwargs): self.binning.shared_flags = save_shared_flags self.binning.shared_flag_mask = save_shared_flag_mask self.binning.binned = save_binned + self.binning.covariance = save_covariance # Now construct the noise covariance, hits, and condition number mask for the # final binned map. + save_covariance = self.map_binning.covariance + self.map_binning.covariance = cov_name + if self.mc_mode: # Verify that our covariance and other products exist. if self.map_binning.pixel_dist not in data: @@ -543,6 +556,7 @@ def _exec(self, data, detectors=None, **kwargs): # Projecting amplitudes to a temp space self.template_matrix.transpose = False self.template_matrix.det_data = temp_project + self.template_matrix.amplitudes = solver_result if self.map_binning.binned == "binned": # The user did not modify the default name of the output binned map. @@ -600,6 +614,8 @@ def _exec(self, data, detectors=None, **kwargs): self.map_binning.apply(data, detectors=detectors) self.map_binning.pre_process = None + self.map_binning.covariance = save_covariance + self._log_info(comm, rank, " finished final binning in", timer=timer) return diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index 6e3350a40..2422adadc 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -195,9 +195,38 @@ def _exec(self, data, detectors=None, **kwargs): # Extract the results binned_map = data[self.binned] + # dist = binned_map.distribution + # print("binned zmap = ") + # for ism, sm in enumerate(dist.local_submaps): + # for spix in range(dist.n_pix_submap): + # if binned_map.data[ism, spix, 0] != 0: + # pix = sm * dist.n_pix_submap + spix + # print( + # "{} {:0.6e} {:0.6e} {:0.6e}".format( + # pix, + # binned_map.data[ism, spix, 0], + # binned_map.data[ism, spix, 1], + # binned_map.data[ism, spix, 2], + # ) + # ) + # Apply the covariance in place covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) + # print("binned = ") + # for ism, sm in enumerate(dist.local_submaps): + # for spix in range(dist.n_pix_submap): + # if binned_map.data[ism, spix, 0] != 0: + # pix = sm * dist.n_pix_submap + spix + # print( + # "{} {:0.6e} {:0.6e} {:0.6e}".format( + # pix, + # binned_map.data[ism, spix, 0], + # binned_map.data[ism, spix, 1], + # binned_map.data[ism, spix, 2], + # ) + # ) + return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index 1aa11a62b..c8df476d6 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -28,6 +28,8 @@ from .noise_weight import NoiseWeight +from .mapmaker_templates import TemplateMatrix + @trait_docs class SolverRHS(Operator): @@ -277,6 +279,10 @@ class SolverLHS(Operator): help="This must be an instance of a template matrix operator", ) + out = Unicode( + None, allow_none=True, help="Output Data key for resulting amplitudes" + ) + @traitlets.validate("binning") def _check_binning(self, proposal): bin = proposal["value"] @@ -338,6 +344,8 @@ def _exec(self, data, detectors=None, **kwargs): if comm is not None: rank = comm.rank + print("\n\n==============================================\n\n", flush=True) + # Check that input traits are set if self.binning is None: raise RuntimeError("You must set the binning trait before calling exec()") @@ -345,6 +353,8 @@ def _exec(self, data, detectors=None, **kwargs): raise RuntimeError( "You must set the template_matrix trait before calling exec()" ) + if self.out is None: + raise RuntimeError("You must set the 'out' trait before calling exec()") # Project amplitudes into timestreams and make a binned map. @@ -360,9 +370,6 @@ def _exec(self, data, detectors=None, **kwargs): self.binning.apply(data, detectors=detectors) self.binning.pre_process = None - bd = data[self.binning.binned].data - print("lhs binned map = ", bd[bd != 0], flush=True) - self._log_debug(comm, rank, "projection and binning finished in", timer=timer) # Build a pipeline for the map scanning and template matrix application. @@ -387,8 +394,14 @@ def _exec(self, data, detectors=None, **kwargs): noise_model=self.binning.noise_model, det_data=self.det_temp ) - # Same template matrix operator, but now we are applying the transpose. - self.template_matrix.transpose = True + # Make a copy of the template_matrix operator so that we can apply both the + # matrix and its transpose in a single pipeline + + template_transpose = self.template_matrix.duplicate() + template_transpose.amplitudes = self.out + template_transpose.transpose = True + + # print("project input amps = ", data[self.template_matrix.amplitudes]) # Create a pipeline that projects the binned map and applies noise # weights and templates. @@ -399,9 +412,10 @@ def _exec(self, data, detectors=None, **kwargs): proj_pipe = Pipeline( detector_sets=["ALL"], operators=[ + self.template_matrix, scan_map, noise_weight, - self.template_matrix, + template_transpose, ], ) else: @@ -409,16 +423,18 @@ def _exec(self, data, detectors=None, **kwargs): proj_pipe = Pipeline( detector_sets=["SINGLE"], operators=[ + self.template_matrix, pointing, scan_map, noise_weight, - self.template_matrix, + template_transpose, ], ) # Zero out the amplitudes before accumulating the updated values - data[self.template_matrix.amplitudes].reset() + if self.out in data: + data[self.out].reset() # Run the projection pipeline. @@ -437,11 +453,11 @@ def _requires(self): # This operator require everything that its sub-operators needs. req = self.binning.requires() req.update(self.template_matrix.requires()) - req["meta"].append(self.amplitudes) return req def _provides(self): prov = self.binning.provides() + prov["meta"].append(self.out) return prov def _accelerators(self): @@ -451,9 +467,9 @@ def _accelerators(self): def solve( data, detectors, - lhs, - rhs_amps, - guess=None, + lhs_op, + rhs_key, + result_key, convergence=1.0e-12, n_iter_max=100, n_iter_min=3, @@ -462,20 +478,22 @@ def solve( This uses a standard preconditioned conjugate gradient technique (e.g. Shewchuk, 1994) to solve for the template amplitudes. The Right Hand Side amplitude values - are precomputed and passed to this function. The starting guess of the solver - can be passed in or else zeros are used. + are precomputed and stored in the data. The result key in the Data is either + created or used as the starting guess. Args: data (Data): The distributed data object. detectors (list): The subset of detectors used for the mapmaking. - lhs (Operator): The LHS operator. - rhs_amps (Amplitudes): The RHS value. - guess (Amplitudes): The starting guess. If None, use all zeros. + lhs_op (Operator): The LHS operator. + rhs_key (str): The Data key containing the RHS value. + result_key (str): The Data key containing the output result and + optionally the starting guess. convergence (float): The convergence limit. n_iter_max (int): The maximum number of iterations. + n_iter_min (int): The minimum number of iterations, for detecting a stall. Returns: - None + (Amplitudes): The result. """ log = Logger.get() @@ -490,65 +508,123 @@ def solve( if comm is not None: rank = comm.rank - # Solving A * x = b ... - - # The name of the amplitudes which are updated in place by the LHS operator - lhs_amps = lhs.template_matrix.amplitudes + if rhs_key not in data: + msg = "rhs_key '{}' does not exist in data".format(rhs_key) + log.error(msg) + raise RuntimeError(msg) + rhs = data[rhs_key] - # The starting guess - if guess is None: + result = None + if result_key not in data: # Copy structure of the RHS and set to zero - if lhs_amps in data: - msg = "LHS amplitudes '{}' already exists in data".format(lhs_amps) - log.error(msg) - raise RuntimeError(msg) - data[lhs_amps] = rhs_amps.duplicate() - data[lhs_amps].reset() + data[result_key] = rhs.duplicate() + data[result_key].reset() + result = data[result_key] else: - # FIXME: add a check that the structure of the guess matches the RHS. - data[lhs_amps] = guess + result = data[result_key] + if not isinstance(result, Amplitudes): + raise RuntimeError("starting guess must be an Amplitudes instance") + if result.keys() != rhs.keys(): + raise RuntimeError("starting guess must have same keys as RHS") + for k, v in result.items(): + if v.n_global != rhs[k].n_global: + msg = ( + "starting guess['{}'] has different n_global than rhs['{}']".format( + k, k + ) + ) + raise RuntimeError(msg) + if v.n_local != rhs[k].n_local: + msg = ( + "starting guess['{}'] has different n_global than rhs['{}']".format( + k, k + ) + ) + raise RuntimeError(msg) + + # Solving A * x = b ... + + # Temporary variables. We give things more descriptive names here, but to align + # with the notation in some literature, we note the mapping between variable + # names. Duplicate the structure of the RHS for these when we first assign below. + + # The residual "r" + residual = None + + # The result of the LHS operator "q" + lhs_out_key = "{}_out".format(lhs_op.name) + if lhs_out_key in data: + data[lhs_out_key].clear() + del data[lhs_out_key] + data[lhs_out_key] = rhs.duplicate() + lhs_out = data[lhs_out_key] + + # The result of the preconditioner "s" + precond = None - # Compute q = A * x (in place) - lhs.apply(data, detectors=detectors) + # The new proposed direction "d" + proposal_key = "{}_in".format(lhs_op.name) + if proposal_key in data: + data[proposal_key].clear() + del data[proposal_key] + data[proposal_key] = rhs.duplicate() + data[proposal_key].reset() + proposal = data[proposal_key] + + # One additional temp variable. Allocate this now for use below + temp = rhs.duplicate() + temp.reset() + + # Compute q = A * x + + # Input is either the starting guess or zero + lhs_op.template_matrix.amplitudes = result_key + lhs_op.out = lhs_out_key + lhs_op.apply(data, detectors=detectors) # The initial residual # r = b - q - residual = rhs_amps.duplicate() - residual -= data[lhs_amps] + residual = rhs.duplicate() + residual -= lhs_out - print("RHS ", rhs_amps) - print("LHS", data[lhs_amps]) + print("RHS ", rhs) + print("LHS ", lhs_out) print("residual", residual) # The preconditioned residual # s = M^-1 * r - precond_residual = residual.duplicate() - precond_residual.reset() - lhs.template_matrix.apply_precond(residual, precond_residual) - - print("precond_residual", precond_residual) + precond = rhs.duplicate() + precond.reset() + lhs_op.template_matrix.apply_precond(residual, precond) + print("Start precond = ", precond) # The proposal # d = s - proposal = precond_residual.duplicate() + for k, v in proposal.items(): + v.local[:] = precond[k].local + print("proposal", proposal) - # print("proposal", proposal) + # Set LHS amplitude inputs to this proposal + lhs_op.template_matrix.amplitudes = proposal_key - # delta_new = r^T * d - sqsum = precond_residual.dot(residual) - print("sqsum = ", sqsum) - - init_sqsum = sqsum - best_sqsum = sqsum + # Epsilon_0 = r^T * r + sqsum = rhs.dot(rhs) + sqsum_init = sqsum + sqsum_best = sqsum last_best = sqsum - sqsum_last = None + # delta_new = delta_0 = r^T * d + delta = proposal.dot(residual) + print("delta = ", delta) + delta_init = delta if comm is not None: comm.barrier() timer.stop() if rank == 0: - msg = "MapMaker initial residual = {}, {:0.2f} s".format(sqsum, timer.seconds()) + msg = "MapMaker initial residual = {}, {:0.2f} s".format( + sqsum_init, timer.seconds() + ) log.info(msg) timer.clear() timer.start() @@ -557,64 +633,51 @@ def solve( if not np.isfinite(sqsum): raise RuntimeError("Residual is not finite") - # Update LHS amplitude inputs - for k, v in data[lhs_amps].items(): - v.local[:] = proposal[k].local - - print("LHS input = ", data[lhs_amps], flush=True) + # q = A * d + lhs_op.apply(data, detectors=detectors) - # q = A * d (in place) - lhs.apply(data, detectors=detectors) - - print("LHS output", data[lhs_amps]) + print("LHS output", lhs_out) # alpha = delta_new / (d^T * q) - alpha = sqsum - alpha /= proposal.dot(data[lhs_amps]) + print("alpha num = ", delta) + print("alpha den = ", proposal.dot(lhs_out)) + alpha = delta / proposal.dot(lhs_out) print("alpha = ", alpha) - # r -= alpha * q - data[lhs_amps] *= alpha - residual -= data[lhs_amps] - # print("residual", residual) - - # The preconditioned residual - # s = M^-1 * r - lhs.template_matrix.apply_precond(residual, precond_residual) - - # print("precond_residual", precond_residual) - - # delta_old = delta_new - sqsum_last = sqsum + # Update the result + # x += alpha * d + temp.reset() + for k, v in temp.items(): + v.local[:] = proposal[k].local + temp *= alpha + result += temp - # delta_new = r^T * s - sqsum = precond_residual.dot(residual) + # Update the residual + # r -= alpha * q + temp.reset() + for k, v in temp.items(): + v.local[:] = lhs_out[k].local + temp *= alpha + residual -= temp + + # Epsilon + sqsum = residual.dot(residual) print("sqsum = ", sqsum) if comm is not None: comm.barrier() timer.stop() if rank == 0: - msg = "MapMaker iteration {:4d}, relative residual = {}, {:0.2f} s".format( - iter, sqsum, timer.seconds() + msg = "MapMaker iteration {:4d}, relative residual = {:0.6e}, {:0.2f} s".format( + iter, sqsum / sqsum_init, timer.seconds() ) log.info(msg) timer.clear() timer.start() - # beta = delta_new / delta_old - beta = sqsum / sqsum_last - - # New proposal - # d = s + beta * d - proposal *= beta - proposal += precond_residual - - # print("proposal", proposal) - # Check for convergence - if sqsum < init_sqsum * convergence or sqsum < 1e-30: + if (sqsum / sqsum_init) < convergence or sqsum < 1e-30: timer.stop() timer_full.stop() if rank == 0: @@ -624,11 +687,12 @@ def solve( log.info(msg) break - best_sqsum = min(sqsum, best_sqsum) - print("best_sqsum = ", best_sqsum) + sqsum_best = min(sqsum, sqsum_best) + print("sqsum_best = ", sqsum_best) + # Check for stall / divergence if iter % 10 == 0 and iter >= n_iter_min: - if last_best < best_sqsum * 2: + if last_best < sqsum_best * 2: timer.stop() timer_full.stop() if rank == 0: @@ -637,4 +701,25 @@ def solve( ) log.info(msg) break - last_best = best_sqsum + last_best = sqsum_best + + # The preconditioned residual + # s = M^-1 * r + lhs_op.template_matrix.apply_precond(residual, precond) + + # delta_old = delta_new + delta_last = delta + + # delta_new = r^T * s + delta = precond.dot(residual) + + # beta = delta_new / delta_old + beta = delta / delta_last + print("beta = ", beta) + + # New proposal + # d = s + beta * d + proposal *= beta + proposal += precond + + print("proposal[{}]".format(iter), proposal, flush=True) diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 62979cc5e..1b1ebeebf 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -63,6 +63,29 @@ def __init__(self, **kwargs): super().__init__(**kwargs) self._initialized = False + def duplicate(self): + """Make a shallow copy which contains the same list of templates. + + This is useful when we want to use both a template matrix and its transpose + in the same pipeline. + + Returns: + (TemplateMatrix): A new instance with the same templates. + + """ + ret = TemplateMatrix( + API=self.API, + templates=self.templates, + amplitudes=self.amplitudes, + transpose=self.transpose, + view=self.view, + det_data=self.det_data, + flags=self.flags, + flag_mask=self.flag_mask, + ) + ret._initialized = self._initialized + return ret + def apply_precond(self, amps_in, amps_out): """Apply the preconditioner from all templates to the amplitudes. @@ -133,6 +156,12 @@ def _exec(self, data, detectors=None, **kwargs): if self.amplitudes not in data: # The output template amplitudes do not yet exist. Create these with # all zero values. + print( + "template matrix transpose create amplitudes {}".format( + self.amplitudes + ), + flush=True, + ) data[self.amplitudes] = AmplitudesMap() for tmpl in self.templates: data[self.amplitudes][tmpl.name] = tmpl.zeros() @@ -157,9 +186,20 @@ def _exec(self, data, detectors=None, **kwargs): for d in dets: ob.detdata[self.det_data][d, :] = 0 + # print("template matrix in = ", data[self.amplitudes], flush=True) for d in all_dets: for tmpl in self.templates: tmpl.add_to_signal(d, data[self.amplitudes][tmpl.name]) + # print( + # "template matrix out {} --> {}".format( + # self.det_data, self.templates[0].det_data + # ) + # ) + # print( + # "template matrix out {} = ".format(d), + # ob.detdata[self.det_data][d], + # flush=True, + # ) return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index e625c6823..a9791f9d4 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -582,7 +582,7 @@ def _exec(self, data, detectors=None, **kwargs): # Data for this detector ddata = dview[det] - print("Zmap det {} = {}".format(det, ddata), flush=True) + # print("Zmap det {} = {}".format(det, ddata), flush=True) # We require that the pointing matrix has the same number of # non-zero elements for every detector and every observation. @@ -609,25 +609,25 @@ def _exec(self, data, detectors=None, **kwargs): ) log.error(msg) raise RuntimeError(msg) - print( - "Zmap found existing PixelData {}".format(self.zmap), - flush=True, - ) + # print( + # "Zmap found existing PixelData {}".format(self.zmap), + # flush=True, + # ) zmap = data[self.zmap] else: - print( - "Zmap allocating PixelData {}".format(self.zmap), - flush=True, - ) + # print( + # "Zmap allocating PixelData {}".format(self.zmap), + # flush=True, + # ) data[self.zmap] = PixelData( dist, np.float64, n_value=weight_nnz ) zmap = data[self.zmap] else: - print( - "Zmap PixelData {} already loaded".format(self.zmap), - flush=True, - ) + # print( + # "Zmap PixelData {} already loaded".format(self.zmap), + # flush=True, + # ) check_nnz = None if len(wview.detector_shape) == 1: check_nnz = 1 @@ -654,6 +654,28 @@ def _exec(self, data, detectors=None, **kwargs): if self.det_flags is not None: local_pix[fview[det] & self.det_flag_mask != 0] = -1 + # print( + # "Offset output det {}, ob {} [:100] = ".format(det, ob.name), + # flush=True, + # ) + # for i in range(100): + # print( + # "{} {} weight {} {} {}".format( + # i, + # ddata[i], + # wview[det][i, 0], + # wview[det][i, 1], + # wview[det][i, 2], + # ) + # ) + # print("", flush=True) + + # print( + # "Offset output det {}, ob {} [-100:] = ".format(det, ob.name), + # ob.detdata[self.det_data][detector][-100:], + # flush=True, + # ) + # Accumulate cov_accum_zmap( dist.n_local_submap, @@ -667,7 +689,7 @@ def _exec(self, data, detectors=None, **kwargs): zmap.raw, ) zm = zmap.raw.array() - print("Zmap after det {} ".format(det), zm[zm != 0], flush=True) + # print("Zmap after det {} ".format(det), zm[zm != 0], flush=True) return def _finalize(self, data, **kwargs): @@ -677,7 +699,7 @@ def _finalize(self, data, **kwargs): data[self.zmap].sync_alltoallv() else: data[self.zmap].sync_allreduce() - print("Zmap final sync of {}".format(self.zmap), flush=True) + # print("Zmap final sync of {}".format(self.zmap), flush=True) return def _requires(self): diff --git a/src/toast/ops/noise_weight.py b/src/toast/ops/noise_weight.py index 3fe090427..050117f20 100644 --- a/src/toast/ops/noise_weight.py +++ b/src/toast/ops/noise_weight.py @@ -63,6 +63,7 @@ def _exec(self, data, detectors=None, **kwargs): for d in dets: # Get the detector weight from the noise model. detweight = noise.detector_weight(d) + print("NWEIGHT det {} = {}".format(d, detweight)) # Apply ob.detdata[self.det_data][d] *= detweight diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index 510735ab2..4e5a83f8f 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -140,27 +140,27 @@ def _exec(self, data, detectors=None, **kwargs): "Projection supports only float32 and float64 binned maps" ) - print("========= {} ==========".format(det)) - print("Scanned map TOD = ", maptod) - print("Scanned original TOD = ", ddata) + # print("========= {} ==========".format(det)) + # print("Scanned map TOD = ", maptod) + # print("Scanned original TOD = ", ddata) # zero-out if needed if self.zero: ddata[:] = 0.0 - print("Scanned: zero-ing TOD = ", ddata) + # print("Scanned: zero-ing TOD = ", ddata) # Add or subtract. Note that the map scanned timestream will have # zeros anywhere that the pointing is bad, but those samples (and # any other detector flags) should be handled at other steps of the # processing. if self.subtract: - print("Scanned: subtracting TOD") + # print("Scanned: subtracting TOD") ddata[:] -= maptod else: - print("Scanned: adding TOD") + # print("Scanned: adding TOD") ddata[:] += maptod - print("Scanned final = ", ddata) + # print("Scanned final = ", ddata) del maptod maptod_raw.clear() @@ -288,3 +288,141 @@ def _provides(self): def _accelerators(self): return list() + + +@trait_docs +class ScanScale(Operator): + """Operator which uses the pointing matrix to apply pixel weights to timestreams. + + The map must be a PixelData instance with either float32 or float64 values and + one value per pixel. The timestream samples are multiplied by their corresponding + pixel values. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + det_data = Unicode( + None, allow_none=True, help="Observation detdata key for the timestream data" + ) + + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") + + weights = Unicode("weights", help="Observation detdata key for Stokes weights") + + map_key = Unicode( + None, + allow_none=True, + help="The Data key where the weight map is located", + ) + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + # Check that the detector data is set + if self.det_data is None: + raise RuntimeError("You must set the det_data trait before calling exec()") + + # Check that the map is set + if self.map_key is None: + raise RuntimeError("You must set the map_key trait before calling exec()") + if self.map_key not in data: + msg = "The map_key '{}' does not exist in the data".format(self.map_key) + raise RuntimeError(msg) + + map_data = data[self.map_key] + if not isinstance(map_data, PixelData): + raise RuntimeError("The map to scan must be a PixelData instance") + if map_data.n_value != 1: + raise RuntimeError("The map to scan must have one value per pixel") + map_dist = map_data.distribution + + for ob in data.obs: + # Get the detectors we are using for this observation + dets = ob.select_local_detectors(detectors) + if len(dets) == 0: + # Nothing to do for this observation + continue + + if self.det_data not in ob.detdata: + msg = "detector data '{}' does not exist in observation {}".format( + self.det_data, ob.name + ) + log.error(msg) + raise RuntimeError(msg) + + # Temporary array, re-used for all detectors + maptod_raw = AlignedF64.zeros(ob.n_local_samples) + maptod = maptod_raw.array() + + for det in dets: + # The pixels, weights, and data. + pix = ob.detdata[self.pixels][det] + ddata = ob.detdata[self.det_data][det] + + # Get local submap and pixels + local_sm, local_pix = map_dist.global_pixel_to_submap(pix) + + # We support projecting from either float64 or float32 maps. We + # use a shortcut here by passing the original timestream values + # as the pointing "weights", so that the output is equal to the + # pixel values times the original timestream. + + maptod[:] = 0.0 + + if map_data.dtype.char == "d": + scan_map_float64( + map_data.distribution.n_pix_submap, + 1, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + ddata.astype(np.float64).reshape(-1), + maptod, + ) + elif map_data.dtype.char == "f": + scan_map_float32( + map_data.distribution.n_pix_submap, + 1, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + ddata.astype(np.float64).reshape(-1), + maptod, + ) + else: + raise RuntimeError( + "Projection supports only float32 and float64 binned maps" + ) + + ddata[:] = maptod + + del maptod + maptod_raw.clear() + del maptod_raw + + return + + def _finalize(self, data, **kwargs): + return + + def _requires(self): + req = { + "meta": [map_key], + "shared": list(), + "detdata": [self.pixels, self.weights, self.det_data], + } + return req + + def _provides(self): + prov = {"meta": list(), "shared": list(), "detdata": list()} + return prov + + def _accelerators(self): + return list() diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index 3e0470663..b29ec3a66 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -7,6 +7,7 @@ import numpy as np import scipy +import scipy.signal from ..utils import Logger, rate_from_times, AlignedF32 @@ -94,7 +95,7 @@ def _initialize(self, new_data): self._obs_rate[iob] = rate # The step length for this observation - step_length = int(self.step_time * self._obs_rate[iob]) + step_length = self._step_length(self.step_time, self._obs_rate[iob]) # Track number of offset amplitudes per view. self._obs_views[iob] = list() @@ -106,6 +107,8 @@ def _initialize(self, new_data): else: slice_len = view_slice.stop - view_slice.start view_n_amp = slice_len // step_length + if view_n_amp * step_length < slice_len: + view_n_amp += 1 self._obs_views[iob].append(view_n_amp) # The noise model. @@ -183,7 +186,7 @@ def _initialize(self, new_data): detnoise = ob[self.noise_model].detector_weight(det) # The step length for this observation - step_length = int(self.step_time * self._obs_rate[iob]) + step_length = self._step_length(self.step_time, self._obs_rate[iob]) # Loop over views views = ob.view[self.view] @@ -194,7 +197,7 @@ def _initialize(self, new_data): view_samples = ob.n_local_samples else: view_samples = vw.stop - vw.start - n_amp_view = slice_len // step_length + n_amp_view = self._obs_views[iob][ivw] # Move this loop to compiled code if it is slow if self.flags is None: @@ -204,6 +207,7 @@ def _initialize(self, new_data): if amp == n_amp_view - 1: amplen = view_samples - voff self._sigmasq[offset + amp] = 1.0 / (detnoise * amplen) + voff += step_length else: flags = views.detdata[self.flags][ivw] voff = 0 @@ -227,7 +231,7 @@ def _initialize(self, new_data): # Compute the amplitude noise filter and preconditioner for each detector # and each view. - print("Offset sigmasq = ", self._sigmasq, flush=True) + # print("Offset sigmasq = ", self._sigmasq, flush=True) self._filters = dict() self._precond = dict() @@ -249,7 +253,7 @@ def _initialize(self, new_data): ) # Log version of offset PSD for interpolation - logfreq = np.log(freq) + logfreq = np.log(self._freq[iob]) logpsd = np.log(offset_psd) logfilter = np.log(1 / offset_psd) @@ -291,7 +295,7 @@ def _truncate(noisefilter, lim=1e-4): view_samples = ob.n_local_samples else: view_samples = vw.stop - vw.start - n_amp_view = self._obs_view[iob][ivw] + n_amp_view = self._obs_views[iob][ivw] sigmasq_slice = self._sigmasq[offset : offset + n_amp_view] # nstep = offset_slice.stop - offset_slice.start @@ -318,7 +322,7 @@ def _truncate(noisefilter, lim=1e-4): ) icenter = noisefilter.size // 2 preconditioner = np.zeros( - [precond_width, nstep], dtype=np.float64 + [precond_width, n_amp_view], dtype=np.float64 ) preconditioner[0] = sigmasq_slice preconditioner[:wband, :] += np.repeat( @@ -394,6 +398,9 @@ def _zeros(self): z.local_flags[:] = np.where(self._amp_flags, 1, 0) return z + def _step_length(self, stime, rate): + return int(stime * rate + 0.5) + @function_timer def _add_to_signal(self, detector, amplitudes): offset = self._det_start[detector] @@ -401,37 +408,35 @@ def _add_to_signal(self, detector, amplitudes): if detector not in ob.local_detectors: continue # The step length for this observation - step_length = int(self.step_time * self._obs_rate[iob]) - print( - "Offset input det {}, ob {} = ".format(detector, iob), - ob.view[self.view].detdata[self.det_data], - flush=True, - ) + step_length = self._step_length(self.step_time, self._obs_rate[iob]) + # print( + # "Offset input det {}, ob {} = ".format(detector, iob), + # ob.view[self.view].detdata[self.det_data], + # flush=True, + # ) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] - print( - "Offset input det {}, ob {}, view {} = ".format(detector, iob, ivw), - vw[detector], - flush=True, - ) - print( - "Offset input amplitude range = {} - {}".format( - offset, offset + n_amp_view - 1 - ) - ) + # print( + # "Offset input det {}, ob {}, view {} = ".format(detector, iob, ivw), + # vw[detector], + # flush=True, + # ) + # print( + # "Offset input amplitude range = {} - {}".format( + # offset, offset + n_amp_view - 1 + # ) + # ) template_offset_add_to_signal( step_length, amplitudes.local[offset : offset + n_amp_view], vw[detector], ) - print( - "Offset output det {}, ob {}, view {} = ".format( - detector, iob, ivw - ), - vw[detector], - flush=True, - ) offset += n_amp_view + # print( + # "Offset output det {}, ob {} = ".format(detector, iob), + # ob.detdata[self.det_data], + # flush=True, + # ) @function_timer def _project_signal(self, detector, amplitudes): @@ -440,7 +445,7 @@ def _project_signal(self, detector, amplitudes): if detector not in ob.local_detectors: continue # The step length for this observation - step_length = int(self.step_time * self._obs_rate[iob]) + step_length = self._step_length(self.step_time, self._obs_rate[iob]) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] template_offset_project_signal( @@ -448,6 +453,20 @@ def _project_signal(self, detector, amplitudes): vw[detector], amplitudes.local[offset : offset + n_amp_view], ) + # amplitudes.local[offset : offset + n_amp_view] *= self._sigmasq[ + # offset : offset + n_amp_view + # ] + # print( + # "offset project det {}, amps = ".format(detector), + # flush=True, + # ) + # for i in range(n_amp_view): + # offset = self._det_start[detector] + # print( + # "base {} = {} sumsq = {}".format( + # i, amplitudes.local[offset + i], self._sigmasq[offset + i] + # ) + # ) offset += n_amp_view @function_timer @@ -475,7 +494,7 @@ def _apply_precond(self, amplitudes_in, amplitudes_out): # C_a preconditioner for det in self._all_dets: offset = self._det_start[det] - for iob, ob in enumerate(new_data.obs): + for iob, ob in enumerate(self.data.obs): if det not in ob.local_detectors: continue # Loop over views @@ -488,10 +507,10 @@ def _apply_precond(self, amplitudes_in, amplitudes_out): else: view_samples = vw.stop - vw.start - n_amp_view = self._obs_view[iob][ivw] + n_amp_view = self._obs_views[iob][ivw] amp_slice = slice(offset, offset + n_amp_view, 1) - amps_in = amplitudes_in[amp_slice] + amps_in = amplitudes_in.local[amp_slice] amps_out = None if self.precond_width <= 1: # Use C_a prior @@ -508,9 +527,24 @@ def _apply_precond(self, amplitudes_in, amplitudes_out): overwrite_b=False, check_finite=True, ) - amplitudes_out[amp_slice] = amps_out + amplitudes_out.local[amp_slice] = amps_out else: # Diagonal preconditioner + # print("diagonal prec = ") + # ndetbase = amplitudes_in.n_local // 2 + # print( + # "ndetbase = {}, len(sigmasq) = {}".format(ndetbase, len(self._sigmasq)) + # ) + # for det in self._all_dets: + # offset = self._det_start[det] + # for i in range(ndetbase): + # print( + # "det {} base {} = {:0.6e}".format( + # det, i, self._sigmasq[offset + i] + # ), + # flush=True, + # ) + # print(self._sigmasq) amplitudes_out.local[:] = amplitudes_in.local amplitudes_out.local *= self._sigmasq return diff --git a/src/toast/tests/_helpers.py b/src/toast/tests/_helpers.py index 42fae7de0..8197f3590 100644 --- a/src/toast/tests/_helpers.py +++ b/src/toast/tests/_helpers.py @@ -93,6 +93,7 @@ def create_telescope(group_size, sample_rate=10.0 * u.Hz): n_pix=npix, sample_rate=sample_rate, f_min=1.0e-5 * u.Hz, + # net=1.0, net=0.5, f_knee=(sample_rate / 2000.0), ) @@ -130,7 +131,7 @@ def create_distdata(mpicomm, obs_per_group=1, samples=10): def create_satellite_data( - mpicomm, obs_per_group=1, sample_rate=10.0 * u.Hz, obs_time=5.0 * u.minute + mpicomm, obs_per_group=1, sample_rate=10.0 * u.Hz, obs_time=10.0 * u.minute ): """Create a data object with a simple satellite sim. @@ -161,10 +162,10 @@ def create_satellite_data( telescope=tele, hwp_rpm=10.0, observation_time=obs_time, - spin_period=1.0 * u.minute, - spin_angle=2.0 * u.degree, - prec_period=5.0 * u.minute, - prec_angle=2.0 * u.degree, + spin_period=2.0 * u.minute, + spin_angle=5.0 * u.degree, + prec_period=10.0 * u.minute, + prec_angle=10.0 * u.degree, ) sim_sat.apply(data) diff --git a/src/toast/tests/ops_mapmaker.py b/src/toast/tests/ops_mapmaker.py index 5cd9758ab..8a414c64e 100644 --- a/src/toast/tests/ops_mapmaker.py +++ b/src/toast/tests/ops_mapmaker.py @@ -34,155 +34,210 @@ def setUp(self): self.outdir = create_outdir(self.comm, fixture_name) np.random.seed(123456) - def test_offset(self): - # Create a fake satellite data set for testing - data = create_satellite_data(self.comm, obs_time=20.0 * u.minute) - - # Create some sky signal timestreams. - - pointing = ops.PointingHealpix( - nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" - ) - pointing.apply(data) - - # Create fake polarized sky pixel values locally - create_fake_sky(data, "pixel_dist", "fake_map") - - # Scan map into timestreams - scanner = ops.ScanMap( - det_data="signal", - pixels=pointing.pixels, - weights=pointing.weights, - map_key="fake_map", - ) - scanner.apply(data) - - # Now clear the pointing and reset things for use with the mapmaking test later - delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) - delete_pointing.apply(data) - pointing.create_dist = None - - # Create an uncorrelated noise model from focalplane detector properties - default_model = ops.DefaultNoiseModel(noise_model="noise_model") - default_model.apply(data) - - # Simulate noise and accumulate to signal - sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") - sim_noise.apply(data) - - print(data.obs[0].detdata["signal"]) - print("Done generating starting TOD", flush=True) - - # Set up binning operator for solving - binner = ops.BinMap( - pixel_dist="pixel_dist", - pointing=pointing, - noise_model=default_model.noise_model, - ) - - # Set up template matrix with just an offset template. - - # Use 1/10 of an observation as the baseline length. Make it not evenly - # divisible in order to test handling of the final amplitude. - ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] - step_seconds = float(int(ob_time / 10.0)) - tmpl = templates.Offset( - times="times", - noise_model=default_model.noise_model, - step_time=step_seconds, - ) - - tmatrix = ops.TemplateMatrix(templates=[tmpl]) - - # Map maker - mapper = ops.MapMaker( - det_data="signal", - binning=binner, - template_matrix=tmatrix, - ) - - # Make the map - mapper.apply(data) - - print(data) - - # Access the output - # final_map = data[mapper.binning.binned] - - del data - return - - # def test_compare_madam(self): - # if not ops.Madam.available: - # print("libmadam not available, skipping binned map comparison") - # return - # + # def test_offset(self): # # Create a fake satellite data set for testing - # data = create_satellite_data(self.comm) + # data = create_satellite_data(self.comm, obs_time=20.0 * u.minute) + # + # # Create some sky signal timestreams. + # + # pointing = ops.PointingHealpix( + # nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + # ) + # pointing.apply(data) + # + # # Create fake polarized sky pixel values locally + # create_fake_sky(data, "pixel_dist", "fake_map") + # + # # Scan map into timestreams + # scanner = ops.ScanMap( + # det_data="signal", + # pixels=pointing.pixels, + # weights=pointing.weights, + # map_key="fake_map", + # ) + # scanner.apply(data) + # + # # Now clear the pointing and reset things for use with the mapmaking test later + # delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + # delete_pointing.apply(data) + # pointing.create_dist = None # # # Create an uncorrelated noise model from focalplane detector properties # default_model = ops.DefaultNoiseModel(noise_model="noise_model") # default_model.apply(data) # - # # Simulate noise - # sim_noise = ops.SimNoise(noise_model="noise_model", out="noise") + # # Simulate noise and accumulate to signal + # sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") # sim_noise.apply(data) # - # # Pointing operator + # print(data.obs[0].detdata["signal"]) + # print("Done generating starting TOD", flush=True) + # + # # Set up binning operator for solving + # binner = ops.BinMap( + # pixel_dist="pixel_dist", + # pointing=pointing, + # noise_model=default_model.noise_model, + # ) + # + # # Set up template matrix with just an offset template. + # + # # Use 1/10 of an observation as the baseline length. Make it not evenly + # # divisible in order to test handling of the final amplitude. + # ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + # step_seconds = float(int(ob_time / 10.0)) + # tmpl = templates.Offset( + # times="times", + # noise_model=default_model.noise_model, + # step_time=step_seconds, + # ) + # + # tmatrix = ops.TemplateMatrix(templates=[tmpl]) + # + # # Map maker + # mapper = ops.MapMaker( + # det_data="signal", + # binning=binner, + # template_matrix=tmatrix, + # ) + # + # # Make the map + # mapper.apply(data) + # + # # Access the output + # # final_map = data[mapper.binning.binned] + # + # del data + # return + + # def test_compare_madam_noprior(self): + # if not ops.Madam.available: + # print("libmadam not available, skipping destriping comparison") + # return + # + # testdir = os.path.join(self.outdir, "compare_madam_noprior") + # if self.comm is None or self.comm.rank == 0: + # os.makedirs(testdir) + # + # # Create a fake satellite data set for testing + # data = create_satellite_data(self.comm, obs_time=30.0 * u.minute) + # + # # Create some sky signal timestreams. + # # pointing = ops.PointingHealpix( - # nside=64, mode="IQU", nest=True, hwp_angle="hwp_angle" + # nside=16, + # nest=True, + # mode="IQU", + # hwp_angle="hwp_angle", + # create_dist="pixel_dist", # ) + # pointing.apply(data) + # + # # Create fake polarized sky pixel values locally + # create_fake_sky(data, "pixel_dist", "fake_map") # - # # Build the covariance and hits - # cov_and_hits = ops.CovarianceAndHits( - # pixel_dist="pixel_dist", pointing=pointing, noise_model="noise_model" + # # Scan map into timestreams + # scanner = ops.ScanMap( + # det_data="signal", + # pixels=pointing.pixels, + # weights=pointing.weights, + # map_key="fake_map", # ) - # cov_and_hits.apply(data) + # scanner.apply(data) + # + # # Now clear the pointing and reset things for use with the mapmaking test later + # delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + # delete_pointing.apply(data) + # pointing.create_dist = None + # + # # Create an uncorrelated noise model from focalplane detector properties + # default_model = ops.DefaultNoiseModel(noise_model="noise_model") + # default_model.apply(data) # - # # Set up binned map + # # Simulate noise and accumulate to signal + # sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") + # sim_noise.apply(data) + # + # print(data.obs[0].detdata["signal"]) + # print("Done generating starting TOD", flush=True) # + # # Set up binning operator for solving # binner = ops.BinMap( # pixel_dist="pixel_dist", - # covariance=cov_and_hits.covariance, - # det_data="noise", # pointing=pointing, - # noise_model="noise_model", + # noise_model=default_model.noise_model, # ) - # binner.apply(data) # - # # Write binned map to disk so we can load the whole thing on one process. + # # Set up template matrix with just an offset template. # - # toast_hit_path = os.path.join(self.outdir, "toast_hits.fits") - # toast_bin_path = os.path.join(self.outdir, "toast_bin.fits") - # toast_cov_path = os.path.join(self.outdir, "toast_cov.fits") - # write_healpix_fits(data[binner.binned], toast_bin_path, nest=True) - # write_healpix_fits(data[cov_and_hits.hits], toast_hit_path, nest=True) - # write_healpix_fits(data[cov_and_hits.covariance], toast_cov_path, nest=True) + # # Use 1/10 of an observation as the baseline length. Make it not evenly + # # divisible in order to test handling of the final amplitude. + # ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + # # step_seconds = float(int(ob_time / 10.0)) + # step_seconds = 5.0 + # tmpl = templates.Offset( + # times="times", + # noise_model=default_model.noise_model, + # step_time=step_seconds, + # ) + # + # tmatrix = ops.TemplateMatrix(templates=[tmpl]) + # + # # Map maker + # mapper = ops.MapMaker( + # name="toastmap", + # det_data="signal", + # binning=binner, + # template_matrix=tmatrix, + # solve_rcond_threshold=1.0e-6, + # map_rcond_threshold=1.0e-6, + # iter_max=10, + # ) + # + # # Make the map + # mapper.apply(data) + # + # print("out amplitudes = ", data["toastmap_solve_amplitudes"][tmpl.name]) + # + # # Outputs + # toast_hits = "toastmap_hits" + # toast_map = "toastmap_map" + # + # # Write map to disk so we can load the whole thing on one process. + # + # toast_hit_path = os.path.join(testdir, "toast_hits.fits") + # toast_map_path = os.path.join(testdir, "toast_map.fits") + # write_healpix_fits(data[toast_map], toast_map_path, nest=True) + # write_healpix_fits(data[toast_hits], toast_hit_path, nest=True) # # # Now run Madam on the same data and compare # # sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) # # pars = {} - # pars["kfirst"] = "F" + # pars["kfirst"] = "T" # pars["iter_max"] = 10 - # pars["base_first"] = 1.0 + # pars["base_first"] = step_seconds # pars["fsample"] = sample_rate # pars["nside_map"] = pointing.nside # pars["nside_cross"] = pointing.nside # pars["nside_submap"] = min(8, pointing.nside) - # pars["write_map"] = "F" - # pars["write_binmap"] = "T" + # pars["good_baseline_fraction"] = tmpl.good_fraction + # pars["pixlim_cross"] = 1.0e-6 + # pars["pixlim_map"] = 1.0e-6 + # pars["write_map"] = "T" + # pars["write_binmap"] = "F" # pars["write_matrix"] = "F" # pars["write_wcov"] = "F" # pars["write_hits"] = "T" + # pars["write_base"] = "T" # pars["kfilter"] = "F" - # pars["path_output"] = self.outdir - # pars["info"] = 0 + # pars["path_output"] = testdir + # pars["info"] = 2 # # madam = ops.Madam( # params=pars, - # det_data="noise", + # det_data="signal", # pixels=pointing.pixels, # weights=pointing.weights, # pixels_nested=pointing.nest, @@ -195,8 +250,8 @@ def test_offset(self): # # Run Madam # madam.apply(data) # - # madam_hit_path = os.path.join(self.outdir, "madam_hmap.fits") - # madam_bin_path = os.path.join(self.outdir, "madam_bmap.fits") + # madam_hit_path = os.path.join(testdir, "madam_hmap.fits") + # madam_map_path = os.path.join(testdir, "madam_map.fits") # # if data.comm.world_rank == 0: # set_matplotlib_backend() @@ -208,43 +263,242 @@ def test_offset(self): # madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) # diff_hits = toast_hits - madam_hits # - # outfile = os.path.join(self.outdir, "madam_hits.png") + # outfile = os.path.join(testdir, "madam_hits.png") # hp.mollview(madam_hits, xsize=1600, nest=True) # plt.savefig(outfile) # plt.close() - # outfile = os.path.join(self.outdir, "toast_hits.png") + # outfile = os.path.join(testdir, "toast_hits.png") # hp.mollview(toast_hits, xsize=1600, nest=True) # plt.savefig(outfile) # plt.close() - # outfile = os.path.join(self.outdir, "diff_hits.png") + # outfile = os.path.join(testdir, "diff_hits.png") # hp.mollview(diff_hits, xsize=1600, nest=True) # plt.savefig(outfile) # plt.close() # - # # Compare binned maps + # # Compare maps # - # toast_bin = hp.read_map(toast_bin_path, field=None, nest=True) - # madam_bin = hp.read_map(madam_bin_path, field=None, nest=True) + # toast_map = hp.read_map(toast_map_path, field=None, nest=True) + # madam_map = hp.read_map(madam_map_path, field=None, nest=True) # # Set madam unhit pixels to zero # for stokes, ststr in zip(range(3), ["I", "Q", "U"]): - # mask = hp.mask_bad(madam_bin[stokes]) - # madam_bin[stokes][mask] = 0.0 - # diff_map = toast_bin[stokes] - madam_bin[stokes] + # mask = hp.mask_bad(madam_map[stokes]) + # madam_map[stokes][mask] = 0.0 + # diff_map = toast_map[stokes] - madam_map[stokes] # print("diff map {} has rms {}".format(ststr, np.std(diff_map))) - # outfile = os.path.join(self.outdir, "madam_bin_{}.png".format(ststr)) - # hp.mollview(madam_bin[stokes], xsize=1600, nest=True) + # outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) + # hp.mollview(madam_map[stokes], xsize=1600, nest=True) # plt.savefig(outfile) # plt.close() - # outfile = os.path.join(self.outdir, "toast_bin_{}.png".format(ststr)) - # hp.mollview(toast_bin[stokes], xsize=1600, nest=True) + # outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) + # hp.mollview(toast_map[stokes], xsize=1600, nest=True) # plt.savefig(outfile) # plt.close() - # outfile = os.path.join(self.outdir, "diff_bin_{}.png".format(ststr)) + # outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) # hp.mollview(diff_map, xsize=1600, nest=True) # plt.savefig(outfile) # plt.close() # - # nt.assert_almost_equal(toast_bin[stokes], madam_bin[stokes], decimal=6) + # # nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=6) # # del data # return + + def test_compare_madam_prior(self): + if not ops.Madam.available: + print("libmadam not available, skipping destriping comparison with prior") + return + + testdir = os.path.join(self.outdir, "compare_madam_prior") + if self.comm is None or self.comm.rank == 0: + os.makedirs(testdir) + + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm, obs_time=30.0 * u.minute) + + # Create some sky signal timestreams. + + pointing = ops.PointingHealpix( + nside=16, + nest=True, + mode="IQU", + hwp_angle="hwp_angle", + create_dist="pixel_dist", + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Now clear the pointing and reset things for use with the mapmaking test later + delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + delete_pointing.apply(data) + pointing.create_dist = None + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise and accumulate to signal + sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") + sim_noise.apply(data) + + print(data.obs[0].detdata["signal"]) + print("Done generating starting TOD", flush=True) + + # Set up binning operator for solving + binner = ops.BinMap( + pixel_dist="pixel_dist", + pointing=pointing, + noise_model=default_model.noise_model, + ) + + # Set up template matrix with just an offset template. + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + # step_seconds = float(int(ob_time / 10.0)) + step_seconds = 5.0 + tmpl = templates.Offset( + times="times", + noise_model=default_model.noise_model, + step_time=step_seconds, + use_noise_prior=True, + precond_width=1, + ) + + tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # Map maker + mapper = ops.MapMaker( + name="toastmap", + det_data="signal", + binning=binner, + template_matrix=tmatrix, + solve_rcond_threshold=1.0e-6, + map_rcond_threshold=1.0e-6, + iter_max=10, + ) + + # Make the map + mapper.apply(data) + + print("out amplitudes = ", data["toastmap_solve_amplitudes"][tmpl.name]) + + # Outputs + toast_hits = "toastmap_hits" + toast_map = "toastmap_map" + + # Write map to disk so we can load the whole thing on one process. + + toast_hit_path = os.path.join(testdir, "toast_hits.fits") + toast_map_path = os.path.join(testdir, "toast_map.fits") + write_healpix_fits(data[toast_map], toast_map_path, nest=True) + write_healpix_fits(data[toast_hits], toast_hit_path, nest=True) + + # Now run Madam on the same data and compare + + sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + pars = {} + pars["kfirst"] = "T" + pars["iter_max"] = 10 + pars["base_first"] = step_seconds + pars["fsample"] = sample_rate + pars["nside_map"] = pointing.nside + pars["nside_cross"] = pointing.nside + pars["nside_submap"] = min(8, pointing.nside) + pars["good_baseline_fraction"] = tmpl.good_fraction + pars["pixlim_cross"] = 1.0e-6 + pars["pixlim_map"] = 1.0e-6 + pars["write_map"] = "T" + pars["write_binmap"] = "F" + pars["write_matrix"] = "F" + pars["write_wcov"] = "F" + pars["write_hits"] = "T" + pars["write_base"] = "T" + pars["kfilter"] = "T" + pars["precond_width_min"] = 1 + pars["precond_width_max"] = 1 + pars["use_cgprecond"] = "T" + pars["path_output"] = testdir + pars["info"] = 2 + + madam = ops.Madam( + params=pars, + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + pixels_nested=pointing.nest, + noise_model="noise_model", + ) + + # Generate persistent pointing + pointing.apply(data) + + # Run Madam + madam.apply(data) + + madam_hit_path = os.path.join(testdir, "madam_hmap.fits") + madam_map_path = os.path.join(testdir, "madam_map.fits") + + if data.comm.world_rank == 0: + set_matplotlib_backend() + import matplotlib.pyplot as plt + + # Compare hit maps + + toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) + madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) + diff_hits = toast_hits - madam_hits + + outfile = os.path.join(testdir, "madam_hits.png") + hp.mollview(madam_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "toast_hits.png") + hp.mollview(toast_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "diff_hits.png") + hp.mollview(diff_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + # Compare maps + + toast_map = hp.read_map(toast_map_path, field=None, nest=True) + madam_map = hp.read_map(madam_map_path, field=None, nest=True) + # Set madam unhit pixels to zero + for stokes, ststr in zip(range(3), ["I", "Q", "U"]): + mask = hp.mask_bad(madam_map[stokes]) + madam_map[stokes][mask] = 0.0 + diff_map = toast_map[stokes] - madam_map[stokes] + print("diff map {} has rms {}".format(ststr, np.std(diff_map))) + outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) + hp.mollview(madam_map[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) + hp.mollview(toast_map[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) + hp.mollview(diff_map, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + # nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=6) + + del data + return diff --git a/src/toast/tests/ops_mapmaker_solve.py b/src/toast/tests/ops_mapmaker_solve.py index acafff47a..dd99e20a7 100644 --- a/src/toast/tests/ops_mapmaker_solve.py +++ b/src/toast/tests/ops_mapmaker_solve.py @@ -237,18 +237,20 @@ def test_lhs(self): tmatrix.amplitudes = "amplitudes" binner.binned = "lhs_binned" + out_amps = "out_amplitudes" lhs_calc = SolverLHS( det_temp="signal", binning=binner, template_matrix=tmatrix, + out=out_amps, ) lhs_calc.apply(data) - print("amplitudes out = ", data["amplitudes"]) + print("amplitudes out = ", data[out_amps]) # Verify that the output amplitudes agree np.testing.assert_equal( - data["amplitudes"][tmpl.name].local, + data[out_amps][tmpl.name].local, data["amplitudes_check"][tmpl.name].local, ) From a4e0f31f92a9c1ae1dd258cfedb97adb86c595f8 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Wed, 27 Jan 2021 12:54:47 -0800 Subject: [PATCH 053/690] Mapmaker unit tests now consistent with madam for all combinations of supported preconditioner. --- src/toast/observation_data.py | 12 - src/toast/ops/copy.py | 12 - src/toast/ops/madam.py | 21 +- src/toast/ops/mapmaker.py | 2 +- src/toast/ops/mapmaker_solve.py | 49 +- src/toast/ops/mapmaker_templates.py | 38 +- src/toast/ops/noise_weight.py | 1 - src/toast/templates/offset.py | 393 ++++++++------- src/toast/templates/template.py | 4 +- src/toast/tests/ops_mapmaker.py | 746 +++++++++++++++++----------- 10 files changed, 742 insertions(+), 536 deletions(-) diff --git a/src/toast/observation_data.py b/src/toast/observation_data.py index 726619420..a4e1f2540 100644 --- a/src/toast/observation_data.py +++ b/src/toast/observation_data.py @@ -453,7 +453,6 @@ def create(self, name, sample_shape=None, dtype=np.float64, detectors=None): raise RuntimeError(msg) # Create the data object - print("DetDataMgr[{}] allocate for {}".format(name, detectors), flush=True) self._internal[name] = DetectorData(detectors, data_shape, dtype) return @@ -509,18 +508,7 @@ def ensure(self, name, sample_shape=None, dtype=np.float64, detectors=None): if d not in self._internal[name].detectors: change = True if change: - print( - "DetDataMgr[{}] change detectors to {}".format(name, detectors), - flush=True, - ) self._internal[name].change_detectors(detectors) - else: - print( - "DetDataMgr[{}] detectors {} already included".format( - name, detectors - ), - flush=True, - ) else: # Create the data object self.create( diff --git a/src/toast/ops/copy.py b/src/toast/ops/copy.py index 0432f377d..efbd106ae 100644 --- a/src/toast/ops/copy.py +++ b/src/toast/ops/copy.py @@ -169,24 +169,12 @@ def _exec(self, data, detectors=None, **kwargs): raise RuntimeError(msg) if ob.detdata[out_key].detectors != dets: # The output has a different set of detectors. Reallocate. - print( - "Copy: reset detdata {} for dets {}".format( - out_key, dets - ), - flush=True, - ) ob.detdata[out_key].change_detectors(dets) else: sample_shape = None shp = ob.detdata[in_key].detector_shape if len(shp) > 1: sample_shape = shp[1:] - print( - "Copy: allocate detdata {} for dets {}".format( - out_key, dets - ), - flush=True, - ) ob.detdata.create( out_key, sample_shape=sample_shape, diff --git a/src/toast/ops/madam.py b/src/toast/ops/madam.py index 688c8298a..05b1e1448 100644 --- a/src/toast/ops/madam.py +++ b/src/toast/ops/madam.py @@ -10,7 +10,7 @@ import numpy as np -from ..utils import Logger, Timer, GlobalTimers, dtype_to_aligned +from ..utils import Logger, Environment, Timer, GlobalTimers, dtype_to_aligned from ..traits import trait_docs, Int, Unicode, Bool, Dict @@ -173,6 +173,21 @@ def _check_det_out(self, proposal): ) return check + @traitlets.validate("params") + def _check_params(self, proposal): + check = proposal["value"] + if "info" not in check: + # The user did not specify the info level- set it from the toast loglevel + env = Environment.get() + level = env.log_level() + if level == "DEBUG": + check["info"] = 2 + elif level == "VERBOSE": + check["info"] = 3 + else: + check["info"] = 1 + return check + def __init__(self, **kwargs): super().__init__(**kwargs) self._cached = False @@ -790,9 +805,7 @@ def _stage_data( psdvals = [] for idet, det in enumerate(all_dets): if det not in psds: - raise RuntimeError( - "Every detector must have at least " "one PSD" - ) + raise RuntimeError("Every detector must have at least one PSD") psdlist = psds[det] npsd[idet] = len(psdlist) for psdstart, psd, detw in psdlist: diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index 1afa4bedd..1317660e3 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -458,7 +458,7 @@ def _exec(self, data, detectors=None, **kwargs): ) rhs_calc.apply(data, detectors=detectors) - print("RHS = ", data[solver_rhs], flush=True) + # print("RHS = ", data[solver_rhs], flush=True) self._log_info(comm, rank, " finished RHS calculation in", timer=timer) diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index c8df476d6..2db9f40c6 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -344,8 +344,6 @@ def _exec(self, data, detectors=None, **kwargs): if comm is not None: rank = comm.rank - print("\n\n==============================================\n\n", flush=True) - # Check that input traits are set if self.binning is None: raise RuntimeError("You must set the binning trait before calling exec()") @@ -372,6 +370,20 @@ def _exec(self, data, detectors=None, **kwargs): self._log_debug(comm, rank, "projection and binning finished in", timer=timer) + # Add noise prior + + self._log_debug(comm, rank, "begin add noise prior") + + # Zero out the amplitudes before accumulating the updated values + if self.out in data: + data[self.out].reset() + + self.template_matrix.add_prior( + data[self.template_matrix.amplitudes], data[self.out] + ) + + self._log_debug(comm, rank, "add noise prior finished in", timer=timer) + # Build a pipeline for the map scanning and template matrix application. # First create the operators that we will use. @@ -401,8 +413,6 @@ def _exec(self, data, detectors=None, **kwargs): template_transpose.amplitudes = self.out template_transpose.transpose = True - # print("project input amps = ", data[self.template_matrix.amplitudes]) - # Create a pipeline that projects the binned map and applies noise # weights and templates. @@ -431,11 +441,6 @@ def _exec(self, data, detectors=None, **kwargs): ], ) - # Zero out the amplitudes before accumulating the updated values - - if self.out in data: - data[self.out].reset() - # Run the projection pipeline. proj_pipe.apply(data, detectors=detectors) @@ -587,22 +592,20 @@ def solve( residual = rhs.duplicate() residual -= lhs_out - print("RHS ", rhs) - print("LHS ", lhs_out) - print("residual", residual) + # print("RHS ", rhs) + # print("LHS ", lhs_out) + # print("residual", residual) # The preconditioned residual # s = M^-1 * r precond = rhs.duplicate() precond.reset() lhs_op.template_matrix.apply_precond(residual, precond) - print("Start precond = ", precond) # The proposal # d = s for k, v in proposal.items(): v.local[:] = precond[k].local - print("proposal", proposal) # Set LHS amplitude inputs to this proposal lhs_op.template_matrix.amplitudes = proposal_key @@ -615,7 +618,7 @@ def solve( # delta_new = delta_0 = r^T * d delta = proposal.dot(residual) - print("delta = ", delta) + # print("delta = ", delta) delta_init = delta if comm is not None: @@ -636,14 +639,14 @@ def solve( # q = A * d lhs_op.apply(data, detectors=detectors) - print("LHS output", lhs_out) + # print("LHS output", lhs_out) # alpha = delta_new / (d^T * q) - print("alpha num = ", delta) - print("alpha den = ", proposal.dot(lhs_out)) + # print("alpha num = ", delta) + # print("alpha den = ", proposal.dot(lhs_out)) alpha = delta / proposal.dot(lhs_out) - print("alpha = ", alpha) + # print("alpha = ", alpha) # Update the result # x += alpha * d @@ -663,7 +666,7 @@ def solve( # Epsilon sqsum = residual.dot(residual) - print("sqsum = ", sqsum) + # print("sqsum = ", sqsum) if comm is not None: comm.barrier() @@ -688,7 +691,7 @@ def solve( break sqsum_best = min(sqsum, sqsum_best) - print("sqsum_best = ", sqsum_best) + # print("sqsum_best = ", sqsum_best) # Check for stall / divergence if iter % 10 == 0 and iter >= n_iter_min: @@ -715,11 +718,11 @@ def solve( # beta = delta_new / delta_old beta = delta / delta_last - print("beta = ", beta) + # print("beta = ", beta) # New proposal # d = s + beta * d proposal *= beta proposal += precond - print("proposal[{}]".format(iter), proposal, flush=True) + # print("proposal[{}]".format(iter), proposal, flush=True) diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 1b1ebeebf..43345a8c2 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -107,6 +107,27 @@ def apply_precond(self, amps_in, amps_out): for tmpl in self.templates: tmpl.apply_precond(amps_in[tmpl.name], amps_out[tmpl.name]) + def add_prior(self, amps_in, amps_out): + """Apply the noise prior from all templates to the amplitudes. + + This can only be called after the operator has been used at least once so that + the templates are initialized. + + Args: + amps_in (AmplitudesMap): The input amplitudes. + amps_out (AmplitudesMap): The output amplitudes, modified in place. + + Returns: + None + + """ + if not self._initialized: + raise RuntimeError( + "You must call exec() once before applying the noise prior" + ) + for tmpl in self.templates: + tmpl.add_prior(amps_in[tmpl.name], amps_out[tmpl.name]) + @function_timer def _exec(self, data, detectors=None, **kwargs): log = Logger.get() @@ -156,12 +177,6 @@ def _exec(self, data, detectors=None, **kwargs): if self.amplitudes not in data: # The output template amplitudes do not yet exist. Create these with # all zero values. - print( - "template matrix transpose create amplitudes {}".format( - self.amplitudes - ), - flush=True, - ) data[self.amplitudes] = AmplitudesMap() for tmpl in self.templates: data[self.amplitudes][tmpl.name] = tmpl.zeros() @@ -186,20 +201,9 @@ def _exec(self, data, detectors=None, **kwargs): for d in dets: ob.detdata[self.det_data][d, :] = 0 - # print("template matrix in = ", data[self.amplitudes], flush=True) for d in all_dets: for tmpl in self.templates: tmpl.add_to_signal(d, data[self.amplitudes][tmpl.name]) - # print( - # "template matrix out {} --> {}".format( - # self.det_data, self.templates[0].det_data - # ) - # ) - # print( - # "template matrix out {} = ".format(d), - # ob.detdata[self.det_data][d], - # flush=True, - # ) return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/noise_weight.py b/src/toast/ops/noise_weight.py index 050117f20..3fe090427 100644 --- a/src/toast/ops/noise_weight.py +++ b/src/toast/ops/noise_weight.py @@ -63,7 +63,6 @@ def _exec(self, data, detectors=None, **kwargs): for d in dets: # Get the detector weight from the noise model. detweight = noise.detector_weight(d) - print("NWEIGHT det {} = {}".format(d, detweight)) # Apply ob.detdata[self.det_data][d] *= detweight diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index b29ec3a66..5a5e737b7 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -4,6 +4,8 @@ from collections import OrderedDict +import traitlets + import numpy as np import scipy @@ -62,20 +64,52 @@ class Offset(Template): use_noise_prior = Bool( False, - help="Construct the offset noise covariance and use it for a noise prior and as a preconditioner", + help="Use detector PSDs to build the noise prior and preconditioner", ) precond_width = Int(20, help="Preconditioner width in terms of offsets / baselines") + @traitlets.validate("precond_width") + def _check_precond_width(self, proposal): + w = proposal["value"] + if w < 1: + raise traitlets.TraitError("Preconditioner width should be >= 1") + return w + + @traitlets.validate("good_fraction") + def _check_good_fraction(self, proposal): + f = proposal["value"] + if f < 0.0 or f > 1.0: + raise traitlets.TraitError("good_fraction should be a value from 0 to 1") + return f + def __init__(self, **kwargs): super().__init__(**kwargs) + def clear(self): + """Delete the underlying C-allocated memory.""" + if hasattr(self, "_offsetvar"): + del self._offsetvar + if hasattr(self, "_offsetvar_raw"): + self._offsetvar_raw.clear() + del self._offsetvar_raw + + def __del__(self): + self.clear() + @function_timer def _initialize(self, new_data): + # This function is called whenever a new data trait is assigned to the template. + # Clear any C-allocated buffers from previous uses. + self.clear() + # Compute the step boundaries for every observation and the number of # amplitude values on this process. Every process only stores amplitudes # for its locally assigned data. + if self.use_noise_prior and self.noise_model is None: + raise RuntimeError("cannot use noise prior without specifying noise_model") + # Use this as an "Ordered Set". We want the unique detectors on this process, # but sorted in order of occurrence. all_dets = OrderedDict() @@ -123,7 +157,7 @@ def _initialize(self, new_data): # Determine the binning for the noise prior if self.use_noise_prior: obstime = ob.shared[self.times][-1] - ob.shared[self.times][0] - tbase = step_length + tbase = self.step_time fbase = 1.0 / tbase powmin = np.floor(np.log10(1 / obstime)) - 1 powmax = min(np.ceil(np.log10(1 / tbase)) + 2, self._obs_rate[iob]) @@ -168,11 +202,13 @@ def _initialize(self, new_data): # Boolean flags self._amp_flags = np.zeros(self._n_local, dtype=np.bool) - # For the sigmasq values (offset / baseline variance), we have one per - # amplitude, which can approach the size of the time ordered data. Store these - # in C-allocated memory as 32bit float. - self._sigmasq_raw = AlignedF32.zeros(self._n_local) - self._sigmasq = self._sigmasq_raw.array() + # Here we track the variance of the offsets based on the detector noise weights + # and the number of unflagged / good samples per offset. Since we have one + # value per amplitude, this can approach the size of the time ordered data. + # Store these in C-allocated memory as 32bit float. These are used when + # building both the diagonal and banded preconditioners. + self._offsetvar_raw = AlignedF32.zeros(self._n_local) + self._offsetvar = self._offsetvar_raw.array() offset = 0 for det in self._all_dets: @@ -180,7 +216,7 @@ def _initialize(self, new_data): if det not in ob.local_detectors: continue - # Noise weight + # "Noise weight" (time-domain inverse variance) detnoise = 1.0 if self.noise_model is not None: detnoise = ob[self.noise_model].detector_weight(det) @@ -199,14 +235,16 @@ def _initialize(self, new_data): view_samples = vw.stop - vw.start n_amp_view = self._obs_views[iob][ivw] - # Move this loop to compiled code if it is slow + # Move this loop to compiled code if it is slow... + # Note: we are building the offset amplitude *variance*, which is + # why the "noise weight" (inverse variance) is in the denominator. if self.flags is None: voff = 0 for amp in range(n_amp_view): amplen = step_length if amp == n_amp_view - 1: amplen = view_samples - voff - self._sigmasq[offset + amp] = 1.0 / (detnoise * amplen) + self._offsetvar[offset + amp] = 1.0 / (detnoise * amplen) voff += step_length else: flags = views.detdata[self.flags][ivw] @@ -220,18 +258,22 @@ def _initialize(self, new_data): ) if (n_good / amplen) > self.good_fraction: # Keep this - self._sigmasq[offset + amp] = 1.0 / (detnoise * n_good) + self._offsetvar[offset + amp] = 1.0 / ( + detnoise * n_good + ) else: # Flag it - self._sigmasq[offset + amp] = 0.0 + self._offsetvar[offset + amp] = 0.0 self._amp_flags[offset + amp] = True voff += step_length offset += n_amp_view # Compute the amplitude noise filter and preconditioner for each detector - # and each view. - - # print("Offset sigmasq = ", self._sigmasq, flush=True) + # and each view. The "noise filter" is the real-space inverse amplitude + # covariance, which is constructed from the Fourier domain amplitude PSD. + # + # The preconditioner is either a diagonal one using the amplitude variance, + # or is a banded one using the amplitude covariance plus the diagonal term. self._filters = dict() self._precond = dict() @@ -245,117 +287,144 @@ def _initialize(self, new_data): if iob not in self._filters: self._filters[iob] = dict() self._precond[iob] = dict() - if self.noise_model is not None: - # We have noise information. Get the PSD describing noise - # correlations between offset amplitudes for this observation. - offset_psd = self._get_offset_psd( - ob[self.noise_model], self._freq[iob], self.step_time, det - ) - # Log version of offset PSD for interpolation - logfreq = np.log(self._freq[iob]) - logpsd = np.log(offset_psd) - logfilter = np.log(1 / offset_psd) - - # Helper functions - def _interpolate(x, psd): - result = np.zeros(x.size) - good = np.abs(x) > 1e-10 - logx = np.log(np.abs(x[good])) - logresult = np.interp(logx, logfreq, psd) - result[good] = np.exp(logresult) - return result - - def _truncate(noisefilter, lim=1e-4): - icenter = noisefilter.size // 2 - ind = ( - np.abs(noisefilter[:icenter]) - > np.abs(noisefilter[0]) * lim - ) - icut = np.argwhere(ind)[-1][0] - if icut % 2 == 0: - icut += 1 - noisefilter = np.roll(noisefilter, icenter) - noisefilter = noisefilter[ - icenter - icut : icenter + icut + 1 - ] - return noisefilter - - # Compute the list of filters and preconditioners (one per view) - # For this detector. - self._filters[iob][det] = list() - self._precond[iob][det] = list() - - # Loop over views - views = ob.view[self.view] - for ivw, vw in enumerate(views): - view_samples = None - if vw.start is None: - # This is a view of the whole obs - view_samples = ob.n_local_samples - else: - view_samples = vw.stop - vw.start - n_amp_view = self._obs_views[iob][ivw] - sigmasq_slice = self._sigmasq[offset : offset + n_amp_view] + offset_psd = self._get_offset_psd( + ob[self.noise_model], self._freq[iob], self.step_time, det + ) + + # "Noise weight" (time-domain inverse variance) + detnoise = ob[self.noise_model].detector_weight(det) + + # Log version of offset PSD and its inverse for interpolation + logfreq = np.log(self._freq[iob]) + logpsd = np.log(offset_psd) + logfilter = np.log(1.0 / offset_psd) + + # Compute the list of filters and preconditioners (one per view) + # For this detector. - # nstep = offset_slice.stop - offset_slice.start + self._filters[iob][det] = list() + self._precond[iob][det] = list() - filterlen = n_amp_view * 2 + 1 - filterfreq = np.fft.rfftfreq(filterlen, self.step_time) - noisefilter = _truncate( - np.fft.irfft(_interpolate(filterfreq, logfilter)) + # Loop over views + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_samples = None + if vw.start is None: + # This is a view of the whole obs + view_samples = ob.n_local_samples + else: + view_samples = vw.stop - vw.start + n_amp_view = self._obs_views[iob][ivw] + offsetvar_slice = self._offsetvar[offset : offset + n_amp_view] + + filterlen = 2 + while filterlen < 2 * n_amp_view: + filterlen *= 2 + filterfreq = np.fft.rfftfreq(filterlen, self.step_time) + + # Recall that the "noise filter" is the inverse amplitude + # covariance, which is why we are using 1/PSD. Also note that + # the truncate function shifts the filter to be symmetric about + # the center, which is needed for use with scipy.signal.convolve + # If we move this application back to compiled FFT based + # methods, we should instead keep this filter in the fourier + # domain. + + noisefilter = self._truncate( + np.fft.irfft( + self._interpolate_psd(filterfreq, logfreq, logfilter) ) - self._filters[iob][det].append(noisefilter) - - # Build the band-diagonal preconditioner - lower = None - if self.precond_width <= 1: - # Compute C_a prior - preconditioner = _truncate( - np.fft.irfft(_interpolate(filterfreq, logpsd)) - ) - else: - # Compute Cholesky decomposition prior - wband = min(self.precond_width, noisefilter.size // 2) - precond_width = max( - wband, min(self.precond_width, n_amp_view) - ) - icenter = noisefilter.size // 2 - preconditioner = np.zeros( - [precond_width, n_amp_view], dtype=np.float64 - ) - preconditioner[0] = sigmasq_slice - preconditioner[:wband, :] += np.repeat( - noisefilter[icenter : icenter + wband, np.newaxis], - n_amp_view, - 1, - ) - lower = True - scipy.linalg.cholesky_banded( - preconditioner, - overwrite_ab=True, - lower=lower, - check_finite=True, + ) + + self._filters[iob][det].append(noisefilter) + + # Build the preconditioner + lower = None + preconditioner = None + + if self.precond_width == 1: + # We are using a Toeplitz preconditioner. The first row + # of the matrix is the inverse FFT of the offset PSD, + # with an added zero-lag component from the detector + # weight. NOTE: the truncate function shifts the real + # space filter to the center of the vector. + preconditioner = self._truncate( + np.fft.irfft( + self._interpolate_psd(filterfreq, logfreq, logpsd) ) - self._precond[iob][det].append((preconditioner, lower)) - offset += n_amp_view + ) + icenter = preconditioner.size // 2 + preconditioner[icenter] += 1.0 / detnoise + else: + # We are using a banded matrix for the preconditioner. + # This contains a Toeplitz component from the inverse + # offset variance in the LHS, and another diagonal term + # from the individual offset variance. + # + # NOTE: Instead of directly solving x = M^{-1} b, we do + # not invert "M" and solve M x = b using the Cholesky + # decomposition of M (*not* M^{-1}). + icenter = noisefilter.size // 2 + wband = min(self.precond_width, icenter) + precond_width = max( + wband, min(self.precond_width, n_amp_view) + ) + preconditioner = np.zeros( + [precond_width, n_amp_view], dtype=np.float64 + ) + preconditioner[0, :] = 1.0 / offsetvar_slice + preconditioner[:wband, :] += np.repeat( + noisefilter[icenter : icenter + wband, np.newaxis], + n_amp_view, + 1, + ) + lower = True + preconditioner = scipy.linalg.cholesky_banded( + preconditioner, + overwrite_ab=True, + lower=lower, + check_finite=True, + ) + self._precond[iob][det].append((preconditioner, lower)) + offset += n_amp_view return def __del__(self): - if hasattr(self, "_sigmasq"): - del self._sigmasq - if hasattr(self, "_sigmasq_raw"): - self._sigmasq_raw.clear() - del self._sigmasq_raw - - @staticmethod - def _get_offset_psd(noise, freq, step_time, det): + if hasattr(self, "_offsetvar"): + del self._offsetvar + if hasattr(self, "_offsetvar_raw"): + self._offsetvar_raw.clear() + del self._offsetvar_raw + + # Helper functions for noise / preconditioner calculations + + def _interpolate_psd(self, x, lfreq, lpsd): + result = np.zeros(x.size) + good = np.abs(x) > 1e-10 + logx = np.log(np.abs(x[good])) + logresult = np.interp(logx, lfreq, lpsd) + result[good] = np.exp(logresult) + return result + + def _truncate(self, noisefilter, lim=1e-4): + icenter = noisefilter.size // 2 + ind = np.abs(noisefilter[:icenter]) > np.abs(noisefilter[0]) * lim + icut = np.argwhere(ind)[-1][0] + if icut % 2 == 0: + icut += 1 + noisefilter = np.roll(noisefilter, icenter) + noisefilter = noisefilter[icenter - icut : icenter + icut + 1] + return noisefilter + + def _get_offset_psd(self, noise, freq, step_time, det): """Compute the PSD of the baseline offsets.""" psdfreq = noise.freq(det) psd = noise.psd(det) rate = noise.rate(det) + # Remove the white noise component from the PSD - psd = psd.copy() * np.sqrt(rate) + psd = psd.copy() psd -= np.amin(psd[psdfreq > 1.0]) psd[psd < 1e-30] = 1e-30 @@ -365,14 +434,6 @@ def _get_offset_psd(noise, freq, step_time, det): logfreq = np.log(psdfreq) logpsd = np.log(psd) - def interpolate_psd(x): - result = np.zeros(x.size) - good = np.abs(x) > 1e-10 - logx = np.log(np.abs(x[good])) - logresult = np.interp(logx, logfreq, logpsd) - result[good] = np.exp(logresult) - return result - def g(x): bad = np.abs(x) < 1e-10 good = np.logical_not(bad) @@ -383,10 +444,18 @@ def g(x): tbase = step_time fbase = 1.0 / tbase - offset_psd = interpolate_psd(freq) * g(freq * tbase) + + offset_psd = np.zeros_like(freq) + offset_psd[:] += self._interpolate_psd(freq, logfreq, logpsd) * g(freq * tbase) + for m in range(1, 2): - offset_psd += interpolate_psd(freq + m * fbase) * g(freq * tbase + m) - offset_psd += interpolate_psd(freq - m * fbase) * g(freq * tbase - m) + offset_psd[:] += self._interpolate_psd( + freq + m * fbase, logfreq, logpsd + ) * g(freq * tbase + m) + offset_psd[:] += self._interpolate_psd( + freq - m * fbase, logfreq, logpsd + ) * g(freq * tbase - m) + offset_psd *= fbase return offset_psd @@ -409,34 +478,14 @@ def _add_to_signal(self, detector, amplitudes): continue # The step length for this observation step_length = self._step_length(self.step_time, self._obs_rate[iob]) - # print( - # "Offset input det {}, ob {} = ".format(detector, iob), - # ob.view[self.view].detdata[self.det_data], - # flush=True, - # ) for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] - # print( - # "Offset input det {}, ob {}, view {} = ".format(detector, iob, ivw), - # vw[detector], - # flush=True, - # ) - # print( - # "Offset input amplitude range = {} - {}".format( - # offset, offset + n_amp_view - 1 - # ) - # ) template_offset_add_to_signal( step_length, amplitudes.local[offset : offset + n_amp_view], vw[detector], ) offset += n_amp_view - # print( - # "Offset output det {}, ob {} = ".format(detector, iob), - # ob.detdata[self.det_data], - # flush=True, - # ) @function_timer def _project_signal(self, detector, amplitudes): @@ -453,36 +502,23 @@ def _project_signal(self, detector, amplitudes): vw[detector], amplitudes.local[offset : offset + n_amp_view], ) - # amplitudes.local[offset : offset + n_amp_view] *= self._sigmasq[ - # offset : offset + n_amp_view - # ] - # print( - # "offset project det {}, amps = ".format(detector), - # flush=True, - # ) - # for i in range(n_amp_view): - # offset = self._det_start[detector] - # print( - # "base {} = {} sumsq = {}".format( - # i, amplitudes.local[offset + i], self._sigmasq[offset + i] - # ) - # ) offset += n_amp_view @function_timer def _add_prior(self, amplitudes_in, amplitudes_out): - if self.noise_model is None: - # No noise model is specified, so no prior is used. + if not self.use_noise_prior: + # Not using the noise prior term return for det in self._all_dets: - offset = self._det_start[detector] + offset = self._det_start[det] for iob, ob in enumerate(self.data.obs): if det not in ob.local_detectors: continue for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): n_amp_view = self._obs_views[iob][ivw] - amps_in = amplitudes_in[offset : offset + n_amp_view] - amps_out = amplitudes_out[offset : offset + n_amp_view] + amp_slice = slice(offset, offset + n_amp_view, 1) + amps_in = amplitudes_in.local[amp_slice] + amps_out = amplitudes_out.local[amp_slice] amps_out[:] += scipy.signal.convolve( amps_in, self._filters[iob][det][ivw], mode="same" ) @@ -491,7 +527,8 @@ def _add_prior(self, amplitudes_in, amplitudes_out): @function_timer def _apply_precond(self, amplitudes_in, amplitudes_out): if self.use_noise_prior: - # C_a preconditioner + # Our design matrix includes a term with the inverse offset covariance. + # This means that our preconditioner should include this term as well. for det in self._all_dets: offset = self._det_start[det] for iob, ob in enumerate(self.data.obs): @@ -513,14 +550,16 @@ def _apply_precond(self, amplitudes_in, amplitudes_out): amps_in = amplitudes_in.local[amp_slice] amps_out = None if self.precond_width <= 1: - # Use C_a prior + # We are using a Toeplitz preconditioner. # scipy.signal.convolve will use either `convolve` or # `fftconvolve` depending on the size of the inputs amps_out = scipy.signal.convolve( - amps_in, self._precond[iob][det][ivw], mode="same" + amps_in, self._precond[iob][det][ivw][0], mode="same" ) else: - # Use pre-computed Cholesky decomposition + # Use pre-computed Cholesky decomposition. Note that this + # is the decomposition of the actual preconditioner (not + # its inverse), since we are solving Mx=b. amps_out = scipy.linalg.cho_solve_banded( self._precond[iob][det][ivw], amps_in, @@ -529,22 +568,8 @@ def _apply_precond(self, amplitudes_in, amplitudes_out): ) amplitudes_out.local[amp_slice] = amps_out else: - # Diagonal preconditioner - # print("diagonal prec = ") - # ndetbase = amplitudes_in.n_local // 2 - # print( - # "ndetbase = {}, len(sigmasq) = {}".format(ndetbase, len(self._sigmasq)) - # ) - # for det in self._all_dets: - # offset = self._det_start[det] - # for i in range(ndetbase): - # print( - # "det {} base {} = {:0.6e}".format( - # det, i, self._sigmasq[offset + i] - # ), - # flush=True, - # ) - # print(self._sigmasq) + # Since we do not have a noise filter term in our LHS, our diagonal + # preconditioner is just the application of offset variance. amplitudes_out.local[:] = amplitudes_in.local - amplitudes_out.local *= self._sigmasq + amplitudes_out.local *= self._offsetvar return diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index dd95da455..ff281db07 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -191,7 +191,9 @@ def _apply_precond(self, amplitudes_in, amplitudes_out): def apply_precond(self, amplitudes_in, amplitudes_out): """Apply the template preconditioner. - This performs: + Formally, the preconditioner "M" is an approximation to the "design matrix" + (the "A" matrix in "Ax = b"). This function applies the inverse preconditioner + to the template amplitudes: .. math:: a' += M^{-1} \\cdot a diff --git a/src/toast/tests/ops_mapmaker.py b/src/toast/tests/ops_mapmaker.py index 8a414c64e..9370d6acf 100644 --- a/src/toast/tests/ops_mapmaker.py +++ b/src/toast/tests/ops_mapmaker.py @@ -34,282 +34,85 @@ def setUp(self): self.outdir = create_outdir(self.comm, fixture_name) np.random.seed(123456) - # def test_offset(self): - # # Create a fake satellite data set for testing - # data = create_satellite_data(self.comm, obs_time=20.0 * u.minute) - # - # # Create some sky signal timestreams. - # - # pointing = ops.PointingHealpix( - # nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" - # ) - # pointing.apply(data) - # - # # Create fake polarized sky pixel values locally - # create_fake_sky(data, "pixel_dist", "fake_map") - # - # # Scan map into timestreams - # scanner = ops.ScanMap( - # det_data="signal", - # pixels=pointing.pixels, - # weights=pointing.weights, - # map_key="fake_map", - # ) - # scanner.apply(data) - # - # # Now clear the pointing and reset things for use with the mapmaking test later - # delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) - # delete_pointing.apply(data) - # pointing.create_dist = None - # - # # Create an uncorrelated noise model from focalplane detector properties - # default_model = ops.DefaultNoiseModel(noise_model="noise_model") - # default_model.apply(data) - # - # # Simulate noise and accumulate to signal - # sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") - # sim_noise.apply(data) - # - # print(data.obs[0].detdata["signal"]) - # print("Done generating starting TOD", flush=True) - # - # # Set up binning operator for solving - # binner = ops.BinMap( - # pixel_dist="pixel_dist", - # pointing=pointing, - # noise_model=default_model.noise_model, - # ) - # - # # Set up template matrix with just an offset template. - # - # # Use 1/10 of an observation as the baseline length. Make it not evenly - # # divisible in order to test handling of the final amplitude. - # ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] - # step_seconds = float(int(ob_time / 10.0)) - # tmpl = templates.Offset( - # times="times", - # noise_model=default_model.noise_model, - # step_time=step_seconds, - # ) - # - # tmatrix = ops.TemplateMatrix(templates=[tmpl]) - # - # # Map maker - # mapper = ops.MapMaker( - # det_data="signal", - # binning=binner, - # template_matrix=tmatrix, - # ) - # - # # Make the map - # mapper.apply(data) - # - # # Access the output - # # final_map = data[mapper.binning.binned] - # - # del data - # return - - # def test_compare_madam_noprior(self): - # if not ops.Madam.available: - # print("libmadam not available, skipping destriping comparison") - # return - # - # testdir = os.path.join(self.outdir, "compare_madam_noprior") - # if self.comm is None or self.comm.rank == 0: - # os.makedirs(testdir) - # - # # Create a fake satellite data set for testing - # data = create_satellite_data(self.comm, obs_time=30.0 * u.minute) - # - # # Create some sky signal timestreams. - # - # pointing = ops.PointingHealpix( - # nside=16, - # nest=True, - # mode="IQU", - # hwp_angle="hwp_angle", - # create_dist="pixel_dist", - # ) - # pointing.apply(data) - # - # # Create fake polarized sky pixel values locally - # create_fake_sky(data, "pixel_dist", "fake_map") - # - # # Scan map into timestreams - # scanner = ops.ScanMap( - # det_data="signal", - # pixels=pointing.pixels, - # weights=pointing.weights, - # map_key="fake_map", - # ) - # scanner.apply(data) - # - # # Now clear the pointing and reset things for use with the mapmaking test later - # delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) - # delete_pointing.apply(data) - # pointing.create_dist = None - # - # # Create an uncorrelated noise model from focalplane detector properties - # default_model = ops.DefaultNoiseModel(noise_model="noise_model") - # default_model.apply(data) - # - # # Simulate noise and accumulate to signal - # sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") - # sim_noise.apply(data) - # - # print(data.obs[0].detdata["signal"]) - # print("Done generating starting TOD", flush=True) - # - # # Set up binning operator for solving - # binner = ops.BinMap( - # pixel_dist="pixel_dist", - # pointing=pointing, - # noise_model=default_model.noise_model, - # ) - # - # # Set up template matrix with just an offset template. - # - # # Use 1/10 of an observation as the baseline length. Make it not evenly - # # divisible in order to test handling of the final amplitude. - # ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] - # # step_seconds = float(int(ob_time / 10.0)) - # step_seconds = 5.0 - # tmpl = templates.Offset( - # times="times", - # noise_model=default_model.noise_model, - # step_time=step_seconds, - # ) - # - # tmatrix = ops.TemplateMatrix(templates=[tmpl]) - # - # # Map maker - # mapper = ops.MapMaker( - # name="toastmap", - # det_data="signal", - # binning=binner, - # template_matrix=tmatrix, - # solve_rcond_threshold=1.0e-6, - # map_rcond_threshold=1.0e-6, - # iter_max=10, - # ) - # - # # Make the map - # mapper.apply(data) - # - # print("out amplitudes = ", data["toastmap_solve_amplitudes"][tmpl.name]) - # - # # Outputs - # toast_hits = "toastmap_hits" - # toast_map = "toastmap_map" - # - # # Write map to disk so we can load the whole thing on one process. - # - # toast_hit_path = os.path.join(testdir, "toast_hits.fits") - # toast_map_path = os.path.join(testdir, "toast_map.fits") - # write_healpix_fits(data[toast_map], toast_map_path, nest=True) - # write_healpix_fits(data[toast_hits], toast_hit_path, nest=True) - # - # # Now run Madam on the same data and compare - # - # sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) - # - # pars = {} - # pars["kfirst"] = "T" - # pars["iter_max"] = 10 - # pars["base_first"] = step_seconds - # pars["fsample"] = sample_rate - # pars["nside_map"] = pointing.nside - # pars["nside_cross"] = pointing.nside - # pars["nside_submap"] = min(8, pointing.nside) - # pars["good_baseline_fraction"] = tmpl.good_fraction - # pars["pixlim_cross"] = 1.0e-6 - # pars["pixlim_map"] = 1.0e-6 - # pars["write_map"] = "T" - # pars["write_binmap"] = "F" - # pars["write_matrix"] = "F" - # pars["write_wcov"] = "F" - # pars["write_hits"] = "T" - # pars["write_base"] = "T" - # pars["kfilter"] = "F" - # pars["path_output"] = testdir - # pars["info"] = 2 - # - # madam = ops.Madam( - # params=pars, - # det_data="signal", - # pixels=pointing.pixels, - # weights=pointing.weights, - # pixels_nested=pointing.nest, - # noise_model="noise_model", - # ) - # - # # Generate persistent pointing - # pointing.apply(data) - # - # # Run Madam - # madam.apply(data) - # - # madam_hit_path = os.path.join(testdir, "madam_hmap.fits") - # madam_map_path = os.path.join(testdir, "madam_map.fits") - # - # if data.comm.world_rank == 0: - # set_matplotlib_backend() - # import matplotlib.pyplot as plt - # - # # Compare hit maps - # - # toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) - # madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) - # diff_hits = toast_hits - madam_hits - # - # outfile = os.path.join(testdir, "madam_hits.png") - # hp.mollview(madam_hits, xsize=1600, nest=True) - # plt.savefig(outfile) - # plt.close() - # outfile = os.path.join(testdir, "toast_hits.png") - # hp.mollview(toast_hits, xsize=1600, nest=True) - # plt.savefig(outfile) - # plt.close() - # outfile = os.path.join(testdir, "diff_hits.png") - # hp.mollview(diff_hits, xsize=1600, nest=True) - # plt.savefig(outfile) - # plt.close() - # - # # Compare maps - # - # toast_map = hp.read_map(toast_map_path, field=None, nest=True) - # madam_map = hp.read_map(madam_map_path, field=None, nest=True) - # # Set madam unhit pixels to zero - # for stokes, ststr in zip(range(3), ["I", "Q", "U"]): - # mask = hp.mask_bad(madam_map[stokes]) - # madam_map[stokes][mask] = 0.0 - # diff_map = toast_map[stokes] - madam_map[stokes] - # print("diff map {} has rms {}".format(ststr, np.std(diff_map))) - # outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) - # hp.mollview(madam_map[stokes], xsize=1600, nest=True) - # plt.savefig(outfile) - # plt.close() - # outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) - # hp.mollview(toast_map[stokes], xsize=1600, nest=True) - # plt.savefig(outfile) - # plt.close() - # outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) - # hp.mollview(diff_map, xsize=1600, nest=True) - # plt.savefig(outfile) - # plt.close() - # - # # nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=6) - # - # del data - # return - - def test_compare_madam_prior(self): + def test_offset(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm, obs_time=20.0 * u.minute) + + # Create some sky signal timestreams. + + pointing = ops.PointingHealpix( + nside=64, mode="IQU", hwp_angle="hwp_angle", create_dist="pixel_dist" + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Now clear the pointing and reset things for use with the mapmaking test later + delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + delete_pointing.apply(data) + pointing.create_dist = None + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise and accumulate to signal + sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") + sim_noise.apply(data) + + # Set up binning operator for solving + binner = ops.BinMap( + pixel_dist="pixel_dist", + pointing=pointing, + noise_model=default_model.noise_model, + ) + + # Set up template matrix with just an offset template. + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + step_seconds = float(int(ob_time / 10.0)) + tmpl = templates.Offset( + times="times", + noise_model=default_model.noise_model, + step_time=step_seconds, + ) + + tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # Map maker + mapper = ops.MapMaker( + det_data="signal", + binning=binner, + template_matrix=tmatrix, + ) + + # Make the map + mapper.apply(data) + + # Access the output + # final_map = data[mapper.binning.binned] + + del data + return + + def test_compare_madam_noprior(self): if not ops.Madam.available: - print("libmadam not available, skipping destriping comparison with prior") + print("libmadam not available, skipping destriping comparison") return - testdir = os.path.join(self.outdir, "compare_madam_prior") + testdir = os.path.join(self.outdir, "compare_madam_noprior") if self.comm is None or self.comm.rank == 0: os.makedirs(testdir) @@ -352,8 +155,193 @@ def test_compare_madam_prior(self): sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") sim_noise.apply(data) - print(data.obs[0].detdata["signal"]) - print("Done generating starting TOD", flush=True) + # Set up binning operator for solving + binner = ops.BinMap( + pixel_dist="pixel_dist", + pointing=pointing, + noise_model=default_model.noise_model, + ) + + # Set up template matrix with just an offset template. + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + # step_seconds = float(int(ob_time / 10.0)) + step_seconds = 5.0 + tmpl = templates.Offset( + times="times", + noise_model=default_model.noise_model, + step_time=step_seconds, + ) + + tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # Map maker + mapper = ops.MapMaker( + name="toastmap", + det_data="signal", + binning=binner, + template_matrix=tmatrix, + solve_rcond_threshold=1.0e-6, + map_rcond_threshold=1.0e-6, + iter_max=10, + ) + + # Make the map + mapper.apply(data) + + # Outputs + toast_hits = "toastmap_hits" + toast_map = "toastmap_map" + + # Write map to disk so we can load the whole thing on one process. + + toast_hit_path = os.path.join(testdir, "toast_hits.fits") + toast_map_path = os.path.join(testdir, "toast_map.fits") + write_healpix_fits(data[toast_map], toast_map_path, nest=True) + write_healpix_fits(data[toast_hits], toast_hit_path, nest=True) + + # Now run Madam on the same data and compare + + sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + pars = {} + pars["kfirst"] = "T" + pars["iter_max"] = 10 + pars["base_first"] = step_seconds + pars["fsample"] = sample_rate + pars["nside_map"] = pointing.nside + pars["nside_cross"] = pointing.nside + pars["nside_submap"] = min(8, pointing.nside) + pars["good_baseline_fraction"] = tmpl.good_fraction + pars["pixlim_cross"] = 1.0e-6 + pars["pixlim_map"] = 1.0e-6 + pars["write_map"] = "T" + pars["write_binmap"] = "F" + pars["write_matrix"] = "F" + pars["write_wcov"] = "F" + pars["write_hits"] = "T" + pars["write_base"] = "T" + pars["kfilter"] = "F" + pars["path_output"] = testdir + + madam = ops.Madam( + params=pars, + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + pixels_nested=pointing.nest, + noise_model="noise_model", + ) + + # Generate persistent pointing + pointing.apply(data) + + # Run Madam + madam.apply(data) + + madam_hit_path = os.path.join(testdir, "madam_hmap.fits") + madam_map_path = os.path.join(testdir, "madam_map.fits") + + if data.comm.world_rank == 0: + set_matplotlib_backend() + import matplotlib.pyplot as plt + + # Compare hit maps + + toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) + madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) + diff_hits = toast_hits - madam_hits + + outfile = os.path.join(testdir, "madam_hits.png") + hp.mollview(madam_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "toast_hits.png") + hp.mollview(toast_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "diff_hits.png") + hp.mollview(diff_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + # Compare maps + + toast_map = hp.read_map(toast_map_path, field=None, nest=True) + madam_map = hp.read_map(madam_map_path, field=None, nest=True) + # Set madam unhit pixels to zero + for stokes, ststr in zip(range(3), ["I", "Q", "U"]): + mask = hp.mask_bad(madam_map[stokes]) + madam_map[stokes][mask] = 0.0 + diff_map = toast_map[stokes] - madam_map[stokes] + print("diff map {} has rms {}".format(ststr, np.std(diff_map))) + outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) + hp.mollview(madam_map[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) + hp.mollview(toast_map[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) + hp.mollview(diff_map, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=4) + + del data + return + + def test_compare_madam_diagpre(self): + if not ops.Madam.available: + print("libmadam not available, skipping comparison with noise prior") + return + + testdir = os.path.join(self.outdir, "compare_madam_diagpre") + if self.comm is None or self.comm.rank == 0: + os.makedirs(testdir) + + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm, obs_time=30.0 * u.minute) + + # Create some sky signal timestreams. + + pointing = ops.PointingHealpix( + nside=16, + nest=True, + mode="IQU", + hwp_angle="hwp_angle", + create_dist="pixel_dist", + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Now clear the pointing and reset things for use with the mapmaking test later + delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + delete_pointing.apply(data) + pointing.create_dist = None + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise and accumulate to signal + sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") + sim_noise.apply(data) # Set up binning operator for solving binner = ops.BinMap( @@ -387,14 +375,12 @@ def test_compare_madam_prior(self): template_matrix=tmatrix, solve_rcond_threshold=1.0e-6, map_rcond_threshold=1.0e-6, - iter_max=10, + iter_max=50, ) # Make the map mapper.apply(data) - print("out amplitudes = ", data["toastmap_solve_amplitudes"][tmpl.name]) - # Outputs toast_hits = "toastmap_hits" toast_map = "toastmap_map" @@ -412,7 +398,8 @@ def test_compare_madam_prior(self): pars = {} pars["kfirst"] = "T" - pars["iter_max"] = 10 + pars["basis_order"] = 0 + pars["iter_max"] = 50 pars["base_first"] = step_seconds pars["fsample"] = sample_rate pars["nside_map"] = pointing.nside @@ -430,9 +417,206 @@ def test_compare_madam_prior(self): pars["kfilter"] = "T" pars["precond_width_min"] = 1 pars["precond_width_max"] = 1 + pars["use_cgprecond"] = "F" + pars["use_fprecond"] = "T" + pars["path_output"] = testdir + + madam = ops.Madam( + params=pars, + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + pixels_nested=pointing.nest, + noise_model="noise_model", + ) + + # Generate persistent pointing + pointing.apply(data) + + # Run Madam + madam.apply(data) + + madam_hit_path = os.path.join(testdir, "madam_hmap.fits") + madam_map_path = os.path.join(testdir, "madam_map.fits") + + if data.comm.world_rank == 0: + set_matplotlib_backend() + import matplotlib.pyplot as plt + + # Compare hit maps + + toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) + madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) + diff_hits = toast_hits - madam_hits + + outfile = os.path.join(testdir, "madam_hits.png") + hp.mollview(madam_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "toast_hits.png") + hp.mollview(toast_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "diff_hits.png") + hp.mollview(diff_hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + # Compare maps + + toast_map = hp.read_map(toast_map_path, field=None, nest=True) + madam_map = hp.read_map(madam_map_path, field=None, nest=True) + # Set madam unhit pixels to zero + for stokes, ststr in zip(range(3), ["I", "Q", "U"]): + mask = hp.mask_bad(madam_map[stokes]) + madam_map[stokes][mask] = 0.0 + diff_map = toast_map[stokes] - madam_map[stokes] + print("diff map {} has rms {}".format(ststr, np.std(diff_map))) + outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) + hp.mollview(madam_map[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) + hp.mollview(toast_map[stokes], xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) + hp.mollview(diff_map, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + + nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=2) + + del data + return + + def test_compare_madam_bandpre(self): + if not ops.Madam.available: + print( + "libmadam not available, skipping comparison with banded preconditioner" + ) + return + + testdir = os.path.join(self.outdir, "compare_madam_bandpre") + if self.comm is None or self.comm.rank == 0: + os.makedirs(testdir) + + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm, obs_time=30.0 * u.minute) + + # Create some sky signal timestreams. + + pointing = ops.PointingHealpix( + nside=16, + nest=True, + mode="IQU", + hwp_angle="hwp_angle", + create_dist="pixel_dist", + ) + pointing.apply(data) + + # Create fake polarized sky pixel values locally + create_fake_sky(data, "pixel_dist", "fake_map") + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data="signal", + pixels=pointing.pixels, + weights=pointing.weights, + map_key="fake_map", + ) + scanner.apply(data) + + # Now clear the pointing and reset things for use with the mapmaking test later + delete_pointing = ops.Delete(detdata=[pointing.pixels, pointing.weights]) + delete_pointing.apply(data) + pointing.create_dist = None + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + # Simulate noise and accumulate to signal + sim_noise = ops.SimNoise(noise_model=default_model.noise_model, out="signal") + sim_noise.apply(data) + + # Set up binning operator for solving + binner = ops.BinMap( + pixel_dist="pixel_dist", + pointing=pointing, + noise_model=default_model.noise_model, + ) + + # Set up template matrix with just an offset template. + + # Use 1/10 of an observation as the baseline length. Make it not evenly + # divisible in order to test handling of the final amplitude. + ob_time = data.obs[0].shared["times"][-1] - data.obs[0].shared["times"][0] + # step_seconds = float(int(ob_time / 10.0)) + step_seconds = 5.0 + tmpl = templates.Offset( + times="times", + noise_model=default_model.noise_model, + step_time=step_seconds, + use_noise_prior=True, + precond_width=10, + ) + + tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # Map maker + mapper = ops.MapMaker( + name="toastmap", + det_data="signal", + binning=binner, + template_matrix=tmatrix, + solve_rcond_threshold=1.0e-6, + map_rcond_threshold=1.0e-6, + iter_max=50, + ) + + # Make the map + mapper.apply(data) + + # Outputs + toast_hits = "toastmap_hits" + toast_map = "toastmap_map" + + # Write map to disk so we can load the whole thing on one process. + + toast_hit_path = os.path.join(testdir, "toast_hits.fits") + toast_map_path = os.path.join(testdir, "toast_map.fits") + write_healpix_fits(data[toast_map], toast_map_path, nest=True) + write_healpix_fits(data[toast_hits], toast_hit_path, nest=True) + + # Now run Madam on the same data and compare + + sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + pars = {} + pars["kfirst"] = "T" + pars["basis_order"] = 0 + pars["iter_max"] = 50 + pars["base_first"] = step_seconds + pars["fsample"] = sample_rate + pars["nside_map"] = pointing.nside + pars["nside_cross"] = pointing.nside + pars["nside_submap"] = min(8, pointing.nside) + pars["good_baseline_fraction"] = tmpl.good_fraction + pars["pixlim_cross"] = 1.0e-6 + pars["pixlim_map"] = 1.0e-6 + pars["write_map"] = "T" + pars["write_binmap"] = "F" + pars["write_matrix"] = "F" + pars["write_wcov"] = "F" + pars["write_hits"] = "T" + pars["write_base"] = "T" + pars["kfilter"] = "T" + pars["precond_width_min"] = 10 + pars["precond_width_max"] = 10 pars["use_cgprecond"] = "T" + pars["use_fprecond"] = "F" pars["path_output"] = testdir - pars["info"] = 2 madam = ops.Madam( params=pars, @@ -498,7 +682,7 @@ def test_compare_madam_prior(self): plt.savefig(outfile) plt.close() - # nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=6) + nt.assert_almost_equal(toast_map[stokes], madam_map[stokes], decimal=2) del data return From 368fd1b1bec19e9f2660f4afd18d14f87fb69c29 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 28 Jan 2021 09:30:36 -0800 Subject: [PATCH 054/690] Template amplitudes now support arbitrary distribution. Add unit tests for the different cases. --- src/toast/ops/scan_map.py | 9 - src/toast/templates/fourier2d.py | 320 +++++++++++++++++++++++- src/toast/templates/subharmonic.py | 6 +- src/toast/templates/template.py | 124 +++++++-- src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/ops_madam.py | 1 - src/toast/tests/ops_mapmaker_binning.py | 1 - src/toast/tests/runner.py | 25 +- src/toast/tests/template.py | 92 +++++++ 9 files changed, 517 insertions(+), 62 deletions(-) create mode 100644 src/toast/tests/template.py diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index 4e5a83f8f..7aacbc5dd 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -140,28 +140,19 @@ def _exec(self, data, detectors=None, **kwargs): "Projection supports only float32 and float64 binned maps" ) - # print("========= {} ==========".format(det)) - # print("Scanned map TOD = ", maptod) - # print("Scanned original TOD = ", ddata) - # zero-out if needed if self.zero: ddata[:] = 0.0 - # print("Scanned: zero-ing TOD = ", ddata) # Add or subtract. Note that the map scanned timestream will have # zeros anywhere that the pointing is bad, but those samples (and # any other detector flags) should be handled at other steps of the # processing. if self.subtract: - # print("Scanned: subtracting TOD") ddata[:] -= maptod else: - # print("Scanned: adding TOD") ddata[:] += maptod - # print("Scanned final = ", ddata) - del maptod maptod_raw.clear() del maptod_raw diff --git a/src/toast/templates/fourier2d.py b/src/toast/templates/fourier2d.py index b52f6c307..5fceb4bfd 100644 --- a/src/toast/templates/fourier2d.py +++ b/src/toast/templates/fourier2d.py @@ -2,6 +2,7 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from astropy import units as u from ..utils import Logger @@ -24,30 +25,333 @@ class Fourier2D(Template): # data : The Data instance we are working with # view : The timestream view we are using # det_data : The detector data key with the timestreams - # det_flags : Optional detector flags - # det_flag_mask : Bit mask for detector flags - # shared_flags : Optional shared flags - # shared_flag_mask : Bit mask for shared flags + # flags : Optional detector solver flags + # flag_mask : Bit mask for detector solver flags # + correlation_length = Quantity(10.0 * u.second, help="Correlation length in time") + + correlation_amplitude = Float(10.0, help="Scale factor of the filter") + + order = Int(1, help="The filter order") + + fit_subharmonics = Bool(True, help="If True, fit subharmonics") + + noise_model = Unicode( + None, + allow_none=True, + help="Observation key containing the optional noise model", + ) + + @traitlets.validate("order") + def _check_order(self, proposal): + od = proposal["value"] + if od < 1: + raise traitlets.TraitError("Filter order should be >= 1") + return od + def __init__(self, **kwargs): super().__init__(**kwargs) def _initialize(self, new_data): + self._norder = self.order + 1 + self._nmode = (2 * self.order) ** 2 + 1 + if self.fit_subharmonics: + self._nmode += 2 + + def evaluate_template(theta, phi, radius): + """Helper function to get the template values for a detector.""" + values = np.zeros(self._nmode) + values[0] = 1 + offset = 1 + if self.fit_subharmonics: + values[1:3] = theta / radius, phi / radius + offset += 2 + if self.order > 0: + rinv = np.pi / radius + orders = np.arange(self.order) + 1 + thetavec = np.zeros(self.order * 2) + phivec = np.zeros(self.order * 2) + thetavec[::2] = np.cos(orders * theta * rinv) + thetavec[1::2] = np.sin(orders * theta * rinv) + phivec[::2] = np.cos(orders * phi * rinv) + phivec[1::2] = np.sin(orders * phi * rinv) + values[offset:] = np.outer(thetavec, phivec).ravel() + return values + + # The detector templates and norms for each observation + self.templates = dict() + self.norms = dict() + + # Amplitude lengths of all views for each obs + self._obs_view_namp = dict() + + # Starting amplitude for each view within each obs + self._obs_view_offset = dict() + + # Sample rate for each obs. + self._obs_rate = dict() + + offset = 0 + + for iob, ob in enumerate(new_data.obs): + # Compute sample rate from timestamps + (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(ob.shared[self.times]) + self._obs_rate[iob] = rate + + # Focalplane radius + radius = np.radians(ob.telescope.focalplane.radius) + + noise = None + if self.noise_model in ob: + noise = ob[self.noise_model] + + # Track number of offset amplitudes per view. + self._obs_view_namp[iob] = list() + self._obs_view_offset[iob] = list() + + for view_slice in ob.view[self.view]: + slice_len = None + if view_slice.start is None: + # This is a view of the whole obs + slice_len = ob.n_local_samples + else: + slice_len = view_slice.stop - view_slice.start + + view_norms = np.zeros((slice_len, self._nmode)) + view_templates = dict() + + for det in ob.local_detectors: + detweight = 1.0 + if noise is not None: + detweight = noise.detector_weight(det) + det_quat = ob.focalplane.detector_quats[det] + x, y, z = qa.rotate(det_quat, ZAXIS) + theta, phi = np.arcsin([x, y]) + view_templates[det] = evaluate_template(theta, phi, radius) + + view_n_amp = slice_len * self._nmode + self._obs_view_namp[iob].append(view_n_amp) + self._obs_view_offset[iob].append(offset) + offset += view_n_amp + + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + common_flags = tod.local_common_flags(self.common_flags) + common_flags = (common_flags & self.common_flag_mask) != 0 + nsample = tod.total_samples + obs_templates = {} + focalplane = obs["focalplane"] + if self.focalplane_radius: + radius = np.radians(self.focalplane_radius) + else: + try: + radius = np.radians(focalplane.radius) + except AttributeError: + # Focalplane is just a dictionary + radius = np.radians(obs["fpradius"]) + norms = np.zeros([nsample, self.nmode]) + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + for det in tod.local_dets: + flags = tod.local_flags(det, self.flags) + good = ((flags & self.flag_mask) | common_flags) == 0 + detweight = self.detweights[iobs][det] + det_quat = focalplane[det]["quat"] + x, y, z = qa.rotate(det_quat, ZAXIS) + theta, phi = np.arcsin([x, y]) + obs_templates[det] = evaluate_template(theta, phi, radius) + norms[todslice] += np.outer(good, obs_templates[det] ** 2 * detweight) + self.comm.allreduce(norms) + good = norms != 0 + norms[good] = 1 / norms[good] + self.norms.append(norms.ravel()) + self.templates.append(obs_templates) + self.namplitude += nsample * self.nmode + + self.norms = np.hstack(self.norms) + + self._get_templates() + if correlation_length: + self._get_prior() + return + + @function_timer + def _get_prior(self): + """Evaluate C_a^{-1} for the 2D polynomial coefficients based + on the correlation length. + """ + if self.correlation_length: + # Correlation length is given in seconds and we cannot assume + # that each observation has the same sampling rate. Therefore, + # we will build the filter for each observation + self.filters = [] # all observations + self.preconditioners = [] # all observations + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + times = tod.local_times() + corr = ( + np.exp((times[0] - times) / self.correlation_length) + * self.correlation_amplitude + ) + ihalf = times.size // 2 + corr[ihalf + 1 :] = corr[ihalf - 1 : 0 : -1] + fcorr = np.fft.rfft(corr) + invcorr = np.fft.irfft(1 / fcorr) + self.filters.append(invcorr) + # Scale the filter by the prescribed correlation strength + # and the number of modes at each angular scale + self.filter_scale = np.zeros(self.nmode) + self.filter_scale[0] = 1 + offset = 1 + if self.fit_subharmonics: + self.filter_scale[1:3] = 2 + offset += 2 + self.filter_scale[offset:] = 4 + self.filter_scale *= self.correlation_amplitude + return + + @function_timer + def _get_templates(self): + """Evaluate and normalize the polynomial templates. + + Each template corresponds to a fixed value for each detector + and depends on the position of the detector. + """ + self.templates = [] + + def evaluate_template(theta, phi, radius): + values = np.zeros(self._nmode) + values[0] = 1 + offset = 1 + if self.fit_subharmonics: + values[1:3] = theta / radius, phi / radius + offset += 2 + if self.order > 0: + rinv = np.pi / radius + orders = np.arange(self.order) + 1 + thetavec = np.zeros(self.order * 2) + phivec = np.zeros(self.order * 2) + thetavec[::2] = np.cos(orders * theta * rinv) + thetavec[1::2] = np.sin(orders * theta * rinv) + phivec[::2] = np.cos(orders * phi * rinv) + phivec[1::2] = np.sin(orders * phi * rinv) + values[offset:] = np.outer(thetavec, phivec).ravel() + return values + + self.norms = [] + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + common_flags = tod.local_common_flags(self.common_flags) + common_flags = (common_flags & self.common_flag_mask) != 0 + nsample = tod.total_samples + obs_templates = {} + focalplane = obs["focalplane"] + if self.focalplane_radius: + radius = np.radians(self.focalplane_radius) + else: + try: + radius = np.radians(focalplane.radius) + except AttributeError: + # Focalplane is just a dictionary + radius = np.radians(obs["fpradius"]) + norms = np.zeros([nsample, self.nmode]) + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + for det in tod.local_dets: + flags = tod.local_flags(det, self.flags) + good = ((flags & self.flag_mask) | common_flags) == 0 + detweight = self.detweights[iobs][det] + det_quat = focalplane[det]["quat"] + x, y, z = qa.rotate(det_quat, ZAXIS) + theta, phi = np.arcsin([x, y]) + obs_templates[det] = evaluate_template(theta, phi, radius) + norms[todslice] += np.outer(good, obs_templates[det] ** 2 * detweight) + self.comm.allreduce(norms) + good = norms != 0 + norms[good] = 1 / norms[good] + self.norms.append(norms.ravel()) + self.templates.append(obs_templates) + self.namplitude += nsample * self.nmode + + self.norms = np.hstack(self.norms) + return def _zeros(self): raise NotImplementedError("Derived class must implement _zeros()") def _add_to_signal(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _add_to_signal()") + poly_amplitudes = amplitudes[self.name] + amplitude_offset = 0 + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + nsample = tod.total_samples + # For each observation, sample indices start from 0 + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + obs_amplitudes = poly_amplitudes[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode])[todslice] + for det in tod.local_dets: + templates = self.templates[iobs][det] + signal[iobs, det, todslice] += np.sum(obs_amplitudes * templates, 1) + amplitude_offset += nsample * self.nmode def _project_signal(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _project_signal()") + poly_amplitudes = amplitudes[self.name] + amplitude_offset = 0 + for iobs, obs in enumerate(self.data.obs): + tod = obs["tod"] + nsample = tod.total_samples + # For each observation, sample indices start from 0 + local_offset, local_nsample = tod.local_samples + todslice = slice(local_offset, local_offset + local_nsample) + obs_amplitudes = poly_amplitudes[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode]) + if self.comm is not None: + my_amplitudes = np.zeros_like(obs_amplitudes) + else: + my_amplitudes = obs_amplitudes + for det in tod.local_dets: + templates = self.templates[iobs][det] + my_amplitudes[todslice] += np.outer( + signal[iobs, det, todslice], templates + ) + if self.comm is not None: + self.comm.allreduce(my_amplitudes) + obs_amplitudes += my_amplitudes + amplitude_offset += nsample * self.nmode def _add_prior(self, amplitudes_in, amplitudes_out): - # Not all Templates implement the prior + if self.correlation_length: + poly_amplitudes_in = amplitudes_in[self.name] + poly_amplitudes_out = amplitudes_out[self.name] + amplitude_offset = 0 + for obs, noisefilter in zip(self.data.obs, self.filters): + tod = obs["tod"] + nsample = tod.total_samples + obs_amplitudes_in = poly_amplitudes_in[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode]) + obs_amplitudes_out = poly_amplitudes_out[ + amplitude_offset : amplitude_offset + nsample * self.nmode + ].reshape([nsample, self.nmode]) + # import pdb + # import matplotlib.pyplot as plt + # pdb.set_trace() + for mode in range(self.nmode): + scale = self.filter_scale[mode] + obs_amplitudes_out[:, mode] += scipy.signal.convolve( + obs_amplitudes_in[:, mode], + noisefilter * scale, + mode="same", + ) + amplitude_offset += nsample * self.nmode return def _apply_precond(self, amplitudes_in, amplitudes_out): - raise NotImplementedError("Derived class must implement _apply_precond()") + poly_amplitudes_in = amplitudes_in[self.name] + poly_amplitudes_out = amplitudes_out[self.name] + poly_amplitudes_out[:] = poly_amplitudes_in * self.norms diff --git a/src/toast/templates/subharmonic.py b/src/toast/templates/subharmonic.py index d32ca1746..63c2f6c14 100644 --- a/src/toast/templates/subharmonic.py +++ b/src/toast/templates/subharmonic.py @@ -27,10 +27,8 @@ class SubHarmonic(Template): # data : The Data instance we are working with # view : The timestream view we are using # det_data : The detector data key with the timestreams - # det_flags : Optional detector flags - # det_flag_mask : Bit mask for detector flags - # shared_flags : Optional shared flags - # shared_flag_mask : Bit mask for shared flags + # flags : Optional detector solver flags + # flag_mask : Bit mask for detector solver flags # def __init__(self, **kwargs): diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index ff281db07..d9a7aa6ac 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -8,6 +8,8 @@ import traitlets +from ..mpi import MPI + from ..utils import Logger, AlignedU8, AlignedF32, AlignedF64, dtype_to_aligned from ..traits import TraitConfig, Instance, Unicode, Int @@ -324,11 +326,13 @@ def __init__(self, comm, n_global, n_local, local_indices=None, dtype=np.float64 self._global_last = self._n_local - 1 else: if self._local_indices is None: - check = [self._n_local] + check = None rank = 0 if self._comm is not None: - check = self._comm.allgather(check) + check = self._comm.allgather(self._n_local) rank = self._comm.rank + else: + check = [self._n_local] if np.sum(check) != self._n_global: msg = "Total amplitudes on all processes does not equal n_global" raise RuntimeError(msg) @@ -479,7 +483,7 @@ def n_local_flagged(self): """The number of local amplitudes that are flagged.""" return np.count_nonzero(self.local_flags) - def _get_global_values(comm_offset, send_buffer): + def _get_global_values(self, comm_offset, send_buffer): n_buf = len(send_buffer) if self._full: # Shortcut if we have all global amplitudes locally @@ -509,11 +513,17 @@ def _get_global_values(comm_offset, send_buffer): send_view[:] = self.local[local_off : local_off + n_copy] send_view[self.local_flags[local_off : local_off + n_copy] != 0] = 0 else: - # Need to efficiently do the lookup. Pull existing techniques from - # old code when we need this. - raise NotImplementedError("sync of explicitly indexed amplitudes") - - def _set_global_values(comm_offset, recv_buffer): + selected = np.logical_and( + np.logical_and( + self._local_indices >= comm_offset, + self._local_indices < comm_offset + n_buf, + ), + self.local_flags == 0, + ) + buf_indices = self._local_indices[selected] - comm_offset + send_buffer[buf_indices] = self.local[selected] + + def _set_global_values(self, comm_offset, recv_buffer): n_buf = len(recv_buffer) if self._full: # Shortcut if we have all global amplitudes locally @@ -541,9 +551,15 @@ def _set_global_values(comm_offset, recv_buffer): buf_off : buf_off + n_copy ] else: - # Need to efficiently do the lookup. Pull existing techniques from - # old code when we need this. - raise NotImplementedError("sync of explicitly indexed amplitudes") + selected = np.logical_and( + np.logical_and( + self._local_indices >= comm_offset, + self._local_indices < comm_offset + n_buf, + ), + self.local_flags == 0, + ) + buf_indices = self._local_indices[selected] - comm_offset + self.local[selected] = recv_buffer[buf_indices] def sync(self, comm_bytes=10000000): """Perform an Allreduce across all processes. @@ -556,14 +572,15 @@ def sync(self, comm_bytes=10000000): None """ - if self._comm is None or self._local_indices is None: - # We have either one process or every process has a disjoint set of - # amplitudes. Nothing to sync. + if self._comm is None or (not self._full and self._local_indices is None): + # Either no MPI or fully disjoint set of amplitudes. return log = Logger.get() n_comm = int(comm_bytes / self._itemsize) n_total = self._n_global + if n_comm > n_total: + n_comm = n_total # Create persistent buffers for the reduction @@ -591,7 +608,7 @@ def sync(self, comm_bytes=10000000): del send_raw del recv_raw - def dot(self, other): + def dot(self, other, comm_bytes=10000000): """Perform a dot product with another Amplitudes object. The other instance must have the same data distribution. The two objects are @@ -601,6 +618,9 @@ def dot(self, other): Args: other (Amplitudes): The other instance. + comm_bytes (int): The maximum number of bytes to communicate in each + call to Allreduce. Only used in the case of explicitly indexed + amplitudes on each process. Result: (float): The dot product. @@ -610,20 +630,82 @@ def dot(self, other): raise RuntimeError("Amplitudes must have the same number of values") if other.n_local != self.n_local: raise RuntimeError("Amplitudes must have the same number of local values") - local_result = np.dot(self.local, other.local) result = None if self._comm is None or self._full: # Only one process, or every process has the full set of values. - result = local_result + result = np.dot( + np.where(self.local_flags == 0, self.local, 0), + np.where(other.local_flags == 0, other.local, 0), + ) else: if self._local_indices is None: # Every process has a unique set of amplitudes. Reduce the local # dot products. - result = MPI.allreduce(local_result, op=MPI.SUM) + local_result = np.dot( + np.where(self.local_flags == 0, self.local, 0), + np.where(other.local_flags == 0, other.local, 0), + ) + result = self._comm.allreduce(local_result, op=MPI.SUM) else: - # More complicated, since we need to reduce each amplitude only - # once. Implement techniques from other existing code when needed. - raise NotImplementedError("dot of explicitly indexed amplitudes") + # Each amplitude must only contribute once to the dot product. Every + # amplitude will be processed by the lowest-rank process which has + # that amplitude. We do this in a buffered way so that we don't need + # store this amplitude assignment information for the whole data at + # once. + n_comm = int(comm_bytes / self._itemsize) + n_total = self._n_global + if n_comm > n_total: + n_comm = n_total + + local_raw = AlignedI32.zeros(n_comm) + assigned_raw = AlignedI32.zeros(n_comm) + local = local_raw.array() + assigned = assigned_raw.array() + + local_result = 0 + + comm_offset = 0 + while comm_offset < n_total: + if comm_offset + n_comm > n_total: + n_comm = n_total - comm_offset + local[:] = self._comm.size + + selected = np.logical_and( + self._local_indices >= comm_offset, + self._local_indices < comm_offset + n_buf, + ) + buf_indices = self._local_indices[selected] - comm_offset + local[buf_indices] = self._comm.rank + self._comm.Allreduce(local, assigned, op=MPI.MIN) + + # Compute local dot product of just our assigned, unflagged elements + local_result += np.dot( + np.where( + np.logical_and( + self.local_flags == 0, assigned == self._comm.rank + ), + self.local, + 0, + ), + np.where( + np.logical_and( + other.local_flags == 0, assigned == self._comm.rank + ), + other.local, + 0, + ), + ) + comm_offset += n_comm + + result = self._comm.allreduce(local_result, op=MPI.SUM) + + del local + del assigned + local_raw.clear() + assigned_raw.clear() + del local_raw + del assigned_raw + return result diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 69ffa50dd..772a47f14 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -29,6 +29,7 @@ install(FILES ops_memory_counter.py ops_scan_map.py ops_madam.py + template.py template_offset.py DESTINATION ${PYTHON_SITE}/toast/tests ) diff --git a/src/toast/tests/ops_madam.py b/src/toast/tests/ops_madam.py index 134140033..5821c2651 100644 --- a/src/toast/tests/ops_madam.py +++ b/src/toast/tests/ops_madam.py @@ -177,7 +177,6 @@ def test_madam_det_out(self): pars["write_hits"] = "T" pars["kfilter"] = "F" pars["path_output"] = self.outdir - pars["info"] = 2 # FIXME: add a view here once our test data includes it diff --git a/src/toast/tests/ops_mapmaker_binning.py b/src/toast/tests/ops_mapmaker_binning.py index 5346f7008..689929f42 100644 --- a/src/toast/tests/ops_mapmaker_binning.py +++ b/src/toast/tests/ops_mapmaker_binning.py @@ -157,7 +157,6 @@ def test_compare_madam(self): pars["write_hits"] = "T" pars["kfilter"] = "F" pars["path_output"] = self.outdir - pars["info"] = 0 madam = ops.Madam( params=pars, diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 8962e8fe8..af956b44f 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -45,17 +45,13 @@ from . import covariance as test_covariance +from . import template as test_template from . import template_offset as test_template_offset -# -# from . import cache as testcache -# -# from . import tod as testtod # # from . import psd_math as testpsdmath # # from . import ops_dipole as testopsdipole -# from . import ops_simnoise as testopssimnoise # from . import ops_sim_sss as testopssimsss # # from . import ops_polyfilter as testopspolyfilter @@ -64,17 +60,10 @@ # from . import ops_gainscrambler as testopsgainscrambler # from . import ops_applygain as testopsapplygain # -# -# -# from . import ops_madam as testopsmadam -# from . import ops_mapmaker as testopsmapmaker -# # from . import map_satellite as testmapsatellite # # from . import map_ground as testmapground # -# from . import binned as testbinned -# # from . import sim_focalplane as testsimfocalplane # from . import tod_satellite as testtodsat # @@ -161,25 +150,25 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_covariance)) + suite.addTest(loader.loadTestsFromModule(test_template)) suite.addTest(loader.loadTestsFromModule(test_template_offset)) - # - # suite.addTest(loader.loadTestsFromModule(testtod)) # # suite.addTest(loader.loadTestsFromModule(testopssimsss)) + # suite.addTest(loader.loadTestsFromModule(testopsapplygain)) - # suite.addTest(loader.loadTestsFromModule(testcov)) + # suite.addTest(loader.loadTestsFromModule(testopsdipole)) # suite.addTest(loader.loadTestsFromModule(testopsgroundfilter)) # suite.addTest(loader.loadTestsFromModule(testsimfocalplane)) # suite.addTest(loader.loadTestsFromModule(testopspolyfilter)) # suite.addTest(loader.loadTestsFromModule(testopsgainscrambler)) + # suite.addTest(loader.loadTestsFromModule(testpsdmath)) - # suite.addTest(loader.loadTestsFromModule(testopsmadam)) - # suite.addTest(loader.loadTestsFromModule(testopsmapmaker)) + # suite.addTest(loader.loadTestsFromModule(testmapsatellite)) # suite.addTest(loader.loadTestsFromModule(testmapground)) - # suite.addTest(loader.loadTestsFromModule(testbinned)) + # suite.addTest(loader.loadTestsFromModule(testopsatm)) # # # These tests segfault locally. Re-enable once we are doing bandpass diff --git a/src/toast/tests/template.py b/src/toast/tests/template.py new file mode 100644 index 000000000..7f770729b --- /dev/null +++ b/src/toast/tests/template.py @@ -0,0 +1,92 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from ..utils import rate_from_times + +from ..dist import distribute_uniform + +from .. import ops + +from ..templates import Amplitudes, AmplitudesMap + +from ._helpers import create_outdir, create_comm + + +class TemplateTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def test_amplitudes(self): + # Create a toast communicator with groups if possible + comm = create_comm(self.comm) + + # Global number of amplitudes + n_global = 1000 + + # Test every process with independent amplitudes + + dist = distribute_uniform(n_global, comm.world_size) + n_local = dist[comm.world_rank][1] + + amps = Amplitudes(comm.comm_world, n_global, n_local, dtype=np.int32) + + amps.local[:] = 1 + amps.sync() + + np.testing.assert_equal(amps.local, np.ones_like(amps.local)) + + amps.clear() + del amps + + # Test every process with full overlap + + n_local = n_global + amps = Amplitudes(comm.comm_world, n_global, n_local, dtype=np.int32) + amps.local[:] = 1 + amps.sync() + + np.testing.assert_equal(amps.local, comm.world_size * np.ones_like(amps.local)) + + amps.clear() + del amps + + # Test arbitrary distribution + + n_local = n_global // 2 + local_indices = 2 * np.arange(n_local, dtype=np.int32) + local_indices += comm.world_rank % 2 + + amps = Amplitudes( + comm.comm_world, + n_global, + n_local, + local_indices=local_indices, + dtype=np.int32, + ) + amps.local[:] = 1 + amps.sync() + + check_even = (1 + comm.world_size) // 2 + check_odd = comm.world_size // 2 + + if comm.world_rank % 2 == 0: + np.testing.assert_equal(amps.local, check_even * np.ones_like(amps.local)) + else: + np.testing.assert_equal(amps.local, check_odd * np.ones_like(amps.local)) + + amps.clear() + del amps + + return From 0e8440aadf847bd56577e13a7e1839655577e3b6 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sat, 30 Jan 2021 16:48:49 -0800 Subject: [PATCH 055/690] Clean up sync and dot product of distributed amplitudes. Add many more extensive unit tests, which pass when using multiple groups and a variety of communication buffer sizes. --- src/toast/ops/mapmaker_templates.py | 3 + src/toast/templates/CMakeLists.txt | 1 + src/toast/templates/__init__.py | 4 +- src/toast/templates/amplitudes.py | 804 +++++++++++++++++++++++++ src/toast/templates/fourier2d.py | 53 +- src/toast/templates/offset.py | 6 +- src/toast/templates/subharmonic.py | 2 + src/toast/templates/template.py | 598 +----------------- src/toast/tests/CMakeLists.txt | 2 +- src/toast/tests/runner.py | 4 +- src/toast/tests/template.py | 92 --- src/toast/tests/template_amplitudes.py | 232 +++++++ 12 files changed, 1102 insertions(+), 699 deletions(-) create mode 100644 src/toast/templates/amplitudes.py delete mode 100644 src/toast/tests/template.py create mode 100644 src/toast/tests/template_amplitudes.py diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 43345a8c2..5d2daa721 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -183,6 +183,9 @@ def _exec(self, data, detectors=None, **kwargs): for d in all_dets: for tmpl in self.templates: tmpl.project_signal(d, data[self.amplitudes][tmpl.name]) + # Synchronize the result + for tmpl in self.templates: + data[self.amplitudes][tmpl.name].sync() else: if self.amplitudes not in data: msg = "Template amplitudes '{}' do not exist in data".format( diff --git a/src/toast/templates/CMakeLists.txt b/src/toast/templates/CMakeLists.txt index 0ad0fb829..188467f69 100644 --- a/src/toast/templates/CMakeLists.txt +++ b/src/toast/templates/CMakeLists.txt @@ -4,6 +4,7 @@ install(FILES __init__.py template.py + amplitudes.py offset.py DESTINATION ${PYTHON_SITE}/toast/templates ) diff --git a/src/toast/templates/__init__.py b/src/toast/templates/__init__.py index 40b197511..fd233c6e9 100644 --- a/src/toast/templates/__init__.py +++ b/src/toast/templates/__init__.py @@ -4,6 +4,8 @@ # Import Templates into our public API -from .template import Template, Amplitudes, AmplitudesMap +from .template import Template + +from .amplitudes import Amplitudes, AmplitudesMap from .offset import Offset diff --git a/src/toast/templates/amplitudes.py b/src/toast/templates/amplitudes.py new file mode 100644 index 000000000..ea5faa971 --- /dev/null +++ b/src/toast/templates/amplitudes.py @@ -0,0 +1,804 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +from collections.abc import MutableMapping + +import numpy as np + +from ..mpi import MPI + +from ..utils import ( + Logger, + AlignedU8, + AlignedF32, + AlignedF64, + AlignedI32, + dtype_to_aligned, +) + + +class Amplitudes(object): + """Class for distributed template amplitudes. + + In the general case, template amplitudes exist as sparse, non-unique values across + all processes. This object provides methods for describing the local distribution + of amplitudes and for doing global reductions and dot products. + + There are 4 supported cases: + + 1. If n_global == n_local, then every process has a full copy of the amplitude + values. + + 2. If n_global != n_local and both local_indices and local_ranges are None, + then every process has a disjoint set of amplitudes. The sum of n_local + across all processes must equal n_global. + + 3. If n_global != n_local and local_ranges is not None, then local_ranges + specifies the contiguous global slices that are concatenated to form the + local data. The sum of the lengths of the slices must equal n_local. + + 4. If n_global != n_local and local_indices is not None, then local_indices + is an array of the global indices of all the local data. The length of + local_indices must equal n_local. WARNING: this case is more costly in + terms of storage and reduction. Avoid it if possible. + + Because different process groups have different sets of observations, there are + some types of templates which may only have shared amplitudes within the group + communicator. If use_group is True, the group communicator is used instead of the + world communicator, and n_global is interpreted as the number of amplitudes in the + group. This information is needed whenever working with the full set of amplitudes + (for example when doing I/O). + + Args: + comm (toast.Comm): The toast communicator. + n_global (int): The number of global values across all processes. + n_local (int): The number of values on this process. + local_indices (array): If not None, the explicit indices of the local + amplitudes within the global array. + local_ranges (list): If not None, a list of tuples with the (offset, n_amp) + amplitude ranges stored locally. + dtype (dtype): The amplitude dtype. + use_group (bool): If True, use the group rather than world communicator. + + """ + + def __init__( + self, + comm, + n_global, + n_local, + local_indices=None, + local_ranges=None, + dtype=np.float64, + use_group=False, + ): + self._comm = comm + self._n_global = n_global + self._n_local = n_local + self._local_indices = local_indices + self._local_ranges = local_ranges + self._use_group = use_group + if use_group: + self._mpicomm = self._comm.comm_group + else: + self._mpicomm = self._comm.comm_world + self._dtype = np.dtype(dtype) + self._storage_class, self._itemsize = dtype_to_aligned(dtype) + self._full = False + self._global_first = None + self._global_last = None + if self._n_global == self._n_local: + self._full = True + self._global_first = 0 + self._global_last = self._n_local - 1 + else: + if (self._local_indices is None) and (self._local_ranges is None): + check = None + rank = 0 + if self._mpicomm is not None: + check = self._mpicomm.allgather(self._n_local) + rank = self._mpicomm.rank + else: + check = [self._n_local] + if np.sum(check) != self._n_global: + msg = "Total amplitudes on all processes does not equal n_global" + raise RuntimeError(msg) + self._global_first = 0 + for i in range(rank): + self._global_first += check[i] + self._global_last = self._global_first + self._n_local - 1 + elif self._local_ranges is not None: + # local data is specified by ranges + check = 0 + last = 0 + for off, n in self._local_ranges: + check += n + if off < last: + msg = "local_ranges must not overlap and must be sorted" + raise RuntimeError(msg) + last = off + n + if last > self._n_global: + msg = "local_ranges extends beyond the number of global amps" + raise RuntimeError(msg) + if check != self._n_local: + raise RuntimeError("local_ranges must sum to n_local") + self._global_first = self._local_ranges[0][0] + self._global_last = ( + self._local_ranges[-1][0] + self._local_ranges[-1][1] - 1 + ) + else: + # local data has explicit global indices + if len(self._local_indices) != self._n_local: + msg = "Length of local_indices must match n_local" + raise RuntimeError(msg) + self._global_first = self._local_indices[0] + self._global_last = self._local_indices[-1] + self._raw = self._storage_class.zeros(self._n_local) + self.local = self._raw.array() + + # Support flagging of template amplitudes. This can be used to flag some + # amplitudes if too many timestream samples contributing to the amplitude value + # are bad. We will be passing these flags to compiled code, and there + # is no way easy way to do this using numpy bool and C++ bool. So we waste + # a bit of memory and use a whole byte per amplitude. + self._raw_flags = AlignedU8.zeros(self._n_local) + self.local_flags = self._raw_flags.array() + + def clear(self): + """Delete the underlying memory. + + This will forcibly delete the C-allocated memory and invalidate all python + references to this object. DO NOT CALL THIS unless you are sure all references + are no longer being used and you are about to delete the object. + + """ + if hasattr(self, "local"): + del self.local + if hasattr(self, "_raw"): + self._raw.clear() + del self._raw + if hasattr(self, "local_flags"): + del self.local_flags + if hasattr(self, "_raw_flags"): + self._raw_flags.clear() + del self._raw_flags + + def __del__(self): + self.clear() + + def __repr__(self): + val = "".format( + self.n_global, self.n_local, self.comm, self.local, self.local_flags + ) + return val + + def __eq__(self, value): + if isinstance(value, Amplitudes): + return self.local == value.local + else: + return self.local == value + + # Arithmetic. These assume that flagging is consistent between the pairs of + # Amplitudes (always true when used in the mapmaking) or that the flagged values + # have been zeroed out. + + def __iadd__(self, other): + if isinstance(other, Amplitudes): + self.local[:] += other.local + else: + self.local[:] += other + return self + + def __isub__(self, other): + if isinstance(other, Amplitudes): + self.local[:] -= other.local + else: + self.local[:] -= other + return self + + def __imul__(self, other): + if isinstance(other, Amplitudes): + self.local[:] *= other.local + else: + self.local[:] *= other + return self + + def __itruediv__(self, other): + if isinstance(other, Amplitudes): + self.local[:] /= other.local + else: + self.local[:] /= other + return self + + def __add__(self, other): + result = self.duplicate() + result += other + return result + + def __sub__(self, other): + result = self.duplicate() + result -= other + return result + + def __mul__(self, other): + result = self.duplicate() + result *= other + return result + + def __truediv__(self, other): + result = self.duplicate() + result /= other + return result + + def reset(self): + """Set all amplitude values to zero.""" + self.local[:] = 0 + + def reset_flags(self): + """Set all flag values to zero.""" + self.local_flags[:] = 0 + + def duplicate(self): + """Return a copy of the data.""" + ret = Amplitudes( + self._comm, + self._n_global, + self._n_local, + local_indices=self._local_indices, + local_ranges=self._local_ranges, + dtype=self._dtype, + use_group=self._use_group, + ) + ret.local[:] = self.local + ret.local_flags[:] = self.local_flags + return ret + + @property + def comm(self): + """The toast communicator in use.""" + return self._comm + + @property + def n_global(self): + """The total number of amplitudes.""" + return self._n_global + + @property + def n_local(self): + """The number of locally stored amplitudes.""" + return self._n_local + + @property + def n_local_flagged(self): + """The number of local amplitudes that are flagged.""" + return np.count_nonzero(self.local_flags) + + @property + def local_indices(self): + """The global indices of the local amplitudes, or None.""" + return self._local_indices + + @property + def local_ranges(self): + """The global slices covered by local amplitudes, or None.""" + return self._local_indices + + @property + def use_group(self): + """Whether to use the group communicator rather than the global one.""" + return self._use_group + + def sync(self, comm_bytes=10000000): + """Perform an Allreduce across all processes. + + Args: + comm_bytes (int): The maximum number of bytes to communicate in each + call to Allreduce. + + Returns: + None + + """ + if self._mpicomm is None: + # Nothing to do + return + + if not self._full and ( + self._local_indices is None and self._local_ranges is None + ): + # Disjoint set of amplitudes, no communication needed. + return + + log = Logger.get() + + n_comm = int(comm_bytes / self._itemsize) + n_total = self._n_global + if n_comm > n_total: + n_comm = n_total + + # Create persistent buffers for the reduction + + send_raw = self._storage_class.zeros(n_comm) + send_buffer = send_raw.array() + recv_raw = self._storage_class.zeros(n_comm) + recv_buffer = recv_raw.array() + + # Buffered Allreduce + + # For each buffer, the local indices of relevant data + local_selected = None + + # For each buffer, the indices of relevant data in the buffer + buffer_selected = None + + comm_offset = 0 + while comm_offset < n_total: + if comm_offset + n_comm > n_total: + n_comm = n_total - comm_offset + + if self._full: + # Shortcut if we have all global amplitudes locally + send_buffer[:] = self.local[comm_offset : comm_offset + n_comm] + send_buffer[ + self.local_flags[comm_offset : comm_offset + n_comm] != 0 + ] = 0 + else: + # Need to compute our overlap with the global amplitude range. + send_buffer[:] = 0 + if (self._global_last >= comm_offset) and ( + self._global_first < comm_offset + n_comm + ): + # We have some overlap + if self._local_ranges is not None: + sel_start = None + n_sel = 0 + + # current local offset of the range + range_off = 0 + + # build up the corresponding buffer indices + buffer_selected = list() + + for off, n in self._local_ranges: + if off >= comm_offset + n_comm: + range_off += n + continue + if off + n <= comm_offset: + range_off += n + continue + # This range has some overlap... + + # This is the starting local memory offset of this range: + local_off = range_off + + # Copy offset into the buffer + buf_off = 0 + + # The global starting index of the copy + start_indx = None + + if comm_offset > off: + local_off += comm_offset - off + start_indx = comm_offset + else: + buf_off = off - comm_offset + start_indx = off + + if sel_start is None: + # this is the first range with some overlap + sel_start = local_off + + n_copy = None + if comm_offset + n_comm > off + n: + n_copy = off + n - start_indx + else: + n_copy = comm_offset + n_comm - start_indx + + n_sel += n_copy + + buffer_selected.append( + np.arange(buf_off, buf_off + n_copy, 1, dtype=np.int64) + ) + send_view = send_buffer[buf_off : buf_off + n_copy] + send_view[:] = self.local[local_off : local_off + n_copy] + send_view[ + self.local_flags[local_off : local_off + n_copy] != 0 + ] = 0 + range_off += n + + local_selected = slice(sel_start, sel_start + n_sel, 1) + buffer_selected = np.concatenate(buffer_selected) + + elif self._local_indices is not None: + local_selected = np.logical_and( + np.logical_and( + self._local_indices >= comm_offset, + self._local_indices < comm_offset + n_comm, + ), + self.local_flags == 0, + ) + buffer_selected = ( + self._local_indices[local_selected] - comm_offset + ) + send_buffer[buffer_selected] = self.local[local_selected] + else: + raise RuntimeError( + "should never get here- non-full, disjoint data requires no sync" + ) + + self._mpicomm.Allreduce(send_buffer, recv_buffer, op=MPI.SUM) + + if self._full: + # Shortcut if we have all global amplitudes locally + self.local[comm_offset : comm_offset + n_comm] = recv_buffer + else: + if (self._global_last >= comm_offset) and ( + self._global_first < comm_offset + n_comm + ): + self.local[local_selected] = recv_buffer[buffer_selected] + + comm_offset += n_comm + + # Cleanup + del send_buffer + del recv_buffer + send_raw.clear() + recv_raw.clear() + del send_raw + del recv_raw + + def dot(self, other, comm_bytes=10000000): + """Perform a dot product with another Amplitudes object. + + The other instance must have the same data distribution. The two objects are + assumed to have already been synchronized, so that any amplitudes that exist + on multiple processes have the same values. This further assumes that any + flagged amplitudes have been set to zero. + + Args: + other (Amplitudes): The other instance. + comm_bytes (int): The maximum number of bytes to communicate in each + call to Allreduce. Only used in the case of explicitly indexed + amplitudes on each process. + + Result: + (float): The dot product. + + """ + if other.n_global != self.n_global: + raise RuntimeError("Amplitudes must have the same number of values") + if other.n_local != self.n_local: + raise RuntimeError("Amplitudes must have the same number of local values") + + if self._mpicomm is None or self._full: + # Only one process, or every process has the full set of values. + return np.dot( + np.where(self.local_flags == 0, self.local, 0), + np.where(other.local_flags == 0, other.local, 0), + ) + + if (self._local_ranges is None) and (self._local_indices is None): + # Every process has a unique set of amplitudes. Reduce the local + # dot products. + local_result = np.dot( + np.where(self.local_flags == 0, self.local, 0), + np.where(other.local_flags == 0, other.local, 0), + ) + result = self._mpicomm.allreduce(local_result, op=MPI.SUM) + return result + + # Each amplitude must only contribute once to the dot product. Every + # amplitude will be processed by the lowest-rank process which has + # that amplitude. We do this in a buffered way so that we don't need + # store this amplitude assignment information for the whole data at + # once. + n_comm = int(comm_bytes / self._itemsize) + n_total = self._n_global + if n_comm > n_total: + n_comm = n_total + + local_raw = AlignedI32.zeros(n_comm) + assigned_raw = AlignedI32.zeros(n_comm) + local = local_raw.array() + assigned = assigned_raw.array() + + local_result = 0 + + # For each buffer, the local indices of relevant data + local_selected = None + + # For each buffer, the indices of relevant data in the buffer + buffer_selected = None + + comm_offset = 0 + while comm_offset < n_total: + if comm_offset + n_comm > n_total: + n_comm = n_total - comm_offset + local[:] = self._mpicomm.size + + if (self._global_last >= comm_offset) and ( + self._global_first < comm_offset + n_comm + ): + # We have some overlap + if self._local_ranges is not None: + sel_start = None + n_sel = 0 + + # current local offset of the range + range_off = 0 + + # build up the corresponding buffer indices + buffer_selected = list() + + for off, n in self._local_ranges: + if off >= comm_offset + n_comm: + range_off += n + continue + if off + n <= comm_offset: + range_off += n + continue + # This range has some overlap... + + # This is the starting local memory offset of this range: + local_off = range_off + + # Copy offset into the buffer + buf_off = 0 + + # The global starting index of the copy + start_indx = None + + if comm_offset > off: + local_off += comm_offset - off + start_indx = comm_offset + else: + buf_off = off - comm_offset + start_indx = off + + if sel_start is None: + # this is the first range with some overlap + sel_start = local_off + + n_set = None + if comm_offset + n_comm > off + n: + n_set = off + n - start_indx + else: + n_set = comm_offset + n_comm - start_indx + + n_sel += n_set + + buffer_selected.append( + np.arange(buf_off, buf_off + n_set, 1, dtype=np.int64) + ) + local_view = local[buf_off : buf_off + n_set] + local_view[:] = self._mpicomm.rank + local_view[ + self.local_flags[local_off : local_off + n_set] != 0 + ] = self._mpicomm.size + range_off += n + + local_selected = slice(sel_start, sel_start + n_sel, 1) + buffer_selected = np.concatenate(buffer_selected) + + elif self._local_indices is not None: + local_selected = np.logical_and( + np.logical_and( + self._local_indices >= comm_offset, + self._local_indices < comm_offset + n_comm, + ), + self.local_flags == 0, + ) + buffer_selected = self._local_indices[local_selected] - comm_offset + local[buffer_selected] = self._mpicomm.rank + else: + raise RuntimeError( + "should never get here- non-full, disjoint data requires no sync" + ) + + self._mpicomm.Allreduce(local, assigned, op=MPI.MIN) + + if (self._global_last >= comm_offset) and ( + self._global_first < comm_offset + n_comm + ): + # Compute local dot product of just our assigned, unflagged elements + local_result += np.dot( + np.where( + np.logical_and( + self.local_flags[local_selected] == 0, + assigned[buffer_selected] == self._mpicomm.rank, + ), + self.local[local_selected], + 0, + ), + np.where( + np.logical_and( + other.local_flags[local_selected] == 0, + assigned[buffer_selected] == self._mpicomm.rank, + ), + other.local[local_selected], + 0, + ), + ) + + comm_offset += n_comm + + result = self._mpicomm.allreduce(local_result, op=MPI.SUM) + + del local + del assigned + local_raw.clear() + assigned_raw.clear() + del local_raw + del assigned_raw + + return result + + +class AmplitudesMap(MutableMapping): + """Helper class to provide arithmetic operations on a collection of Amplitudes. + + This simply provides syntactic sugar to reduce duplicated code when working with + a collection of Amplitudes in the map making. + + """ + + def __init__(self): + self._internal = dict() + + # Mapping methods + + def __getitem__(self, key): + return self._internal[key] + + def __delitem__(self, key): + del self._internal[key] + + def __setitem__(self, key, value): + if not isinstance(value, Amplitudes): + raise RuntimeError( + "Only Amplitudes objects may be assigned to an AmplitudesMap" + ) + self._internal[key] = value + + def __iter__(self): + return iter(self._internal) + + def __len__(self): + return len(self._internal) + + def __repr__(self): + val = "= comm_offset + n_buf - ): - # No overlap with our local data - return - if self._local_indices is None: - local_off = 0 - buf_off = 0 - if comm_offset > self._global_first: - local_off = comm_offset - self._global_first - else: - buf_off = self._global_first - comm_offset - n_copy = None - if comm_offset + n_buf > self._global_last: - n_copy = self._global_last + 1 - local_off - else: - n_copy = n_buf - buf_off - send_view = send_buffer[buf_off : buf_off + n_copy] - send_view[:] = self.local[local_off : local_off + n_copy] - send_view[self.local_flags[local_off : local_off + n_copy] != 0] = 0 - else: - selected = np.logical_and( - np.logical_and( - self._local_indices >= comm_offset, - self._local_indices < comm_offset + n_buf, - ), - self.local_flags == 0, - ) - buf_indices = self._local_indices[selected] - comm_offset - send_buffer[buf_indices] = self.local[selected] - - def _set_global_values(self, comm_offset, recv_buffer): - n_buf = len(recv_buffer) - if self._full: - # Shortcut if we have all global amplitudes locally - self.local[comm_offset : comm_offset + n_buf] = recv_buffer - else: - # Need to compute our overlap with the global range. - if (self._global_last < comm_offset) or ( - self._global_first >= comm_offset + n_buf - ): - # No overlap with our local data - return - if self._local_indices is None: - local_off = 0 - buf_off = 0 - if comm_offset > self._global_first: - local_off = comm_offset - self._global_first - else: - buf_off = self._global_first - comm_offset - n_copy = None - if comm_offset + n_buf > self._global_last: - n_copy = self._global_last + 1 - local_off - else: - n_copy = n_buf - buf_off - self.local[local_off : local_off + n_copy] = recv_buffer[ - buf_off : buf_off + n_copy - ] - else: - selected = np.logical_and( - np.logical_and( - self._local_indices >= comm_offset, - self._local_indices < comm_offset + n_buf, - ), - self.local_flags == 0, - ) - buf_indices = self._local_indices[selected] - comm_offset - self.local[selected] = recv_buffer[buf_indices] - - def sync(self, comm_bytes=10000000): - """Perform an Allreduce across all processes. - - Args: - comm_bytes (int): The maximum number of bytes to communicate in each - call to Allreduce. - - Returns: - None - - """ - if self._comm is None or (not self._full and self._local_indices is None): - # Either no MPI or fully disjoint set of amplitudes. - return - log = Logger.get() - - n_comm = int(comm_bytes / self._itemsize) - n_total = self._n_global - if n_comm > n_total: - n_comm = n_total - - # Create persistent buffers for the reduction - - send_raw = self._storage_class.zeros(n_comm) - send_buffer = send_raw.array() - recv_raw = self._storage_class.zeros(n_comm) - recv_buffer = recv_raw.array() - - # Buffered Allreduce - - comm_offset = 0 - while comm_offset < n_total: - if comm_offset + n_comm > n_total: - n_comm = n_total - comm_offset - self._get_global_values(comm_offset, send_buffer) - self._comm.Allreduce(send_buffer, recv_buffer, op=MPI.SUM) - self._set_global_values(comm_offset, recv_buffer) - comm_offset += n_comm - - # Cleanup - del send_buffer - del recv_buffer - send_raw.clear() - recv_raw.clear() - del send_raw - del recv_raw - - def dot(self, other, comm_bytes=10000000): - """Perform a dot product with another Amplitudes object. - - The other instance must have the same data distribution. The two objects are - assumed to have already been synchronized, so that any amplitudes that exist - on multiple processes have the same values. This further assumes that any - flagged amplitudes have been set to zero. - - Args: - other (Amplitudes): The other instance. - comm_bytes (int): The maximum number of bytes to communicate in each - call to Allreduce. Only used in the case of explicitly indexed - amplitudes on each process. - - Result: - (float): The dot product. - - """ - if other.n_global != self.n_global: - raise RuntimeError("Amplitudes must have the same number of values") - if other.n_local != self.n_local: - raise RuntimeError("Amplitudes must have the same number of local values") - result = None - if self._comm is None or self._full: - # Only one process, or every process has the full set of values. - result = np.dot( - np.where(self.local_flags == 0, self.local, 0), - np.where(other.local_flags == 0, other.local, 0), - ) - else: - if self._local_indices is None: - # Every process has a unique set of amplitudes. Reduce the local - # dot products. - local_result = np.dot( - np.where(self.local_flags == 0, self.local, 0), - np.where(other.local_flags == 0, other.local, 0), - ) - result = self._comm.allreduce(local_result, op=MPI.SUM) - else: - # Each amplitude must only contribute once to the dot product. Every - # amplitude will be processed by the lowest-rank process which has - # that amplitude. We do this in a buffered way so that we don't need - # store this amplitude assignment information for the whole data at - # once. - n_comm = int(comm_bytes / self._itemsize) - n_total = self._n_global - if n_comm > n_total: - n_comm = n_total - - local_raw = AlignedI32.zeros(n_comm) - assigned_raw = AlignedI32.zeros(n_comm) - local = local_raw.array() - assigned = assigned_raw.array() - - local_result = 0 - - comm_offset = 0 - while comm_offset < n_total: - if comm_offset + n_comm > n_total: - n_comm = n_total - comm_offset - local[:] = self._comm.size - - selected = np.logical_and( - self._local_indices >= comm_offset, - self._local_indices < comm_offset + n_buf, - ) - buf_indices = self._local_indices[selected] - comm_offset - local[buf_indices] = self._comm.rank - self._comm.Allreduce(local, assigned, op=MPI.MIN) - - # Compute local dot product of just our assigned, unflagged elements - local_result += np.dot( - np.where( - np.logical_and( - self.local_flags == 0, assigned == self._comm.rank - ), - self.local, - 0, - ), - np.where( - np.logical_and( - other.local_flags == 0, assigned == self._comm.rank - ), - other.local, - 0, - ), - ) - comm_offset += n_comm - - result = self._comm.allreduce(local_result, op=MPI.SUM) - - del local - del assigned - local_raw.clear() - assigned_raw.clear() - del local_raw - del assigned_raw - - return result - - -class AmplitudesMap(MutableMapping): - """Helper class to provide arithmetic operations on a collection of Amplitudes. - - This simply provides syntactic sugar to reduce duplicated code when working with - a collection of Amplitudes in the map making. - - """ - - def __init__(self): - self._internal = dict() - - # Mapping methods - - def __getitem__(self, key): - return self._internal[key] - - def __delitem__(self, key): - del self._internal[key] - - def __setitem__(self, key, value): - if not isinstance(value, Amplitudes): - raise RuntimeError( - "Only Amplitudes objects may be assigned to an AmplitudesMap" - ) - self._internal[key] = value - - def __iter__(self): - return iter(self._internal) - - def __len__(self): - return len(self._internal) - - def __repr__(self): - val = " Date: Mon, 1 Feb 2021 11:25:58 -0800 Subject: [PATCH 056/690] Initial port of fourier2d and subharmonic templates, plus beginnings of unit tests. --- src/toast/templates/CMakeLists.txt | 2 + src/toast/templates/__init__.py | 4 + src/toast/templates/fourier2d.py | 533 ++++++++++++------------ src/toast/templates/offset.py | 23 +- src/toast/templates/subharmonic.py | 168 +++++++- src/toast/tests/CMakeLists.txt | 2 + src/toast/tests/runner.py | 5 + src/toast/tests/template_fourier2d.py | 75 ++++ src/toast/tests/template_subharmonic.py | 75 ++++ 9 files changed, 594 insertions(+), 293 deletions(-) create mode 100644 src/toast/tests/template_fourier2d.py create mode 100644 src/toast/tests/template_subharmonic.py diff --git a/src/toast/templates/CMakeLists.txt b/src/toast/templates/CMakeLists.txt index 188467f69..4b132e420 100644 --- a/src/toast/templates/CMakeLists.txt +++ b/src/toast/templates/CMakeLists.txt @@ -6,5 +6,7 @@ install(FILES template.py amplitudes.py offset.py + fourier2d.py + subharmonic.py DESTINATION ${PYTHON_SITE}/toast/templates ) diff --git a/src/toast/templates/__init__.py b/src/toast/templates/__init__.py index fd233c6e9..27ac31d99 100644 --- a/src/toast/templates/__init__.py +++ b/src/toast/templates/__init__.py @@ -9,3 +9,7 @@ from .amplitudes import Amplitudes, AmplitudesMap from .offset import Offset + +from .fourier2d import Fourier2D + +from .subharmonic import SubHarmonic diff --git a/src/toast/templates/fourier2d.py b/src/toast/templates/fourier2d.py index 9cc14ce8f..9c6a8998e 100644 --- a/src/toast/templates/fourier2d.py +++ b/src/toast/templates/fourier2d.py @@ -2,11 +2,24 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections import OrderedDict + +import numpy as np + +import scipy +import scipy.signal + from astropy import units as u -from ..utils import Logger +import traitlets + +from ..mpi import MPI -from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float +from ..utils import Logger, AlignedF64 + +from .. import qarray as qa + +from ..traits import trait_docs, Int, Unicode, Bool, Instance, Float, Quantity from ..data import Data @@ -17,8 +30,11 @@ @trait_docs class Fourier2D(Template): - """This class represents atmospheric fluctuations in front of the focalplane - as 2D Fourier modes. + """This class models 2D Fourier modes across the focalplane. + + Since the modes are shared across detectors, our amplitudes are organized by + observation and views within each observation. Each detector projection + will traverse all the local amplitudes. """ @@ -31,6 +47,8 @@ class Fourier2D(Template): # flag_mask : Bit mask for detector solver flags # + times = Unicode("times", help="Observation shared key for timestamps") + correlation_length = Quantity(10.0 * u.second, help="Correlation length in time") correlation_amplitude = Float(10.0, help="Scale factor of the filter") @@ -55,49 +73,118 @@ def _check_order(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) + def clear(self): + """Delete the underlying C-allocated memory.""" + if hasattr(self, "_norms"): + del self._norms + if hasattr(self, "_norms_raw"): + self._norms_raw.clear() + del self._norms_raw + + def __del__(self): + self.clear() + def _initialize(self, new_data): + zaxis = np.array([0.0, 0.0, 1.0]) + + # This function is called whenever a new data trait is assigned to the template. + # Clear any C-allocated buffers from previous uses. + self.clear() + self._norder = self.order + 1 self._nmode = (2 * self.order) ** 2 + 1 if self.fit_subharmonics: self._nmode += 2 - # Determine the total number of amplitudes. The rank zero process in each - # group does this. + # Every process determines their local amplitude ranges. - comm = new_data.comm + # The local ranges of amplitudes (in terms of global indices) + self._local_ranges = list() - self._group_start = 0 - self._total_amps = 0 + # Starting local amplitude for each view within each obs + self._obs_view_local_offset = dict() + + # Starting global amplitude for each view within each obs + self._obs_view_global_offset = dict() + + # Number of amplitudes in each local view for each obs + self._obs_view_namp = dict() - if comm.group_rank == 0: - group_n_amp = list() - for iob, ob in enumerate(new_data.obs): - group_n_amp.append(ob.n_all_samples * self._nmode) + # This is the total number of amplitudes for each observation, across all + # views. + self._obs_total_namp = dict() - group_n_amp = np.array(group_n_amp, dtype=np.int64) + # Use this as an "Ordered Set". We want the unique detectors on this process, + # but sorted in order of occurrence. + all_dets = OrderedDict() + + local_offset = 0 + global_offset = 0 + + for iob, ob in enumerate(new_data.obs): + self._obs_view_namp[iob] = list() + self._obs_view_local_offset[iob] = list() + self._obs_view_global_offset[iob] = list() - all_n_amp = None - if comm.comm_rank is None: - all_n_amp = [group_n_amp] - else: - all_n_amp = comm.comm_rank.allgather(group_n_amp) + # Build up detector list + for d in ob.local_detectors: + if d not in all_dets: + all_dets[d] = None - for gamps in all_n_amp: - self._total_amps += np.sum(gamps) + obs_n_amp = 0 - # Rank 0 of every group figures out the offset for their group - for g in range(comm.ngroups): - if g == comm.group: - # This is our group - break + views = ob.view[self.view] + for ivw, vw in enumerate(views): + # First obs sample of this view + obs_start = ob.local_index_offset + + view_len = None + if vw.start is None: + # This is a view of the whole obs + view_len = ob.n_local_samples else: - # Increment our offset - self._group_start += np.sum(all_n_amp[g]) + view_len = vw.stop - vw.start + obs_start += vw.start + + obs_offset = obs_start * self._nmode + + self._obs_view_local_offset[iob].append(local_offset) + self._obs_view_global_offset[iob].append(global_offset + obs_offset) + + view_n_amp = view_len * self._nmode + obs_n_amp += view_n_amp + self._obs_view_namp[iob].append(view_n_amp) + + self._local_ranges.append((global_offset + obs_offset, view_n_amp)) + + local_offset += view_n_amp + + # To get the total number of amplitudes in this observation, we must + # accumulate across the grid row communicator. + if ob.comm_row is not None: + obs_n_amp = ob.comm_row.allreduce(obs_n_amp) + self._obs_total_namp[iob] = obs_n_amp + global_offset += obs_n_amp + + self._all_dets = list(all_dets.keys()) + + # The global number of amplitudes for our process group and our local process. + # Since different groups have different observations, their amplitude values + # are completely disjoint. We create Amplitudes with the `use_group` option + # and so only have to consider the full set if we are doing things like I/O + # (i.e. nothing needed by this class). + + self._n_global = np.sum( + [self._obs_total_namp[x] for x, y in enumerate(new_data.obs)] + ) - if comm.comm_group is not None: - # Broadcast to the rest of the group - self._group_start = comm.comm_group.bcast(self._group_start, root=0) - self._total_amps = comm.comm_group.bcast(self._total_amps, root=0) + self._n_local = np.sum([x[1] for x in self._local_ranges]) + + # Allocate norms. This data is the same size as a set of amplitudes, + # so we allocate it in C memory. + + self._norms_raw = AlignedF64.zeros(self._n_local) + self._norms = self._norms_raw.array() def evaluate_template(theta, phi, radius): """Helper function to get the template values for a detector.""" @@ -119,284 +206,180 @@ def evaluate_template(theta, phi, radius): values[offset:] = np.outer(thetavec, phivec).ravel() return values - # The detector templates and norms for each observation - self.templates = dict() - self.norms = dict() - - # Amplitude lengths of all views for each obs - self._obs_view_namp = dict() - - # Starting local amplitude for each view within each obs - self._obs_view_local_offset = dict() - - # Starting global amplitude for each view within each obs - self._obs_view_global_offset = dict() - - # Sample rate for each obs. - self._obs_rate = dict() + # The detector templates for each observation + self._templates = dict() - offset = 0 + # The noise filter for each observation + self._filters = dict() for iob, ob in enumerate(new_data.obs): - # Compute sample rate from timestamps - (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(ob.shared[self.times]) - self._obs_rate[iob] = rate + # Focalplane for this observation + fp = ob.telescope.focalplane # Focalplane radius - radius = np.radians(ob.telescope.focalplane.radius) + radius = np.radians(fp.radius) noise = None if self.noise_model in ob: noise = ob[self.noise_model] - # Track number of offset amplitudes per view. - self._obs_view_namp[iob] = list() - self._obs_view_local_offset[iob] = list() - self._obs_view_global_offset[iob] = list() + self._templates[iob] = list() + self._filters[iob] = list() + + obs_local_namp = 0 - for view_slice in ob.view[self.view]: - slice_len = None - if view_slice.start is None: + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_len = None + if vw.start is None: # This is a view of the whole obs - slice_len = ob.n_local_samples + view_len = ob.n_local_samples else: - slice_len = view_slice.stop - view_slice.start + view_len = vw.stop - vw.start - view_norms = np.zeros((slice_len, self._nmode)) - view_templates = dict() - - for det in ob.local_detectors: - detweight = 1.0 - if noise is not None: - detweight = noise.detector_weight(det) - det_quat = ob.focalplane.detector_quats[det] - x, y, z = qa.rotate(det_quat, ZAXIS) - theta, phi = np.arcsin([x, y]) - view_templates[det] = evaluate_template(theta, phi, radius) + # Build the filter for this view - view_n_amp = slice_len * self._nmode - self._obs_view_namp[iob].append(view_n_amp) - self._obs_view_local_offset[iob].append(offset) - self._obs_view_global_offset[iob].append(offset + self._group_start) - offset += view_n_amp - - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - common_flags = tod.local_common_flags(self.common_flags) - common_flags = (common_flags & self.common_flag_mask) != 0 - nsample = tod.total_samples - obs_templates = {} - focalplane = obs["focalplane"] - if self.focalplane_radius: - radius = np.radians(self.focalplane_radius) - else: - try: - radius = np.radians(focalplane.radius) - except AttributeError: - # Focalplane is just a dictionary - radius = np.radians(obs["fpradius"]) - norms = np.zeros([nsample, self.nmode]) - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - for det in tod.local_dets: - flags = tod.local_flags(det, self.flags) - good = ((flags & self.flag_mask) | common_flags) == 0 - detweight = self.detweights[iobs][det] - det_quat = focalplane[det]["quat"] - x, y, z = qa.rotate(det_quat, ZAXIS) - theta, phi = np.arcsin([x, y]) - obs_templates[det] = evaluate_template(theta, phi, radius) - norms[todslice] += np.outer(good, obs_templates[det] ** 2 * detweight) - self.comm.allreduce(norms) - good = norms != 0 - norms[good] = 1 / norms[good] - self.norms.append(norms.ravel()) - self.templates.append(obs_templates) - self.namplitude += nsample * self.nmode - - self.norms = np.hstack(self.norms) - - self._get_templates() - if correlation_length: - self._get_prior() - return - - @function_timer - def _get_prior(self): - """Evaluate C_a^{-1} for the 2D polynomial coefficients based - on the correlation length. - """ - if self.correlation_length: - # Correlation length is given in seconds and we cannot assume - # that each observation has the same sampling rate. Therefore, - # we will build the filter for each observation - self.filters = [] # all observations - self.preconditioners = [] # all observations - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - times = tod.local_times() + corr_len = self.correlation_length.to_value(u.second) + times = views.shared[self.times][ivw] corr = ( - np.exp((times[0] - times) / self.correlation_length) - * self.correlation_amplitude + np.exp((times[0] - times) / corr_len) * self.correlation_amplitude ) ihalf = times.size // 2 corr[ihalf + 1 :] = corr[ihalf - 1 : 0 : -1] fcorr = np.fft.rfft(corr) invcorr = np.fft.irfft(1 / fcorr) - self.filters.append(invcorr) - # Scale the filter by the prescribed correlation strength - # and the number of modes at each angular scale - self.filter_scale = np.zeros(self.nmode) - self.filter_scale[0] = 1 - offset = 1 - if self.fit_subharmonics: - self.filter_scale[1:3] = 2 - offset += 2 - self.filter_scale[offset:] = 4 - self.filter_scale *= self.correlation_amplitude - return + self._filters[iob].append(invcorr) - @function_timer - def _get_templates(self): - """Evaluate and normalize the polynomial templates. + # Now compute templates and norm for this view - Each template corresponds to a fixed value for each detector - and depends on the position of the detector. - """ - self.templates = [] + view_templates = dict() - def evaluate_template(theta, phi, radius): - values = np.zeros(self._nmode) - values[0] = 1 - offset = 1 - if self.fit_subharmonics: - values[1:3] = theta / radius, phi / radius - offset += 2 - if self.order > 0: - rinv = np.pi / radius - orders = np.arange(self.order) + 1 - thetavec = np.zeros(self.order * 2) - phivec = np.zeros(self.order * 2) - thetavec[::2] = np.cos(orders * theta * rinv) - thetavec[1::2] = np.sin(orders * theta * rinv) - phivec[::2] = np.cos(orders * phi * rinv) - phivec[1::2] = np.sin(orders * phi * rinv) - values[offset:] = np.outer(thetavec, phivec).ravel() - return values + good = np.empty(view_len, dtype=np.float64) + norm_slice = slice( + self._obs_view_local_offset[iob][ivw], + self._obs_view_local_offset[iob][ivw] + + self._obs_view_namp[iob][ivw], + 1, + ) + norms_view = self._norms[norm_slice].reshape((-1, self._nmode)) + + for det in ob.local_detectors: + detweight = 1.0 + if noise is not None: + detweight = noise.detector_weight(det) + det_quat = fp.detector_quats[det] + x, y, z = qa.rotate(det_quat, zaxis) + theta, phi = np.arcsin([x, y]) + view_templates[det] = evaluate_template(theta, phi, radius) - self.norms = [] - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - common_flags = tod.local_common_flags(self.common_flags) - common_flags = (common_flags & self.common_flag_mask) != 0 - nsample = tod.total_samples - obs_templates = {} - focalplane = obs["focalplane"] - if self.focalplane_radius: - radius = np.radians(self.focalplane_radius) - else: - try: - radius = np.radians(focalplane.radius) - except AttributeError: - # Focalplane is just a dictionary - radius = np.radians(obs["fpradius"]) - norms = np.zeros([nsample, self.nmode]) - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - for det in tod.local_dets: - flags = tod.local_flags(det, self.flags) - good = ((flags & self.flag_mask) | common_flags) == 0 - detweight = self.detweights[iobs][det] - det_quat = focalplane[det]["quat"] - x, y, z = qa.rotate(det_quat, ZAXIS) - theta, phi = np.arcsin([x, y]) - obs_templates[det] = evaluate_template(theta, phi, radius) - norms[todslice] += np.outer(good, obs_templates[det] ** 2 * detweight) - self.comm.allreduce(norms) - good = norms != 0 - norms[good] = 1 / norms[good] - self.norms.append(norms.ravel()) - self.templates.append(obs_templates) - self.namplitude += nsample * self.nmode - - self.norms = np.hstack(self.norms) - - return + good[:] = 1.0 + if self.flags is not None: + flags = views.detdata[self.flags][ivw][det] + good[(flags & self.flag_mask) != 0] = 0 + norms_view += np.outer(good, view_templates[det] ** 2 * detweight) + + obs_local_namp += self._obs_view_namp[iob][ivw] + self._templates[iob].append(view_templates) + + # Reduce norm values across the process grid column + norm_slice = slice( + self._obs_view_local_offset[iob][0], + self._obs_view_local_offset[iob][0] + obs_local_namp, + 1, + ) + norms_view = self._norms[norm_slice] + if ob.comm_col is not None: + temp = np.array(norms_view) + ob.comm_col.Allreduce(temp, norms_view, op=MPI.SUM) + del temp + + # Invert norms + good = norms_view != 0 + norms_view[good] = 1.0 / norms_view[good] + + # Set the filter scale by the prescribed correlation strength + # and the number of modes at each angular scale + self._filter_scale = np.zeros(self._nmode) + self._filter_scale[0] = 1 + offset = 1 + if self.fit_subharmonics: + self._filter_scale[1:3] = 2 + offset += 2 + self._filter_scale[offset:] = 4 + self._filter_scale *= self.correlation_amplitude + + def _detectors(self): + return self._all_dets def _zeros(self): - raise NotImplementedError("Derived class must implement _zeros()") + # Return amplitudes distributed over the group communicator and using our + # local ranges. + z = Amplitudes( + self.data.comm, + self._n_global, + self._n_local, + local_ranges=self._local_ranges, + ) + # Amplitude flags are not used by this template- if some samples are flagged + # across all detectors then they will just not contribute to the projection. + # z.local_flags[:] = np.where(self._amp_flags, 1, 0) + return z def _add_to_signal(self, detector, amplitudes): - poly_amplitudes = amplitudes[self.name] - amplitude_offset = 0 - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - nsample = tod.total_samples - # For each observation, sample indices start from 0 - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - obs_amplitudes = poly_amplitudes[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode])[todslice] - for det in tod.local_dets: - templates = self.templates[iobs][det] - signal[iobs, det, todslice] += np.sum(obs_amplitudes * templates, 1) - amplitude_offset += nsample * self.nmode + for iob, ob in enumerate(self.data.obs): + if detector not in ob.local_detectors: + continue + views = ob.view[self.view] + for ivw, vw in enumerate(views): + amp_slice = slice( + self._obs_view_local_offset[iob][ivw], + self._obs_view_local_offset[iob][ivw] + + self._obs_view_namp[iob][ivw], + 1, + ) + views.detdata[self.det_data][ivw][detector] += np.sum( + amplitudes.local[amp_slice].reshape((-1, self._nmode)) + * self._templates[iob][ivw][detector], + 1, + ) def _project_signal(self, detector, amplitudes): - poly_amplitudes = amplitudes[self.name] - amplitude_offset = 0 - for iobs, obs in enumerate(self.data.obs): - tod = obs["tod"] - nsample = tod.total_samples - # For each observation, sample indices start from 0 - local_offset, local_nsample = tod.local_samples - todslice = slice(local_offset, local_offset + local_nsample) - obs_amplitudes = poly_amplitudes[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode]) - if self.comm is not None: - my_amplitudes = np.zeros_like(obs_amplitudes) - else: - my_amplitudes = obs_amplitudes - for det in tod.local_dets: - templates = self.templates[iobs][det] - my_amplitudes[todslice] += np.outer( - signal[iobs, det, todslice], templates + for iob, ob in enumerate(self.data.obs): + if detector not in ob.local_detectors: + continue + views = ob.view[self.view] + for ivw, vw in enumerate(views): + amp_slice = slice( + self._obs_view_local_offset[iob][ivw], + self._obs_view_local_offset[iob][ivw] + + self._obs_view_namp[iob][ivw], + 1, + ) + amp_view = amplitudes.local[amp_slice].reshape((-1, self._nmode)) + amp_view[:] += np.outer( + views.detdata[self.det_data][ivw][detector], + self._templates[iob][ivw][detector], ) - if self.comm is not None: - self.comm.allreduce(my_amplitudes) - obs_amplitudes += my_amplitudes - amplitude_offset += nsample * self.nmode def _add_prior(self, amplitudes_in, amplitudes_out): - if self.correlation_length: - poly_amplitudes_in = amplitudes_in[self.name] - poly_amplitudes_out = amplitudes_out[self.name] - amplitude_offset = 0 - for obs, noisefilter in zip(self.data.obs, self.filters): - tod = obs["tod"] - nsample = tod.total_samples - obs_amplitudes_in = poly_amplitudes_in[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode]) - obs_amplitudes_out = poly_amplitudes_out[ - amplitude_offset : amplitude_offset + nsample * self.nmode - ].reshape([nsample, self.nmode]) - # import pdb - # import matplotlib.pyplot as plt - # pdb.set_trace() - for mode in range(self.nmode): - scale = self.filter_scale[mode] - obs_amplitudes_out[:, mode] += scipy.signal.convolve( - obs_amplitudes_in[:, mode], - noisefilter * scale, + for iob, ob in enumerate(self.data.obs): + views = ob.view[self.view] + for ivw, vw in enumerate(views): + amp_slice = slice( + self._obs_view_local_offset[iob][ivw], + self._obs_view_local_offset[iob][ivw] + + self._obs_view_namp[iob][ivw], + 1, + ) + in_view = amplitudes_in.local[amp_slice].reshape((-1, self._nmode)) + out_view = amplitudes_out.local[amp_slice].reshape((-1, self._nmode)) + for mode in range(self._nmode): + scale = self._filter_scale[mode] + out_view[:, mode] += scipy.signal.convolve( + in_view[:, mode], + self._filters[iob][ivw] * scale, mode="same", ) - amplitude_offset += nsample * self.nmode - return def _apply_precond(self, amplitudes_in, amplitudes_out): - poly_amplitudes_in = amplitudes_in[self.name] - poly_amplitudes_out = amplitudes_out[self.name] - poly_amplitudes_out[:] = poly_amplitudes_in * self.norms + amplitudes_out[:] = amplitudes_in * self._norms diff --git a/src/toast/templates/offset.py b/src/toast/templates/offset.py index b47e80ce2..4082bcf7c 100644 --- a/src/toast/templates/offset.py +++ b/src/toast/templates/offset.py @@ -32,11 +32,10 @@ class Offset(Template): """This class represents noise fluctuations as a step function. - Every process stores the offsets for its local data. Although our data is arranged - in observations and then in terms of detectors, we will often be projecting our - template for a single detector at a time. Because of this, we will arrange our - template amplitudes in "detector major" order and store offsets into this for each - observation. + Every process stores the amplitudes for its local data, which is disjoint from the + amplitudes on other processes. We project amplitudes one detector at a time, and + so we arrange our template amplitudes in "detector major" order and store offsets + into this for each observation. """ @@ -133,17 +132,17 @@ def _initialize(self, new_data): # The step length for this observation step_length = self._step_length(self.step_time, self._obs_rate[iob]) - # Track number of offset amplitudes per view. + # Track number of offset amplitudes per view, per det. self._obs_views[iob] = list() for view_slice in ob.view[self.view]: - slice_len = None + view_len = None if view_slice.start is None: # This is a view of the whole obs - slice_len = ob.n_local_samples + view_len = ob.n_local_samples else: - slice_len = view_slice.stop - view_slice.start - view_n_amp = slice_len // step_length - if view_n_amp * step_length < slice_len: + view_len = view_slice.stop - view_slice.start + view_n_amp = view_len // step_length + if view_n_amp * step_length < view_len: view_n_amp += 1 self._obs_views[iob].append(view_n_amp) @@ -509,7 +508,7 @@ def _project_signal(self, detector, amplitudes): @function_timer def _add_prior(self, amplitudes_in, amplitudes_out): if not self.use_noise_prior: - # Not using the noise prior term + # Not using the noise prior term, nothing to accumulate to output. return for det in self._all_dets: offset = self._det_start[det] diff --git a/src/toast/templates/subharmonic.py b/src/toast/templates/subharmonic.py index f357682b9..41cc56811 100644 --- a/src/toast/templates/subharmonic.py +++ b/src/toast/templates/subharmonic.py @@ -2,6 +2,11 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from collections import OrderedDict + +import numpy as np + +from ..mpi import MPI from ..utils import Logger @@ -22,6 +27,11 @@ class SubHarmonic(Template): modes is lower than 1/T where T is the length of the interval being fitted. + Every process stores the amplitudes for its local data, which is disjoint from the + amplitudes on other processes. We project amplitudes one detector at a time, and + so we arrange our template amplitudes in "detector major" order and store offsets + into this for each observation. + """ # Notes: The TraitConfig base class defines a "name" attribute. The Template @@ -33,24 +43,170 @@ class SubHarmonic(Template): # flag_mask : Bit mask for detector solver flags # + times = Unicode("times", help="Observation shared key for timestamps") + + order = Int(1, help="The filter order") + + noise_model = Unicode( + None, + allow_none=True, + help="Observation key containing the optional noise model", + ) + def __init__(self, **kwargs): super().__init__(**kwargs) def _initialize(self, new_data): - return + # Use this as an "Ordered Set". We want the unique detectors on this process, + # but sorted in order of occurrence. + all_dets = OrderedDict() + + for iob, ob in enumerate(new_data.obs): + # Build up detector list + for d in ob.local_detectors: + if d not in all_dets: + all_dets[d] = None + + self._all_dets = list(all_dets.keys()) + + # Go through the data one local detector at a time and compute the offsets into + # the amplitudes. + + # The starting amplitude for each detector within the local amplitude data. + self._det_start = dict() + + offset = 0 + for det in self._all_dets: + self._det_start[det] = offset + for iob, ob in enumerate(new_data.obs): + if det not in ob.local_detectors: + continue + # We have one set of amplitudes for each detector in each view + offset += len(ob.view[self.view]) * (self.order + 1) + + # Now we know the total number of amplitudes. + + self._n_local = offset + if new_data.comm.comm_world is None: + self._n_global = self._n_local + else: + self._n_global = new_data.comm.comm_world.allreduce( + self._n_local, op=MPI.SUM + ) + + # The templates for each view of each obs + self._templates = dict() + + # The preconditioner for each obs / view / detector + self._precond = dict() + + # We are not constructing any data objects that are in the same order as the + # amplitudes (we are just building dictionaries for lookups). In this case, + # it is easier to just build these by looping in observation order rather than + # detector order. + + for iob, ob in enumerate(new_data.obs): + # Build the templates and preconditioners for every view. + self._templates[iob] = list() + self._precond[iob] = dict() + norder = self.order + 1 + + noise = None + if self.noise_model in ob: + noise = ob[self.noise_model] + + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_len = None + if vw.start is None: + # This is a view of the whole obs + view_len = ob.n_local_samples + else: + view_len = vw.stop - vw.start + + templates = np.zeros((norder, view_len), dtype=np.float64) + r = np.linspace(-1.0, 1.0, view_len) + for order in range(norder): + if order == 0: + templates[order] = 1.0 + elif order == 1: + templates[order] = r + else: + templates[order] = ( + (2 * order - 1) * r * templates[order - 1] + - (order - 1) * templates[order - 2] + ) / order + self._templates[iob].append(templates) + + self._precond[iob][ivw] = dict() + for det in ob.local_detectors: + detweight = 1.0 + if noise is not None: + detweight = noise.detector_weight(det) + + good = slice(0, view_len, 1) + if self.flags is not None: + flags = views.detdata[self.flags][ivw][det] + good = flags & self.flag_mask != 0 + + prec = np.zeros((norder, norder), dtype=np.float64) + for row in range(norder): + for col in range(row, norder): + prec[row, col] = np.dot( + templates[row][good], templates[col][good] + ) + prec[row, col] *= detweight + if row != col: + prec[col, row] = prec[row, col] + self._precond[iob][ivw][det] = np.linalg.inv(prec) + + def _detectors(self): + return self._all_dets def _zeros(self): - raise NotImplementedError("Derived class must implement _zeros()") + z = Amplitudes(self.data.comm, self._n_global, self._n_local) + # No explicit flagging of amplitudes in this template... + # z.local_flags[:] = np.where(self._amp_flags, 1, 0) + return z def _add_to_signal(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _add_to_signal()") + norder = self.order + 1 + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if detector not in ob.local_detectors: + continue + for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): + amp_view = amplitudes.local[offset : offset + norder] + for order in range(norder): + vw[detector] += self._templates[iob][ivw][order] * amp_view[order] + offset += norder def _project_signal(self, detector, amplitudes): - raise NotImplementedError("Derived class must implement _project_signal()") + norder = self.order + 1 + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if detector not in ob.local_detectors: + continue + for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): + amp_view = amplitudes.local[offset : offset + norder] + for order, template in enumerate(self._templates[iob][ivw]): + amp_view[order] = np.dot(vw[detector], template) + offset += norder def _add_prior(self, amplitudes_in, amplitudes_out): - # Not all Templates implement the prior + # No prior for this template, nothing to accumulate to output. return def _apply_precond(self, amplitudes_in, amplitudes_out): - raise NotImplementedError("Derived class must implement _apply_precond()") + norder = self.order + 1 + for det in self._all_dets: + offset = self._det_start[detector] + for iob, ob in enumerate(self.data.obs): + if detector not in ob.local_detectors: + continue + views = ob.view[self.view] + for ivw, vw in enumerate(views): + amps_in = amplitudes_in.local[offset : offset + norder] + amps_out = amplitudes_out.local[offset : offset + norder] + amps_out[:] = np.dot(self._precond[iob][ivw][det], amps_in) + offset += norder diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 284a73b13..31e2b70f2 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -31,5 +31,7 @@ install(FILES ops_madam.py template_amplitudes.py template_offset.py + template_fourier2d.py + template_subharmonic.py DESTINATION ${PYTHON_SITE}/toast/tests ) diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index a7fe6fb9d..95c92e353 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -47,6 +47,9 @@ from . import template_amplitudes as test_template_amplitudes from . import template_offset as test_template_offset +from . import template_fourier2d as test_template_fourier2d +from . import template_subharmonic as test_template_subharmonic + # # from . import psd_math as testpsdmath @@ -152,6 +155,8 @@ def test(name=None, verbosity=2): suite.addTest(loader.loadTestsFromModule(test_template_amplitudes)) suite.addTest(loader.loadTestsFromModule(test_template_offset)) + suite.addTest(loader.loadTestsFromModule(test_template_fourier2d)) + suite.addTest(loader.loadTestsFromModule(test_template_subharmonic)) # # suite.addTest(loader.loadTestsFromModule(testopssimsss)) diff --git a/src/toast/tests/template_fourier2d.py b/src/toast/tests/template_fourier2d.py new file mode 100644 index 000000000..88c5380df --- /dev/null +++ b/src/toast/tests/template_fourier2d.py @@ -0,0 +1,75 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from ..utils import rate_from_times + +from .. import ops + +from ..templates import Fourier2D + +from ._helpers import create_outdir, create_satellite_data + + +class TemplateFourier2DTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def test_projection(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create a default noise model + noise_model = ops.DefaultNoiseModel() + noise_model.apply(data) + + # Create some empty detector data + for ob in data.obs: + ob.detdata.create("signal", dtype=np.float64) + + tmpl = Fourier2D( + det_data="signal", + times="times", + noise_model=noise_model.noise_model, + ) + + # Set the data + tmpl.data = data + + # Get some amplitudes and set to one + amps = tmpl.zeros() + amps.local[:] = 1.0 + + # Project. + for det in tmpl.detectors(): + for ob in data.obs: + tmpl.add_to_signal(det, amps) + + # Verify + if self.comm is None or self.comm.rank == 0: + print("\n\nNOTE: Fourier2D template unit tests incomplete\n", flush=True) + # for ob in data.obs: + # for det in ob.local_detectors: + # np.testing.assert_equal(ob.detdata["signal"][det], 1.0) + + # Accumulate amplitudes + for det in tmpl.detectors(): + for ob in data.obs: + tmpl.project_signal(det, amps) + + # Verify + # FIXME... + + del data + return diff --git a/src/toast/tests/template_subharmonic.py b/src/toast/tests/template_subharmonic.py new file mode 100644 index 000000000..70a5520dd --- /dev/null +++ b/src/toast/tests/template_subharmonic.py @@ -0,0 +1,75 @@ +# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +import numpy.testing as nt + +from astropy import units as u + +from .mpi import MPITestCase + +from ..utils import rate_from_times + +from .. import ops + +from ..templates import SubHarmonic + +from ._helpers import create_outdir, create_satellite_data + + +class TemplateSubHarmonicTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, fixture_name) + np.random.seed(123456) + + def test_projection(self): + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm) + + # Create a default noise model + noise_model = ops.DefaultNoiseModel() + noise_model.apply(data) + + # Create some empty detector data + for ob in data.obs: + ob.detdata.create("signal", dtype=np.float64) + + tmpl = SubHarmonic( + det_data="signal", + times="times", + noise_model=noise_model.noise_model, + ) + + # Set the data + tmpl.data = data + + # Get some amplitudes and set to one + amps = tmpl.zeros() + amps.local[:] = 1.0 + + # Project. + for det in tmpl.detectors(): + for ob in data.obs: + tmpl.add_to_signal(det, amps) + + # Verify + if self.comm is None or self.comm.rank == 0: + print("\n\nNOTE: SubHarmonic template unit tests incomplete\n", flush=True) + # for ob in data.obs: + # for det in ob.local_detectors: + # np.testing.assert_equal(ob.detdata["signal"][det], 1.0) + + # Accumulate amplitudes + for det in tmpl.detectors(): + for ob in data.obs: + tmpl.project_signal(det, amps) + + # Verify + # FIXME... + + del data + return From 42956853ea1c5d98aeccc6d4fa7695902557c466 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 2 Feb 2021 20:14:13 -0800 Subject: [PATCH 057/690] Merge commit 074666b from master branch --- src/toast/schedule_build.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/toast/schedule_build.py b/src/toast/schedule_build.py index 69cf985f0..a3b590430 100644 --- a/src/toast/schedule_build.py +++ b/src/toast/schedule_build.py @@ -503,6 +503,7 @@ def update(self, observer): class HorizontalPatch(Patch): elevations = None + def __init__(self, name, weight, azmin, azmax, el, scantime): self.name = name self.weight = weight @@ -915,6 +916,8 @@ def attempt_scan_pole( if success: # Still the same scan patch.hits -= 1 + patch.rising_hits -= 1 + patch.setting_hits -= 1 try: t, subscan = add_scan( args, From 3917c46c304f4ae97df0385141abd9841f06d010 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Tue, 2 Feb 2021 20:24:05 -0800 Subject: [PATCH 058/690] Manually merge commit 348056a from master. --- src/toast/schedule_build.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/src/toast/schedule_build.py b/src/toast/schedule_build.py index a3b590430..70b6d2afa 100644 --- a/src/toast/schedule_build.py +++ b/src/toast/schedule_build.py @@ -109,6 +109,15 @@ def __init__( el_max = np.pi / 2 - np.abs(corner._dec - self.site_lat) if el_max < site_el_max: site_el_max = el_max + self.parse_elevations(elevations, site_el_max) + if el_step != 0: + self.nstep_el = int((self.el_max0 - self.el_min0 + 1e-3) // el_step) + 1 + self.el_max = self.el_max0 + self.el_lim = self.el_min0 + self.step_azel() + return + + def parse_elevations(self, elevations, site_el_max=np.pi / 2): if elevations is None: if site_el_max < self.el_max0: self.el_max0 = site_el_max @@ -152,12 +161,7 @@ def __init__( sys.exit() self.el_min0 = np.amin(self.elevations) self.el_max0 = np.amax(self.elevations) - if el_step != 0: - self.nstep_el = int((self.el_max0 - self.el_min0 + 1e-3) // el_step) + 1 self.elevations0 = self.elevations - self.el_max = self.el_max0 - self.el_lim = self.el_min0 - self.step_azel() return def oscillate(self): @@ -386,10 +390,23 @@ def update(self, *args, **kwargs): class SSOPatch(Patch): - def __init__(self, name, weight, radius): + def __init__( + self, + name, + weight, + radius, + el_min=0, + el_max=np.pi / 2, + elevations=None, + ): self.name = name self.weight = weight self.radius = radius + self._area = np.pi * radius ** 2 / (4 * np.pi) + self.el_min0 = el_min + self.el_min = el_min + self.el_max0 = el_max + self.parse_elevations(elevations) try: self.body = getattr(ephem, name)() except: From 528132c9fc64ae0ae34c0c31bf844cec56bfd242 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Wed, 3 Feb 2021 18:08:53 -0800 Subject: [PATCH 059/690] Add data view support to some of the mapmaking utility operators that were missing them. Small cleanups to ported unit tests. --- src/libtoast/tests/toast_test_utils.cpp | 5 + src/toast/data.py | 3 + src/toast/ops/madam_utils.py | 12 -- src/toast/ops/mapmaker.py | 18 +- src/toast/ops/mapmaker_binning.py | 56 ++--- src/toast/ops/mapmaker_solve.py | 75 +++++-- src/toast/ops/mapmaker_utils.py | 44 +--- src/toast/ops/noise_weight.py | 23 +- src/toast/ops/scan_map.py | 269 ++++++++++++++---------- src/toast/ops/sim_ground.py | 2 + src/toast/tests/covariance.py | 3 +- src/toast/tests/dist.py | 4 +- src/toast/tests/observation.py | 2 +- src/toast/tests/ops_mapmaker_solve.py | 14 +- src/toast/tests/ops_mapmaker_utils.py | 14 +- src/toast/tests/ops_scan_map.py | 4 +- 16 files changed, 285 insertions(+), 263 deletions(-) diff --git a/src/libtoast/tests/toast_test_utils.cpp b/src/libtoast/tests/toast_test_utils.cpp index 474714516..e847daaf6 100644 --- a/src/libtoast/tests/toast_test_utils.cpp +++ b/src/libtoast/tests/toast_test_utils.cpp @@ -12,6 +12,8 @@ TEST_F(TOASTutilsTest, logging) { auto & env = toast::Environment::get(); + std::string orig_level = env.log_level(); + std::cout << "Testing level CRITICAL" << std::endl; env.set_log_level("CRITICAL"); auto & log = toast::Logger::get(); @@ -91,6 +93,9 @@ TEST_F(TOASTutilsTest, logging) { log.info("This message level is INFO at ", here); log.debug("This message level is DEBUG"); log.debug("This message level is DEBUG at ", here); + + // Restore original log level + env.set_log_level(orig_level.c_str()); } TEST_F(TOASTutilsTest, singletimer) { diff --git a/src/toast/data.py b/src/toast/data.py index 037cf815e..898b31819 100644 --- a/src/toast/data.py +++ b/src/toast/data.py @@ -4,6 +4,8 @@ from collections.abc import MutableMapping +from collections import OrderedDict + import numpy as np from .mpi import Comm @@ -216,6 +218,7 @@ def split(self, key, require=False): for value, obslist in selected.items(): new_data = Data(comm=self._comm) + new_data._internal = self._internal for ob in obslist: new_data.obs.append(ob) datasplit[value] = new_data diff --git a/src/toast/ops/madam_utils.py b/src/toast/ops/madam_utils.py index 27de9d847..1cf2362d6 100644 --- a/src/toast/ops/madam_utils.py +++ b/src/toast/ops/madam_utils.py @@ -135,7 +135,6 @@ def stage_in_turns( if nodecomm.rank % n_copy_groups == copying: # Our turn to copy data storage, _ = dtype_to_aligned(madam_dtype) - print("Allocate local of len ", nsamp * len(dets) * nnz) raw = storage.zeros(nsamp * len(dets) * nnz) wrapped = raw.array() stage_local( @@ -180,10 +179,7 @@ def restore_local( ob.detdata.create(detdata_name, dtype=detdata_dtype) else: ob.detdata.create(detdata_name, dtype=detdata_dtype, sample_shape=(nnz,)) - print("Created detdata {} = {}".format(detdata_name, ob.detdata[detdata_name])) - print("madam buffer has shape = ", madam_buffer.shape) for vw in ob.view[view].detdata[detdata_name]: - print("copying view {}".format(vw)) offset = interval_starts[interval] for idet, det in enumerate(dets): if det not in ob.local_detectors: @@ -193,14 +189,6 @@ def restore_local( (idet * nsamp + offset + len(vw[idet])) * nnz, 1, ) - print("vw[idet].shape = ", vw[idet].shape) - print( - "idet = {}, nsamp = {}, offset = {} len = {}, nnz = {}".format( - idet, nsamp, offset, len(vw[idet]), nnz - ) - ) - print(slc) - print("madam[slc].shape = ", madam_buffer[slc].shape) if nnz > 1: vw[idet] = madam_buffer[slc].reshape((-1, nnz)) else: diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index 1317660e3..a087de29b 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -338,6 +338,7 @@ def _exec(self, data, detectors=None, **kwargs): scanner = ScanMask( det_flags=flagname, pixels=scan_pointing.pixels, + view=solve_view, mask_bits=1, ) @@ -381,7 +382,6 @@ def _exec(self, data, detectors=None, **kwargs): covariance=self.binning.covariance, hits=solver_hits_name, rcond=solver_rcond_name, - view=self.binning.pointing.view, det_flags=flagname, det_flag_mask=255, pointing=self.binning.pointing, @@ -421,17 +421,19 @@ def _exec(self, data, detectors=None, **kwargs): for d in dets: local_total += len(vw[d]) local_cut += np.count_nonzero(vw[d]) - total = 0 - cut = 0 + total = None + cut = None + msg = None if comm is None: total = local_total cut = local_cut else: total = comm.reduce(local_total, op=MPI.SUM, root=0) cut = comm.reduce(local_cut, op=MPI.SUM, root=0) - msg = "Solver flags cut {} / {} = {:0.2f}% of samples".format( - cut, total, 100.0 * (cut / total) - ) + if comm.rank == 0: + msg = "Solver flags cut {} / {} = {:0.2f}% of samples".format( + cut, total, 100.0 * (cut / total) + ) self._log_info(comm, rank, msg) # Compute the RHS. Overwrite inputs, either the original or the copy. @@ -456,9 +458,8 @@ def _exec(self, data, detectors=None, **kwargs): binning=self.binning, template_matrix=self.template_matrix, ) - rhs_calc.apply(data, detectors=detectors) - # print("RHS = ", data[solver_rhs], flush=True) + rhs_calc.apply(data, detectors=detectors) self._log_info(comm, rank, " finished RHS calculation in", timer=timer) @@ -528,7 +529,6 @@ def _exec(self, data, detectors=None, **kwargs): covariance=self.map_binning.covariance, hits=hits_name, rcond=rcond_name, - view=self.map_binning.pointing.view, det_flags=self.map_binning.det_flags, det_flag_mask=self.map_binning.det_flag_mask, shared_flags=self.map_binning.shared_flags, diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index 2422adadc..74ee064ff 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -125,8 +125,13 @@ def __init__(self, **kwargs): def _exec(self, data, detectors=None, **kwargs): log = Logger.get() + data.comm.comm_world.barrier() + if data.comm.world_rank == 0: + log.verbose(" BinMap building pipeline") + if self.covariance not in data: msg = "Data does not contain noise covariance '{}'".format(self.covariance) + log.error(msg) raise RuntimeError(msg) cov = data[self.covariance] @@ -137,11 +142,11 @@ def _exec(self, data, detectors=None, **kwargs): # Sanity check that the covariance pixel distribution agrees if cov.distribution != data[self.pixel_dist]: - raise RuntimeError( - "Pixel distribution '{}' does not match the one used by covariance '{}'".format( - self.pixel_dist, self.covariance - ) + msg = "Pixel distribution '{}' does not match the one used by covariance '{}'".format( + self.pixel_dist, self.covariance ) + log.error(msg) + raise RuntimeError(msg) # Set outputs of the pointing operator @@ -152,11 +157,11 @@ def _exec(self, data, detectors=None, **kwargs): if self.binned in data: if data[self.binned].distribution != data[self.pixel_dist]: - raise RuntimeError( - "Pixel distribution '{}' does not match existing binned map '{}'".format( - self.pixel_dist, self.binned - ) + msg = "Pixel distribution '{}' does not match existing binned map '{}'".format( + self.pixel_dist, self.binned ) + log.error(msg) + raise RuntimeError(msg) data[self.binned].raw[:] = 0.0 # Noise weighted map. We output this to the final binned map location, @@ -189,44 +194,21 @@ def _exec(self, data, detectors=None, **kwargs): # Process one detector at a time. accum = Pipeline(detector_sets=["SINGLE"]) accum_ops.extend([self.pointing, build_zmap]) + accum.operators = accum_ops + + if data.comm.world_rank == 0: + log.verbose(" BinMap running pipeline") pipe_out = accum.apply(data, detectors=detectors) # Extract the results binned_map = data[self.binned] - # dist = binned_map.distribution - # print("binned zmap = ") - # for ism, sm in enumerate(dist.local_submaps): - # for spix in range(dist.n_pix_submap): - # if binned_map.data[ism, spix, 0] != 0: - # pix = sm * dist.n_pix_submap + spix - # print( - # "{} {:0.6e} {:0.6e} {:0.6e}".format( - # pix, - # binned_map.data[ism, spix, 0], - # binned_map.data[ism, spix, 1], - # binned_map.data[ism, spix, 2], - # ) - # ) - # Apply the covariance in place + if data.comm.world_rank == 0: + log.verbose(" BinMap applying covariance") covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) - # print("binned = ") - # for ism, sm in enumerate(dist.local_submaps): - # for spix in range(dist.n_pix_submap): - # if binned_map.data[ism, spix, 0] != 0: - # pix = sm * dist.n_pix_submap + spix - # print( - # "{} {:0.6e} {:0.6e} {:0.6e}".format( - # pix, - # binned_map.data[ism, spix, 0], - # binned_map.data[ism, spix, 1], - # binned_map.data[ism, spix, 2], - # ) - # ) - return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index 2db9f40c6..8f8a0e54d 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -111,9 +111,33 @@ def _check_matrix(self, proposal): def __init__(self, **kwargs): super().__init__(**kwargs) + def _log_debug(self, comm, rank, msg, timer=None): + """Helper function to log a DEBUG level message from rank zero""" + log = Logger.get() + if comm is not None: + comm.barrier() + if timer is not None: + timer.stop() + if rank == 0: + if timer is None: + msg = "MapMaker RHS {}".format(msg) + else: + msg = "MapMaker RHS {} {:0.2f} s".format(msg, timer.seconds()) + log.debug(msg) + if timer is not None: + timer.clear() + timer.start() + @function_timer def _exec(self, data, detectors=None, **kwargs): log = Logger.get() + timer = Timer() + + # The global communicator we are using (or None) + comm = data.comm.comm_world + rank = 0 + if comm is not None: + rank = comm.rank # Check that the inputs are set if self.det_data is None: @@ -127,12 +151,19 @@ def _exec(self, data, detectors=None, **kwargs): # Make a binned map + timer.start() + self._log_debug(comm, rank, "begin binned map") + self.binning.det_data = self.det_data self.binning.apply(data, detectors=detectors) + self._log_debug(comm, rank, "binned map finished in", timer=timer) + # Build a pipeline for the projection and template matrix application. # First create the operators that we will use. + self._log_debug(comm, rank, "begin create projection pipeline") + # Name of the temporary detdata created if we are not overwriting inputs det_temp = "temp_RHS" @@ -157,6 +188,7 @@ def _exec(self, data, detectors=None, **kwargs): scan_map = ScanMap( pixels=pointing.pixels, weights=pointing.weights, + view=pointing.view, map_key=self.binning.binned, det_data=detdata_name, subtract=True, @@ -164,12 +196,15 @@ def _exec(self, data, detectors=None, **kwargs): # Set up noise weighting operator noise_weight = NoiseWeight( - noise_model=self.binning.noise_model, det_data=detdata_name + noise_model=self.binning.noise_model, + det_data=detdata_name, + view=pointing.view, ) # Set up template matrix operator. self.template_matrix.transpose = True self.template_matrix.det_data = detdata_name + self.template_matrix.view = pointing.view # Create a pipeline that projects the binned map and applies noise # weights and templates. @@ -205,15 +240,25 @@ def _exec(self, data, detectors=None, **kwargs): ) proj_pipe.operators = oplist + self._log_debug(comm, rank, "projection pipeline created in", timer=timer) + # Run this projection pipeline. + self._log_debug(comm, rank, "begin run projection pipeline") + proj_pipe.apply(data, detectors=detectors) + self._log_debug(comm, rank, "projection pipeline finished in", timer=timer) + + self._log_debug(comm, rank, "begin cleanup temporary detector data") + if not self.overwrite: # Clean up our temp buffer delete_temp = Delete(detdata=[det_temp]) delete_temp.apply(data) + self._log_debug(comm, rank, "cleanup finished in", timer=timer) + return def _finalize(self, data, **kwargs): @@ -354,6 +399,9 @@ def _exec(self, data, detectors=None, **kwargs): if self.out is None: raise RuntimeError("You must set the 'out' trait before calling exec()") + # Pointing operator used in the binning + pointing = self.binning.pointing + # Project amplitudes into timestreams and make a binned map. timer.start() @@ -361,6 +409,7 @@ def _exec(self, data, detectors=None, **kwargs): self.template_matrix.transpose = False self.template_matrix.det_data = self.det_temp + self.template_matrix.view = pointing.view self.binning.det_data = self.det_temp @@ -389,13 +438,11 @@ def _exec(self, data, detectors=None, **kwargs): self._log_debug(comm, rank, "begin scan map and accumulate amplitudes") - # Use the same pointing operator as the binning - pointing = self.binning.pointing - # Set up map-scanning operator to project the binned map. scan_map = ScanMap( pixels=pointing.pixels, weights=pointing.weights, + view=pointing.view, map_key=self.binning.binned, det_data=self.det_temp, subtract=True, @@ -403,7 +450,9 @@ def _exec(self, data, detectors=None, **kwargs): # Set up noise weighting operator noise_weight = NoiseWeight( - noise_model=self.binning.noise_model, det_data=self.det_temp + noise_model=self.binning.noise_model, + det_data=self.det_temp, + view=pointing.view, ) # Make a copy of the template_matrix operator so that we can apply both the @@ -592,10 +641,6 @@ def solve( residual = rhs.duplicate() residual -= lhs_out - # print("RHS ", rhs) - # print("LHS ", lhs_out) - # print("residual", residual) - # The preconditioned residual # s = M^-1 * r precond = rhs.duplicate() @@ -618,7 +663,6 @@ def solve( # delta_new = delta_0 = r^T * d delta = proposal.dot(residual) - # print("delta = ", delta) delta_init = delta if comm is not None: @@ -639,15 +683,9 @@ def solve( # q = A * d lhs_op.apply(data, detectors=detectors) - # print("LHS output", lhs_out) - # alpha = delta_new / (d^T * q) - # print("alpha num = ", delta) - # print("alpha den = ", proposal.dot(lhs_out)) alpha = delta / proposal.dot(lhs_out) - # print("alpha = ", alpha) - # Update the result # x += alpha * d temp.reset() @@ -666,7 +704,6 @@ def solve( # Epsilon sqsum = residual.dot(residual) - # print("sqsum = ", sqsum) if comm is not None: comm.barrier() @@ -691,7 +728,6 @@ def solve( break sqsum_best = min(sqsum, sqsum_best) - # print("sqsum_best = ", sqsum_best) # Check for stall / divergence if iter % 10 == 0 and iter >= n_iter_min: @@ -718,11 +754,8 @@ def solve( # beta = delta_new / delta_old beta = delta / delta_last - # print("beta = ", beta) # New proposal # d = s + beta * d proposal *= beta proposal += precond - - # print("proposal[{}]".format(iter), proposal, flush=True) diff --git a/src/toast/ops/mapmaker_utils.py b/src/toast/ops/mapmaker_utils.py index a9791f9d4..20663b59f 100644 --- a/src/toast/ops/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils.py @@ -582,8 +582,6 @@ def _exec(self, data, detectors=None, **kwargs): # Data for this detector ddata = dview[det] - # print("Zmap det {} = {}".format(det, ddata), flush=True) - # We require that the pointing matrix has the same number of # non-zero elements for every detector and every observation. # We check that here, and if this is the first observation and @@ -609,25 +607,13 @@ def _exec(self, data, detectors=None, **kwargs): ) log.error(msg) raise RuntimeError(msg) - # print( - # "Zmap found existing PixelData {}".format(self.zmap), - # flush=True, - # ) zmap = data[self.zmap] else: - # print( - # "Zmap allocating PixelData {}".format(self.zmap), - # flush=True, - # ) data[self.zmap] = PixelData( dist, np.float64, n_value=weight_nnz ) zmap = data[self.zmap] else: - # print( - # "Zmap PixelData {} already loaded".format(self.zmap), - # flush=True, - # ) check_nnz = None if len(wview.detector_shape) == 1: check_nnz = 1 @@ -645,7 +631,7 @@ def _exec(self, data, detectors=None, **kwargs): # Get the detector weight from the noise model. detweight = noise.detector_weight(det) - # Samples with telescope pointing problems are already flagged in the + # Samples with telescope pointing problems are already flagged in # the pointing operators by setting the pixel numbers to a negative # value. Here we optionally apply detector flags to the local # pixel numbers to flag more samples. @@ -654,28 +640,6 @@ def _exec(self, data, detectors=None, **kwargs): if self.det_flags is not None: local_pix[fview[det] & self.det_flag_mask != 0] = -1 - # print( - # "Offset output det {}, ob {} [:100] = ".format(det, ob.name), - # flush=True, - # ) - # for i in range(100): - # print( - # "{} {} weight {} {} {}".format( - # i, - # ddata[i], - # wview[det][i, 0], - # wview[det][i, 1], - # wview[det][i, 2], - # ) - # ) - # print("", flush=True) - - # print( - # "Offset output det {}, ob {} [-100:] = ".format(det, ob.name), - # ob.detdata[self.det_data][detector][-100:], - # flush=True, - # ) - # Accumulate cov_accum_zmap( dist.n_local_submap, @@ -689,7 +653,6 @@ def _exec(self, data, detectors=None, **kwargs): zmap.raw, ) zm = zmap.raw.array() - # print("Zmap after det {} ".format(det), zm[zm != 0], flush=True) return def _finalize(self, data, **kwargs): @@ -699,7 +662,6 @@ def _finalize(self, data, **kwargs): data[self.zmap].sync_alltoallv() else: data[self.zmap].sync_allreduce() - # print("Zmap final sync of {}".format(self.zmap), flush=True) return def _requires(self): @@ -767,10 +729,6 @@ class CovarianceAndHits(Operator): help="The Data key where the inverse condition number should be stored", ) - view = Unicode( - None, allow_none=True, help="Use this view of the data in all observations" - ) - det_flags = Unicode( None, allow_none=True, help="Observation detdata key for flags to use" ) diff --git a/src/toast/ops/noise_weight.py b/src/toast/ops/noise_weight.py index 3fe090427..e5e5c70a8 100644 --- a/src/toast/ops/noise_weight.py +++ b/src/toast/ops/noise_weight.py @@ -34,6 +34,10 @@ class NoiseWeight(Operator): "noise_model", help="The observation key containing the noise model" ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + det_data = Unicode( None, allow_none=True, help="Observation detdata key for the timestream data" ) @@ -60,19 +64,26 @@ def _exec(self, data, detectors=None, **kwargs): noise = ob[self.noise_model] - for d in dets: - # Get the detector weight from the noise model. - detweight = noise.detector_weight(d) + for vw in ob.view[self.view].detdata[self.det_data]: + for d in dets: + # Get the detector weight from the noise model. + detweight = noise.detector_weight(d) - # Apply - ob.detdata[self.det_data][d] *= detweight + # Apply + vw[d] *= detweight return def _finalize(self, data, **kwargs): return def _requires(self): - req = {"meta": [self.noise_model], "detdata": [self.det_data]} + req = { + "meta": [self.noise_model], + "detdata": [self.det_data], + "intervals": list(), + } + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): diff --git a/src/toast/ops/scan_map.py b/src/toast/ops/scan_map.py index 7aacbc5dd..578a0fba5 100644 --- a/src/toast/ops/scan_map.py +++ b/src/toast/ops/scan_map.py @@ -37,6 +37,10 @@ class ScanMap(Operator): None, allow_none=True, help="Observation detdata key for the timestream data" ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") weights = Unicode("weights", help="Observation detdata key for Stokes weights") @@ -95,67 +99,76 @@ def _exec(self, data, detectors=None, **kwargs): log.error(msg) raise RuntimeError(msg) - # Temporary array, re-used for all detectors - maptod_raw = AlignedF64.zeros(ob.n_local_samples) - maptod = maptod_raw.array() - # If our output detector data does not yet exist, create it ob.detdata.ensure(self.det_data, detectors=dets) - for det in dets: - # The pixels, weights, and data. - pix = ob.detdata[self.pixels][det] - wts = ob.detdata[self.weights][det] - ddata = ob.detdata[self.det_data][det] - - # Get local submap and pixels - local_sm, local_pix = map_dist.global_pixel_to_submap(pix) - - # We support projecting from either float64 or float32 maps. - - maptod[:] = 0.0 - - if map_data.dtype.char == "d": - scan_map_float64( - map_data.distribution.n_pix_submap, - map_data.n_value, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - map_data.raw, - wts.astype(np.float64).reshape(-1), - maptod, - ) - elif map_data.dtype.char == "f": - scan_map_float32( - map_data.distribution.n_pix_submap, - map_data.n_value, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - map_data.raw, - wts.astype(np.float64).reshape(-1), - maptod, - ) + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_samples = None + if vw.start is None: + # This is a view of the whole obs + view_samples = ob.n_local_samples else: - raise RuntimeError( - "Projection supports only float32 and float64 binned maps" - ) - - # zero-out if needed - if self.zero: - ddata[:] = 0.0 - - # Add or subtract. Note that the map scanned timestream will have - # zeros anywhere that the pointing is bad, but those samples (and - # any other detector flags) should be handled at other steps of the - # processing. - if self.subtract: - ddata[:] -= maptod - else: - ddata[:] += maptod - - del maptod - maptod_raw.clear() - del maptod_raw + view_samples = vw.stop - vw.start + + # Temporary array, re-used for all detectors + maptod_raw = AlignedF64.zeros(view_samples) + maptod = maptod_raw.array() + + for det in dets: + # The pixels, weights, and data. + pix = views.detdata[self.pixels][ivw][det] + wts = views.detdata[self.weights][ivw][det] + ddata = views.detdata[self.det_data][ivw][det] + + # Get local submap and pixels + local_sm, local_pix = map_dist.global_pixel_to_submap(pix) + + # We support projecting from either float64 or float32 maps. + + maptod[:] = 0.0 + + if map_data.dtype.char == "d": + scan_map_float64( + map_data.distribution.n_pix_submap, + map_data.n_value, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + wts.astype(np.float64).reshape(-1), + maptod, + ) + elif map_data.dtype.char == "f": + scan_map_float32( + map_data.distribution.n_pix_submap, + map_data.n_value, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + wts.astype(np.float64).reshape(-1), + maptod, + ) + else: + raise RuntimeError( + "Projection supports only float32 and float64 binned maps" + ) + + # zero-out if needed + if self.zero: + ddata[:] = 0.0 + + # Add or subtract. Note that the map scanned timestream will have + # zeros anywhere that the pointing is bad, but those samples (and + # any other detector flags) should be handled at other steps of the + # processing. + if self.subtract: + ddata[:] -= maptod + else: + ddata[:] += maptod + + del maptod + maptod_raw.clear() + del maptod_raw return @@ -167,7 +180,10 @@ def _requires(self): "meta": [map_key], "shared": list(), "detdata": [self.pixels, self.weights, self.det_data], + "intervals": list(), } + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): @@ -201,6 +217,10 @@ class ScanMask(Operator): 1, help="The detector flag value to set where the mask result is non-zero" ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") mask_key = Unicode( @@ -248,17 +268,19 @@ def _exec(self, data, detectors=None, **kwargs): if self.det_flags not in ob.detdata: ob.detdata.create(self.det_flags, dtype=np.uint8, detectors=dets) - for det in dets: - # The pixels and flags. - pix = ob.detdata[self.pixels][det] - dflags = ob.detdata[self.det_flags][det] + views = ob.view[self.view] + for ivw, vw in enumerate(views): + for det in dets: + # The pixels and flags. + pix = views.detdata[self.pixels][ivw][det] + dflags = views.detdata[self.det_flags][ivw][det] - # Get local submap and pixels - local_sm, local_pix = mask_dist.global_pixel_to_submap(pix) + # Get local submap and pixels + local_sm, local_pix = mask_dist.global_pixel_to_submap(pix) - # We could move this to compiled code if it is too slow... - masked = mask_data[local_sm, local_pix, 0] & self.mask_bits - dflags[masked != 0] |= self.det_flags_value + # We could move this to compiled code if it is too slow... + masked = mask_data[local_sm, local_pix, 0] & self.mask_bits + dflags[masked != 0] |= self.det_flags_value return @@ -270,7 +292,10 @@ def _requires(self): "meta": [mask_key], "shared": list(), "detdata": [self.pixels, self.det_flags], + "intervals": list(), } + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): @@ -299,6 +324,10 @@ class ScanScale(Operator): None, allow_none=True, help="Observation detdata key for the timestream data" ) + view = Unicode( + None, allow_none=True, help="Use this view of the data in all observations" + ) + pixels = Unicode("pixels", help="Observation detdata key for pixel indices") weights = Unicode("weights", help="Observation detdata key for Stokes weights") @@ -348,55 +377,64 @@ def _exec(self, data, detectors=None, **kwargs): log.error(msg) raise RuntimeError(msg) - # Temporary array, re-used for all detectors - maptod_raw = AlignedF64.zeros(ob.n_local_samples) - maptod = maptod_raw.array() - - for det in dets: - # The pixels, weights, and data. - pix = ob.detdata[self.pixels][det] - ddata = ob.detdata[self.det_data][det] - - # Get local submap and pixels - local_sm, local_pix = map_dist.global_pixel_to_submap(pix) - - # We support projecting from either float64 or float32 maps. We - # use a shortcut here by passing the original timestream values - # as the pointing "weights", so that the output is equal to the - # pixel values times the original timestream. - - maptod[:] = 0.0 - - if map_data.dtype.char == "d": - scan_map_float64( - map_data.distribution.n_pix_submap, - 1, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - map_data.raw, - ddata.astype(np.float64).reshape(-1), - maptod, - ) - elif map_data.dtype.char == "f": - scan_map_float32( - map_data.distribution.n_pix_submap, - 1, - local_sm.astype(np.int64), - local_pix.astype(np.int64), - map_data.raw, - ddata.astype(np.float64).reshape(-1), - maptod, - ) + views = ob.view[self.view] + for ivw, vw in enumerate(views): + view_samples = None + if vw.start is None: + # This is a view of the whole obs + view_samples = ob.n_local_samples else: - raise RuntimeError( - "Projection supports only float32 and float64 binned maps" - ) - - ddata[:] = maptod - - del maptod - maptod_raw.clear() - del maptod_raw + view_samples = vw.stop - vw.start + + # Temporary array, re-used for all detectors + maptod_raw = AlignedF64.zeros(view_samples) + maptod = maptod_raw.array() + + for det in dets: + # The pixels, weights, and data. + pix = views.detdata[self.pixels][ivw][det] + ddata = views.detdata[self.det_data][ivw][det] + + # Get local submap and pixels + local_sm, local_pix = map_dist.global_pixel_to_submap(pix) + + # We support projecting from either float64 or float32 maps. We + # use a shortcut here by passing the original timestream values + # as the pointing "weights", so that the output is equal to the + # pixel values times the original timestream. + + maptod[:] = 0.0 + + if map_data.dtype.char == "d": + scan_map_float64( + map_data.distribution.n_pix_submap, + 1, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + ddata.astype(np.float64).reshape(-1), + maptod, + ) + elif map_data.dtype.char == "f": + scan_map_float32( + map_data.distribution.n_pix_submap, + 1, + local_sm.astype(np.int64), + local_pix.astype(np.int64), + map_data.raw, + ddata.astype(np.float64).reshape(-1), + maptod, + ) + else: + raise RuntimeError( + "Projection supports only float32 and float64 binned maps" + ) + + ddata[:] = maptod + + del maptod + maptod_raw.clear() + del maptod_raw return @@ -408,7 +446,10 @@ def _requires(self): "meta": [map_key], "shared": list(), "detdata": [self.pixels, self.weights, self.det_data], + "intervals": list(), } + if self.view is not None: + req["intervals"].append(self.view) return req def _provides(self): diff --git a/src/toast/ops/sim_ground.py b/src/toast/ops/sim_ground.py index 481be6f43..1d8e6d1da 100644 --- a/src/toast/ops/sim_ground.py +++ b/src/toast/ops/sim_ground.py @@ -32,6 +32,8 @@ from ..instrument import Telescope +from ..schedule import Schedule + from ..healpix import ang2vec from .operator import Operator diff --git a/src/toast/tests/covariance.py b/src/toast/tests/covariance.py index c81f70768..c6b6c7fb6 100644 --- a/src/toast/tests/covariance.py +++ b/src/toast/tests/covariance.py @@ -53,7 +53,8 @@ def create_invnpp(self): build_invnpp = ops.BuildInverseCovariance( pixel_dist="pixel_dist", noise_model="noise_model" ) - invnpp = build_invnpp.apply(data) + build_invnpp.apply(data) + invnpp = data[build_invnpp.inverse_covariance] del data return invnpp diff --git a/src/toast/tests/dist.py b/src/toast/tests/dist.py index a7b778f34..01a0d2a26 100644 --- a/src/toast/tests/dist.py +++ b/src/toast/tests/dist.py @@ -174,14 +174,14 @@ def test_split(self): # Verify that the observations are shared sum1 = 0 - for value, site_data in datasplit_site: + for value, site_data in datasplit_site.items(): for obs in site_data.obs: assert "var1" not in obs obs["var1"] = 1 sum1 += 1 sum2 = 0 - for value, season_data in datasplit_season: + for value, season_data in datasplit_season.items(): for obs in season_data.obs: sum2 += obs["var1"] diff --git a/src/toast/tests/observation.py b/src/toast/tests/observation.py index 1bf9376ff..b69bb7368 100644 --- a/src/toast/tests/observation.py +++ b/src/toast/tests/observation.py @@ -312,7 +312,7 @@ def test_observation(self): ) # ... Or you can access it as one big array (first dimension is detector) - print("\n", obs.detdata["signal"].data, "\n") + # print("\n", obs.detdata["signal"].data, "\n") def test_view(self): np.random.seed(12345) diff --git a/src/toast/tests/ops_mapmaker_solve.py b/src/toast/tests/ops_mapmaker_solve.py index dd99e20a7..194e5f338 100644 --- a/src/toast/tests/ops_mapmaker_solve.py +++ b/src/toast/tests/ops_mapmaker_solve.py @@ -95,7 +95,6 @@ def test_rhs(self): rhs_binned = data[binner.binned] bd = data[binner.binned].data - print("rhs binned map = ", bd[bd != 0]) # Manual check. This applies the same operators as the RHS operator, but # checks things along the way. And these lower-level operators are unit @@ -108,7 +107,6 @@ def test_rhs(self): check_binned = data[binner.binned] bd = data[binner.binned].data - print("check binned map = ", bd[bd != 0], flush=True) # Verify that the binned map elements agree np.testing.assert_equal(rhs_binned.raw.array(), check_binned.raw.array()) @@ -180,8 +178,6 @@ def test_lhs(self): low=-1000.0, high=1000.0, size=data["amplitudes"][tmpl.name].n_local ) - print("amplitudes = ", data["amplitudes"]) - for ob in data.obs: ob.detdata.create("signal") @@ -191,9 +187,6 @@ def test_lhs(self): tmatrix.transpose = False tmatrix.apply(data) - for ob in data.obs: - print("signal = ", ob.detdata["signal"]) - # Pointing operator pointing = ops.PointingHealpix(nside=64, mode="I", hwp_angle="hwp_angle") @@ -228,9 +221,6 @@ def test_lhs(self): rhs_calc.apply(data) bd = data[binner.binned].data - print("rhs binned map = ", bd[bd != 0]) - - print("amplitudes_check = ", data["amplitudes_check"]) # Now we will run the LHS operator and compare. Re-use the previous detdata # array for temp space. @@ -238,6 +228,8 @@ def test_lhs(self): tmatrix.amplitudes = "amplitudes" binner.binned = "lhs_binned" out_amps = "out_amplitudes" + data[out_amps] = data["amplitudes"].duplicate() + lhs_calc = SolverLHS( det_temp="signal", binning=binner, @@ -246,8 +238,6 @@ def test_lhs(self): ) lhs_calc.apply(data) - print("amplitudes out = ", data[out_amps]) - # Verify that the output amplitudes agree np.testing.assert_equal( data[out_amps][tmpl.name].local, diff --git a/src/toast/tests/ops_mapmaker_utils.py b/src/toast/tests/ops_mapmaker_utils.py index 9554228ad..c0975396c 100644 --- a/src/toast/tests/ops_mapmaker_utils.py +++ b/src/toast/tests/ops_mapmaker_utils.py @@ -105,13 +105,17 @@ def test_inv_cov(self): # Build an inverse covariance from both build_invnpp = ops.BuildInverseCovariance( - pixel_dist="pixel_dist", noise_model="noise_model" + pixel_dist="pixel_dist", + noise_model="noise_model", + inverse_covariance="invnpp_out", ) build_invnpp.apply(data) invnpp = data[build_invnpp.inverse_covariance] build_invnpp_corr = ops.BuildInverseCovariance( - pixel_dist="pixel_dist", noise_model="noise_model_corr" + pixel_dist="pixel_dist", + noise_model="noise_model_corr", + inverse_covariance="invnpp_out_corr", ) build_invnpp_corr.apply(data) invnpp_corr = data[build_invnpp_corr.inverse_covariance] @@ -194,7 +198,10 @@ def test_zmap(self): # Build a noise weighted map from both build_zmap = ops.BuildNoiseWeighted( - pixel_dist="pixel_dist", noise_model="noise_model", det_data="noise" + pixel_dist="pixel_dist", + noise_model="noise_model", + det_data="noise", + zmap="zmap", ) build_zmap.apply(data) zmap = data[build_zmap.zmap] @@ -203,6 +210,7 @@ def test_zmap(self): pixel_dist="pixel_dist", noise_model="noise_model_corr", det_data="noise_corr", + zmap="zmap_corr", ) build_zmap_corr.apply(data) zmap_corr = data[build_zmap_corr.zmap] diff --git a/src/toast/tests/ops_scan_map.py b/src/toast/tests/ops_scan_map.py index 7e6c4a324..cd4ea3048 100644 --- a/src/toast/tests/ops_scan_map.py +++ b/src/toast/tests/ops_scan_map.py @@ -77,7 +77,7 @@ def test_scan_add_subtract(self): pointing.apply(data) # Create fake polarized sky pixel values locally - self.create_fake_sky(data, "pixel_dist", "fake_map") + create_fake_sky(data, "pixel_dist", "fake_map") # Scan map into timestreams twice, adding once and then subtracting. Also test # zero option. @@ -133,7 +133,7 @@ def test_mask(self): pointing.apply(data) # Create fake polarized sky pixel values locally - self.create_fake_sky(data, "pixel_dist", "fake_map") + create_fake_sky(data, "pixel_dist", "fake_map") # Generate a mask data["fake_mask"] = PixelData(data["pixel_dist"], np.uint8, n_value=1) From ccfbb6eed19f7928a9df19db57b104f363f5498c Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Thu, 4 Feb 2021 12:25:27 -0800 Subject: [PATCH 060/690] Add a note --- src/toast/ops/sim_tod_noise.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/toast/ops/sim_tod_noise.py b/src/toast/ops/sim_tod_noise.py index 6c71fb54c..7535f2ef0 100644 --- a/src/toast/ops/sim_tod_noise.py +++ b/src/toast/ops/sim_tod_noise.py @@ -202,6 +202,10 @@ class SimNoise(Operator): want to enforce reproducibility of a given sample, even when using different-sized observations. + This operator intentionally does not provide a "view" trait. To avoid + discontinuities, the full observation must be simulated regardless of any data + views that will be used for subsequent analysis. + """ # Class traits From 00dd87bc718d721bf9c6c98e0df734d2f4ad5877 Mon Sep 17 00:00:00 2001 From: Theodore Kisner Date: Sun, 7 Feb 2021 12:15:39 -0800 Subject: [PATCH 061/690] Rework Weather and Site classes. Small weather historical data files are now included in package. --- setup.py | 7 +- src/toast/CMakeLists.txt | 7 + src/toast/__init__.py | 2 +- src/toast/aux/weather/atacama.h5 | Bin 0 -> 1106744 bytes src/toast/aux/weather/convert.py | 42 +++ src/toast/aux/weather/south_pole.h5 | Bin 0 -> 1106744 bytes src/toast/dist.py | 13 +- src/toast/instrument.py | 136 ++++--- src/toast/observation.py | 201 ++++++++++- src/toast/ops/sim_satellite.py | 2 +- src/toast/ops/sim_tod_noise.py | 6 +- src/toast/schedule.py | 3 + src/toast/tests/CMakeLists.txt | 1 + src/toast/tests/_helpers.py | 41 ++- src/toast/tests/config.py | 4 +- src/toast/tests/dist.py | 13 +- src/toast/tests/observation.py | 98 +++++- src/toast/tests/runner.py | 2 + src/toast/weather.py | 527 +++++++++++++--------------- 19 files changed, 727 insertions(+), 378 deletions(-) create mode 100644 src/toast/aux/weather/atacama.h5 create mode 100644 src/toast/aux/weather/convert.py create mode 100644 src/toast/aux/weather/south_pole.h5 diff --git a/setup.py b/setup.py index dad27b98a..5b5e2282c 100644 --- a/setup.py +++ b/setup.py @@ -233,11 +233,12 @@ def readme(): "traitlets>=5.0", "numpy", "scipy", - "pshmem", - "healpy", "matplotlib", - "ephem", "h5py", + "pshmem", + "astropy", + "healpy", + "skyfield", ] conf["extras_require"] = {"mpi": ["mpi4py>=3.0"]} conf["packages"] = find_packages("src") diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index 4fcb0ab53..52a34bb32 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -110,6 +110,13 @@ install(FILES DESTINATION ${PYTHON_SITE}/toast ) +# Install package data + +install(DIRECTORY + aux + DESTINATION ${PYTHON_SITE}/toast +) + # Process the sub directories add_subdirectory(tests) add_subdirectory(ops) diff --git a/src/toast/__init__.py b/src/toast/__init__.py index 75df72834..a9f696abe 100644 --- a/src/toast/__init__.py +++ b/src/toast/__init__.py @@ -76,7 +76,7 @@ from .config import load_config -from .instrument import Telescope, Focalplane, Site +from .instrument import Telescope, Focalplane, GroundSite, SpaceSite from .instrument_sim import fake_hexagon_focalplane diff --git a/src/toast/aux/weather/atacama.h5 b/src/toast/aux/weather/atacama.h5 new file mode 100644 index 0000000000000000000000000000000000000000..d949e36ba72d44927621236f83ef4850bef7f167 GIT binary patch literal 1106744 zcmeFZ2Xqrzwl!Qf#u$+dh-89Hwrm5&WvQxekV-_642TROLy;Rq1`%wqz$S@kpqgMn zWDprd4kAM}8IeIv29ZHz`a3kR{`qIU_r0EeZ|2SVRu@Z@!c|$i=iU?c-bWec8nyDM zqEyV+BO-zcVk70^_e?I^B z>;L_@rk0vD877qZas40X?{8ynW&d`M|Ng}D^lu;8^JBnY=l`c?z+BVR=<65k@O_~j z{qb$r=l=a(2$S>wx6UHJud@gKZ?6yiK0btr_z%aOPRRHBh^apo@_#WtCdan{KgR!8 z>-T7}Z{s7rU-wduZ-2=A{qbL?|F>u0$NF9W{cHZdnBjiR_jT}}t;;zhzpaxW>nwy( z{fFy!ec5jg*Zgk74HTkGp^OuRr{B`uE2_#zp7+c2CUp z?Hu#{d#wMvaYKLo_BlFyANRvwcW``r-;e9HtR43I6;i&B`+m89+tZJ@_4o6Yn-O7l z8L&xCMh&yv$kMP{lctuYKfXx*Xyf;}7n!77Owwl=2*=-E z=j-DJH7yoXr9>3nrN@BImAeg)I#>Rw9 zZFMt$c>MQm^oPfXfBV>fc>MQp^@qoQH^m&IG*0+iI_gYu~d-E~0_h0kz6>I&r zp|XXvoc`B*{Ox@3p1u*iV)bwDpaGyrqM{&gdfqpJxi@_gR8r4zrPJ$Z{mz*n`-ycaddHD7Hja#Oh@nW;bE%v{b{Il3lR% z=6I}pT7(((R_y3}9aHoIv$sQt_EIh)Wfmu1TP*Q*#l)G{Ogwvf5oTUGp|Nua({d%T z=iW-x2X+%C`U3H8c}UFrKN58?Cw27FP^k>10aGnXd-kG=bsDvW?4azD{nXj~F;!1_ zOC5394U(adK`Kzp;F?;-5bPaq@C3UUJi{j#80Hn+{``^BbX`^Hu%2}rce3i_4@u^( zSvsX@xL)~GNAH_4TJON~_3VHUj!CM>1&g-jyf0R8&a-zo>)QyPwO8STd_CUVtS7I^ zmdYz@7V=V=bG+jmBY4+l7d#6J3;x(rf|MC2m>U}e+nX8!9X?3#%~&ij;!(jd@qs`; zJQu9bvLUCX1Y)8Ska~{^xo7u4>Od;8@Y4`;Yd&IUY(`rCIM^;6d%onv^07#4JyHrw zzrq9H1_Jkf4WA`GcXJX5+8 z?a^`Ycm}bES%iJHfjF=1CXU-@Ng(7Qv7TZmvpy%aZmUFHcWP0Gy(0}wOQ$UM&|tJe zZGEm&rv3x!DI98$`sX*;XBIIqfjEP@aC3vFz!(FQaPz-q6XjC2s{Y{<66O0#Sx@B> zNuH;%I;rvmoqgU*o$X0oz4O2lz0!M~o=K18G}mi$RBz|BpO$d4@|I(|Yk2jSDm=Z? zkf*oC@b3FE&;GWLcP5_Z1C_4ub`mP6qe=-(WV~QWX)S24jTh{m$%1rvrJ%UZ2-=yq z1kL4Vf^_kdKwqj6)2K3Hhz`-?ZINu6fEZ>rQl_p!?rmF^1MRwTn0P0 z=EY1xaU2*~5&J*X!b(;f>>u7AQzjK#j!web6c<+NZ^L%=b*!owM%c0W36oWkF!7jZ zwj>iz^R7hGU<8rhIEWIMOtjtS5=Zf+Bxv12ti~W=imG6rl%%#}l_)cnr%qIx$}4}O z8WY$i+eYeIbC7zLouTZy*VN*PG%oVrNQWZlp+lPYsIM|;kG&B@uWui$LjW1Lj#GUt)< z^4e<}-Wl7Ax8@khOM9|-Yu^LBv+`5k84ME`Pa%QLR#C8v2Eo#=m0(Fv71)9^1$Ox+ z!CwEMV0-dd2o5cRf|p7o=7A2e6`LXTPhF91&|Sx9b`g*>BHBm46s zh!JlgXWmc9xvDspKgVHvSZ&N!>5Xas5!g9=2KJSmhdqVYVCkoW*jwor%&dQoy(Pnm zb9iCGxJnTDSPXGDsYdKcR-)t{OB6DRc=P{ETrKwz>)u1ef8`=!xwpi1DLZxLE+eFS2lw^tD?cS78#s%M;VyYAO7?{Np(G{ z$`06`C`qppU4^qJDRYh{X{T1vu`^catj`|nTn~soaABcdJ>`m?NilJ1_W({cdJ5-E z{F$Q@Je<_-HmBkud2Rg)Jljp+Ezbw?Y{q2XwQn15ah>FSs~_-I=X+k67a_Rg5(L{U zUSKCSfPFDdVCpXt%vulF=9r+AI3<{0F^Dl2M4ByTBo7;itew-4ipxN9>A8ram58ao z5xH}0LCT7qh*^IXS(ukdRXYdv{ahG(vuiLDX~LFwgRtURfP?Q`SY2QPmTK+5{><3~`peFk;@3NfbdGwK_g&)}{i7%b_M!R!E=G}`m0 zY?8fA=c=?!8CL0(JBbb-oWvAhbiqJ|PSf$2j>#&a4^A4U*Q}kPx1_(&t11@e7;6$| z)l7!HlEwK|dpT{|Nlvl)Iq#q^oQ;;|%^eeY=YjUTv~?0M>6h}VAS7j`BWvyHC}3KEw6FIeg&arhx}4Z;RAZ%IDXi&Q9<%p#*gms9_6_NQnSJB1 zZ{c#x?%a;qy}#f<)yFt6EfnS){FkQ`@p+SAe-Of?Hz&b^ZHZiLAhApu4f|v=@gy%M z&c*AATy-z;l)6Be6|Z66L{Q&v1;B?zQ|Fc1)HlFJT@O;Idv7LH;&)Lk`Ap?H)eN5G z#s*i?Fz{Xfm`$X6sFM0@yQ==EK8b;vd6MMDU6aiINS)-FptBwF>g;!_>7}is^vcjZ zdP_f_-cv!tc}Df%7`2lNbX>@3pYP>td2VvrrXjp+Q}O;~g?Y_Vo|k@U!b|sTydCtQ<(0IOJU*Q#) z=o^A}?p?u?{8n(ZD2e>BRgto*CDP6tjoe2jp+K=E$UAopVp6ss+vk&Do4d$bJc#7& z1+ZMQDpnP#i!BMwF^w9I<^5AJ({Da@?_7ypw{}46@nfd@Tg=SMMI2Y7i7PCL*veNU z+5|IEKDB@xz)I{RY{b4Vjj&~>6Q~k}I18L7+DZ3`I`3z~)+-3Ui%{>I?$myA z1l25^LFN5RsOAvFmgeOQN?LUTvu~tb?^+C;Y7Ue?_rEy*8R zQ0KDd*894Z)XT-j>9y&*^yXHN_1NpIc*EO1XA}I%TEX$So&fV8 zNS($DZu@LO)oqhtTY6GpR@@St)qWEk#dD)zLLuasNRgD>4EY8OLNU~bn2Ix-3USP(5}bMu z%Q0ea&fQ`hrwpCR$(J`lE_RvoWsBsUp+)(?`zT(DGVJ{{xeznyJFp}j#g5hYu^bWtIap326)r(+gDMeMcY$ao*CO)D zfe?Ep6Srm#3Dj9hG`d4X9p!`P-x2$YeAMeGOWAq?Rab6DN>ACQzzH?q6=n~)jK;4(wnzu z=q)kp^~|NKdQGM0dd5|RQ$J1MJedPIDLIXEZk)|&t-Cpf# zH~SMIaO{Ji{t0ric`?Y%nUVcdC#0^`2WpgYkb}v{|6w`O1~wws&{K%@UO>vcP;Abh z3;S>6#jbv8*b^l&yP9HKNJEJK681msiye3SBCf-32kpC8;PE|COei5kr za2REG{!D47%QSejrh$1r=TF(>ZCb%fzD>mv*>%$secn>6`Tevc$LixrYJ_!c=XN@| z^kE&7o?joBZ`8ZrcF{BaoO(^3EWIb_(X(;cIcaNM*e9borNcZmiL5$@2a|Bk5 z<&I%#+-2-E_;H}eN9^MBkYG$4VJjk{+GZzCm4i6yLG5yNF_C`RLj3)`MAqIWa-Kre zQLhYTJ|#dMW2SUU3Uz#R(_rsH5L2f9DVu!VFDl=NDoXm-E#&BwcM#)q@qv-0dD-89cdh8e+c%HkJi;U-` z%z3x$i?RYzLerZz|-%6OrY?3}o*hBe~9eWN*3}sVi?o?tCYa zX3#w(D<6@0e*~5`mINQA#ng>36Wt8EPPWDBr=2h}bqHWDV=(i4CiXx28B6_LIPiWW zjM;_RS0}JG_btqR{DgxEYN+c=6M1cAIEy5>uQRcyjUmB1bBXk23Go#> zyn4B6Q@y!nrrvq>s-9^X#%U_`=ByRB0N!(zral+=t^e7h3R2qn~(7%PG9_ zZWXWo=~v$6i4>GwMFb|P3go?w1?|D^g332Rko_|RB{#&5v@?RQ^9Mnbvk;;a$|COu zEn>PSB269x^2>FQa;XOjOdEydnq!etKLeRVvQV(!MkF2fBKg=!~JySQgG>tMdW2?0bi` zd=3I}j#whg6V2VK#9Ol(k!p1$%=%fxlL`6o$}NPgzJo9$ekIbl$3!#oH3^)_PQA1! z)s~2(Yz~fUvJaxn=``3Y6R6krGhk)q{Dn8e$$cX_$)++FF=}{E0N~VZp3Cg518Z|FStS#Jtvj!wa@-m|eQasgJy0=DwY0qnJ%g}8DDn=e1XwhLb|uXy5!qa-k_8c}~~ zNbKud5M@#qVpoqL{#Emdrt5m5op%`a$$6rz@e|)PhSF8xR6DRBmGYLRs?Jp?jV08& zup0H9Xhm6XPuLr&G`P=2ojW#Db>cP3G%Ee4ZSvK3DVvj6+gc^bIXt^wlGf<$y>IB* zSG76S?Y^AVc$ITbjpXg;lX#{}GOuaSlehjnhj;eh%KNK@3W_zKAhnB!dZMME^zJIK zO-2Yxqe((AeXighyHjA}ASnsIAjrdVAoVIelB9ab_qa9kPe?`V-f4)c<^h(n2+5aS z$l3xhlYI)ZY6@e`g({HSiddD;j1_BJEU)Z{os|b+SNb^2Hk^)`M)R88& zYa6i)JO=yV8ZkTG5EjS>#afWsn-+(8El=f)1RAW7M6Cd`stw(-oPCnQ zDZR7vt}KBMGVOTn(W!i(@dln*e+_EC+ydQbhI(%t#OKX|GWge*>yex0DV?LMHFmd)T?(T6#I z-Ml<4(}j1e%i@*5PF|XFpZDCzEqGf&-m2{>Fu4~Bbj)VJt*!`4{7b>bWCuLG1d=LO zK-N<{3R;^&p5z2RYCW=Vz5+h!4Pd$q4%P?UV=iFoCN1_SA}l}Y1ocBIW*-CYQF|#? zT$`|RZ6CJfxr&*H*HC{)5cQPe#6KaLNNy9{8WG=;_C$Krjks_2C$5B4!t8^XJ9RwK z9-K^^nahbM?;66)fw}K?81Th2L~i9HH1rWs?u1g>JRf!EC`6qFic;s8D9Y@Lr_Sj- zwfd@2*4_wss#a8<+m3o0ccJz!Db!bSJhiK*QkI=b?VsjQXQ}1X@@_S?w%<-6mQnwW ztFUkU)KmF6bv613^+||9yDQuvMU^lpcXS4IDFa}Tz@hxXLl!3Mtc$4LH8p}`9*yCc zh~*qxJD#UIINsfU1n;`#;4mf%zHAP`d}gIUR{=ga;)7t>R|dJ8G((Q! zHo!EdA$B6*9I@*Fd)kNGZO$Qc2GrEKieSGShnaUt*gUrywtVUZb8g2>xrq={XJA{8 z1>k#DW9HLdtl4rM+l1$ki{>Kg{&@*=sSxq~Ru=p{OUxmZXz$b|Qfw2znnn<75eM=1 znMizb(}4@gBG%-I3$5@pH_pz^VyRCRkSwZ~7SnmjY9yj`ZYGs~!Y@G9yrum<+j z1}a_LPQAK)Py?Q#f&7;!U2q+0k=qbY9#Q+i>;{?)apQCz1G@xp`XWUQ?EP5qT{?qh zQWwBhw*6t7$gQ?iW!HSrsm5mkUXsAM(o;CLxQo-yI|RP#HK#h&jJI4%0}SM6s5>eM zu1gj{y=;h}8Zuj8B7%Za4zQ?kB_J2Af$ZN4z~mw>J3 zB(6xXjk6@+e({9ajEFzhNL-(46HmJqM3TA?c~L6y+?!A2Gi!(vvm5ZL8<5XFBGQd8 zYBNPpc7U2X#zO7TuQ>Jm6a%rEqipdS)N`W=Wp1^h&Q4va5;2+v-e-I`I=?>+)o%@%m%G zY9#g#h8nx&EF2uM8e;xFtVEp0cI&TLVP0Z?-S?Pz5K3&$0Jl8c9wb-AK2qA zbsc<8Rm;N+^5nb*2QF*y@2GFky86RjnFf0#-#=uNzYRT}b~4N6%uHg(p3vE@W4-O} zJ-xdd;TUEM$4olGslOEF9Ztg2s4hG+X%_G5ew|lRJ^}|{R`B023+&@Qf_uQvf^X9% zz_Aq2Lp&CkurMS)ERLLo!A2nzxzDu#-N_iFQDz~BcLj1i-;6YGK#LN00@)5-LiP&T zv3XG`tUav-A6OOBZ~?m-)x&aXXB=b(fsdPl1Gd?Kv9H3x4x6!W$sxe2&VgNX65s7O zVs1u=J*)DgePQ_~w^4$dBs%lWHy%qJ9ZAYzpZPa%zgR)~MQ{StZG|*!qb?jVAH7nOs*|rz_*$L|T za*aB=J*BEDZ>g-yYmkMi248j2z(j&Qx-SRc_21y*-FK=g^^YYnr+qr>$C`Ru&9QpA zah{&N&2vojY>p-ZkN6@tAAC@bXF9ay{l^`=^y?zvK|H)_$ghB7z2hC7a|p^B$jug2 z6X=y5f@Db(TvKNWHr;&S4HpByyH{XuUl!asUO?>_ft(dIkP8ze8CoDqzmZ6jJss+m zg-9K`6zce`i0N_)`EL7>>*#Yte+8^$OhIg;<*L;Dh~1+A zSM`ao&x-zzS(!`6QEQ9J)cXWc@6wu7S<{lb8}_E=R#PAcn@>HNTd7p)8f8b{gPQRj z)h1*!Fe_9B>1=UB@IiTl+_ttMc&nSi)&uOa{?GIAf4gV?(fhY-vn%@AFVu!*tIh^5 zsdQ?0yk4HK0`80BESGk3o;~+D@8`O_=8%JzkFDl?J4*=8RdodR1mG)k*9vk@GqPRp zinOO^A^X5XNJ+Z~TB6U$c_ji{B06AI`vKV2Diz|+6ztV4!sff{u$FAcYUv=Bg150V znj!u%d7$=FL%oqeWLld9*Ec7$@&Mx8IF|VCI|+Lg>hg|jAP3k1_Bc&!&CU^K&2_*K zUlJwl6A9!D1$-n7a@-tHtLLKr;f1KYp$HAGh@w*SGN4OIr0gr6nnOgYX$y9$(Gobu z&eZ>?2XMe(FIyUA?`Kfw#_3SMFQl45kgNHQQRewQz#;#4j%;+6w$HmI@s1PQSo5G5 zoiCSD$Mn0VcXI;gtKOEAZE2kUz-x|eQj>Q)ixvW@;O|b%7o2522~79m$a7eS-0PsW zZ{k4yM^h2wS&baG&x3z^3A|P=EWb|z-Btsz$sp{!F%2u{mtkezI;;ft;$ZqUtZcrI z6?G(0=0y>?WM$&6Z6xj^O^N+{d!n@L3-zI$uoK5XE4oQN4!Z1M_SS3gsj|jeVR|KCi z4{~-Yj28!bk(F7WN|AEV%dqJY~H$g!JY$Fp8I_+Tv7EX~AnvH4hYY%Amf zSFkPT9qb$M0klMbuQw@3H06tvV9WBvmk+R&Yk`eZmPET^G|`qAOPGl9#IZAj zFq5W|VE6gN9p@tKxpl;;2b{6{N#Y-MnlQ{=A{~B8v?D$eckys)UQ~ek#}xsdw`nAgAPxVNyFc2u#^abvesO0-P6 z#>#bmP4Wbzb*_Eo^>X<-dezM(dX`i4)|)&hKUv7x%Aenn z3UE@dJ3?;V3;Vo~uXW16{yj3bm={5Xoo;&ml8)D z-I4&;1+7YldZ2r3N9=RDK>jv>_(H}IdHXC9P@wK%oQR1J2 z=Ib2^RLVi+kis<3swDN6FGE#VVb8>flu2qz-CR4cM^Eb5G@05mGAXm~@IT!qeZvaG zW_ee|MLF)o2M=Cll|V+4zk6YwJgt|`)o7oNSzknN-`qiO&%IMGuRN>wB-iGYcM4~l zpNsdjtHHBP(s4d%QB({v1L`lj5 zn`|cPN?#W zuo?&Tw3`O;#Pd(JN#Ar&jD1z@8`3o*kGe6H(FUR|@wBnr~Y`nYgMBZ0$2hTjuBS>2- z39d`S&rK9={y1=xnpe@d$SLdCtmn*3`^ZrJh?^S=mmoIVt72%+f z$j{TKu~6Rt-cQqb_0~{9YFLEsAnE_!k)pPv2VEm`;iArnR@`= zJcs3+0qk8GLZsHYz-JYJ9IY7O#?eG`y#jGYRU&q?4t!iwV!hoPYMcqgwtXq_*Ih-} z&KsZ(1wJd+DWd)KkjNQ1K&zsmY?TVsl@d#>n<`Q7A<##Z1c`EjiFyjOp;Ehklo<%v zK%)cyRGa**xy!ds(T=J&Jy) z?NCQfQ!<@nTdd<)?B6!5NCE8LTNZlk*u`_+6Vu$2sDwBF--_ zO;U2h3qEPc!B%U$xP~RY8y zpST(OV-md=E+$zgw$eElr0bY8pU%7}pWfGgtv=WgQt&SO?e*%-pBUm<+eBZ;PXhqTr5@yUNQ;Z-JL)qIY)4}e*?W-pn*&bM@&p4)R9FI z^E?*0ea%46(jN3Jpk-<0MAq3ZB=^_~apoBKIv;3>?gKva8TuoNf|k1swr{48V>QBB zPbcV48Gs#L=;6vX4>Oy$V~yh%tlfMAdhbAQzyCSdrz?QhbwEdn>S`wtw5EuhiH$r;WhS!l1L)x!0TxXOAqU#i|$c8{K^@6&6Zs5#4| zKAgPL4P4E0E*Q~-x86_ZgZxomT98NZeH4Vi%3gxgGF||!Dd-@N3aTa0M>nPvQi__8 z+|detsu!a2IHX3INX}SUz6 zK#Y%^|1Y)4q9gSy1b{~~Yg%pMt)PjN7Znv8-bO+oduQm?>M7XD&l8w_hXo6HF38*S zqu|V5z?U6{e#X1V$L7ZBjU^zaSH{fHTEI0)4Eo)+G{HG?@5Uz*-{56Db@t^mHk)e{vJG zY7g-kyue$Yhq(SLp}#yPwgzvAy&d?eIeDn>ei^E{037BX4)Eb>R5@9fn$I+YJ`@S? zF$cB2oe1${&A-ehGfQu)aH8;2CvMP%o$i z{a2trwao?Kl3i)gl@G0?7bLgpO4^REj0xT~AF6?GaYTnQg^z;_09@i4I zEd8mo|5)lunn?YNWzc-B25e>Pzsx45CJGg&jJ}|yg(k#K{}>i`sdE_XXmOUcZ|<#A zZ7Hm`IuXa*>H&H0GR`K{<;^|%@P5?=UhWYNeI-L6hOZDj;ZFqUMMFwTO~|W~k)_0V zq&BPvA9WTn32NZ560zFQ5!+s*VRc?7R15&3dS=+CK0{MDe| z+g^>>DkXz%r5V&4gNe5HEF#x|Uf$o<61n3h!aP4qyl-xkV8mMzCGdwG^Ybk37me-+0=4!DQLJi(qK8jy88X{FR{tshMJhg9kyuSzi%8nH|O{`_QXI| zx;P{$@S=fE*)>P65$bUEq^Fz=woto$fOC}*9IPVHGWn4Fz8bPsvZH{o1xdrtgP!ye zlJ}Ry_FX1S3pB#M+cv;@zxMDf#(_fHvAf$L=&!nkedi(wU^S2jB@$||5PR?5gn2g% z;_q1ET{|7L)tSUwUIu*EP1ujyKuhN#>RQK%`|SnLv0MX<=?kK*@tIhT76L6@Y3M(! z06O?s=m|}r&JR5FT8T6u)BwLz8_xBquUBKrer!fv``S}w1@I(edQk1I{#2PX8n~1( zpg$c;19c!?L_4W8csleo&H^3kLhxOy;F?=0+j$?A^MJlZeDzPY$$Y&=n{HuRI1kj=QN(qyB5|FDyk}+`=*#U# z*c`)&w*EL0eEu^DrT|9RX%*((Fw~?9Lr|+*qc0K$S1frTg)v0Z1eOTj- zsB^J}x@R{BeeYMjFW@4dx>6t82Q)^5DEqS=bdaN9PmHEMJeGPkI;b*lB8A>d(7%8V zvWg6TPNv#u;7LxerqY-VfCX=%Ow%1;574KcI|X*n`9Efp(Z%yu2&T5xR=gV%dz`%( zYrnHhr+o+gKZi$g%=4Ro^E!Dt>M<`rTq$_FjA8FT2M80}{q`vnYG+&|E zakVlIOa@zQXpLR2R4fmZv2}nOvnRZur9O=V{_B9L-@|l52%(2_6YZK}z(G}l_1_3$ zg`H?Fq!7Ai5>emC1gt!ZIQy>xZR8f<4R;cG&=Df(FA~k5Yv99xdn)jeSe+46I-8f8 zw-*9Fwloc#0bZ~(Xo%j&QSGDz&^fYzA5t1z4}8a@>fl#Ee>$%rrJ)w8sM}IGX9wyS z&>6IiJ*nKdFSVZs+l(FqF+Pp@9%le%GKpG27b)p8p(dP5ZJ+1UpgN0c3N3|LzZ~|} zD(YChg*p?Df|dgINu$F5V>bD*-E4c8Xxrtf5X-C(lKcnf0iU}`&*Uz{`Q^tP+rAp~ zkgnwE*4)s4I6`o4y$7{L0py&(B5m(JND*fsw%~50KA!_i7xQDy?pXMZjU;UQ+!T8j zbOj6~6FU=^0;j$iFps-981n^dd*>zUM6gLq9ibQN5%n(}iN!hy^pGi#JB>8be(S zb2pc|=FOwlzKg-8E-GzxgU)CpVEDTsN4^3!DfmBblOMM6 zUJKLSZ*PuWH2+E>V>ImqSjvfd6`83FDXG6SSOI#hc5_8An zfFb&c|HBJnpP7xax5KF>1p3*Z7lrvNPF?FuQQtZ(#E~lCS1|nW0L1pv5L3V|ft*cX zd{e*?J5u*43G%JpRC^NoNj5`%)iRZOM>-%LO`_V(pv{{Hzf;gI6EI=$UDm}^vvw)u z0&77-v4wg~+hJcGp$rc(zF*is%_cu=^YvV@`GpEQE_Kx^>#kK*=9Skm5!-bP(?##^ zUy#%8O6L3pWcaO*zC2qjMEHtdd0ti%G-oCY^0N@c-mZ_>k0X%h-ZT_EzW}Kh1d)^%g(ef5wiWQ_ zfDbkq26=HR;2dd0x;O>wv;uT25Np7PgX9sQM0W4cndV1L*XBdB%uX!s4HG1SV9gFQAC z;`?;y0h$lI?-H=B3v9TO(x{EpdUHFaAC7|kAP>%Z`A^Ne{?;yE?_ZfwtU_*cdu`Qu zw-aUUizL;NlR7qAWzId9K{W9TQjIQ%&8elZxg-aO&q>!#J}SX zp(pM++nU};0N1MZKE#K za;X>f<+FjG8b+lXDWJR0px)BJy(><@g%?82vz9VxTS0@jj|MJ)O(LJsq+276`KN9& z9=N#2m^J2r@oc#(#t7YgW5%1;#;2vTiwAc_h(SXEv8G-v4xCs_JbtO9SmJ54$bYFI z&L6E6_g9Y-wX>>->vtrG`lpFvi$hpkpf-wK)HTJo#m!=`Pj$p98Oh?u(8l6{{%ypD z8@h@py1Q8Tb}uolZ(lLlI6xfxd6<}`O%Xrza)|e{jT6`AognthK2=Psn<=i7XN%)j z%@xbOlf`o*=Zn|ng`%A761RR{D!v)DLL9bimAJ0XTG7#Kop>~6lNe>(D$aI$#4?R` zh}yw}sPq3KJ|1;O+;jGV_;~phu}Sz!n;yJZn;iDSrtZs% zn%3NmGNtb;Z5o?Z##?*hB)-)Bxo04`_GTk^(*%Ub`!Ni3m znks!rGG&-JQ;mnjbS+;s6Z@#PX;X$(ZKLGIq?d*0{yK!MJv3gmPIJR0%QPr%0=Y@uo+j`dkZ@8XVPld6(f z*VJ3wK6QY2pyg1p_@Lq9`S?_^cVH~c;RI1?>J)E|nIM7`%I0+;`@j*gWOBNQU^u>+kM}kt^?s_ZvPHkN);TjOY>+?|l9w ziWPqoH$P@fcP@sR8duC=($C3d8rLSb>5Y=#R6MSbDYQ>vQzsf_+WxwP>DW4rX={8L z)2F^=O{dP6H{~r=(Nv{-jL9`rYr=e->4_`OWVOVbIu@*CQrE3w(zQ!8bvvjx^*)JB z!=zd!M}s=1tXU0AH6JuJPWX^+{MkCkcw_M@o^sca2N7 zy)e?2UyQqxLPg7l9OCRcxx|hS@`$_k77$l>i-@UgG4Ww!32}UMDe+`NY4J+8^5Tc& zII+JWL3}N+;@xg~aa0-=O)<5^qobOLNNFl2#f3v8R}OLO*ef zcZhg+!zi)$r4(_=__1P(s& zS)#i45^+k+rDC12%f;jN)uOA8TP(eLy;xev`f&1q^`HNUC<;R2@aTMC&vRViXh&k<$ny|uXMP4-fzw~YVrKglP z&DvSPWXy~)9ZJ-i%01MYnmOW3{oll!=6|YWDqS?eWXq>Bt?~$_me);E)vmhYj-+HU)wE3p^bizH6|NKx4J@-P)bv!6OZt+E&bcZn&92shAx;mTba$DAJTCP#6t z>Ga1~Q|0)0Q>*w&CPQ=;Q`X!BQ=zFzrkxJGsUORms(+|%O3)^ohLtL9yb@N=7%B}m z-YY-V*yi>kW54sOjE;4yjXNr>GnRd_+gNP)0pp!!$Ba4ZpEauATr@{hS zcsgs8xVrxsu}-zIqSAAmm>4!iG>)1k<{LU)a1G z7&Yp}#@FGijsJ(C>yGE@jlyQANRgF2G8@|NgL~elQZ!ZClJ?%I%UrRgfG=KO1@8^ELpL^~(=Q+>wzR&q;lglUlE#UuLE8$JBjfLw0-(8QaLk%s`o2Xqs}O*?A4+5J4TaL z#CZC>!JN*rY2?|>k*YnB{LAUoA3cXuf7{V+>1Aa9bR{K;ts$3^jpS^)nXJTjP{_u^ z)amO+zsGoyxSlt?8{ekrmE^xn%8%M&OXYbi_-;5ACJyFs!=)f8k?OX&w2=!#Dh?b&gg)~s!(X+e+ZfXj2rb$>;=Pv4TG z*(Yk2{zf9>ev!JNC~I<+WCrR|%-?1xa|)AVg=^(mi@GA4n4ri+Je1hyCS^9pS%oQ{ zR%Ihx)!3sF4R(-ev+X_l?AL$+i?KCk$4yLF(|@KcZ>!MnM~-9JW#-JKb`l%TO<}4Y zU;%+POm2@jFMe$zzxtX3Um&)LUy|X<$EN!7qswFX_YR4?yh0lP@k}}&DSDZAO)TO2 z`YU*G@n-(*%{%-Kj|cpWmZ!YU>QDUF_1}e@8sICFMCsltNlKqLl;)k6qoj#SWO;2A zZRt^`umLSg}kTUqZ#Jmy_mKN0N_O zOMl~?Xnxu*${BNzCXYHw$7Z|Hwp1@VxZRiLCI(P(LNFbWI89y;&eG;h=jhRnRMMBY zK(#L}(4rHWRO6UWcB4wDCcB(wWK@vdk!pG*UP~V{8)(#`TXcSRGo{wtCcWw16=CA4;_Un#DdwCY!{q#jFyF(nY)$-dHtFpM_C|9g zyD?pvMMNmGcaK!q$!lut@Jn@O zW0`9vvcfx4nEfA1w%cX~>kR3$KBjNP`){-0xBJcK&%fWqKY4zfpI8~d=bnna0Beb9V{<=WiuHX=yb-&ZUiSlY7Ju`|*??ocEru4fx1c3ORHCycpg0 zlB5Sx(&V*Fp7wN&q?MnP>BNpvbbpLG)fel~oRLDWIciLo?v16~LlzX)1oE+Bv`xTh zOI>W~qm?~f5wJkn!_~BA*G8I`0 zy@@8ti3uco!)9eRUr&{-9j?xt|7kM)8QN@BxGs~OXu!@MGhlj# zqgmUV(d zw$qcZofpV=UOmmPmW}82uAJwGyQcGdn=|=hiF{u6X*sX8u!)~_;x4bW=^^jc{~uqg z^onu2xHn7VoC{j z$5QK&Y4o`sr1ssK-V98qOH=02^G8d`C3q#Ni>{#u1{)~jnKLc_wv+ff`^mKV1a(dH zq-jHa=!H%Y^^XmudH2F;S6M6_ElZ@CQ!bFsql@HpAe$8L*4-8&*w!kJr;$^F|6eev1mvw^Hi0d$iQ@5$*roOH=TQWUjm+`yoPa(fCZ2OMcVg zr=si$C(aZ~2eG^>GOQ_NFbnb-!d6wtu`H_*%;}~gJI+S3{*%h=M5QWQ{8f!vq>W80H*JX`L`po~55tDvz#OC&oWu@&CnX#<}o2+Ne?zCoDpZ`yqe>X~- zznM3U*GXBRT+Zd~9#`@9 zcUt(xwjI2rLpPt^+sFIN>F2v*J{>Fp1*6r;uv-;8a0ZO%k?Ok&SJ zSuOFb#W$!U%%Z5}a-BBQlw=W+wmH8P^30xom8FphdUCX##!N3EGNXh)eX zWs5H%b@vsdoVtz{cyA?*`MYTLvqRK!=osCz_8`rRr|5i*KNV~Wrl7AOq`M`Y=GH{f z$KNq@bU`8+eMurw`Bc)WzCeo=a;R?3C3+%PLfLLrbf~M295*!5UX2#|Sl&kMukKOP zkWLbj>87K3JtXq!KbpGb1JiGR5I6LB~%!@iJ{@Td2oIo;GBv zw@q01{;_Pdx;b0a!?QsbJgh_0K3iK>Xz)>Sqxq|!h~GEZfe-$>g6}Ta#9yAYgYOw~ zoOku{;78x};cdr<@Tx8`d|Au|zH(IwuVr|PA8Fsp|A_D8$A~`WH=cUITdaP?4}1Ha z4?XdRw~!U1ljUL*;UPggHptK+k)dR1$)PFeG{$x~gQ9^W>iC)K7j zLwll7qd3}qX(rhe*^<19J>8%cBzS3&*BvL)5#2`4RtIQi?@?NmaDpWCJZYuVDN0T7 zqfHHg^!|PbWz7sD``{=#{V9rcv=Zpwy(D_4mO_8z(kcFLHkG)P(u%F+xc1@ct7T0HXr;XVKaWgiP5iRr9sJwu1H9@hcfS6O4Dfc;)t(an=>>Uuy`lBD zKhR69Po!G#jlM<>(82a!)ame#5<5kiPlg0rEF#4ey``C_`Ct|uK9pT>7{;vb4`<^y zDzH^c6dAI)2KI z7QRxkonN`UgHOnP&3DIrUe<+wCbYc@b@2??~!#PIO_+b_x^S zO|41?$$rxj5*2Z!3puXjAnQ(c)4geXkuR0{_><$`Kw3T}lwK8t)924omj1qciPUd5}y5%{E zSiPe1zBj~Qc~7S|eWbO`pXt){uOwKCzm+U& z7V1}Ksys`cK7vWkS762h52Mni%yy}1u(NX7?7g@yD>H7uwZWPV z1ms&U`KrU)=}qB>ZL{ao8(esgv=e;B6o08OpnOM)BsYF+7Ey;}5RLDyzKlw-uUE4-ge#>{=-U9(yx}Jn1eFJl;udWaRmK|Ql^zZ)G0Vs zn-Z4llKfUZ>YF>7-hVTqFXkp>-#CUI=uD)Yp&U7?5``w((EgXJ=|b^35o>r32FZ*qsNZ5^nQ6G&H36wFWv6a(y%THUHg!xtb0Z| zn_p4p-FI|$-~%~L`$XRrKGO^#4l~=o)3VD0l)L#44YK%0H+4kWxUu4_p+$mH z(#NyK(wX_Yk!z-RrO%^&R8z+ZDb!5?%B;%)Mx_?Fu#d{A8`-@on>pKMdW z_q3Pr`qQfUqx0H%F7Fv{r1+9gS@)g)x=V~~`=sc8oWQMol%=g#6lhGH3XT1sK?@8F z1Pp0N1shCg`r$ETxo$jNzcP_}jHc46Tq`>MP2j&a&Z6t~c2s!Zo*ev^k#&+Il{P!m z*v^Br_1F=TSnNS_&im3+hd?^^Ac%&RhtQr^Vf1-g1UZa4O@HVNeb^gIW2eNCwRJox zu1%nC4d-a{ur&H;mPHxg^Qn1X5!Gpz(W*O@RQ0TeELJv>bx<=s&1`R)U4L4PtzOG#fu* z2s=_J$7IipVD?!fnb|)j_SQ<3`E;nUJ-873OcWe?IKucP)6q-#Gc2ci8!fXFlR&v~&o$hsjd>pW!6`N105AX^{J5 zLvm|1rGLA}(y8_dl8E2*_|9Sx*7(-xyG z)YP_>dY|m21y2vq-4ItY^7EnVw*#nhMHtQXjwE%fSlaO;mcBYAkhp-Un*W}sfmNyG z_9mU09%oVV(i~FP$|c#E`Lx%rkZeLqXnWx`>OWpdJCD@S)ri}4V#ysUuf9(!Djw2b z)&FSLqPMhO;v<<3{Gjm}zbO2)2s@oD#tJ7%FrC|yY~xx%JGm;&9zK>~;^jlw!j-a2 zsa%dV>>b7wbBD88VM;7}g&Nb$9K|AEs58rPTI|&mZ6;!(!zO6!vJ=7jj88ITYu6aD z)>b3-yW5nlXJc9ahw&`$&t%s9a2o3j0xP;?&Gz)$GRcFj)%2S9Pk(utpTDY(54Gyxb+r5VFXpfLBfmfJlPZ4kj;AC@Z^lr18ZArF!$%N>sga++ z1+N^eN2|Y>(9V;S=$6Vf;z~f*Wvu9DAW!lGGswGhF8%qtkTPwS(Q3EV1gxb{^NqA> z>UNr4xtE@AJxnW}oTS5_J;`mrn>w}z(kY8jQZkJoU&&}vtvXAYvPr~DQ|Z^L4C+~v zNhV*jX?{#T{p`I=FZ&BA)8h&S_Lfm-as_R7t)WH!Hz}i}fx;4Qk#24)Z4>XHu1UQV z<@_J5OA|QYQJ<)9&o>HD5o2409=-I2G+UV|!)D|TWr1sD+3qF7*rr|btXM&q2d62p zhn`BzoJ>DJ+|?YK3j0xfZ6y8eDG5vHmlo& z=}aHPbluI_z#ntAHf;jC-!p}6d1k@(tp(G%XvJc?iG2^ZVRiEsu!FlETW=Zf&W8w` z@G{l2e7s5;|KRUs{`bim{^j92{O2-(H<|N=@4WJxuXrm#X95P1T*zShDl?37B^2q^ z98H@0Pm7khjHW{ujA@*gz%Tkwqsda1qGT#i>N=br)eFd>9yemqU@&7x_NZX#{*Or~(@ zGzuS^P8AojDPUwi9XN7{+|7$==EEyA%eI{AvMXpfUrqUmjU=1WLaKAy$tdt1eYn#> z`s&>jBH2r8K0YIU$Sa!m@(a!D{X^l`MA#sENw#6Z5N5Sbmi;{-$HxDaXO`KD%+gPV zwQnEA*fe!E-Cu(h1!=O~BedD1NL}`=O^?m4)n{kyMl%B!Lw45Bh#f35Wg0MJ7gmp9 zp%%i-Q8|toJ{r$3Fo9_Z^=a<$X>8jW3-+{~V_)}KF@8JG>g;ARm2I<_tc5LW&sxG7 z&WiEtT7vm?%ftEEsTcW(uZ4Wwzw3O1;vIgM!UKNin*aEd>p%0S^~7nFxDbzdgK4bO zFlsrhM8&&DkxHu$O&M)WhmuW6_4GKh*fW(Dt^oa8YDNFlc$y?Lo$mY3q*JQ%>4Wq_ z$~o*nH8>0GERg+(dR^TTQsaZi`(oDAt_z8M`KHlFHjCy>43RJwW6irPDf_HUj+R56RX z|IDMu+V=FmZ6O7{TSk)}ZXlDlyU1$WL7MZ~mD0ja(1gvNB-`svm;dvn!;k!g*ap(( z)1fr=MHsyq8cl5T5O4rJ#uUrv5PAehDQP*kukxJTeteQfr zYUzVi6TMmAN}@m8DSlWdb*^|s&+8x4FVANrzWM{LP5VW&K8gtMU5q)aNig@D66}Pj z6ubXbhCL7;%C=?5vz4I=EX7on$&4Arru%9z4MQFF&`y^XFVSN%mImxdx*?mXY0M7S zn6SCUX3Wle3>&OGmMMK1%a*#2XT0?UX5K!Ty;wYreb=#I3b_`n>O5HHHY;{CjIq1V zXE5uxGX+g$4l~WNWewvNvAE9VtoE1}RZ>=fd!&GnaTVHqblII@t6GGqed|5yL z_}x!_f`TZ$SC=AG$dkQ*r``$YB<#QIQFhK~l8G>-&)de6q~}D^ubV=m&q2%H3G$%N z3^KYumvR;?B+>9CB$K*=awe^(iX~2@etRdyO1w z%87MXQG0bg-I#ETDv!6&I;l4Lv`jb;akYz%OzEaW+a3x3`#s4gf1&BV1Jo1pi*(fm z9?G zdpy{Ht?3i`cakxSPc~tBFH9LP^bQ@Xam>hQ0`usY$bOkjVX;f6vUjE&vwaK}Gs}t< zFXq`2x9QAg&rG(r%!a93&11pYi`dtORm^?*F8)+`94}W}&nLuo@vjD+^BxCZ^Yr~2 zKkuOk#kfn6rO{B5n=DVkZEEzlN|Tmd(Iw9kBhpvQR;)B?JYX-~JbmQYpA3M%@rfyTYrLgO2Ek@Up9B(moiIhMH6rFeHTn&VAM z<9(^I3?Cc(%)}UH1~Zh8MegJn%Z-;?PId=4P2!3Wm!~wCZCed7SixJ zB{V&#l-9VHk;nZTlr7dkTSS_uGp~t$?r5efL3ilsfA@rw8=bVdtC!Xt|BpNtzof>3 zcXUIupQ`&llb`EfN;MK^mJJf@^J*zpeNCFJ4jjr3D+;)3K!FuL9?72ODY1?R%FKP3 zDzn$rWXA`z*p=ZrY~BG~HjLM2YgZVuznUiOuAV6iTWQ9k#|m{!X9D}KJc(5sOlD^3 zQ<(XRsZ2JBV-0>*Y^6HSEOyOg#W&`#Yy0OirOJhDUi@;VVC>G%oO^+<)~@IEEAI2g zY0r3*^bfqjbTM+1m!keHGIVRHoG_y)(mh`dnm<>Ec3(80A6t#dKi`zbwM`XhXa_SiBr#1Ak|+k4>k{V*@LjEL+BiH1PAMn;n1<;_}Jx!7b2eUaPfk| zbuW}|^u`)vA1q;3%%QD&cF+CTpvAJ-2oD2Jxc?i3l52?qO(dk_PGp-2x zdP;Y>VPnei{!`9*yR!aCl+0GYZ`~0xHH~`T~ zL0Fs-gow|kMK7VxX3E7OxuPuq-|S>oSsXZ%-QPR$Rog zicCD)nhnL;Y}l;J#j@LZaI?tA_xt%c9d`+3iUk<%QV6B_MYt7QjQopN@X7xwqBY9U zWOW^h?G@Oxxe_a1+<>|AO+;(eVg2lSuu+Xj8r6iNrY1~$cMC&ix1i(FZMZwOVYYTV z*2vw1Qs)D>u6>9hjy;%Z{shm*^&#cS8z@@IIXRcP>hkqWFJMCog9Uo zKeVuAwH^#_o51$oSXeJLM^KXmeueUwy=5lS-p)qRMO(bMIv*QH+d(dD87}R1fWwTH z$Szm~>8EROr+GbMWStOxaucfKwqlsnb|hx*Kv~RgRNC)Fs>OaROE`=Z!oHVX-bbNt ze-cFt-7&M%9R)8vu;!m9GPJ#M#!=Y5xyuKQ(Y`33#v(hk+(^Y`ul&?^^ITxD~^%+=jpY9hBO1VED`j zP(0Fw>Cq4Ic6Bcl9`!*_`z&VX{#OqA=)!;>|3_&s)h{zCU-1pgvJx6!Ie()|>C-1@DxP5qQc|gdG12DXE6yFvf$6rM^SbKY5 z+(A#Q8R`YICtmpOc?xr;`JzY859+)>){G7W-3dh7_F!z-7lszYaJ&u;$JZSZkP<(Q zF(NTA+YkdE!JkX@ z!*cISVB0R^__@qTUtdVLyT7uy8h?H0!5Hp8H=74~QDKyCJY)XH{Z*~$kv{;mr$ z8=v5r$V)hVdk5v_Z|J<3!3&paoNqH2+th^)%nAaWuyRuDpg$1(?Wlo z5xC!`=qnzNQ_@p0OwSVLWQ7pMpe`~SUhn3i+S?9WE-%8c$HIM)wq;l`ZYA8-uS3M1 z^(d8c!mNyq*k`^89`P>7G29BpZ(A|da68Itc4Fkhy?Cp#A3x)dA)(O~DQYKBwfrQU zFM43NlP6j`Pa!DI7mfpd=rjq$3y&boIvs)l*)Tk82}4kPIO>8U5ji&squ)oN?0PhI zPCJV`0da`E8jrjC&!M|28E%GY&{w#K2Bc%z(G19)$ilc$IY?>A!KUb3JZ{a0%IHfd zo_ZPPn+uW77eVn^5#)B3;F?h>f}fP)?WQuQ4=u;}!#p$Hb155?g6VW@bmh!rc;VXUr+wHtKM zT5F719%HaCc`O{1CSr0aho1uk(P1;OS86s^nF)LKHromLwh%84FT#NPVw}xhf;D3u z@UGMWBa&A_;>{YQ%df|7Gbc1pamJ+in;|E}r>|l+Udio){(^&eKlU(&$R30LU^m>h z_JE1>DXbIv`PU48+)xZe79WIp^Mf&6ISfB$hQmN70^uc*7&|@+U)Dt9^VS&rbUBNE z`3acbd>$`!QedK+ilU@6I9$H~gDFC6+R_nRnJLt-Yy`^Y;r)v|{5Lxvi(-Y|?r;h7 zMid|=tPmUj6yr@=35H5s#q5_?F=$B{#=Dn8GvGRo9;iUupeocIs0MUu;6An%2lQ^@ z{=Isn?Qg){rwurke+yS;+!4; zILm#p&E*=W6mx2Ycer}5M;ziFbLlAq+(fB=oN|B|-aL|mL%1w{xQ;-6hB7pds$to2 z9f+SWz^wH~aEdiS&xr9DTr~yt860LATfr!Xp}cWAWJPDg>Gy1G|6zlBZF8~p?E?J# zV2=vlC8&*Diq3rwnEP@CJf=Dd9s#Ssq&K5}$96oPuoLUTb_%`sAROe5;>3PebQ!y$ z$-)!6(!5cZ<_G60f1LaifOxH7{5B3lR%STPyGKI9_B1~1iNdhN7?jAw!F6K-e#$1H zam+as+ntBm&SbnSPlaXh1)*Q2!^kHCq6ac@XjK;W-_OF<8`+Rn$-`|SXNG57LP3HM zmu_qZmPiyp?1YzuSJReP00G+#E(<; z@Sfa=_T|m^cltJ5qS|rm)m_Mhb>RKtE@+Nn?ZS@+o(Cyu4_)iT7 z8#GbTrGtS{!p@^jrZ_)g3}z>oqfKfGW`CFp^&|_#oCIR0@`#Nmv}w-7{EM>?baOT~ z+0I2s%zV7LC)}ylSc2Zn6;PV64nyTP!pYDD724Z{-wCnt74X20BiJy*9qwixxTx%f z8b@!8bMVEIbbojT1i0vD?^r~b#i33y9&5W2FBhyguGz~pBFW_ocI=XLUz~*Zv23^m>mLs|FyqkxXm-$HDU5K$$i?GGG z7#dk6NZD74wen?l7+^Z88Ii36* zZhB$`x9v~^_k8|cZbaq-&UAi1xB1E!?)>QAT+0R#{2VNa@;E60CkS_>yG9^ljIf*S z>nO~P*2Gb79mJ^_VD~W-EbKQ!_3Cl>sy6}ib0)%I(_}2%G!2?fz`hh~Z1xxS#+;df z@6I-0yKFI4)eirjE=I_ID`EP5HGD)jVt@B$C~e$aDHw+N zz#!TWGyMer`rQE-u{;?6zJ?$%CKS_3!!a=|60YrMu-zvH>(kCcPCXv?JA_{Emk2wT za~M!Qk1h7ecy~GlRTXJCKPw&1Z!@scC<{M-=V1HAJouiygjxE6C;qAeBniD`%4`7- zKQ4hn$`z~~auu)bgqXakK-t(T3^Axdhhr^S(S+JG%T8?j>HEnJmv7Vh@7 zBB!ktXT|Pe@Y?%0^j{~Y9`8c@poi#~(1S-^k8$MV3&b4y0hJ-^xtq43+>gRoZgu%ar@feOLd*P2C{uGn^}p@SLH-cPWRR zmJ0YAsEDjYRs7wmiNnA2;aY2m^cTiZk2ghFx*0?SubBbIi7@#v8KyQ<5m(DWIQE35 zz?pbnXosOji%=N21pCESptyex-o0LjAzz(fo3aJ^dAsps$svsRcM>Md6P~u-xV-)p ztR?-BA{mI3qCjYW2twAeVCXCf!6S`uwDv|~;i72BKZ?PB3TN@VBMwGh@u*mofb^I| zG}I^I`S4`?u}OuvZW?a>xFF!JbSS!I;ktVcp4`ZVPjEgA6)$6?R)H`h6yWu|BB(W7 zf$Y^%?6WOH&7pD({!xL-!&O-SyBbm#YayXihnJHYFtw;r=q*h!DsMu}^=1?OT;AtQ?)AhH zF5z+oXPSA78@=Npm!$uMn{cF$o1*oBn;A90U0@>E{ap+xVngAkISe*+ifD9JMxxOu z3{%#`uFtyYi!;Eq6GkwQGJ%SQDUvJA@a@xB=txY!*zJ>%EXN!q84z ze2-rY-K#6%*|!#9!d&xg{T9@h?#6PZ1MoIIibo>ucwXy)RXv{gKFJH&Ro*C+@I`%) zKaM>RYL{{_B+d%nr#~WK_9zPcooGx+iG{v!m)l7q4!aHF5%ww`Cs!xpk?%QF?nr^< z%2dp$Ov4_ji&%T(A~pzn{@k`@p}jd9%d>N^`BN?)Cg($K)Md1*7Q#ZY2v(D>V!L-4 z{5O?D_t14**(A*IPi~+_uLeoswaC}3#~t$q)S5KmWx_2a&1u2CF|D{)+=}B%+mK}6 zjz98uvGm|QSQdA{)2|!bCiS56S}($m^`W=mBgRII;#xQC;OZ`Va@qF&X*J{O~QUqVE10pyPt;aW>E zoLx)NHUBE^J-!Alz6}4~mSfz%3TW$BL*hdX93IueGw>!N^BUo%ehc#+HzRxDZ7BJ+ z<7)3ch}LytPx}L?mOg~IaPR+Je-E}K_aWKpmW83X2xo9ekF#qZ!`a-P&b7O(Z zeS902aqbZ(w&WG}@XdGbh0ufKWW^!MOJNHeg4+vaVdF0kL;n%L0R@~4Q^e>CDroc` z1;33NI1{Q3CqI209BPDJdS>v_AB(TG=8%t?i02ZPSZz;ORyP|Prq74RoMotLT!Da? zb?8ZP#>^cbeEJ%JO=9vD#YLUO|?L|pbm%&|bU76##g zei$01Bk(dU64e$_aDILU6EtFQ?rAJ8pNzx&vUotiZ5BC6$XRzDx~8dUH@k>L=?qM} zo{4L3v!JDw1Ffb!Ot!v^qml)X+*FL+3Z<~rxr!sJ%Aw&`0sVn0oLgIqqpJ1rz1e`4 z&}PV|x8j|wz_(1kiwB0nY;W=ai~jaPcYLXZ&EPi{2O4C!KvPw2^t=gNk?Sn(%|ttH z?Yq?+t>4JCtL);EJPvWM+&nmiLjl}1vuN(~{Wvbs`WzSb`~oL(FpFb}Ib5Nz7qm(6 z&9ZsXz$NTz<EovFgQsRM_))_|Gq&mekFyx zUK!l@sQ~o@O2Rj;jPRGL_~SPUW36<-&KV+qwJBV;k4NnC$@qQ90vVo!69VU1wb2%G z-{(W^g&odmJD_gO3jFPH#3IKvFi}_s@^nJ$P8a;C6*#ulJ0UlE55hkkKt#_;tS|P! z_?G&%*bZeQGT^b`2Q0IW<3M8e`wMD>M2_D&?qI-@W?I~r<>VsTKw3rWEV_&y~G z3Ki$kNvSYw7IG`-BHoE-;K08OT-}omJF^@#SL8!>Qvo_%uV7{5Ra~B2hVlp3Q5anX z$N4qb{-+Kv-ZY{>;KbbD-$uBb;L*C^K1{Q#EKaTx zdfnpOZr$blnydlUOg(U^hPW1M3hAJ6i2X4cw?!>6JIV^#g3P_L zV+KxV&4FLqJluaaAG!&4$i1)#lcy{bcuPkNbgc&cU5n`fPRLm7f*Z}7QS^HUTqOheTNW=g6gaaDYFzQLNt}Y;Om5?T zC+_d`&BD9h!@2+SvwLe$X{;SK@s?QN&r!ksI-uR>QWg@ zm?n#k&f&P0sQ?#sbwnK2z|dkXfy2>9{5Auq?lXc|lsN`r66PGYz}p@RMbn=7X!f^*Tijw~Pg;h_1`e2`;E0_c%^j^{lf)M z^yw#|w$B4q@t!y?=8d0|P9em~7cN`;Agdk#Tn&5MFcvucR_@AS(;9vvZ&nm4`~}0yG8{qU=ctbe*o@(w^%W z@U6hov?{z4tA^~MddMxhg)O^Ug*?22g8Xh2OlY%svrLm)I>ng#BFl4fyBxUG?mgV? zlP9_V61};JeZkxanRDC&|70#nIg<;}$medv6mkmJ%DJ><6j8!GDxj+i)CkNxMyc}G955s_{;72!I z5q^V|kb7Ab&JG%=PSwVwU3ys1I2!5ijj;IN7?j=@_5dYMhRYFa*g5kEUQ1Bs8OC0l zfz54mU}-WB$!F$c-qD3H@?MI~-plbuZ8a23*5FX-29!!TV^-o$!Ef*wj`X<;T&fqo z{^yM*?^C$r;DhKlJ~%SjA2XK(;MCvXAHswetGkt1bkCY z6mUQiGIyMZrAi8dG}AClI~^YV*%&)f*tN1ZAKNM}Ve`}ioUSWI)727WMO}sOt4jPw z)z~FogB{X0u_>w^iNUuJ>UbLlDs9**(}6P{50Te;*W$z;18!!?ByQY68*ZV96PK0k z%0&zh<5mZr=5D)1bDE;`5w8;4YBUz z9$EErB}4kS!@}W=v*lm8q!|O;{wNXT{t|_=j5v&jNx>jP2G)yY(HJ)zURy@s*?9#- ze^SI`VvmO~mW?X=tBofgP(XF;8w5 z_I#L&F~8@-?8`!=mo7z7+H&k`afG+@T6kUFfP1noc%8Kkv9`N0v~eHKy*q)d$DZIu zpMuYKAJ~2kz?Z#2&>a?nD^o)e(jNwQ=SW=Ch=SO#SWFX5z*><+0mCIC^iLAJL(k*F zsT7p_OvRG_E}~#WHnue9Ks`m!UIb3LbVU)ut%`wJS72OH3J>!#j9Dz?hSLq4J6{b^ z$$E^O--L|Ax8ORZ6)H>H@lWy|%JsYOp}q??o{z95q}M{XWi&Usg}8(2_MF?P9bA&0 z7kAAgn$sPbDB!R(u00@=Gt#`oO_M9(+AJ$Lu~*fcL`Ng{N9i`#-zC(loDOc(@uys4 z;cM=8e?PY|^%J-C&j9yjq6obEMREPKpg61*`nkTeu!%A&$BJhZ&jAaK-+>tIX z?zi!I?w-IIufCqa@qeY>p!O$#Hx&cE>y+7BAL&p<=T)EROp^r$pF) zaxnmgHo+LG6o#LF!ZEi!3Mb-Yu`J*$JkP~r@QVb@*nb`e^3yP`G#d(ka&bT>A1B2M zP%7wMe+6v?#U;qTeihF5uVHg?8Rkk{$Bnnwp?a(m6AOfRRNTaIm3n9iyjOx*3ogHF zgPd|Z%AefFaP9$=)Vi^*w+F|^|A&XmpW{-&D|qda<0{*%IVEWaE^Pf?VRu*v_wQ{q z*FEGscVJOEM+fq`e&-UdefSN|czpx6{Y@KZ`RyL3FZF;MHSmzTpY@VU8uON;o-dq) z$}jGpz$N#66NOofI6^l_AY%3){A?VA;Z@Q&lQ0w=gXHkfb~r?Z{bTa(BZav|1^R-n z`D>6SqN@e{%v=|;P5MZ7HiWN>F?Q}93(2O5n5jEWsB0W9xAJh5W>CB|3q$P}z}s>u z3YR*d=h!-kjM|8h%FT#)xexPB9mMMMN8vup4WGw&Aj{ngzD1|7)7BTop8}w@GZ^0K zp)lDKE^zk|xKSPn^_GDmdsm`kT@_Ysx&gg4HE1)aM}1}!My|Vsi#uE4^sfyU zRqkWjk4^;M?}pFYhuFBW2f1R;V4MFE-H$(j?Kb0f|0C{5&=D^AOE|aADuJsxkjuT; zTfpskS<0>Tsp6bU>$vPWO`OZz+uVZxx;S<9*W9W}?>Ot7UpR~YKb*@Gp@&O~qgYN7 zX0ryN|Ex6LM+_E@V-7{doneCaxB{lRDZ*i=GPK3iU^Gqxn=CXD^id1?O}cnlZy@kw zhPb6`gz+k7c)D{e-bqbB;j&4PSk0mJy)}wn%!H=wZ1kV8fl=OkEL^(?y1|Y(r@03A zw4K4(Y(}y5Ry>-1e=6nh*u&bdKZMZi`iLQDi5pI;e(+gk%Mg@SNEz_}_j zBH-5)3CqP%P_>A`?%ikM@G1^RS0xHKAO)ktGBBtn3x*GKpzoLu<0F?)H=__G3re9I ze-)W!Ww0qMM|Sab^h~e7h`<|I^0pc}8nsyAeG{oy>abAAkq3MWR)pL}-_&+|OzRYM zwQi)OcH_LX&_7=H;+@G0)bzZ;v1wnh@WUcbZNXM<_M<4SSirdhGp=)&Bx|`ZdpfwW z!hA9E&3|0J;GO(ubRQRM`GzYA`pn5(8Q>Pr5yxA#AxN7bhflxdp*2ns%N12{(@!07 zk(#)2P77m?=pbR19^_x^L3ND*1_v0TZTbb;dx9CW^=?Fr zI*8>Rhfw(82r7mj7v6z8Y-f5Qq|OUbHoj1k@JCgzKN8*tV#lpuT&xX+&7{+~Oi}3V zh(hE`cWxTw1JWV2BnzWg<>6X-9$rkn1nVh9_$w}GTZ*O7|DV0L zjFRf=*0viB?g4^31lJ&i-@w8xcyPK3cMlpU*;Vb`Ro#ud1a~L6TX1)ROK^wp;z`b* zcZ_p>y`PM4ycwg?borjwyze>J+H*sSt+wuow%HPfZ?_HDw8PdR`z~9i(0#T-Y4_U} zGsE&j_k*^vYYyA09y)3(|K^14$e}-NBS)OIUHR_3t$n)7w!TxY+NzJaW-Ffb7Uv4? z+D6WNVoOuzjcsb5K$Yt5MO6x<*ixnDpXaMQ8vm|Jsve)J%N(7F(6Sve=%V%x25>HNWj_i(i4_Yri}}>b$VjA?I_*bwzXJ4+nUe)Z9Vc2vXw1A)V8bl z2;1YlzuGpq$JtVjpJbbBoVJAh+_p7YJ+?uGSzqvPo2HBO*>>7PZAsIF+fuqBY@0014Yuu1Hrh_^*=(y)bgQjJ8rI~TkAKSCN}z3X8 zaocWE-sL;ox0}5W?B@D2yE*d0ZtlLcoBOZr=KMRmY52iz9)97wC2*LGi5zBDQioZZ z!eNf5c9`Mm9A6mpou zg&n3xQHN<%!ePdia+vpJ9OhnmhdEKvVd__Nn0~b#=GS@-Gog{gJZa)EXPY^U?PrHc z+s0uIbmaA&9AuyQ{S0ECi?|Q@#QT4R zS&+jdAohilIE?>0hv}8vVG5_^xQPFW3=Xs8dxt5I&0(HqhX)L{6Z<;E+h4?CrV#Hh z#T=##F`q)r3zjBcWgTWi1&3Kf-1k*-m_${1F7Y2!-C=%{!!)e#FbNwI^A--1yRF0Y z?*I?tKen^ObnE6YyBshe_TBmu%K^L(3}Qw&%-zv^#yE!=J;7m4PjZ-cE{FLe++jAs z!#B%ehQXoDDu?O3!D04pbC?@D9j4Zw4)Y>$PqY7Buz6gATx`lQ!=w#N>I}7;2_x;M z-&o=clc{br6GA@1BxAhY2z|Up8$D**O*m|-{f=%{*v+_A#1LJ)fJ?+?yIBsCHE3fL zOrj6j&5jei=A_+pJ!>~#;Iic^F-8k6G;kRnU12c=77PBio1X9({EF}L#%?a62R}@P z!6a!yhdBe2AK=j^nZsO$$>Ws70xnBn@;$n^4VMsfQ3EEcVKNCl^g|ExVKSqn!&HOC zGc?c)7W@m4rhR2{o*TO=W-3hj!XsHthxxOP!#qG2YZ^Mt&6eS27)HZkhbFee#rcGI*Uc{|Wm$)B*&0)KleAI5tal2Utw|S@S=FB;}xp5H&=qcHCyZL&{Zub0TH|6f}88EE> z*lwD@uQmL<_*okGm3)nt!7s~uUJt{iK@L+Ee#?_OOa$zv!){d?Vw{fO;dUKQOYwul zR6$eAay!gW*!6(jsR9nu1a1BHBYp(G&gC4YBK#Kr>9A@r2 z-e+#G8SyIEWKCr^g)-Vra`=ocVK?*3;VYHx=KJd8a1Faz4X;;q?PgJZyIJ3ey74pl z4ZFha?IyS*{@&egD#LGkZ+P^vn^+iL8-=FE!4|D`M{7&q*E!T~>cMSOEG(u_!_eEV znRat)7M~5n{_}|^94{|JBP-F?YS^Q_>g&-9ewb)G?9pD>9=jO>&(??d9GLb!W;X$t zMxw);XLw)g)59y=cNN{@k&|xP&4b%;hU+`HcEl?Wqs>sX>49rXxITP?55RLNUTJ-? znQw43+8(;L8ZJ3KqC#ZTAa ztsCs-Pk1icLSDc$ElfM_BA52jkL<^{kJ!zzqjppGgx$RTliWDXaa^DtUP7NREe+46 zckqx0J#_KNR~K3BaZ9-oNMSG(DVzn;QZAEM1`uuKNWcyw7Wo5M7P z-R>)FlK2GpU3cJpgf^0x(jR%@Q$j(XC8UJY({JE6rcc2m9^ zwG8dO?}b+mAf5x!%usT9Br)gq0}Qi_wVR$3`HbJ_KPK5t3pX0{+09R(u)$9o1?*-k znj8zi64R-X@H;o#ZZ^R1`-OIMdy2b;Vfg3XlA!KONVo+l<>;PYDw`cW9&PlumnCI50$XA0l} zMYt6wx8TzsO?k=@{5qgYh$1okDA;;Po?ldk?Sluu3@X|oB}UMn73Xg~s4_Fy*wzM{4IAOGBiJO_9c=a-4mL;52Ag7+g3aZd!6wDMU}M1~ z@%vyi3%ztqNc zkXi(f{jjL>E3X}g1}5-)CmQw8H^SpKEGodEcQlW~VN@(Sn1bHm@bfngbI=4V&Z3(t z=;qK;YRW3S3Klmv^7<{*PBfEd2fy#a$KjC@4yBKCtZ;YEBaRylbpM&-!Lx?qS;zj>!0*I;S$AU9lX&)_ zmma|5gSc-f`98vKQsGr!M)Ufy)cT3!++@3H<|5~a{m?MGi4VumiTk#A>dSOE5bO8z z@eX30{df2i@2bSR&N9}U%c&P@$R*-FhnR1p2bT4J8*6I%Mr_eBeX=?^;7ALO7n=!aMJhCu>) zyrOv4X;_STHXF^&#m9*E+{M%&Vm)RxJl5eMn}`o_&hrPKv-_XDgERZd6XKlo7;!y8%!zYO zYQcl^#QFmF6X$kUSsPvB`w{OB#5(2?%%9<9FL+;krx$Ve6Zd@7@ORXO>*V_iYQw+` z^iT9+cj>{}=VSd+kp8PMJ#P_eHuZc8{q!Ds>5uea_0Yqz+B{AVR=xrI3)J^a{pp?Q zzedb+m~1zB&!}M2hknbOkk3j&UZ-JgmYy1$jT|dLKT#Mhm*T#kh!c6gi}+_Z=)D16 z)68xvlIs^cpao*TlGy)Ee%B+nzt!UB!^pLfXotMMjGrVY){D4}@sUgTNde;hC6c;N zZf^>}o0y-TLccnVeu21moym>Zm%>+GE#Nuidi_;si2Po*o;pF?(`<#=cD_5Y-+qvO ziMS^s<}oLU;~9>Jco)88H+JH^?gCfsY3kE5dSMV9OfMPo*D+js0UMt{p_;D*G5dMI?OcUUcU|; z>amYN{hra>VLA~1kTz(c6FpWJ)|+M+{O`sXA2Fw28Y9%@AOv>azv z^qz~D!6LSZ-83&tZdaryg~hjZNu#FZX>*RdrQK|7i~n@OOYP)&KWfKtj)|N^Ysm{`5wf7Jn?_cEj{rs_L)8%zc~bhh1B0N$yj5;p)bB4N-z78UN$kk>~*wo zD6hjLMFT_OkcNGMLF@?>r6$C&4ta?V^1>oL8pw<%bzvPjv^DF0da5^Y=+@m~O2fk0 zlf9jO+~;J!#t$d@tu%Y_u=n)B)RIBT=&`>?(>Xa#coZ&yFP6hst5S1ou+D%-)h6`j zu=voLo~6CrJch@I9@M(t?5n_I^RLuRcs%}%cscDRwTGBkXa`NqhsiOR6o$!|0I{8l z4q@>c4Y<+3OEj=`5&Z%z8o}b_a(V(-+{TLzttTfo(g(nz2%5OCom#vD?d)aEbC5cJ zjCu@0xiQO=UYDOJvsLUCJ7zp@5E>V zJq(6JTR1q-M7{Lrf*LRhUF?O$Px)E*<4ZZ=(d9?HiF(kP^twpm^i^Dwl)6Ml|I5jE$KtQH;BG+1pASr z@GRKW9ZyX}D_17L7@ef@q5;@6f=&4d*41#iJ_WBvA5CVF=dekLMl!;sIb3=zA+}5L zBG_C)D?h>J9Xg3xM}G{X-(j;7HsyE1^$_oWl-Ht@iZHQYGT<_dU}Dh8B>GjTw3%)YyDvXpX5XFWf)Z$&3Xhz>)_*c@f@@h1fRq3IS!wj zQT!P`+2ZJ3(9T_MzfXh53_hQp$upmLqM<~vTDBN3gVQ>6v}PrB89uEyQJc5mcQ9H3 zqY``2HuYrwan|D}(B>)XADnid=X}E@`VKg?giitZT*c33QBU-Qe(@RK5AF1Ojc(!O zhf~#$i}CYJuG0DL~fr#;#^`vW?HQ)d|6&;L(tp9`ZQ_*pfyGX(7n zh0$MVXEbacHguTEP0>&*G}x7OVqf-Ahu~*k_PA#7`#Oi2_=I{jC)gyvLQP>$C0`;u zH68UXE4d7t<%RKzlK6QAdZZfYA1<%z<0B36FWA&-LtevXZWrRzjlHR0`YQNLgU#^) zc*S6PQ}~P;PLDK__kq#EvD}A_dg5{SClfz()Yc-Fuv!()`Vme&;Iy8bfz?p-^a)N| zXTcCoJ~$A0bRl;#SZ$_U3}NQaNZA}LwGT)W^nrsR#{;c z52rq8>hGIq_zs`BCT8UUxX)c=@#=mjlxF2R2BPsAJj%tk+m5F2bi>3-YTib+{d#)S14x z3o-5vJNWFf;}0-eik6=DBj544)PrHe?aokg6h?o*Cm22@e}fk~x&faQUfvT%DPglZ z5-u@(2S1t%5L2{Ndm8yW9j}2?{A~6-=HfvM&>(yct)fp}OOB(V3!Csd7&Y0>XZ^u* zci~a{>3L!Ft$%*Vts0C*!)VYsJ`XJ|g3~ngl<)@q^d0IdoYuhU^)tTX3$*zLFNBTv zJvsD|{f93c8;mBPqi_B575aoSSvW6}lXGr`=p%}uBRnlzJ^HtwSx0ta&&$r<^H_8Q zm&{Aqn~V-N-?NvJ@p7=qc`MkAVa<4xy{tCri78CJt<&G+L(hfT8-dMr*lenZuU97C zRmq*|^jbAJHhgV2zIG5kYn!0`X4F#njD=5Jd)5nRsBK4jR`fFrHfeg>O?$YE$KwtU z;dq8~9&-d*8Obq?#pfrmF9??^aGCAG7d;$5Y>N4aQ79bnxt!5>7;I|9(KDi*889gW zlV&jawtjDncKZKLorKM|UZEp=nyrBsjB3KC^=4RX<8$_~zYUw$2g#Si|7fVsG2RU@se1*C5K@1t*?Ge9ma`p_#VX z&=G#tAHB3>{TNXl55&WY(kJht?ysUR&N+cK&3yVebn+*A!lN&;ZoI?3Fnhpr5)uDY ztO?T*n{0glT;yy4@}dylTZ+$z%drZ?wi3rujlD*0nP8K+Hu+Zve`}1-G>2tNJfRgl zU=r~QYXq1a>_jg1AV<;5YxGhU9^YzCKUmBhO^n9Fdm?e0gtnY8g~up(+_U%|VdyZ@ zZn{OG6?o)_#{+J2V)?u&c<2ni%N)MbJaTzHdVopurNk5#>0xmL7WFnze>M^~n4H@} zUTjC>fAHOR(Ua}Q>kjZ3Ts9x&HGlH_Q}lFaiT6c(>=GV!jr(BYz6HzM#kM(qz0JsE4sSJeDh)RmfJIH$6J z_0FPTGk`s=itI5?!nb^!I~kRnb9QGHM0!pTC-1 zSdR{feLS~A#J}uTeD4pq?c}w4ct3P;_z<~pg!mq3O>}~u>@@o~#J}YQz8~=)hb~HA zA&0KgM{;`wi`Ovd@tD{@h4C|D^pbVZ8*=R(Oz^DBc-CKN<1lNSWcb!r>V5%g{&)0c zON-HC+1O{H&Ud7yw5!klBRZLcf8803pSkF%*Rp0|AGkd{62qeY_rwzhDbT}y7?iJw z-xK?b4d_XmkQXiKrHHkYoX^*Zm{Kbi^k5y~z|V+xqP}?6K=Nx4vEp`pDEUm>XO6{} z$o-zgy+kPS#&^E$VZDteSBZD%bdH}GUtWj?$n8hOHxK!JkQi54&R)n$@|igIS;zMy zzGt?;cL#kndA)2eUP5g99A%G}*nT-deLhJJk>8_g8vBbeuT=`ry3@V4R0IRv<@eQE$op?^+P&HuO@&{!lPAzCS(WaJ&M488eRiC!r-b zu_U+ac7Jpw)Kf4)CZNvY` z-(lqMo&D7K1M~yrZ*JlnMST5d@w0OHuMO=yLFZjlYN5q!6-g-u@ zC$?wskZP|urVrFv0nY+R`wzu4f@TaF}pq5 z^QT80u6tDHZ;u{)@aRS&OC?fSYLwMdj{=rXm9X@po+W28Pa$anbE6Xvw77spI5tzdey(ISC-AI?6tj$s^eAiCSGN1 z>D8zXUM=tA)$yTT?HlD)y~$psbbEC(&a04FUU?RKHEo4gwbpwzYqwWV_Ivf^gjY*W zd$r@DSH)m6XEmP&*Yv4NeV_hp z?9-T*J|%4D(~%B7H3;@8Z-1Z8xP5Bn_334#PaCKCG;y|1otF4iVY5#wcl*@wh)IFTzT-c*#H9Y!I z$D?b_JaYH&=u|I{>JNk4c>e74C@9vWEK@uhw$P)uYdyLS*T};j9fj@Rmpn@R#G_;{ zJv#Z`qv?q)RZef|VKz(Mb6FZ!$Wo>sEmbXTsX!G=PmHBBjV%4x%F>#4mS(rNRMT#$ zQg2Ha2Jmx`rSC^t$~Vzcg$PRnV=a|kXlegqOJmnq8n(ky`a_nEpRhFTvL*LZODkWH z1Me;MPv+IqOkT~&0iTjy`73#qv@T2lS64fE^}M%NRR()?Z@gEJeO|qe@~T3- zS2t#RHF=p=gI1Cs@XWK-tCM@YI(^)$1?RmwaM`Oyuw4DrtGh3}3jW}gEs;+dQuvfR zy-$H3dcWyOE>5(6%@!33TTH2!_FdbRfqZutcI^WtOM<j{rWo$=_)4UbAZASd2>Gy<+;l3O~4cCAck^9M^E;kyU@ zX02-JQ(a5X8(6y1)l#cr@P^+Sx260cmPWzu={8G+j#+Aa%F>4Oc))E-DIZ!&_|cLr z0eVa6RWI08{lTj)MZC&c+N&D1ysBfo8rImW*)6;}*vhLlcCViG^XkD6uU3rp>gS1G z?G5*89h!`o1t({rivgLQH zQE9j0e{!o~Ww#=#yR{Xa?darIlU{Bm9p%<;r&|d_+^QPmR>gU475d#R*E+ZU-s;wE zm>xUiR^m&%=7C$UpK^bYM>msulpwoDON)ARtAa;UYI&5su}9-zS-hP`jeqfICM-7( z@aWZWk1D%8+AxQo=(GN6ZtFa%i$(_>_vq)_9u3EPR|KJ>G?uDmw{!z7eyoE(qQl*7 zEG5Hp52C+@L(z~IO$E>!n!AG6T4>Jx2d_C|DfXnL2e7Mg5ylTJ{g4oUOzu^htX?HV zbLH~#dm*pVmw*diItL#u-^Q!+_-JQ5w9^Ey@}apIbG;gi#^&txD(;k5{qA}7*JrO@ zprif8eX7^Orvn3gN`$Y?@8Z<^u}+me>6GoOQ|U9gl&7*wALUY`b}nt};nL3GE){XP zRBnn(_2#*BY7zHuaB0;aF74RO^H0F-zDosPxwQX{ODVoXW9i-M^u1f_a>J*9TfNJ= z^{|Rto9n~2xm$NSx^PGqTOnR4vWLi4Y%Fsu<$vzhQY4d4Y$_caVy1r zw=O+%>+Ngwj2=hk@~A~|kJ?m0myJBC)ZU{$!5&4Uzhn4nEwq<&u1Cw@_2`sGW%1H? z_dH4nt9;2V#ig_K2X%5yPD@?TS;B^v?zQB1cx@PBso-yx2El63beK>Z2d%JFc#EYe zXl>VCyz(%0@2I8Q@S23)+T62L`h}%1xZTO%)kplXYJIQ%{>7_v!@M%Tdi6Bgt0w5^ ze8=GT*}qKrJUX1Ho&FO5iT8={87gFh1bYyz?$)zw1(}*Dihd;?l#^ZbiZF8lKs>mRlVgxn)|rwFKSu=}m1L z?pE8;=y9A|%Uy2Ok8~@y->tH9-5LtNAv<~9LAM-d-8!4Vqy68Zql_Nq%ZE>v@@OEe z)}yl-{XCj5#G@0xdUPSoqcY1pirMZ_lA|6?y#mL_9wovXALETV3*c>~;8MZT)*6;N zHM8`42TQ*NqcI1)2cFn-w53RVv0k{Ps5!9R03Wo~22KTFl<^sQOW@Uo)Wke9OiFrH zt)W+cG^I!B1o*V3_jq!W9^?hu`-!^p=VaCXFj-ZIX>f92iu5ECt+#c>fy)XW!7rT*g7uPI$BjK=ooaLHfR zrP-BSI$O)7(~YQAtz23Hw-e~?k1=p_y7VZ_rPR}5x7MXPCtZrT?9$4oJnw}|jX%3I zKe1cmQn*z&lUob%%p>S+LoxiWqFc|6TUmd0t3?-{V|VL)fA~#yYkCwlZN6I>)>8A1 zx%KIaTSsrZl`*kLPttqTI;%&0@x|#4Jo4g$ZF_rE6i&0KX9uTw)D173xznRRc6rp1 zK4rlNk7}m2^iyU_bQ+pCOCKs(+5)RTTUZ+330=^4&KiRcQn%_)!7EqMYaFzc z`4si;zNNd5=uMx~hoT=_K6-$P#2=4)gU98dE)^Z=RVut~!BsplwNHgZCM)N~$@+R{ zvesmAs(3!9MwEA|XmzKCHFe@KPW*uQ_j2mY7^l)kJ5@g3Dfe`zhR<>8(rS32sg?Vj z%5mJOT4$ZQc+060kDU7S*r~*yxG#lE`LejwHjhiai@6l2=29QLad&H%hV^zSIeo@( z3m-&Z8)m_1p-atIy7YLROZ~RE^xIzQ-JjfUxwPvEy8P(UghX!rl!00WuQR#mKMJ|^ zff|>k9p2a(t&Mc+jh9=TTX#3O^$UH;G5m1JRkuFAcI(|&x5D7mC$C43M3zM1}UKiazNQQ($GEz#4`B-9*g)%4u(sBNj}&#VLbqL0Cr z4ve6uv@CUvwG^PQShUa5>ob-{((`wDLXSa@Ux^-qf5crqE7L=?r|0YL)zIJR5#X{h z_hjYWHCZnoO;*FtlQr`@r}m_A;!{rb{?Vx=HJmDpeulSn>To;ff9g-4zD{+RfUYJv zRR%_X;(2FgIdx>AQwz{jqZLlgTIo~|Jn#anYMng_o(C=k5XUosL^fuj~B3n zQHO$-GE`;_)_^{}i>0HzEp5Z!did!d7FlY$4jpZ^R0e-*L*J0(sihTJyvj{)R{`zh zn}%L?p`$dDHRkwaO}#!@yPi*0j>N_m64X^7wLy6#jL>ePs5P96O0 z)LPh7$Vx4uRz1$|QnkwbUel$e^;}BV(xs&I87(@|U-Uv(BVp)vX>E*4ITpCoX|+q~ zH@P$kHeb%-eOFz2{mi9E`uckGieD1C^#^>eQ>$LKp;zomFHenXOJDK3)2$z;xs{i` zBI72vDx7vJ$3^r*KQWFz!ui3iF4U)U^!4*tr*)P`d0KgNzrRQM=^yT+qX7Zx)*||h zRUZ9xz@u#^=@I{?=TB>CPGL)DY?hj}qDRNy{-73JA7$y8%Th4@mUMxoo%9Nm-_t8l zbN1qCp5a~tXm@?` zhPpYA?>Celr!l?hzM&p%MDGbE(c5@fPlocn`M!I}sit^zD)OowHTCuidfZLa*4?Z% z@q*dZ*KB84kDljvsIisFw=1tX){po;y;1)ZtRb^l+V}&0Ox;aZ)Y4~aZ&i-9OcP5D z+EQo9$2&c!$#}+Gj<U}Ll&u)b%*Mkt{d=jF~Z$cFK9HO{Hp<0$Q zR4v&vc=dg#-uw`%>N!GnC|{^96%JME5~0daE>!I*h00weRKL^=Rhc^6nuO{`vru*D z8meP`Lsf5RsEUjZRjF~In*3X+D!W28!yT%O;h{<%6ROoSLv?#@sO~Qc)sD5H`e|dR zVt0h9*nvMwRqKRddYCLs zThoT&gJEj*eVC4C3)71tVX9v|OrOe!scO|QwW%AXL_de=de<v4pty^7Gr4-wj*C{mr0Maq{oQiTgfYTb{Kx=<=oSt>-TWW`90sv4;i zwIcOl7^yzZA~m>Gq`LNq)Z$)|+T1@QN_9g%8rBvQAJMQZu^NbSB8sU#00wexYLn!JuwWYQ?TP93F9>7vvsbCgcy zjZ%+7QL0iTN?%GuX=%kM4X+xd^N(F{T z>BOWcjgI6sGon;{eU#d4jZ%igQA&3)N@-q1Y1*eK)z1{I!R4Zrs$R5C_led%SG4*B z#VA!lxD1WZ*E11nSvyj1LnGCT?_T&xq~;fo(&oBR`fEUx+AWGw{&jrj{wTTbN2zp@ zXdOu(t-D2{^_cgqP(4~b+eNED4}SKJ)^K;U3QmvKviZ@9UlOf5E2CvLMQio;Xl*(a zt#K!!mHBM6reBKIi5t=C_8?kq{*Km{Ptl4>6{AGiV^p<3jCPcWQSJ&c3a%8RHI-xJ ztsTQYM~qT7j?o`YVw9#G&kK&x`(81cJTyixevQ#1PmDrhV)T4kjI@x~uZ+>N)iDa% z5F`F>0{QpFsQK|2b-f&;!PjE+6ovr@ZcYjFNncQT=3o4NULX^&bRUF?~Z;g>*c4;`1N_LUj^p*^>MjhL7V-G-Rak_d;D5| z$gg7;{JQtbuQbU6%9k#nj#UF%9UM^J(E&Yl1vE4~pc3l?+R-ppRhvX=o5+QkaK8@I zTG5L8C0hALL~HNVXf?eRt&6!}*P3Gwi;-tujJ9o!Q5KGC@1HTM^dLr8UdHJ3*BF)f z!H@U(wY#KWv+MZPp`~Bx2K&`^oL}=Tzjg%t+8XbdXPIBK*8KCmC;aKx$;*Cax#?Hv zBfpZq^lRvQj`cI|kvO289K)cr0j0?n(6r(K)hZXzq{;zVjRIQHKA;WV0{U@CKwq@B zAx3FqRi{*}o|lf*`=4S}t8%OkR*zM(`ms9RG*(&L#41UbSX~?ttHa}BwP|v!R>Z{W z)2vwKSQM-1b+POp#j3+`{zBNRvHJ0Dtopp+mMBgWGsLN4vp99=9H)Xq;xx}4rxUSp z+OjH6nNG&(!lgK+Ocbv>6XMklZN8q1#>n;1KcaQAaEuQ1kI~{lj24koPcFr%(YqLh zmGi55L%&*j{VE^lSMX}MZ}O}39lxB9{rd9JuQSO48k;4c1^M7zkbI~fP!$u9qeVbr zT?5)?59rCjfXHZIK%9txoA9Ba3dD%D(sTHRRP2yC&eVhh$ zic^oCaq2!GPG?5Msi-$j;c;>7WyY!gKH_{nPP4AWY1!*IEleJ-Ia%VhFMGUZ=84yh zs`1*{G+ybu#_PB=I}iB$!1w^;31Ehk@>w;+#yiPhKOSali_E63v8e)wDIb-DqcVBi>JSgSB9(cicLF3-qKU3 z3sZEU*A(R&Hbq0MDGI(YMI-7>)!dm8vZs%d^LUiTevDG)I?*~lJ6i8AM=K$oJ{|8a z-aAG;Hpi$t-n;uPzJ=E|$>dkN{C;(@!3S@>f=~5t54Wy<&A_kR;eKsfi~quI&uPE% z-1TdCD)?j!Xu=NxB`X`yy($4stP#-ECINkDg*W{Y(8exkYbZLxXZbg6RW>Z3#xv00 zynv1^<^J8=&{Fa90lm2%P_-xAkN=$fjGmIjDskFaEy)F&e6c!TC|2Vt#Hum=Q@3uc z*8hx#+Qn)eOtbcd&se+$jtSv75#QN+l-FK^;geW>2#Qlo`Zy)a9H)9k;^Zk2r{gu_ zbfamUe80r0NWVB$8WpEuc+ekH;Dga#?0v`SXsUSm zOU5gEvv?K%HC{vJ$Lp^P@v8C`-Bg>Rb#13;S@aYYEHqUEN7IX$NVzsdYEP0V{gMYR zy`r>tMU;A;j#30QX7Cs8D;}+~-J|t-LbS#&jaH5$(Q0@hTHUG1C%%hO=DabQfW}H! zjnU;6G0NVP+A}Uj$wOlF6(9U{NsL-wq~<*2zQ-{-_L2IX!LMn#{W?{Q`dP`ZnKh_K z^0RME?P=@Rrw)ECfpPCX)Si)kJwc;Qqx?!W-LEMN{2GZ~k8JVl(00EPP)lQu`Stds zU(>0lL$CSerA8fj;aBc2=s8V5v%U|gUV(t>6h@cj0=kN~X0991NOWDjSwOow1=Os6 zKuO7mepWy(^t@toKuz`q^z~FgZT<=<{4F_yh7+RU9NA)3zc4zjh(A($V$kw@YIG@l z^{*ayEIOVV8LO6aVs#7dIX1^C6b~JoBu=~G+sWX0@o~EOBu)eIxQApWVqUZcu8vl%4bkk+M62$8`o+W1avi7dIL&M5)w17@ z)|w~LO7SsT>k`H2x8yOJlac-`cZ_1_)gD%%|DZ>+HJ}%vKl|7+M#Jb+Mt6zPYx1Re zzZgZ)r=_1vo_Hw4@~i#?ziyB}yXa}gFQy;EFEZm7XAb*Sm_BCd8^7|C zBb~^F(|AH1>S!--tO`9sudU+rH*2%peZtsF2-Bs8;j;f0t_!onHD*`1jvWbC#LaLe zc@(bhZ^9M%U4&Y&MmbR^Lcdjt(DIfM3h5W2aS;*PGAlyAvks|wEkdX6MySW@2sKC$ zsq=Xvm5TMozS5B@S1D4Nt3_&8<48Hatub1%Hh}wh))=csM(R3ii$Yg)>E7Qb$c(vls#*TE>&I?39DTswR(N|T@Qd7q<{HAS?xvM$J%KU$y3xtdL* zHMv!^(vowjyG3iLJz9;m52PR6&ZeEn$d z5OsbXqSCcOb$Cmta$O2l;2CRb*0XPa2vdndVfvv&n7&pC(^b~4<-3RJBWuUe>DV3K<38)spm4oR7OrCH z!sW;rt`d2}b+tgao)imLa4FWYmBTf$TDU^$glkRxa6N4it{k1hwKOtTGj(##B3%$#tgSP`z{>+s`^;ri>3aE)NiyZd0c&f*25&$Ir$ z7_O;zdEMi14R{t#?;WnytcSm(h|tMg5wZ$KX!wt;kBdjh$=Wz)#Rz4qP0g(np<(qS zbb&l9-629nxk$RRZQU#kvYEkb0egs2X4104^AsOgCit-lzex0gaR_j-sr+zHXV=OOGl zhN$N!_Ithy)%+Bpx{@wbSu=-fL$**A%@wLy1w+-T&_8=Q3)#ncR5euhYOo(;$ie2J zn$nWJpSGb|(>_$sdWC9YzffIfFUO1x)iw5V@=OXr8?Zu1O#l@~1?W`8chTh0-DWdcpQfuvc$Icd%plwlTR(i+aYZSX4FZqCE)V~_8ESIyWT&3?`+ z=M5g`+;!OB%$yGAUhNrMkKbB;6hx2r4Y(BMQ_8@6p zpH9{DDSu<1vNYp)J(%<9)| zLLc>jd4?6dehs<1ff*#GMgssuqvgAxSs z-+%u1?MIPszXc^o_|F|#|MOc=kp17E|LgCGg0lYSrRnqJ4odu==i~{>k}N38gnyr( zBq-T`9#5F?pEpd#^S}Lz(|L?xPRm-|{gMtzUef?kl`>)Tf zH1VJB{f~R$TKN+G=b`^PuK#-8|M~g<&j{47TdVoEBU|*M{kNqQ%%jbCV<|_FSBse4NJuZYf^*S1 zzvp~tE_$&1%ySg>>J#T(i__aJF3H>=vmL!T|C+ugGr96I|HXVZ=U{zpc)SyHzFobl zO|R$Vd~BZnUi~!Ct80Uq-)6>b_bASBk7d@7S;TFVcn^BNycVAw;#K+&bsgFEaDg z|Exz#zIt>hg{9=_iEn1k_2;nk7c)M+OD6KWWhrkAvxieH&181%74sd}VEmC8k4#%QzrNj4l^vFL>|y5j zFmsCV?g{V4^z3Kp*~70g>vo&Io%8COUs@Us^H(1%&HD~sDVVEI@72Ss%+Yg>{XWcR z{zx8_fE&Eem-Z?hy!l5cRHTMi;V|!Nn1h3Rd$^1F#U|ZZ1Hk(Mv%%d)kO!QDZwcqy zoQE&z@v1m_5RM4k4rF`0Mn7OBSf76;i9$x%?yz0Bnr&P>&k7sRIjoFFL-)2CW zfx6X>nTg@dcrvq*bs@7*%si}K&zvr^4kwN?v&Ni52i7_@?t8T9kw?#%b?BCa>lom7 zFqI{xWfm%frHah1mB_;kZXxti(o&r&mcG_zPP8sF^9`8?h2fnJmew)*(6)!AWy~Eu z?PKZw0A|==nTy$vJU&b3;dq=i)pzlh{8KG8ozHB{31jkkx&?XH3&cRwKA2SeWcF;GiRzSP3 zn^BGRQys4^!tS?bTx-#aH5ht*-_xt=Xtd67X1&L7t;7Vcj-kzmFdOa12WGG)o6U6= zt9ad3=7!K;<2}3{{Y^aKmHh@RS$lO)!@4M^PY+-;q84i`7+r~HEwzO8mkE)BInmkq=h!E4kibBXcXvsx4uz zFv&rWTz@jR#*9kdbG-JZN58`E-a}^Em{qy=mf5$&=qH(_Qt8mg_vj4`uFlRH7Ixc< zGrv;aQa{*bWd=K8GfVT^!?v@f0o|DWg6CLJ*+vkmzCc^Fao#e(r-V1Ki(BGPymdsu5f5*BRy{!u3dWajtCv{*Fb5K*< zs(jR~ugpDNO~ULZTozYm_N=2vow{+YN>7i*5AtZyHytsbR&0VtlU-b+5boi;I9>;< zum#LdFY>4%%u+K?^a?M$vzM7sG}a$Z`>rx?c#CUTm|^?yl-I)QGqa(k5-^vRlIxJt z6KhyK$Z2URoK_X3hM}?VYFbK!#-d?$8^65Mj%!ZPSgM}XJQ!^p%nZ>OOSQ*a%I2|@ zJ%V^gqerwg09F^_G;XfN3?f>^Gk<30sy06P-~{iBKX%6(-?680jVQpr1s*uEBG;`{^Qs{p_yi9;jsJC~=1uH?4|YU1!SEc)^+}`93_AJ|9i4G8 zZxqezXW?%v@j+@>?H%~p5w5AY<<%*4W8gBiEc;x{-G{M%(DblNUzwfmIF)(ajm*hE zaI4lkt})5Vd>6A{<;rm#7xRKYHbF1VnA_^?QQ=-(XE1>4MyNyS$8fC>*ABJydXzSl z*-qvN2gETWHXAK5^L2Fv&)Wi{Js$maz#|JSy}Ibp!n+HS5eH`Wv_$M>{@h z)iBtk&c-!G_*|#l@W_j%@_UsC4Gl&+YZ|csgtyJ-e=uVWo1y5Y{CRYRZeHMVwI1Vb?>w6Oi9EpDHm9bpQLfY!q>x085K{ z1e=HeeDSxui2u&EJx1gwV_JE?%|h1Bb!%doFrTAGIrtYf|vCIkTCA zT;$dIC1`Ff`-=OR(L3kWO1NClLchkfcq<1pi`&GlHsiStWT{)Ds4EvQxOMW|tmIp_ zf?yJwml&2HpUX1$T$9|Ehn|Z$+m_Us*3^sc9(9FFOX^MrFY}LRWoInc9?W3=5xu;_ z(_X>ld;DzsK5Eoyk9xvn%nf=4^iq(1p*?!ZgO7cJOT~ni_9VBo7cMV;pnm6|zst?F zJ|*y~vdp~0BxxnC6+t%%8d&l+vy>RkG-^vN@65b=cT4TiOwxhGd(dV6Kmhw7%1%2M?&v+(&)*&tZR3`eX z>|TwbFKA6)m#r+bldySTnL1K~>%QRAyBT`H)2{zQFWr}3VFUe@8DL(aUDI zY*@t&FUt&<(8Re)Zs2)!F@ z-jya^KQWtHmAU|%H+Ar|hCGgL#R(sHyS(2>^z!ze3{QiGo9{njpkEJE#ENr`Vnr4@TZJkeTL6A>P^y| z=mAE_%FsKPXDw6Rs}uFm0=ik%7VWT?exxf}qhHJVf06XvaXJ0(|D`EPdr48D$S5o6 zKCkO|Wrd`&31v%UzU_+UJ&MXGk}@h$qKvxF>j(`>A|oM$P?1fN-}U`{{_se5b?(mV zc|EWByiS-4B*mfLJ_-4jgV+=zPUy#;y}?~Pta}nSW1hS)g3fh_q!r6hPb`h312G5l zx`n$r=)(?ui==LvX!rVf&ZbdR599t~C)_KQV0=Jato$$r%)>Z=m`Kr=jaiRzdke;x z9Z}Ri0%Hf_VuF5bX(H~xr69*J{+~r`yz_DI1Myi?j5!?oGt(;EwR(WM z*AozjsaOjkMm|gMV-V&A7;8R<;ZA}S^N2*m@DSz_$QOdRaI^l(au7q?4t+Srh=&94>n^w(09Y&f;#s3z zSD@edZ#nW{HSz*u!eBhZ8qBGZLowft#Q9GQ`YAc?@Sy)0l!QCO$+)MJfqn>m&_;|8 zOA669UBd4dutZ~Q?;As9$N{TCSi`vDE(xCV4CKKaJnQA-uy24|n1X(4$ZX89FpdPR zMDC!UnvC(?JqoXdoG?aTWpf7Wm5aC=S%lb?V@zqnpZyU-XRG5ruoli`F!$==iZ$dD z%)`)!jM;|gb~>8+SD+vN8cjDN=3bcpW#jKL(hX~19`T%r?=dZgp76swZUN>BK{51t zFvhM8m~$icE!#1c?M9!w7i|S&n$v+8`ZERX;yCtFP9U$*9{z9Fc_8K-<@vZbi=0hH z&Tgx~*jJ7BSc`e$L)@W#jQeNx$bIy$ec#}I?iYNocDx4_+-t`Drtdv)-x>QE>A3rR z1A7`uoOLbgk25DH+^-!POaGvTy>vL<7vA4yE|%WMK8G##Ij&BM#eHn-Ys~(?z1vt? zwKSF*;M^+=>*ybwaQ7YOUaq_FT6-`jjf$mfWU=VqV(F-aSh^JFU%6?q^h!FuegfxR zXJTnhPAuJdA(l2?j-_n^?$Y1Ddi*Zd829k{udsL0g75Vi-~W3oRsW6uRB>jjg|k&X zDcz0pT$hnjx`9e*+FU7p9wVheIC~vfCZ*%wOX=`6|K}59>2vQ`+A$w@y|-enoES^9 zu_psXvD6da;~&=iFP_EH$M{|z&9QWFSDee@yx6(7l$O{_sbZj%_8y9}<55y7=cP1a zyp)#X+}Ph2XTUfw{%^6AE)JAZi)B)}X9e~S*5L1kT7DVs@8|x9vtgVad+x#4aAw>W z=fp>orS$bNDRs@3(!8@c7seU!>5EcodkL>uETwx(q_pZL-eZ-NMn06%x))M<^$pI5 zKS=4EHYvUJ1M4o-`v}o4d=#jI5#%BD5HH!aMz#`=f}@wbak_g7NgF8 zlc}6eK`n&(1UY@Y8nqG8sN-LNxZ;@)M%-c+;vNIe)Su$M!zet9qd2d8@kmOOpW#ft zNlHH;hTb?EKcgRY5HX8pglhJE9``&>&4BPN`*FAy1f2NF{$(7OY^Z1+voO^G_JqBOA z5623;k6SWY^gu>6>hWHl$>{Ib_&zubACLDo|1<6lbmBQ`%4vRg+%LeH_<18aU1%() z89n8+4(H#Ud4V4SJn+=P1#sL4Xn3q6iI4my!?x>EkXJqMFRaymIfPQPc$v0ldS%a>EL ztEeZrE~m39@j8$2y`IQvG3rPbf0I*_uGn?cQ_x7%)?Y-;$ZXV#MBtu-ut`C=^9t&M zbLE*SQd-d{rTuVcWCiX;#N+uIPm|H%Yh^SeN=C=w*_9#gpHVNMFQ84~IjzDoDyzYB zdLpCeUdd=Cp3fB23atDpqgU~KF5>xw;%>(%Gdb0_meX(tydOM^J|l3)19v<2aB>=j zXEDng?-9?T1@}8NSK!$N%W3*HJjXD+=V&=~!TpY)csYHLgx_;ePRAspW+nyg>7<<6 zWy)#V89CKPEltj4JnNfsT5t<@L8@^7VPIuxyh}Lg8HT^56 zhtw6gKaBd9ZVGxE&nMncK^K~#wyG!ovr*9D4hmX>I-CB36f_JqHg&v$s*c0grzq&J zX$pMCh=STIQqWPTwVAk5L7TTJXlJ;B-i%hz7pSx8u^%-yNeU`Kt<4qG+MGP4pgVFD zRJ@^}j+F|UcTYjDJy%d?)i|1r`mOhtakPk!qp9h*PZA`hn$|LUcOvf3AU8kkMVm)^ zC1|IPRWj<0XY8hd_lBCI!@cEHwZ9zqy5w}|5IG$`TuzmEwob^qvA(E3LOWZJIN{mebrO zIbHNgPP_cXv+O{dRaMaGXj`v)DCj$51$~8Qx45^0p0ia@4?Md|0~FNLSwVw_E2z0E z>XpVSsOfkG_C9b2$4@~o;Q75@h*~5(KgZPy+O|$XEjBBt>vjbV+@+w)6Y=XQ3YwIT zyE$hRn13l~2iohi0tFqy6!h*j1)YLtdH?SJ&N2QSo_(`|)}l_S1a(Spej={_(8lp> zKkCQPd41xj3u=|phs4qH5pi_a=s3EH#L=;+RdO63N4008UdbPKdN##TvpsP%M2_Ek zJC63jy`AB>gQF)%=>jA4b1QJy>4=P8zAmGKP$M(qBl=ubIZZK=)7!}R2()jt(RjAV z_jglqXK69&WYD(D5QAjo^9JOyDxQ6heR4Vud7PUor?JT4_gCa}Ns*j>RO0_dIUR|1 z+^+`DxeoDsgAF_jYeF1)e@DzZ(KnzCx8nH&+HD}7{e*!Ex)#qo8nt0+ z9%##W<~LA3c-9;BU^5l85YOFyv4VbIp`a($;`eS*P&w{JeG602p|J`&?}&m{qt5Vh zCfaO1YQqZgJe8;$Lv5H#xq=o~{O`=8(0+S8P|!m!@jbqy&Hut{sG(*{FOJ%q#?hy} z<7l&89JLw{hr82pbT^*2JH*ji)P-5h!d)oTgsoZ`M>n7@?C{1o%pKzBv&c9)B?kZP zLwy97WX(?io)mM%+E1o<0o=K=#8aGZ(%cr2voQ^&c{SLPVeJOI|75W_S-Ke?Si@rsU zTtZ%qMqVsHUR0xxQAM047oy$?c~OGcL?bWe-a~(c{x%=|tn zxsKR`>Y~oh5be@jK|l4vU9|oRYCRO~7cp|-ktdV!jQvpGhqzoqfASh}@!f#7ja;bP zjlL&JfqOLyS`eq88&JE~d6x`=FrN$N@dH z{S|2YjwZNIWfe!~ARbp8P@{(&7=RqeL9OU$)a%)L#?db`;^<1mB^+@n4L}`U2;1y!$9iou%*|{TlkNk?6AuG*P>Y8q2My>Ft4W z;dswD`o|u5f|w~VMgU?pH#m+CM(#9ki=*bTsJYBS4KV5^ZzEUsqF!>waMbd8$J70* z{qpGU|jlUg^NS)9$#>sgLK5xzH5M zg;w^#902irK1hzczH*v661jnxwg8?x`p7#IF%O!Gc@O49Pv#=8mLcDkV@|M9PCp|j z^FlGV!gw_iab0i#{U~yB@o72rD#Z6eJlBet2b9a{5sXz@br|>SF~%XTX^nEa5o6Up zjP*fX6m&geIvFvYV}`u5P|!*C3aUUJ`nV|Q>0!tr4t*x_v3>%^qbUk{Y`TJunTPQk zaVI(Wx0k4HVF#xe#^-w{lzC--b56(cYb#igE1a;V1 zo8zc%Ch`Si$ywB08zVN}_ObLG)19Hs-w;j}Dv2XfMq76+>h+3bFK=ETdWem=CW(?Iz~I zwVQD_c$NyNQ(1J(hU^KJPcqbit-c%r|}?nIra7V_0tPM2EC={!fwzlP#2 z?bs3GM+S-tNfdmbIAoZ9#7DLcI21e3atnnD>RE zA6CUUIV}h6qX2!ffY@Hg*AefkhjQAA{8dB#_Q7~4L+)NfY%9^vUPHbP9gLc2#MatP zK@&X{^xiteC{#iB9KbvSd1v(zIp-2bQ!w@%DvzU)SmXaet*1vg>W9&fEq;o1nCbL)o@oE>(2fbQd(+*``6A=+JtlbQ8+W6GC@j%QAZqxHDKp%DUHIqW)9Y)&rm0P z5jDc|Q6JnZO-heo-M6{`>zX1d4M%Llu1o1(tXJ>dLT+NcmyX;ViuK+ctoH(1r1aZ2 z#Q!(eqFD2d#hTAw3+tBd=t~W-9x}vwtvA+UwrB_aFz@a!qoc4sDt4CPeu#|PW1X}G zu-=)9ulZmtv>LU*$nSgWWwZ-&Tnp=?GPH^6a2cJ9c5zOM|KBI0rin7@ivId|x{T(b zecZ#G^EBGRp)%YJ$J*#?rHpRIyl3tsybjhzlU|^YXhl0h{3Ysg`Utu2Z;LzF{pD28 z8Tp6UM*1(gy57 zQ0yx&PL8GLu;7TbYq*4Z+r`*#h{Ybj7G*3QU4r^soSkK$?)F|?EKNboH()=Y z7;*1|J%NN)ybkssRIyIFr;7EcmXzw~ATDSN*9@f8&>U;~UZ@+lmC{D+8Pp9#TnFR# zV2%5NlhQ)8jTcj}Com25;i%O$#hyWrK+IcKA)kY#xceccAzP%>Xq%KuccIOo?YPR( z7wp52Xg6S z41ox&x6dJ_&#{(oLH+o5yaxK`v9IJ*o{n<@oUu*9K37OD>{Via%LRK|Z?U)b4(Bt| zvG+24BhD$Ym(`9vwQ3pm>Gq+n2K!m_lCi&*iZg?BoKc^`StrhE@^ev7P!K~avDfxQ zz&Yo2?8BAe9N|_BHMxsFTZ4U6>}}n86hkerujTv_XF#t}SMVM`qFwyM{?^9d_zVZt zSn8mOvuf4iNpw5d*XTMNG(OQ%=I()?QNZK1~d+*{NI zVUMc~ZR}hp>cP88Dc3_vKbxRUp*?MOkkVh?sDZ$KQ!4hAlF`5PM7tPu0iO|xKE(bt z?hj0q(IYuF;>?6oY?l|ulw0$KN_RbF(n}3;W{WeKuAD_~O1BUT%LLM)dz_S_kG7y=5V;e0tO843G0j7aqYrRj8NdFunym>*1mS@5`SKGbL}gp7|1;kex*&|DD!yKV)-_0LP;+>Dhl zZO0l&9jscq*aTmy{{w5qPN*6c2K&Z`!@2o;;mzj==sh?ZG*}Giy^95h zqcX@!kVDg}IQS;t2V?9LAp2<|3_o-LsvaH$XPN>*WvQ@Vkp|Z-PQY;2GhnZp3q5~c zhMJ5r7#n#XoF{4X4@M0XKbq=@vkV&92N2i|*M?iad6L|LgnRO`n$wT_&h_l4OEwMfMLc$nCUu{9GXIkwIhnMWR7_ey z!rRu8V{f;TGbv$Y?8k88yQw|Fu$Dv9)OKTHaDq>zJtX=HLq2HEbGMLfRd zlIpa4^02Is><%v?-)EMPCa-d`{poEo=|l}NJo|{mIXodO+%pp0{+cYm z^qyP{{6x-={7i1&`9{`E`A6>mP=zh?G$62E174PC!;v#OaO-w=h^*9w0c-WZPPGTD zZRi0)-H(gXaGCWFbgY4Fd)2VST9!2;{K;IPRbPD3CJ-?to$1_ePcr?rqgdIJP6 z*#UcNLZLJ|94=0l!TXW%uJn_J&_@LvNgDVdx^>4;mo8oS-i@9aItDJo>;+ z>DIBw<$LNyn%!&7AT9Ff&G-@QHT0>dcHFMULbYp$B)!D9) ze}cMWs}QC4M2K8-Q`qfLAlz(iq59ZcR9m~~yUc_B8j^WNOPvuOs8K)P2 zmkZd@$bHLfGK=c@5!!-UBs+0Bpm7tJPvf>#jb z(DfufawjP}5KC+i#}TjL3FNXOk^G!;bj7+;os+JWK ziv&h?XDy}-w!RFIe)+h1_yRzsW%h*xMCbXBZ-wOmbsI7qc z{K{qgh%9EIlfjCM53~472N-pUXDQlp7Pc^k*&K>uu{$GKYEBp%(7J`GPFu%%Xs>3+ zEtjz~VGG#JiE~+K!%X(4|8(Yk)r&bdO=KGX@hn1R3_G)81WT!NX0{1~SV&ACX7tI3 znJm^}F|sd0q0c)(*Wi|-!ufpnDfehF~-V# z=d5Dg-sSE)?nor#m(@xf=DKixDL$N^{YI|eu}H4nI+a`Qna5r3DCHV+UvS#*nz%5# zpWH!j6;hjPK%8o<$#j1^a&V+Osk8DS%jN}=^FxEkf!EQ*;LAQ@&L@*)d8wr3P$rq| zlt=t#G7=bii$wEx$Xfpza?ZMethf6{zHj_T*3Q*{hI!qfsL2RA?pc9O&p}{dOo8j` z2{QFXuw>UdXf3Y+^d(~C!dc?Wv=39E7^{cO9{Hoi`H2fC(cA=Eze-@aP-&HoMrGVuO zzrain=d$@~=a|98Y!>a6&f=CHWqU3rvj*P;R<XHka+VG=;U_;aSD%(M)^NaHjofFjI4OWD{rFu)Qul zSc;A+E7bTQoF^^9FOLSHHL*xIr*~8s-kL02S+G}lyLXw;&P@}FM-36&ep?7XkCrME z2mUGE*v~@}_$o>gVN)g9T%yN$SWM(rb=$?glqGOcS5I)hBTBhu^A_&LEj99VqcNE~ z%Yp=a=t~@iQL^3Ci!{$$K@=^U$QBwycD+18);6Y-)aFy9x8DWgN$-;D{a+EEhE{Uy z${*74R|n?qFo5andcs*7J2+SD4wINC4E($Z_HQkMJNKf+Q?Uz0@bnag%e=Tt;V8D| zT8kGpScrAHdSY>)wzy%rnz-q)ia2TZANI)aH|s05v8IX^cKFu^W|h;#9DcuHZSNbH z<%W7TW8Wh-WYk@@;bawi5sWzPEoFljmau|4#`xRCZ0DV;?DNV(=52a`9rilM{td`x zo7#>utF6h*N9_5Ku#54r>)DFU{w%#>CabzV09#`*>j4@Biv)ERmTBijY+AQHsc5is{ZUh{C;tgBV=EKY@aS-6L zQ~YnJzqnw;B(Y)VNRiF#C-(f?TfAOoF1pX@Av(5p6_40!i$Q->#m7$HSeV;f@ctIXUw~&Up zrm(if8t#2{0nLw7;X}b92spJKR!rM1?rWJUMlBy9c77Tp_N%iO^9*{4D8LX`Bp8Y_ z`gIcxOw`4|u)nNJ`WN=2wUw>Sc+UoYYGRXDzhQ-IU$PxVb*%j6LpCVx9{cp*4!e&y z*v~I#7p`7s%P3>TYp$}&#!Jj3sDKTbc#hrV&oDj1Eaq?_gDnk8Vui1yEPlyewl+PS ztuNZa)O)XB3rZF-U57a==71Nwr7@PBUgOG~_BpWQHrDL)ep41*ugj>Y!BPx<2sa*n z5rU1Hg>@#61x>$7q1o(`py-`098NtT_}vQ@&O476Iv!~YnI=t2orn_Uw?nB)MZS%4 ze(ffAqrg>?hns&&y4@Jc{WDm?8GQ-o^okF0cT_HL-F$9wa}GV^g7dy`>vB6etDP3a zDA0`|$1@DX-`Bf|vUTd>pL`YZ^7h|MZTTnm=uHdT=h?)De6432 zT^}*SY4_OQ;2PHXr;1trsbCLkOWD!+B5S@ zTghhf@KY37<{3-+t&AtemMLUma4z}2uzzBtSF3Dn?M+Td0dyFm4I>bhFB(T)u@vPHE&f>#jSx`tQJG*-;yMA{g`|x-j zD>4saX5~xR-co;7Z#0t`E}hOypHF5XAs%e+>CtS>duO(H$Uru!ye|vA(~GHinz7q6 zy0SV~O{RD5mk?$AT6j@aCfwq#3foQ{6?C)r3j@yW5|X`wgw48hh1NP3VP3-^p|O{S zpy7N-sTz>wKKPfXq@nGyq^<3XtpPf;ZfG~Ie~53nZUU8cy?}2Ec^5`lD%&aXW#ekU_WiP zFxR&s%&l(_o1ngk`T5OfooUmV!~SvX!f2jZE$3LNfh+Uf=fvjkv}bRfda(ubELpLr z&wRbLS=NBhLQbELg2ty>;Ra+2a}5s(@}&KOr_VMaap+~4fB zvm~f}bh%B0<*Lwkq%{n_KO7eC zn*~j!kS!~#wBu?iO#62JvBdo$j?a`s)*2MK<7sD0ee&ac!=r&z+ zn=n=EluQ&I{HXY794D^p7$uVFBg7}S9mW2=?8W7?`-sz5+K8Pejl_U@b#d7N6|vv1 zcII@VjY+HCvedgTSl3HW*wL2{naaOv7VTHYR4YYxdd?*lwDBx6+M3RMUnR4lA7m_G zOC+=DvWMN=7|IUz+r}bp2ebQI{MqvS8B7^CmHk^Xj&-QJvxbHdtmX6&_UGmR)^gvP znGV%sV!H<0YW+ue?{QnG_;f`uG*~Yzsy7p^h8|bi{u`j|jGSHk?L(z|d;TiPwa_HV z>=V5>*OLReK{IA?f1=~L3ioub_;W4S+W3{zNYW>@iMHg&>M^9n1IVGB<4N$FS>)E~ zFcKhK1 zwSowTzMwOBFw80*3Etdvm@zyQj_*`JL1!uad8cx%c|x&xs~}#?H|J3KqY55bj)8zrPUth_5UKKKju4mYvxODdOhMb)r8O3I-*uoy0&12sCJy?AD zFqZqtktNt!u<}4HHe*nSaA)QtAz|+o;d81?nD>5$VDL>>XzwXkiX(3nAAPc0GS_b` zx1x0y_wKZU)6PG@Wlj+}`Q}zGdAvQ@Z7_feq2tf25l!ro?+y0wTyI2OC<9g zkC5Wh6XfcqJQCAaNfMGO$bau@$n%?zNapvaq`&D$;*jx|?BF#Ze@RzZq^AcLqx7M7 zo&o$yGlFeemT-DNFBlhV2hg=Y%xoDBrEViZyT3bp#gU@^25+z!ITIc>FMuiY7sG$9 z%OKu62!v-_prkew9Ch|W>F*e@g*cD~B*NhdY4A!ifvAm%>Wk=G`059S=^_`a98Q1fIEjtH_I_W>`6~tF9zX@9vR&tH-40=+}gG#avVC2k{T- zB$XO!aC)*9Jkc`&d6W$tzi9^}|LYGHDT5&J=ulXsISgJucZJanZs5=aFk#0ycr<1b z%t@O9(?5E_(DA-7x^NammdyuUx(u3iR)MM2TDbOe6Rcdl8&2*GgDng8LeBnJsFBNH zU3MHyIF|_W=7TW5;|SE(reYs71CDMu1NFmlVO&-|tjH^X&{ra?H7JAV$Qo$RtOpYl zZJvua=R>ZmU9&*UAa@N95JP%|gdEEzjqOE*pIu4@n>LY9uNHFo z{715Q;Wy%u^qtsE=pcOx)u6PG4)oe%4)#l|Aa%PnG%|ab2o6wNFa)CgUEs*q5is^x7X+aVo8Y$Ub}&)j1?n^R zf=yKv7+c0bUP>%%4UbPh(b^I+3h0P&TVVR`RjXjd1Z!=ePHKfD2@({IDOzIVa>*FD&{{1Mcib| zi=C`{^f;EBWyH*mY!Z&y4ORYbE0b*V3gsfN*K(=dx{xq!3-V{BJK=wNk*XOh$+bDB z$YYOO^5(y*#3!|!6bz^%FZVnnQiqSE`f(@OepwTiHg$!K>vbV?fg$wIuz=3Yo)F?? z11qoCfxo>A^tN_`+5`e>BPN2b(`4{4@__=gnecDtJlIpS7@GeDLc^=2aBSoX_|~)< zwm}FCve^m~uZP2)oCxrF69uvNVqi?56sk!)bc^2y%gmGDkjf#L)S3)e1{{GWxksVa zF9YKBPeOp?6lk3~4PVBe0q*QsFdLZ*BW~n^N^~KNyo~+yk}L4_kN}^?iV(QA1coJE zhX?YT*vl;i?br$^E2)AHe)k|-{SnxzJb|6>>*4pR7f>|r6^xzu7IdGygCFBRLZ4CJ zp(*DlEUIjWga}oB*INxf^RzC%@~RDQZZ?2lX7ExRm#YwMlQ?nC>1LK=vyZKmy0Lhx zdf{roQRS~2LnX5k)VVW<&vJ{t@FYohD|uy~NLJlBK>i+2Bdblah!9sv3_mxMZLXim znBD)#h(l^%f<1A6U1K;@Z3810^@lABo#FCgSJ*Mk9ab$H3k@ohA@;;H2(g<9zSi?$ zQ9%HNbu5RLg{xsz>KbS~v<|i}-U`d#g~I9Hd%$&I3_7LxLiPkMJ3ocM zwrAjQ^(B;kZG_2YZ@_TaJGi&$14N`Z!=7oM;4c3edj0nm21on=dz(M7@ZuksIJgr6 zzpC*5bGz^teyi~-+O+skRUQ6LL^r!1#7?XtwZC4G`gK1@U6UrHtnChCgU#Tv zmLu%883H#$T){hI9E=*}2~H+{kmQ6{Su-1Kr$Homj>q_r9+>688DHY0;y&eM0w`G$#Z$& zptuMI*@fV^_6m&td==bl8La4W9UfjQgDkrW=(6uNT$@(~gHBaLzubE;c62RNk9h#x zmj^KYaQAY?})OuXUf1Qt($=Iq~()`{Dl>J=bYT9J5 zKRFN5zOIIF$vWs(76N8E+tBAkz?wNS_+AnZJuDKyG~ghZo=yf|dK|j6oq>?dT!`$O z4^RCrf@E1CoK;?dgnz~0#RPya*I@9DQh3;K3)Ic4ppWkz7`MC{UZvE)qm27F_jw3; z?oZ&|`e(4Mz7dilaMts@33Ru=huw|u;eK8Ve5q-LX^Cx6IQT1gReXoLmw$p)*WX~Y z{SVlT?|=(?I-#^tg>RqKg|Bf`<24?s@y|Qec-x)oyf|5df1akvd$wruee<>Ob8UWJ zr#2sQsVo0=st(U~=~mlcIVe9b$Kg;9(>>3J^1rF`h53beLn7=KCcpO zz}tiy^3TT_^Q9Y2_?z!c`0IB}`M0`ed`GkyAJAdOt5{j^(bX3Gsa2Nz#aTW1Wv_ej ziX;us1RtJK~ zGYC2U97a{Vgv#x&;UjqiWA)!c@A`L8_3S;&wrd3q-%l{^<0lB0euca<-(Y{#cNpLJ z6NYYVhs4A`Aa3q}ipIaNXPzo=RjSIH{LIAsOa%$wNNr8 zoOAPDN0Oh`k%nX6iOmml$c=M`UP0c__r@alSiTX=leUA0Z!{#^NI@DXgJ|!A;5q#$ zv^8YF-2P|ahQ(PJ({K(JjL(PC{TJc6^(FYWxz)fExDVX<`>+)8348Yt<~coqr!MsnQTP-b0$;$1^w*#?Y=XLoCYW>W zJ^0-J0N+WB{Ui}-KtNjLl^S;BbyFVf2<8Qc(cscIqfc0^m zaCg-|aC1}PFKemt{kSfCWO^4qq>CDVwn2?Qm88yh)@tzCJ2d%4ZJNAClolUrr_Ddq z?8^UW>&gp)4xeq;jlX%c8{gYSmoH7w<)dz(&l;k~-#DVjKR(-ouQt->6AJbDhv>h& zXBzTzei-s2Hkj}m6HNK@&8B=tt~q~sx+QO++mrwP-kL9Ovf-~z>ccPe?91~}4*b7| zBlv){0b)Si48bP&FLGP$dDiv;X`N6ZU5iRVXCcw6HFI=806!sRpJ`jk14R2c$` z_k@9Op9r}7E*hpClf${f{V+Bn5mL+#!NqY$q5rUSFg4DA&UYu^x#}s{=aUIz+Oy!~ zwQLxxa|X(Xap^f&jd%g;ro4oL{;wc;cO%HhzJ_CGUxV+lH(>SaEp&Wof~y+up||q~*cjXb zdi`4A@4r^~u;mlDIDCegW?#V|?mI+h`~c46Cn#6`LLJR-=yksx#uop9bq0T-o9jQQ z-=o4GtySUQ{8il7_)M`oKVq96 zzvQhx|HI9KpZ~re-#KoWxa+wKd*s-M3s^pwOgX!m>|b%4grwDz_}Oiwd`}nHw5Ka% z$C*KPwhhdkX%7Kc2Ey-FXK+d%18ByOFnJu58%>71d@qpy@`I)Q=0Zs6d>FaJ9~Mtu z2#?eP!Q#YHn4q=-#ywvNW3~lBOKdR6f35|Xz5$k2Z-j=V|3E!z7jzngg8PS1(ET0; z%Tcqlc+wvDH)$_SNr;4O)a~>-90Q*7u$EgOgE=;GsOu94K{or~)PMWnu;+f*C)*F% z(nL7xo&=eO2cet%AVd@#hL`(NATRI;3{E}@1zaj*{Y-`9@?$V0JPq~;$3ZjW1nA8^ z3GwQup#BEdfOAemiUr!zhD;b@p9Q^6oPpNF9Efx|3x=D{gTf;pc4%LOgo%ald+sHO zalQ=mmR|);wIbLNRRm#>H|o=Q4hYk zFTv*gYuJkog!;%2@GZ=UzZRO!$9#LfQ*jN&rA|)kuax&z!f`nQnlDCjVvd14JxweN$(euN^uR4YN zR6j=C6OWTYH&2i%mD9w=D3dIS&m{Yrvq_sy4w20{OME(WNtWMv(pr{JZojxdjtws$ z2a_(5Ay+Pw8?1;#?o*Pv^F^|0aS6FzbAxm(EG6oH%gL(ix5?QDRpeS(H3=@cPwL;* z662E(Nt4fG;=TMC$r<*7IHcNc=De#)mn>3ayb4*`0&R&S`MrUn|>7o(g-~EQQGv0+pTnBPH`c z_2l%W<2bE{0i2BO=dN5h!Flv7x329x;6Ei%>+JWQA(uj#9^WGHVS22yG4)GzcVFqcQG@EF? z_9xAI7n7ZJfh0(EDaj02MY86sAqFpliPO<_#364Zah$)EtaIB&+Ou{L8_iuLyKOhw z`XZE=E5eAlI-HyywwK6XN00~0qKJ!=jBK8|pZsl4ARhIJWcJAesPw-6`2w$8u zM4eF^SYgmC*8W!qwL>Z__S|#f^O%c*r1vSo#6MLSsud-;1+5dDSB(?KNJa^&(~N~B zDjI^`-FM1~b8D3w%d?gLjfqq))A3hs&>W*oOuJmXcTzyHxBSSM(p4_*Y0pdCj|>l! z(0?*X=-?NU9=2a3<f;Qq7M5^%`_^)wTy}9EA%dHzr{Em7 z#&bWuCvpB0Gq`r|Gn_{5dCp>NA*ZEwnd>r&ab541bHluu>b zh2*AWIdL^yOZ@u%NBUW8BUZkl1e&9WaYh`OWx1bdUPvU8fl0(=crvk1PbG>cr^(1W z1!QaYVq!AzA@OwnKrF}oCe3nlShxN=tL|IEj`*Br2Ky4(@5^$w?M^Hkb|ZqF8N8bX zUkqm9%NH?^2MgG|RWsR>rQ_Lj&C#qt8=d7XC${ZEKX&v*Z`S6K2-9`_ERhRAw7t?TN9#Af*z7z#L?E;}?UY0N*=%moc z_?V!TbV%^Iy;pcNe5v4ac%cx|Fhd|yy#@cF69fYd2f=c^tsr_^3BGmR1>;BGm8*W; zQMy%KR!%%{RLRW_ReF?;S3WZyrhMztUD>tpSFz8n*kYqq5my%&ahWer`UvX$Jo*ZN0}c*nadzm z%YIwyu$5D_7)w`Udvyk|<_>rwqztbTuKl>Q9WPCvzjhU{5MuePa7jF`7m7gG}%#* zW%Ur&UHzk6Zuvx6NOF~(GrW{f^EH$!f*p&W#yh)@t#Os4-rgmN)v1wiKDOLHe>X00 z&{VFk!$!_BB$}%UPTQNXr@)c|4G{p{CB`!YpQL z?aRz(PiG^A@vO3F91E-TV7=FH?3t}QGj$)uhW8o9_GP%RJu?u`A%ocUqybFn>d0IV z^kV@^Th{MyZ}vdnhV^JSVM7{?n1`hi%h{{PZpLe{_2ic@yXm8_!||PvX!k-`9a|%e z2)-cPKo~r;)Xu3klQON|=d)lo`Atb*~1Bx=VV9!%u68 z^(WQDRX8hjywkyI^L{apr(f9Pv=7X8Y7=V;ddglttz)j)j~Ku30eg7mJ}Y;+%Y3RU zS!7iio4uH^1*VtTrS2D)@B4GiVOTbs)-8*H;xw!EJ;k2YrZe}ysjU9<5mqoek(G2w zU;_rmGs*5)_U?QXYgCP5I&I-B=T#UJpKoDS)gf%dq7YWSDu|tmT){MUFJXcA1DT1q zkUjrAmkG`@S+lnfJKyNdnznl}mtB+Clr20Pq{Xr4fg{-Wp@Z3O)d5WA+K;_6=*=wF z;Y@yrF^gKE$GF*A>^QH&?m4##nH?X5aJ|>U0`>a>{-F)QZ%CmK+2y>@@6ZV$ad4tA zB{ojD@F7&_9~&gh85$s@J@XMtevKF6G~I;mM*Rho?gqj|uO{Vd!ysjh=dYquLF3#v z2%+wZ+Y=-KPDPT+exD`r)$_TQg8#VQVhp!AVIQ|sFO91Yy~SBKXcCLQgUEtip5&Qi zKDjz$FS(C8zVO^CxJDcV%kgu|X>0;;cWccK;N z<+ZZ;Gn$x=c_aHC{fv2^ddyOG-D9Rx@34lI6|7x;lj(F`V^+r)OZjt^T^(?NU5d$L zqj%@B?&HofKeHSrnUKv2;!m>UTaL4-QEANc>`_*%IK(V853nUq<5`#ZI3^pdVD}1S zELA0zd2NdPf9$;nR8w8IHo6f-h>8jk!4~X9u|P<6TPthXqhgC)qgZ1>(WuyBp%}5p zE^fQVuCZ%G$%@9Vv1@qkQS7m6?0VPxm2u9!;~)R{zxUknopZ;y8AB2vY?Jk5&iOpg zoNI3~Y}Pa~s9iSMyEcQozd4bxb<#;{Xd0<PUP&-R;1_j z=7iFJQunu(FoSsVYp#a8TilQ|)x?pfb!(F5$10QX;uT28gJsCUyJ6(a+hFqU?GNXZ zGjE+sjz4tXop{?>uWYK z%y`?>St12EJIX3M{R6(`pICP{|Kjb*`4_)e$iKGw3V3Mh2*=qj!$*<#;Qc9Ov^&Pv z)83Dnshw}m(_WhNS(`VYB--=68oJd*i#ALcfxd3ui{dUJ*P_sBuClW$x!#3ExCU-0 z>B=T0T*bSDxeg8}=Bjo%$W`*{Z_;nT7gBWl2eLKk6|pvZP8LS`$ZGB$i79cHELnAv zM83UF>TkG4)=a-l!gJ0ORqwN;WBy5UqL_3n%HDOF)1A?XWs{hYU&SO`crnL9{cvk->TG$$PdXp*B9* z@DUJ0J%Pl=B$43}2_$|=UE;IXBrhvhCgnmalGpOmq|f9qI)@%YKBWbmF`u3}+bRm3 z;_wGfwQ$opBKw?k=dKgZHJy$-Kc3BV4hi1o%$qaUnd(k)p555RS^2llsf|X?%11(+ z%&yn@Ra<-W_m!%c-=>QW^jUo#zQ!@y6S+;bDW%i2;|DL)-aK?r`%rU9d#Q056!kn7 z%}*bPvXU2~PHnWVt#xBvKh+gnTXxG`%a)aPjdPW9iHBvbclSbE&ubNNRX7;r3MuoG zoN4x*;1Zw7q`e=>1^kv=D*2MstNM&wK2u1Rk9tHpSGY@>G`dC3TW%2lpljr88xQ&T z=^S}5@C-@a;U+@pF~SugBxzF~X;#8PW=}szI_%s>lzaA&FL}Gj<{LXnMDz~Qwe(i< zBySU$-hCa}k-LgayeW}cdlr+FkVPbW%K}mmXC+Na%pvC&%^=rnOeac9Hd*DJO75jj zB3~MfBa>VsNVt?r=C?4C)SSNLT-Ow0IAkEzOLZgLd>zQN8p&kL?^fh?Y70{T6ed#= zIKur(B$tXR$%J~<$(=tclZ{O(kf|Rd$oB=|WYd8#Qf*>!lAX(t$0dF^>(9FH?4Y{o zOj&Z;S?AJWr}DuLXO}OMvt=O1`SnpBXVvtE^c1JXoTIXD=bx#PfAq=GqQW;+4;E`N z1!ksPg%fTx&=$!xYOhDm)eg2D(YD!kORFtj8-3YgK(+z<(fX2|U9*ZOxPs~Kt!fFC zUHMBYxaz!*b~T(H;W{&~v}^Xna97AxnXB+^sB6XBVlKzAVAu7WBCat9f?SGnK`zhN z-=y#70Lkn1i6rN~CPQn#BHF0uWbni%B(c#WvU|}zvi0I^GHJps@~YtAdnra&*8t5^PvQ%AnOGsrX7VzM@1Xt;r=vYAz!^WsAr`=X~<#nL$jHD=s>bO~&L*Azv?yr~5WWlZrovkXb1M$?ZwKiBzj6nc7@WIYVOe#SN^ojW%(nE8uE{~_$oy1>ke1l^TJg_bJJcr8rdWOFFuF(T0B6H@Ld%C%b<4F}aRB?&{h(qlF8;YT{Z`2wk0)s9kxblU)3w z`mW$g@vggDm9F#?wOob)HC-LQ#kzXS6|UR2E4eoQsOSoGi*n^Yk8lkiQ^vJ5DBSfp zr-bX%;V@U)gJLd2Qjp6z^gH=E_cOVb|B*c0_J)M?@snQXUyuq_9}}POh`e^(BQLAp zBm?p;k*nt}khAm85ODnjS-Z_iVk#Y^!DS~q-)$!|J8dS_r>`eF@2@3aTdpP#`z<3O zB^Q!DbLWx8EoKsS=`=DVE{i1OPbGts(#baUQ1YN_e{$h|A2K7k3+X`Pr0y<;$rbAlr@l|2U7jcPT(sXS_z^j)dUMmlg2}?@rf=(KG3M z#1Pl+j@?{$m$Y=(aCWJC#rgUBA!kbPQfE;q)49K0f9KxLS|{5#)>-G!(frIc z@%h8H=ZOasPXqUWQE)@cEzsAngjR#Hv>!*V(VorPtle|zm^MmxQ`=yNPaAo>2&%NQ z0;+JiHmdR%p~jngpeVWr=%FDMY0cx&*hmYye0m;=`jCsh*I$n|wcm!O4%v%_N9Ccq z#hoa$jR!S$`%v5;&ya4CAL&mAP|_R zNx=Bo^)`52hYtA6Ap<@*eLBuOu-hfA8Sc6|wudWX1m}9&se!9vz1l9eSyflgxG2}! zJh{vBrmU;egK(GZS8>;&u|G-u@h^#`#yxWS-DM(|pCmt;(K2j0Od7`=AQSuTA`zoE zknp6{gi$Xi!ulDcgCU#LU`=F3;V?3Kejif2at{)9u_;Nqq#;Y|)hDM6Rmq94QY3O@ zp>q;@$9c8ZPG`m3rOqKzA7>k5Mdyf5d-8j2@qpoc8tmU;6)c*xABK-Dsy*3Gqn)~@ zl{U$lu60-?ZKv8!ZRDwQ+Ba8&(I6Ct-aU#!TfWpnuI3HUirNres@E7zs;xsGrPg#0 zWmohpDg_NVo`#}YPCx})vQXbAIVf;_7J5Ex9=a`CjP}1=fxh)yg$~bHj~bhHqCq8( zA$jIm)b{L6I){B1weI#1?O*f?<=y&#s*ec5c@Khd$R8!KJgF>RUo--5Z5e|b4X=VH z`l{j5DYda?L0xQKrorhW8{yxzb$E7nJ+3;cJAS*gH%`gxhf6gajt!%h;Lk_4xz_9) z=9+sT&NZZDWmjx1xvNi}%yn&YQ5Q4(EqQj>Pu{kBN*rq*lH*-(k?mWr(3#xxB;!^d znbdVVdHu&CvSaNW5}iMT6kZrdw3Q8H<=93fxp)Jzxiw;>52As%Mi46cR0$QP!4?_8HhC(K@y(x^~Kd z%H5reR&mqNY-u{W`Y{K69z7TJuecZ`EX_sDORq=m?rx#EdpjzqxC^zp;6NK9kD-&l z&mr!|Mbs(vGRmWVu`Jt%rd@o2;!3^GvpVFuUr(zyFzgClThrKBg5cI zY5X}X0(YmgI)b7c9#AR<>jqT8T1QPhwW$(cSgyg{n`m+DhbFivZi9y$>w+Dty5ZfA zdtzUT5ii}Gj*X!ic*oV{IPu3a*Mm_5T|fMhF1RSjb?ERLa&hn@a_Qc6@~!<@(nfZe zB-C0@F5g^B1`k_I!j(D1+a`k)i5N(Z5AROrwHuQAhhs^~?~-KPq0i2RhV{-St_99# zS!12&PYcd2d`+je(bare$mIN?2UZ`Qlo<+lT&1UcgmYlo!lw}ZE~PCT&`SGb<3_D) z%x-P#@bL!^KEE!>qDs6h$E?;%Z4A*$W`DT;AFM?FiuM;BKIQ1h$5Q9)D?zS6TOh8;q& zcZ>|Ld|V1AxFhh`{pIi~Q#8J^J_a8=P!UfFu7bbUtBLO%kHhWa;_-w&4e?4&h5a2_ zJUvQ_TOMkO;~KTYZ$Kw}H>L}&UcVRi?-_{ijZeezgT~_4ak zx0H41re7i3YVRYdEfy2WIEE;X8cCIR?THU?B=<-yvSL_CA~Xyp65X&^`1HQBTk>k> z&sRy#zRSa$H4O42Pe^rWTwhNc_G^fCP>b1GdB1hqOYz&a@pbaF9jBhrR(p9{`#blI zw)~24RPJyTa-`QrTlXZPrU0TR+jXepr7md9>%M3;JB0R0MxmgPY}D_;G_-F=4mun( z7j@`4A5~*kqT$|EX!5PCXj;iVsASY(G`jIoRCR=lex8q^1kFk0Ie!kV$iINR!I#mr zRS(b`u@HTD@D$Zr`W)Q~_McZ2a9Ct1+$g3rew|bfH#C;VckjmFQje?P51SRZ`i)rpWMWM`?ny2DEwe73ABe|S z0}b#FS0b+PgT<>_0sK0u5uTaT6ki|Q5|58)i=}hzF|(@+cJ=O#bD#Fbby^y6+^eB@ zaN&4d^He6z{Wu#Bov;wsFRgIB{u=G-w|_Gkqst(LQ-+hdRUvWxC{Id!IPUD0I>}jK zNqJ`_k2Ak*Y6k3C^$Z+3rLlI;j|tkn`(J8Lga;$t{0J0gFNe-@F-YrHB7Orv{b>&* z<6#GMA+IZvDhxt=hYV!mrlWx`=b^+ci%@^rQsg+3i(d3uf#!W$M|(n>kixzdJqg-{ z>Szw26+wAu%48=RWIl#UMxI2GCr_i{buOX8JJ-u8KWZE1$Zx6N>e#1`07E*U3uZi8FBZ;yKybj7~JUbx_L zZ=5`>Kd!vZi0>r~!7pE@;i@x7Vbz#PIDUf#8&Ax|15}IfM7po{;DIgV*=ZeFoK%*W z1e+75ugXvET16|ZX{~Laa8LUsH4HV{UKWiyp+I#nHAJgkG(q&jqi8;3MjOsfM~5?J zBdNfOc8L<|cx@#b+T4Z)-`tAqQ|u^Qn}=E%oG5?uN%V95Y1C$@2L(^QidN3Mj_U9K z17%*hkG3s+imDPniU;qIaYX<<{r(k|e)kO>LO)UcWek3KHVA8Df^jui41fG16fb@+ z!}rrl;U*2kaq-V(aK!5f+;pKF`%gt<-GoZ`psg}4KRXr&xvJsW?i#pm@mhG*`r6pP zC=S=(PzP@aYk)6CHN;&iCgOQ&72en)38y_$<4M6RUN(-y11bx6bgYPvPlR~fS1lf? zY>fHhP4SL(&2W?AE%4MsEwHpvhb@6t_*iRN8@wHHeSK$K^FdcU*x!Rb&w%fa>y1C? z`r`Cn{qgOMLvZg3srWv|`Exb#w#6@P+!P(Q|@z7`W@$g~^xJ5xC9?(FA z*Dp-M6TE7Cvnq$f!$kaL9l&FIYjO5?j2mY*!plxK#$iy0Q!2E=yP}eDD_1i1s@met z`#a!24tB)L*e)2H=z{yN?20S2?2a3x^}rJj8}P!>z40Kn58m;qFHY>#A8*PYfYZtk z!meS1@h3bKUoJKrcPo{KNAFC-H>-@ol^c)6!#9t|^Ux$5lrjasIFX49s%7Ik7iQqT zV`t;M3v+Q9JRft*R^TON>D=oc zOT%+!rs0h%OnBmlQCMg)8c*0f20yJa9uHkG5!<^>!npbrT%>#^9`Q93n@VS6N9;75 z7h=H&hgtBcA=9y{*KE9S**vU`U4WB2FT=rfCU+>@f+>{rw&=?BU>z~JJOis02R zf^n2N1h;5j9P2xW;jfEJ;8AI1@b%V_*xyu+GZj&I$dqUtcc~JtbGi!dQCJmgMpVZa zQ4M@7y*7R`BMuiluY-fO#p7S+;&J$b`ndC{2DsJchB*FLBHp|%3HxSiaDxDywQmjZ z{{32fOos8fgBZ_AXpH;!r>8lNY>LH0O|f=XbNnk>hao)=&eu|{@sNJ4anI;v{QFfh z4t@J44{eLnJG93m%XPr*dg}4hW1VoT=q`A|pssjtc6Z$FTTi^PM+&aqp$~q%urDsy z-XHTD2H3I37>3CD=8Tjd^8Q6Ml4t^RtA18cYfQ!Fdf@dCI zfwe$I<_`a?jU1#!(S9>hzFmS^om`7H)ZByGtaPCLQ%<52XV0UAa@SD1g|`sw`w(@% z_6D6C_YTda&f1u>-_fr!zmW225N0j}<69q!;)ol?aOK!g{5~`c7wap-%-NE7?db?y z=6gBJwJ(pOIz{7S?h1G=R~cU&s=yna3f$ynEN&cM4euLJ9c%41@#9~$@T*U?apZa> zzFMR%ev%iDo0h7N8@F$O+mA`W(~l(LLo3v{`g;v-dWFR!2Xgq}TMjRN%i~2w0q(g@ zi?gn2am5sbhkQf$=sJu?xEtYa^jsRPRyDz~F8XhCQ~cv`Gkjxjb9{5K4tGA=5^n>o zu=Y-C9Ib1E55l(i3*Eo6erS7a@7@82me%7XyYx76d?$RZpfgUH+!c4H=eqGs>4}dq zDfq^ozL*~~0H^O8fMetXagm)vaQVuqxPRd=JTgBGS3f@jrze|m)Qi#BRb&$0^L`3` zH9He)*=)Q!ZaTJTW@43nDSmpd2|54Lt}R>^fqsfQ^m$_!CN_z3)v& zaed|@*^PxL$0wnp>h*MQ_jdH*+b;C`%zkuq!%=jstP3sSkD-?@-RS1c6KLa_lPLDq zDfFcFIrM4B1$3_bCB%%nj5dT{MJ)#0Km${5qJBdP5GZ~dO+9rVZ5ZrBHI;>^VxSO( z6?u*p-+hh_dfuQBhu)&x@Q>*0*iUG}w9n|m`S0jx>0jtwyC6JjR1th8JQ%lZM)_;w zqVzmZ#c+>bA$YX6IDVB9iWB>W;TAd>=HJM0W3D82b}WVa=Z53%jY{Lu?$UV3f--au zNLd`!Hv-QX8HqKU<@ocXa`*?GGaMc$kMZ0X{NYCoK2OiT(eFz|ylizPe9>10XUD|i zA6=^A+-249_wm*7_)<0T{js(1wg+*zx~vY~+pI3`m=cczUF+d<9qQvQ8yaBUj)wSf zBf4Lyg&G%=1e|+9#QPU(@rdC%Jk6Sn8=cVOA(6fC%ngHZTOkdXWy&yr-eSZ4ejQ0% z;_vq*8O;27Wyl~VjFHo;=C;?jYuvGuzSG~HOuv+kMf9hC-Y}C?f=T)v{I_rD^Z)O+ z|MhX&{qypb|A)tUi~iTgY4(479JAcs!Z&dmJx;@aysr?ZBz*^MJ9hfpLCVlK^UwG5 z=R5f4`xy9-xAPzWfAPPqN5^)Z^-Y>FOiWPF-&XL?Rs8$k{QW5Z{QEyIhwJ$JkC-BV zJJvt1fcuXF|3B{j`>qe2Yx(t+A_`ocw-xPQE;=*oXz#sB;o|MTP6 ze|)nx`=1E>=imOvW9}ahImQ3}c>jQoiJSVrKi+>F`G4l4|Gv-F`0f7_KUe>K>(TAX z|Gysp`8w>}=l_Z8@y{s!|H^gPgJ|{oa~=O#UCR8Uu8(i|XBB6d`F~&Jzdinc^NT7q zx`De#NRnaPn%t4H@ziTp z@1`5oBbU_}NDfly23W43H1^wU~f@j!E!91XhsQX!7 z)IW}~bu-ocyaJ^SM|3Xf5vXtqVe#0Rmm92Kh^)0$OP z@51`(PhyP+XS2F@YgxzZ9jyM$eO6U5i1Xx@;uJM&ay~)L`TF(ZY(r;oo;#a3x8(=t znHJ4kWsP~mu0FgcX*utGxsBI%JI!0KdU;Fh(gIT^R?sQcf-16wAaC1U&^5{sm}HwE zPuncm+5Lif$0fmC&L?;b!J__7xajTJQ1l5}(Q`*98f861reV71&z~l$`mYk*`3}+8 z<%($9^G1|;!+@%PIpFJ86EHPYfGN`$*ao))fiEM0qw!3j3SI(~Gu8o*%?>;>@&MER z3NQ{W0H!uSf#q`?#iJgSPt-xCOlRnLZG_4l6qn{JpG@fnx!~cMg4;Y(<0o%kEEkx#x`M~g2}Yw{ zkS3%G_Tm;nMwSV(2HOQG?7HCZ`%>@}78SkbGNQ>7BYNH{McbJqQ8yEa3^P)+&6p@M zI7ifj#iA#9t>{g?AO>`IL`L^rG&U~+0>eszz~C4F-y@)Eq6glJg8&nn4GdkE0sH42 zfN?p2ea$-{YZ4AUh8SqnG=|KVc2IVyFNC+IL22$HXmhNGo;e4gt>G=m42#sN7B$hD zI`^Y-BK?;!v7Wow&@|*)g1*m0mBrj6$@8&tDG!}$Vpd6ytxeMGQyED+4y-U|K&jL4J;6;0{kqM==N(YiNI z^alY^nyC|Y!-tCUc@sp%vFV~7Zx@-}`$e<-s_4_d6s0}Ez?W8r=CWEePLhG4d`|!$ zjs~*g5|DOo1G3-O06h2<7}|XSj&BjrxTz8JPwq}*B@J4FXF|PWK9rtsp_7rPpy|d} zs7SA;)y1S~nfV9)OPF}#&GE{l)rp=y9aXxmYgEwIBgq(bImv&cncBW=j#|dn*7&wH z(J+fgY5dCh8dd(r$X0fKKr&!B~qMYnkI_1U~XVF|BK-pVuG_VNCiM|od` z^Sp23HQrzKGp`y{TriFe7fi5%VAX2`-?!0%@#ZYSQDLFrpRih>XX6uWeI5zYA0Gro zvy!52U^I;z70q9q=#jS(eRcYZs%x2|!#PLP{jpeNPHYyfiF-v;<_XcV=)R~w5fG)X zkw94^7ASVs1@hNMp#L%+7-iYOnzs-rukQqbXCB~9{Q;x}BGg9@f?m}KXss|0+8!){ zo|WsN(obv4kB89yRHZfK4A3%f&;4ag+~tbZF^=1uVDBoZ>>W3%bTJi^lySF`q^enJ z%l$8E!-z5(hf$<)k)okJT8(VkS&d=yZ;jg$$y!>ltkejyOhISXyWs%qu6deORQ|}S zR)lbh&UHE2wLzS6+7!;PW(lqRyEym2{hYVsb9s~iVPideyDF?+>( zlIg5IZZ9hhxxh-#AF^=UFZwu&v#zhlDY}p3eBWkpfy=8o57^5&>fGbJ*S>Q)PZ)2i zC+C@u4S1h`czHxJFMDd><@%`!X3=<_ut; zx(@he?E(Jhhk> zE8(u^%PNx~DNv%)@G{XBZ%|oA3rVWTk4eU{qtxzI$JGvJn8sF^u8|E|reR!nH41f6 z)?%(k>$r}!)SJ#Sq03nNnr*D_mV-4{^0HRxE-M{-&UzPxb8uE8PBA={GZmY`Nkg`A zs%7UnYmoxZLQnb_2(8IWcRAh|lFWNg_u=*2Xx{NUhiC9w-u-hY?+DN1<(sbZ#x5Ut zPjqR)uq#S1EQ_W&s=A=uEl^%&6y#kp1y9KZw61Ir9Buat9`1pl`d(CodEug}x?Hr> ztxCBu6fNU=h~|$YMFX27GH2$COyN4wa(kobUa(y>+7FAe$a^9)@`-5p{9CkbkpumO zYJgeG0#CFNm^@j)qSydns}n#9x(k?*UxB-O323UUfSynU^-MNY&0Y$PwIt|mum_qv zSD1y%KtfR5VV1E!v+}0`?X(f1RX!Ij#$^E}c%XnGFK5y8)9+Ykq}r=r3CV>IZ9} z6f+2Vm`v!KwF+8}Y=g#o4js9V)6 z$@1uR67#X0+5^_A%}E}$`A(2V)ltycb0%n*3u`oi+!q>8XjRtTtSRe>@6H;djI83* za#ptNI2*YBnKf!Xi7Q;=%1 z0=;yIU`R<3tnV`f_xN5nxeq-r6MpI8vy?S2w>hY zV12OyFc}2M>lB0j%+k;>H5z(b*Q0UL1{zOgLFw^QXfEl6wrL8QyW0QDnEVsMVH0;Z z^h}$T5GdH6XqdW5Wf4EBOzADDW;~Gu_ZC;{)(uoEClNKXwy1`gpQ_P6n5B{DUD7Cr zz0f$i)MY(>$a=2#q1t2~D=UACmDFLJXG=|5*C_|?GlnxYTfljj<#NV@+qppd{haUC zF)k4Olrv3!$Ej+6=K^QS^0pHddE?F6ydCLi?i$Z4FU+AFc@@w2X%1U`if5Q-yfrIG zFue;C0`sc~)^oKfCiMkX*H(h;eow*HZ7AhqqXo~ic|xFIjo`VkM=;xt3V{c1!MN$7 z;K{v3^V>&3H?yqh%c?Ch2E7=F>m~+b(Kb2&$zopXOW%^7b0 z!5K9_IC432!JJEU30u5PX{`CPV5A0SpCe1)bns)>SZS1_;u|Y{C3vuAnQmQZPmx zrF`tX;F)t(khl6ISR+D2%j`0uZ6w8{WF4Bj1W_5?RP;UUCOUdgr?EkG|J^+zbNQHP zSoBp?Jt_uN#mfU@paGD!HU{>bHo*5J1E?x)0haIAfXeL!fkKArl?cd8i-WTAD(Gn3 z0($&Iq3382&0XrhjLDzzBF#V8z<5fZAYZyt<%ut!6j(ny$=H0L+B-E{tvpjgqf+(I zIOb((n3WeamW~OmVc;~@7&ysVykA+>hfvPYx-4haCvpC-T__d{IM0q;j)^6l^5|7g zUjHY@Op@`cV2(Fb?7+jUVZ3e3Sl)dylQ(LX@$%-Yc<-g{yt~*_-c9CJ;PyWbBuNdCYUqN#d;0^msytL6m`L4uU&Tr*awH>^_(KX)J{4p=rGSq{l zeoFfqg7GR4%qKbumPg%bZ5S!2woVtAjK!35trFz-D93tuSnyOjCpeDZ75v@b2=c3; zv{sj=en}XF}TXa0Jin3h`L?+2jxiQU8YdxZV z@EuWF{#Inx%7EuY1W-IUUy zUw|jMG_YzN%R91hf2Hf^6zRRUkQ1 zZ9V5xGxOs$rt}UP*`ci(h3}b0iqWu)zY{APH;=U)m2uXHG)`G^GN-Dzic`Eg#@W`M z=S-{abB3vY&X^X=s}@D^etrmVd_9?WTW0dcCrf$#{>?m;Zt_OyCvRL@R*-&H6l`k~ z1%Df=HR1;fiiC85nV%&{ujUK-FRKJ^i7kS6_CZ0FdQMOj`-6JKMMPL6M0C$ALH(>~ zTC1r?yd+VyJ!nRGQxDOo9Vfb#7ExLy(HPk(N|TR@o}y<(gLp@T+X_YdptquXZVA8? zMgcvi0A`&MxYOf-yO#!d3@w0dSXUs0^#%6(nLw3o1)gC`s28~tSc*HSZn*~h@HUY4 zd$e5aICdA*t-BAI2NnMkCf@RGR80GsDk*GJ zlKbR9wNE&xmbGi9@vNMsp*%$6uDMI&+ndN*D|BKN>GN2o%tqF{@)^ZO&6$R$bJqH^ zD0e!-`KrC&jjnxPc%*_Pm-4r^}0%;#}Ox*cQg>CmRiyDvz_R#I*@vd zsiLP&2G!>l(eP%b=r>WbAwVwj(YZkLZK}* z0$R??q0t!)jcsc~^PL158$+P|b2>D=%7&i#YoYA(ZYWL2r@qTI7;wCR3~ul*VdD7q zB*8Fso{D)JqV_lHu4cFsYR9j78gE%E?UVe{=oyjqtQ^jIuPtVcH;=I1&pkN*yJ;Nb zx6^pY=gb*pdCTl7ylFxwURpkvSDf6*+YfnpV|^cQUH_Fg_KOgdbD9f^^p1kB)gYRa zOw==+M|Iv-L2+Qepgj7S`tUymsaGkg-D-&Dc#i7oHkAJiqeeM{qR7S7767+87u4Av?fW104G zoMq8ss{LG?d4GV@nZkHxOby;bYqi%rmY1Dg%FDbq>U-qzhMs46|GTd=KB5Kr^mM@! zO8vpt>jh)uqk?bYbs@mKqo4l@V&GX7y2f=xUj$1Zr-+{YizzoJLEquXE7y@)9)sz6LGk z=TJHupq@&QR-RW>t2$Cq>$8j0gY5q=Ve%&(@PNh%mZuk!=<(H_fK_8JA4Yp2>skNT zSFB-BE6(Ge!r71P;Ebn^QjH;_{f-7aGk!2{nw!Jx{#ef&#~$QOXJ61bi4~;hoDc}@ zD#)i~(wJB#NDB@MzH_Gp%d%Gj%nA`r`8BCW+eg&(nnLgAimDPzMDvN&BIDXAGPx&3 zFL@x^i@y_PBN)KMMFO8)2`qI*z)WfZ;S?ZcW&(423-ExM!1Hzu zu%vDU-VcX>boK)6OWgvpcK3n1^9SJ142DWw8JgG2LC5Nf(34yfD(K6xry$6jX$&3n z+C%UCZqV9zI8-c|02$XLDEpla6+34`-P!rj;aminqbsQQz7r~Z`=E?|N;sG!7H35s{*=M=h$!+KQ@qLDKBMeL$N7vsBZLS&gw4{rn{D}k=pVmW*cMFY+-B6Ws2-?mZhPvlY%B#*m zZ;Ol2v$z2IvK~hSm8I!;kXQHX* zq9k)t9kuaZo?7}kQNw(ZvAPbES$V&GEc4(yYcNM}s@o>co3osgr|svMF;6)7z7%g- zP?N?@8{Qh2%zMUc;1wO*ysG0PUN@5yl>UB#YMCUc77X6qnat8b=R6V9YbB`QB3f z9R^j4Dnod)CiKTpT&_2Q3UxA+ucrON3d5kM{RG-a%z+M#73#TMdcP9d)^35!4~k1& z8Y2ykP@V3k{yfFx$32?6p3&U(4a!Into!TZgLyT%KB)|V18Cwl&_8x6+;m1ZFCUj_7S4u@odpB`!<-7t65;#*A(d1bpyJx!|1Go1;8GQfUogp;5Y4| zI>HT%H@(2n^C@t&eGB|ugP`M=oW@B_+Pkd_tz9^%Uz-fA&D%n0NN1=oYJ#4^@z9%- z1ol}CxCZSloX2@~7=atZQ8nOd5>O6_~WXl&1VY69c)HJ+yRS>w`V7FJtD`z1cs zbD=sH$W7vy!uFiKc2~~LW^kUPi#g^&J}1pA;FOoYa)0&>Ei0PvCfJ_$oJi-b)t2$z z`xkh5$V;A?@ssz=i4@#7Y6nK?=$gEGt&f9Jhh?-Ccq*^fH}W2?)O9 z;iB80 z=-Xr9c-RXV8cYL*MKh=lTm%AVRsntJ4&eUd2(VwGGb#hF1HbPF<-x&F-=`!r&!`Ed z<%!U~s}+=5>Y<};7pQvF8>&=;pl$p}s5+2Az4&=hU$_=B4QFJJ5LX z5i~{xpsGn}txZ=&s|#wW)&Iz#adP7?Ve)6JsOp9!Ft@s>3=O^|d10p7|BlsoH%!zR zr{B~_zl*TG^^vSTL&qA{tYw+YZq}}N!77yDoV#-+PIU=zOvZ3dk!hiQLo26%D>(a` zV;r;PChe1y<9+lLi&lMqUe<9e&y2KCk834wJ9C`ZKX}8tm&gQ9Mt$mu06|i>7bL#F z5SU{Yn30)+LA6$}CY}`RrGE=LU#KXZEF~J&BvP&2NHnIUh^CVk>VwP`twxGZ{tD62 zd@G$Px+qG+ivaV05MQ0>VL*w)(P|@r+l+>lQx|V9Kz3w6{^We^3#^g_2{_2g~G^(VH&D5Up0kuP$ zsPUYuOXpX9v8r*?IscHNy!>Hj-Vrj9SD}s6TZt7+R}uterGbJcZVK&tED$_pRtuK% zXQ@WNM!npZf~h*y=LNk)-Ks3n{&^1740ZW|pzdW(~V~a;ntd)cdN&`+9Vs{lxRUrSu1?EyD!)uP&6Ar3y?z zwqRqZht=vB&1-K3c#--g&nk+l*|kN7J(>0ZyHKw-L-hD*Z!mU)Xzx#Zf|t&Ve*OXV zvA$AYt0?f7tq3fSssTlM9lG`j!0@yQ<;z1TH=YdqVL3p0J`Whka;o3g0!zvsU~5Qc zw+sZ(*#YV)dV!>T0F3%4w9oz?IIb3lh7oewJFG}KfC8%CG^Bk)4Rp^HDCbRvs(qcP zFW(#bOAUu!_h`u6nE}l!XVd&OANr5wLb+@$G%T}Ijd_=9pbr$6fA6hSjqp0kYjPD+ zI6KKKpQVP|nrrkwc2W5<(!Z z9IYpn1hc%8;HfcI(1%VJeCs#SnXWT}aUCt z12xvu-o{qZ7qXA`rmoQb`VUd(D*}v~Fgi0=9@xet0$lc@ei)mkK1(1ht2ZsI!fnv#d>hE5qGZIf}etQW#)4l_H?GUIqSqgeCL_pQ(C}>Wu z0GW}Mq3&HQtuqavPXN%D*Mefumc~j7WG<)DUhg!hYi6Y$%W7!)L^<31)BhGsHm}~K zY*nC(mnOfa^Arn{%o#n^QbG|r%ff1u{^c60=7q+c--49}&1S6+Z?K9|l{metD`#A| zfHO}2!ddTC<`s{Vc>C?9yrNhp?^#Rdh2DSQC6h|fk8dH^&yN)tZkeE1vXb@<572sZ zk=7M2orMUdGYlmu7D}pfsK$AgAub zC3k4=_Zh|WIqmtfFRSJW<<*u^L~VS!2{5(CBx6)~MdZ z(cIOEwI*(69lvg{_DA)pcR!3%Syys~lqYnaxFqlXltlYfU3p!jT;ALBBCiiEB^bWf zpuVt5kUK{R*7do9WH}|6UX`MAa|)_6H6pXWiD-+a^QY6&slT2>_0Ce!J$apI;u@t!j+c;$&< zyuve+SM57QV<%Q%=#Dgf=wLy%d75CazFJV6bWyM7p5T2{T(qsNF1j~D(H5Ib=Nt^8 zvC2%*cW#BKs%WRPk4Htxb3zQvzb?u?KNjsdpG9L-S>XAh1Tqyzxo{I;K&^qNQxED3 zkD_ZlmHLpGGzMk@d)@hT?H2?6$6Vk)yb{nk0ASjA1gJ(F2fkK!K%mS+>Zb)!%~^(W z-s+ThQJ+Px3~Mahr^_cdg3WFZE(=BWJSiplhtOydq~2`*XVPHagdR zmh(P($QkE_)47QHyv03=&U7#34LN`C%-|sET~-sM!+_4$cN6rx=L^!^&2%2ZEy&(P zQExI?v@}Q+jZ5j=$%Lrf`wrAsMZlgNp$872mEC+^ND}i~;R^WfPAE?k-VEB9k zC_6o&xh??wL&{PQrV`cpvCw^h#tBoOYEU&)J%!K%no(SOKwHLW>JMi?CU)t+6_Y>V z@4JJkBON(iM`{wO((xHdrn%Ybz{-Hy-yLdHE3!16_>~$=8EomWP5}qrtx2k!kmfKxCN! z1ugqQ=6nG(H8O4a{Y4m@(E{;rpF+RyH;6ya_{NIioc&HU<`a(QR37!}l8{u-P*Ee-;yQk3h4%d)3E= z9hJ3jQjvBDUbWdFJjQ zjP|TEqlu7be+M7O3%g#37*kg! z3_f{L=#}?ESfC)rG)*mtKf`*k!D$e*H5}46#6bD4bmqI~f>`k}2n(oyI`3wv^XP@H z_unD7N1XFhkY)_9GUxYOhvWS$II$E*<{wVv$|IO2O4V)EQ0h| ztHJE}Hr5j-GoDMt*pm_nF06un)$7popc&E!`X=-+Nv+pOE7E{N0N#Et{+uOOYD}7v8 z#~-!>A8on^c{n_G@_ z3HeQ*g@enO-f~u(@z{n;-**Jj6+bpp1~Kh<34|S73tsoPL;8zsXqs^d?3F7SKhIdY z&zGQUZ5QY_F^@)P1j`XMrhn;hVHfo|uQH|w1luw_Q^0&;H^vP4Fm1-4?F2311~12Q zT{3K^E1&t58Aq9yT+RiTU*uFKw{c+-FS#K7ZyaOV*o>mXrw7~f{dXxJyd{I3T_^r0 zF=@FiXX*jzJ?=IRy_6brySL=q%be)0wgH&V%zi zoVyaFTvYDqx`?%Sy7YU@VOoErO9AG%#Ls4Y3+Z6ZNunUkHcudS_yS{F z7>~L}Qdm={!B}liroqe*iZr8z=}xtD)h{ASL6>kdvveGg~0CW~p$OsmOadcgczF8Jgf##1ma zK=&7?@>i9QS2yCtioJP#+x@KXy7WJZ$$xUh=i39bpqo+_b=?Mz`W>B)!3lGm?3Wfg zmG3|8BvQTOB+C2lB&J~DoG!l3S^vilXMS6z^Wd5%OutljNuR&NrR!F)i^!nXMI`v+ zBGMcqD9DNz@GnXPQQMi$FL96UXo(Bs1GR--z5-$YoJqo(!TCb5-#dlzD_AVnwzFNW z;gFu80!_c!ZpqY1(7$^P)AkuJF@^17?ahX$#v`Ee`5M#q?}M1>Fy@Czb9@imVd!C> z|7v5_<2p0um*?spGX20|k z*je?7OMmp2vzL|O3qFqIWlXe~XKBLMNRH*j#*AmP;$*(SXBP8{xA24G4zM}qf0HMn zLzbF84~#MIa!YcEm)+~w)F@!g*Al1J+O1AvAJd%5@Ao-%iL}_RgN<|8%8kzbidoL0 z1>c;@Z+kF?dq3lcpSXm@OE8~hk|1bGh`@g9PJ!5)69UoZ$AYGDVnVSoEHCD}34^9B z76v_w6V_}?7xstc2t}VB2vyRCf_)tG^rr&!-<;3(7SkbEvg_GI3D{2X4UburWW zm+|(stNB6GNcR0swv)=@;giRD;WPh}JehZTw`s3YjIEetwms{O9Qa4^j$)$<995h~ zJN1X9JK4(?Fb?UTlgRggvtQCk7yWQsmpX}f7m?W^7g5PemwrWALBF)WpkUuN#(C`) z1aG@67?f&b|4ImhJ`iEe+&rP!<$FT?hku3q6LSa)WBOL?V#dg&fXw*Az)M~S8TGp? zKi)IFYY3-*P=e_tid=l0J}1*)!Pz%Bab98mT-V89PR4mNtJiy2pOnXHa|z3Zb6ok- ztDL^Xb=C`aGQQ`n#Wj{ReasoaK=?c@lh?zzyFiOa$*M`X1SZqD>>}!D&z3jY5KbqrD>I#I^iDemsrNVs&zI`zA zF8(~k-(hEeq#Tg* zn&ijkzM2i5Ja6U0W_8AyMh3AR?D@Qz=^DOHf_e9`G0cbD#s^g;@Pkq*?B}U`>v^`P zb?E?Og!A~C7svQ?8OC{8G7fT7(|2o=mBGv;f+raW+ zC#T<)!!^kr;Z!8q3^90)^ILO~6D@Dx#17nKz0e~r{mC;nZwzo|1wXmqX6BRl$nfcY znv5MXW*(viFSgf#uL}^c{)p$(7mnve20nc7bU)^M1hINBm+#uLfX##UJ zw|~oKj%VBXsF-A4tR|iDV0)O4wU6&F5%GibkMQ=lOW6NE!1OA2w=+yL;au4MEYq_r1pNo!u|0(tq3CXg zuz)eu@dFQpUIKlnTOxoaIbW!mzMSpbFlNO!7YdM_|IHs6QzXkZ#b~e@&6MM(6IOqx za6!-Jv7R%W)BhUJYHb?RLNXaARKNwjEakk~YB;mT%Ush~=6~#OXMN9Gj#vB6v=4Fi z8A~vxVK~!1WSP&T$j9$gW@iZF=om*7wb+V}KjFf(cVP1z&lj|~GLDPogm5a~r53;k z4+imkNH8Cqwt^QO4d?r9H?w@&#@N$%o-a*c{zww<7n902E#AcsPR-)`hve}=QwrJs zNd>DP)r_IIz!wx;#+n89&om!&IpZkCsXFl)ElyDjPqI4; zrn0>&b_YRdioowiGhxQCw?c95-kh&*p(WTv+%Ku4(=W&c3Re zE7!lq{KQ6Xa0X+jcQLP{_&4K8hwy`Mq}V+UO1vmQi&vSh$EWWwV!!vMyxCq0KKQ*Q z@AuS(u_lg;PZaR_M=3l1-FcbUY`&|W#urpDHX>yP(@ADCuWBCO@4bxGm-W0^5Q|Ag zEMH@_oy9eo59>(h`@in`|NIEjXUtK3F5Ag2U}xGf<_VwRTvxfxn&pxwW_K;Al;(^di zu2U#BC8LJaKHjTy%^7iyX}vfZ0h#?>8VJ?brxdG_l+yC6(QV*AhIbU6J5W=unO zm8W>;lis}AzydJlW%RG#JH7dERHkz@*{KD zXFQ)T|FDQ}3JvC?u7$F5XDweoEu1fJ*u;zIR$itdhH(=K%wyci+Ye>jyn6=kbu^QY z8g_sWGsxrP2afT{qUO~{Aw}PhMCxlTCo-q&Php9u`WbE%^X+CNhW3GYqP)|%4 zi)zj5FK}jTDytoj-1+nnFIImiv-x5M(+?N099YcGoMo)$uzmM^>-g5&%>St0%!mD9 zyQ7N|`1B`;V+^&jr#yG|DJO;1Z%y1T~x$sQ4X1fXAtY)*mswR+eUNiWv=|PN5 zozH5}6258J3f^mGIIA~|QOVlQoB1U0Uhj6WGcT3Z^K@R+wwEto#P$})v6`ZLfH&KI zglY3-yqE1c<~3g9>mD}o{G~@MPyYT-VzT#z(5!RXF|z`xXb1as^^Sw9N}QUi*$(ip zNSC_q43`=|c|lYtn>)@GF#YN-ocMvd6)l!l%j7q$|?kILw ziXks1Wya1VrcIR)#yfhl-+#t+NzdR5k{0qo4U732<7JFr31#d<82dhw*}l7S$2sJD!R+gOvXsWeEXloiMXnHXIhhJUwJX8wVSFJv)V-p2a10XEA>Fm6khH{+D}nh+Jfb&)ndjq80&{4 z*na$3wvYW$#&vURB2n0rN8FUFiTk-5q%`#r3D9mMV^4LF9TR)Vk*fD(ZNqoceQyXV z38nCSwk&ElDPp0&DvoW?!NS+0@JQZheC=+I-p1A#JBr;Mn&pU6=UkB21+4i;(NNVJ z=bBEzpfQ0sXT@yvc3*~Bt3q+#xHY)wZ8#cj+Jg1P?B3*O+t5vUI~G(XVR_DOyfR}S zhKjN7;Ot3N_RI(Q7&AT8a$AvYr+)imRU+vewi}e(4|8Aa#ZP@c^dSltQO7r zq(k#R=~KVkqp1BSLppt!5uGbzOy9gSrGk%U^z=V-`bEK-rnK15MfdFJ@VoXj&Buu@ zG34mUyF9)A(Tz4-@T8+WC)3F}n}~qDV=Z>KfE7M*@Uc}KPQILo zkp(H3ygm(+8#D08;(d6&JsZC|=AiBLLpYjUBwVt)09Oc);?KMyj9OZX9^qy9+2#bk z^{T?~>1Qxi^*pLZ)}m6vRa~_38jdq*LK1WjmmYhB+s)fCJoqU}&wY+{$}dpKs2iJZ z^y3lvFZg2jcl7c6gY=LXo!}u(XOEJgk$%!N{)Q|a{#K4QD37H2n-%C^XGO}*RHnr* zm1*21b=qvCNt=4K>9-|%6sC=$|Kvu~t9fIniH$KGRcS&iluW5vpc&n*G?q?!YDG7v z*wWA+_EdSjfX@9Yq(zyO9$)TGm*soW(7&??XBkFq zwD8*BG5Ev91|!xupr6nMmuI`;+G2`l`@HZ$;3O<{_Q%ITGx0{)d|dfrA*y~^hGVC% zMX`P1XmxQby0O^o%u2w}hMjmgI30gEWT3=`OuVoz8^aS1;MuB!nDsmtKYloj5!;X8 z$fP1nop2oY?J383Y9~>z`V=m|bRIjRFJqM1HM~)D9gSKWant^0Jm+x-Cy#7F`%jOs z%Af<^DLupPGjH)}>PI}N`5kxU|Hcss;#6NklG+##r*o{NXmqtSUFso6x9pdr_S*8a zDt83+%pXZhS>BY6RiPogIDMy47zC^NgW_!9M&WV0^2Wl10)B6Libl|=REj4u|`K zP&-U|H-DJc*C-lf zYDgz$kEXe)##CF)gqo{Z&@J<=sm2*wDlP9wH9Vc^JPjc|+0WC$EuOSP-Hb#Yb0*_s z=90F5F@)XZL}GUBCiJ(6T+S&XL7OXx*7QoUMWco&Ub;X|6kQ?WTGz> zwwvs*`$9HX{3M!pC6Mot$DnD77@ehpcOI$Z#14IQYB#`@3(c|5-X6U-5Ij>h4ows% z;J3HFc<^}uT2%yLiuppc-W|;9NhsR9S&5qVYjAt*TFl6b!rHZ)*}EEX7*Lvs&A!R# zEw&RqOj9uM=Wdkt+KZ}BMR@6c0ZJV!!fvfn^!{Cj$#bi5#jn$N@ydBLYPpCmHka{6 z-(?KeuE%@&H*tAaBZ{ARfadrZPewe$8kcuiu>T`w9Q=YafB(WCl0)b>O>r8hJ&f+q zlAu~85_H@_Y5MQJ3=M3SqjhUWQ1|&G>H1_v>ei}6z3wYh!3kAr)u=(EY_#ac+d6c= zwJv?7r$>`Mj-qO(N7FZc#x(T33B5eboGQ+kuA21xv|ubQ zVYNS75|DZ#o-!SWU4}lW7C8-<+0Q|jIrDLa>>{+8w+y5GL$Sv{3`4~?pw8rIbRscm zB^`%XFU6yKe;>OTuVoVRcG^K&+=CqoQrP&v(sO1wIx;V^{&ez~+ zs?d|3-ZhdudaFSu9kn8YpFt$}?*el6XAHSEemnUSmP&qB>?Pr>&aA6DL^Pm~NM@Ch zmV>9sSC7jC`frf6^-Y8;en7Owz9eDm-VvRk-z4|96#fYsf!mZ-&~>5~#xK>wdtHY3 zD#{G&9Bgp=2|IKdD?n*6VDIrvM(ssYuzd71Jg+bl8&}Ll)8ct}J~|jhx}jJdw+5e2 zUXMXLA~9bj7S$}bydUA(u6A_k=s;cRmnie*HF}(Tk13}L{%8G3!S9IcunPpzJfq?_dx>9*x)GsGVo>8Rf^fpq{wwuTe*+<;( zXAw=CJmNXKkUUi`C($>mNb1isM0DdOiMscYh?cz|_WrNQTX8X*{$Uup-jl%>uJSm0 ztpYAh(8R_&dbstT5x#k5hGtVOkeh0W8OhElwj1%m)(L1`IR*QcPD2avAiN_s7i(h{ zV4>#{)O@o7FWp>+6I>#&)-MY6^tYhtqc}|El5o`hov3&y4M!?u;0rDbzdt>Q2R7&9 zSwSK8`V^s{uLRrXmEpsVO8neVjX_J#Am4BqRjO~{R-Jn|8}DP?rYE?py9+%=zQ%19 z{kS~;BR+ih8HeB?-kkmurRM%YP#r=)G>FsfeYbuWJ)PAUIjc^6WHjmD5^efZUzaYIV`tcdQS|Ag(bR3UF}2Mzp?}23 z(xRo7bnbF1dibg>O;zUTPn`+W#9o55t0)o&TYXYK&W+rg;6u8*XA(XCVA9hbN8bDG zAlBzI$h&LVq@nZx@p*Tc7_}9WMV6(+KjQ@Pde=aT*WM+l*-kb*dr2N$`aqg2zL5ut z!|`LkEUufVf?oTyaiz9Cz6C=ZoiG}=H5%dQq2?H0X^n69I^!~3SDdrg3)|xSaLk

hJ>Ot>;H~2}uA3qttL&5xyxT0$S@8*2NUg@8>``!>by zs0_^vl%tQI$Wv`zfsQCvq*W)CY2X-DsvDz5+h3?t-xf_e&03pYnW#%|+v?HS--dL! zmNE7DWkm$#-SvN6C+ChJrAM_%PKzB`-{eMii6)TC zy}=~mYAky%HJ)5rw}Z&MN+U*tS!7b4(4Vh`a-qg=fCDG#??D8{gt$MMI@atzvY3b#D2!RxcnqQr-b*j{rL zFMhv;@v-;uOk4*pt9pep(mkl6_8Q;seS@!+-(ujK_qfXU6ZV;W#g!G`QTX5=n}3GV z%-6%H{$WYF>D_SptANF3rz}l;Bu}+06zD{6B^oeWnXW#rLj9JgQOSIDT6I;E#+vC+ znQ8je+slCFn~$N2X(n{~D+}sBZ!AsuZb^q0+0x6?9jMKS2~_XnfNT8|X%ewToBS&@ zAp=$}WMQNm*^s`1e7&%qjPBk)`Y)hDhFy#yMf8 zcr?upcdu~7@m~cPvdaDUN*|f~S<$;4ag( ztR6+6hhj8pFW-i*nvzh^kcKktnP}{J00TefV!l&84&Q$id&5ewduSO>T6+R-y{yJd zPtKxE%0vXXEPbg}8Lt z66~0}3@dxq;QJ@xn7JbwFSf+sgHP<8B#qtJ_GLfXo)Y24i3iZqH4m%e@-fcnC?;ti zN3-)~sJ;6H?vX!@lU|=e#kvcaHnRbf^KPTz&lZe*_yA=J9;1(12Y%9jiAIL6@x+K; zER1=J^-Ay2)$S9v?){8mOTMDwzHj(>%OA8pC{Ald!)P$8`!nCk(D!z7bQPOj9y*Pr zxqn7dnWu_W*GHAgCuvZ9IZgUuln$*Pt51!-jiQ|AXnMZah>o3PPJ41K>E5Z<)G*M2 zs@?IV%XOF;i_OU)g(H=^I~@Ue%vukPgMq?ES=#*&}edmm*dw zsi5vObsVRnjXNauQ2e+d=EWJ~TUkpalWj2Sq7!PGdEmxt-uU5*54NODLGN3B`1#Z{ zlq{czyH_v9hbXpI~>gafWKM?Flxb1%y9pWMQ{J%)n;*8wnUP~&y%7rcgj$g zp>p(Us~lbIHj-}Lrbz!LsL<#!>h$M#b(-U$MGwmA(v3fLsYaqc4Qw!=Gq0P{3y$V= zTd^g5|Hqoj4?56)f81&D6){rQrbdL1$B-m7XObQ{p49D|OU@i$MH17t6N8KtGBZ7s z{4Ey|la5?cv7~@JwkjbGQ!9yP-$fF5rH;rX+#r&=Pl^2YE^^)fCz)v}iR;UxQQ21x z!U~#=~QvLz*vHc zZyipdO@`_=$kXVFBdKYH0{vO8Ot%-SPy=3#zUWb>fBdy*@)sTY(m;>Cs?(<@q{q+? zQ_bkllNPjJ!HQ0Fl4f3p!2&)q@QfJa!X z)s7e6KgD|oI`QPNZoIvs7fb8kp;^O6ymII>_IrKBBbNrzBlj0hR}-f((E1EK4Kzj-cCY6{&@s5*2Gup_x@`G$ccvmRf1hqUBoj(LHVYsY;ioN*mCv z28Q(eL?fCTYEBSfY-^)~5TbC>9;KO%~EJ4lglF9}}PPtt{7Nzm<~_;=ZG z+-|9ghV2??a84IrY8j!VofWRNvqb|pJDhdH9urpsUUH_GB{c!n4E%7s`z-tsG8?mW z=b-P{rP%s36kn@GVEC6!7^}7oU;D+QOxzAMTe%ZOv8kB+JRM_62D**PLWe^+xMuia zJg;AhxZyaq7oEVks*@QlEh26WjB zL)u+0p9e86#Crq4wA-Y`WZh81IhAM~^$D z7-?LIbxTj<`tGw>G~oh9J-UdSHeSUI%Wq<`VIu~|-$5UT`)FMA7^}-!ZYzX`lqpg3kt#GRQjH2OtJBRAnsmWoExMsthkpO9OOOB1qts>;UFSH41~VUJN0BjY zi#MSkew)&uDs!6ueJpi7W=XI7u%dH`E!{uHnF_55otcl+Idc-zG&RVV-lb&wkQm~f zc7OygJ3?I6R+As5SIB&qEN z$MA(sIWEvWg<1jUakxz#7FORthxfN}u0j)@9NCPH6)jj}){3uRJ;Aj3op>bX6;{@~ zLHDVBIO|hC+N!)q>8GEtz55HU-}MVG{1c;jlZMg(&tcRtTY?%s9!`q`q^Seb=Upa@ zpfwE&w1L$KcxW4TuJORo*}?Xab9-0W#Uumg4f<46ZW1XRL-r++rO(yiWJwDHSSY9pgc z_GHW@@$8Lo3s$?c!m7#PN!N&!;3gR^-a>}1ZX?0NJIR$1Z^&H9AvovAFsynciI-KS zP=mc&x_XE*#^&v)-Z<6sefxq1)-tJn-!d<1PL z7GqCh8EP6=U|IS}^qqDJQ#$MLMg9%UoY;sJHFxpSrTdtD=`mW-PK?Ov!Z!0B+;9FG zhsE|{clUdg{5*i;?tH`KNxv}Q%MfZ_GL$~e8b*KZlc4wa4W|RFhq}-pLkD8zXochm z`ggnnJ^MhBHpr{e9tjQlv_y-366w+>etPtLhyh)xHk$5?F`_GTnO@RiN?m2m=^bwi zy3f#(8dzFWn=3Zdyv3eg^>?7pS{|~rHIY1Xg*^Lyog7%vLasNo5%>L_x%96-TkSbo)p>>c4*!T@^Bhwlx~j`Y;n}vB#7q+E~!5 zK4WQ*v^DKKVND0$*wXY=dpcRxkzTBHq_H(l)cd569u4HE$!nfom`3QFRUXt$W;`AD zVgfavJ&Be@&Y%gg+T{BBWRh1@L<*0eCjz%xGNiMaytC>gf{iaoz@a{J%Jm~Di1|+5 zREpuTWC=TEf1sQ(Ia>@l+EQ8MNE@Cj;@y~F=lfuj0h(8ZA|2RqTQK%f&7dAd^xX}ce#RUsZU z=8G4#zdVUHrcI?`;enJ~abq6u0g{z|jz}2S62ae_Wc1r65;FM(@#^a#0r?+^Q_?r0 z%KEAn1u2|uDTmfQN|^RV9gV#WP?{KHR=D^Du=}>6xBwK!+Uw-FtgSR z{~Aq3*~I~<K!J6k-X8Rha9q7gOlzxoT{D8$%KcQpqH>|b!i&qVYQn?5T z>Yy;3O74@T)3oHN%i$5U;tJDzS1Qv8F;&W=I{h?HhsJB_(FVm)v>?`imZcif#Mz_i z(Rw3#@1+SnEN)Ky6D;UNVntJLT2be@Hgw^1dpbhSf#wrO>ZQp%#WIezc)3!q7D`JF zjH5dzjHg!)PNYh1Q|UdGnbch=iqwxfMiSSZBSWih67#$FNbJHVB)+zr3|PMOV^4LXU}PV9`}gC}_201XQf&Up9wQYQ=gMYH1L%%-F(=TKKgD>hslkl72#I&$TurG?3OhZRN7EE>Od!FI@2jn zIXdYi(oqub^l2yuxPk;K%Z~AzT4RoL9rf%u#5a!u-R(#wejM7DGI3@U(#gHY7UZhuf~K z&bVX!3=jNyX*@;?eNg9%KR*368}n3`pqqRM-b`JI_Epq zDd-i!2FVZUIKD9xuZ#BMmRu1YH$Q~Q$MdoCSRvNtmEz!si z7r)}GiQn<<{fd+`V(g)iKb>lqfu$vQU+h;$z?B8|ap6)*39ks8*XVDVyc%KXiZ^=V- zx-v*Nsln)3Iv_q%4+4gdhUH4e@U+nshM5CW#UK_+^^wd@J7 zk(&fb*C)f5P+zDz6#!@62ZGF{S#b0F9I&pR3t_@Vux4NhG-!mtDXo?8*=#lVY+3`~ zriR1fj}b7XYa`T)qG8UJ&0tp@1CLB%Vb`HJ7%1BguhZiJ3lpGPHwl`zCBxGtJK@N$ zoiNKh73#OLHx|@)!O_ND;P@p2LO*9h)t>#Zd~X&wy~zdx^#hRn{Q!Iu=EL;81@Q80 zAr#Ch0_%sxFnF&7ByW_$jJjh`nR^^^#+Ad0LzQ5VUIlaPs=+n(6a;slhPGWb;KAOb z(f)o8HXgYE8-HB_{|%Sn<@3v6UR(#V_pX8Ig$D4Ax(!waci{ZzhcJ({LPcO3C_Hb2 zr1o~mb?AVc^k-03^%6R~yTL}e2j(s7h4hjSpxZN)GkAMf_jZ3NfaO+m539I6xT;JJ?g4vpeKK9hht!5JbS0&L=cY7B zWA_eJkKP4?Uw6T}F&W@HdoQe-o(bKbGU1kb7D&Gl!7S5*(4vqFJm!JijyzBvS^&;@ zg|KyB5y(ZCz_!Fv7^-v(v{)=wEiHrW!g9EJp#mDBPeSWh_SW2;Y7oYqhBq>2Vavv| zaMJS}7(BWFOV(Tj_lQ~$J8>DV$X|uonf2f$Y=91_8!+MeEf}JA2ijd8fSzG1yh(3` z?^ciD?Z6YL4ebB}_C{LA)92t9^#XQzyn-FuUc>0Kz0f`=&h5X|BwP^wRp|8kukh0i z8SuF~0$xa~!mLzPIGMrjUT_=*w`Lf@>vCgwV`mMq_Z;Awr3-|;=HPv#E1VzZ35Scl zpsj2I%sw*-ME-uT$$S>%{+SJzb>_j-VGH0=&tho&xD@IOLP5fE6|CJK2Bmf35H&gi z!k$FJqlPG$`Ftx(JQE8s@3+CwRolVeG#-k)5@1j=3GTKh!G`!9FnnMq$Sh5TiA&Nz zEo2w0dcF%DzuOI|?HO>UeGeRz$%NCv`@s3ee(;UVf|QXtP?nGb#?uaf+NxX#u{jLc z#(6M*E@Skc7s7%|MeuTF2}FD>1(k$j&~I4=d2MANVS55XH7enicNJ8)RKtds)u2A? zG}}+Dfiq!e!OQL(C}~}QO|LIN*5`|Gq_GybBXwZyQ4f1hUWdOyHz9FVBaHrZ2ciV` z!1eik;9ood9gRm&)BPCA?mmHm#105}{0t^+?t%j|Uc!iOb{}8xTM+yd<8;=X6Dowf z6%Kv>Qz#}o3|4QK2F+f1n6Z8&#LicRcMCLN^?4mgXZMw)^cq9EojLT~vxjnBCs@AN z8HUsY+zEAu#ULb9VViSy67!8$oHv_kr-LKUi3#%OBpudluRRi1MT1PyLyOjXbOOn9o_YNqG zO@RidRH(d{28(rfL+s<-FkWX57`W|)&GRy$*J2-3i}pdx)cvrlH48j1WkdD%9I)7Y z5c(b+g2~Sg!@W~SpyOWw?0s_-?i3Zn_@||yaqT$h3CiF@QyCn;dICnzu4H#fpJbn9 z6-c*K!}tfM;Gx+W*lAb;vH3Od``B4{AASK!H(UZpu7!lO%b*Zn2S2=6u5?@nzg4&3 z&D17%(B1-<-`s^gI`?6r;1PK3ZG{m1Coqb%!_wPNAx-W%bgb)wW8E*HIK%eM9uC*a}-ZX}k zU*_=Wr3LssvV;vDHt@C93GV#`xEe4Ho~5#Ps4n<|qeTE5NehIJXJ$jixl$A@nOhxSd-b2AF6CvJw@@3w&U?pPqPaWJAa9)286 z0E=ZwAU-DSqYcgszCB>HE_zO;UJuW;(;3I+RW}VQn~=c-m$Nii=dZL3pe*&g~r@EFtDqK z$!l-GP?y_~x~&<~LR(-<&pjx~dH|{+55cLc6{=Gn!^mw<;8|@4ct3j%U9)@OcTOMd zxce0r>`4^{rq&5_es>CeCv*#aS-oG-^*|?(v5O zA7?xlE zFk#Ie&_A#rmW|2+V~1?WI3t2rh6mx;j9ds!%maC|Be0|M2rL^>2y-G!AT;6_`01BH z?kg6Tyh>2>ItjkRtH3v)8q7jY!ybb(P&#-9mIJFTKIh@Yt_$!ojn(6JIcNk7X&nhE2bI7{OARip)q(dOV?g)MSh(VD4{N{)de#XbqQ(t6?|8uP zN#1bj^+d2eKN*sLOa-=s2G1V^!pK!K!OdefbYGhb5+j1a?@S1!PY#9pvnwIw`x=;1 zv=&CLSP!{FH$i$;H2jQ?fs03CVYcZu=$pA6#xIJ8@`VY|IwcXR!;_)n_72#;VJFxw zN`am2sc_0}7nFOj+}V%;(RF)Z--=9_W5Mdax(JLO?*h~tcM>74e)8j4LE9j6BH_MLE@4|h@R90mo~RRZr*+HRe1=PXSaf8(PJ>Y z{{$>vcEZBf&mqRB50a#RfTrmh;WhOl;o{&6LL2uh!cfU;!du=ALcyiG!o~j{38Q*C zg{OYM7M65;5Do@^6~=k|6UNv`!Hb8ou-Z%+%KwkO_l$~Sjn;J$vw}Hb&KN;OMXYIp zseYI+fnrWwg%K443I;;w1_VJ-K~WSFiV7$qilRs|D`3E!BWA^XUzcl-amE>E?{UwM zd(Zi?*BG^=yQ{0K<}=@S&iR$n_v%r#5lv`Id{YwDT2QIL_B3dk4ef8*nc60Hp&oM_ zC}1#=_uW2JeHBw&&;Xi&@7U{g+J$m14kznFuC%=U1X>$EjkX6)r;IFxwhx^_-=x_T zhJQyFHOAE>9xAo5?gOFNIztrBZm^G#dFljml2O@BNyXPJ14m zp!&;CQO_5d6zqdqvN?;&j?5;tHix2H<&xc^a}?6$JhfbVkuG^(rkHhC>7u5P-tNCf z){k#cnEf5PIPei2&U->78$Zy)@{6QFOH9(Qv0Ef({{(4tB|lrO!*O$+xf_ zjdil6qLj{LF{uk(FKth4V|&n`GQBADT`y|9z>zka51>bCK`TBCA(DsDmhW!(PLuJ} z)ijy*_PQQ38iXz7@x#8-T%-Ys9Udg(`w3j?SRek-u` z11%*q)6=Gddb+mCK!Nj2l$9JrQQw1Uwq+PSuvtUDbJvmWib(vUnDw-%ZxmgRkD)We zV@dh4jc(1^K?yOtXm`~;v>+y)R+dSi83Fq#booK5T|0^DIVV$o%_Fq`ekzp;Nu%N} z$I0iyagrvUpyG%4J$dN6iZ*4^sr6Y@+$)=!9LT0ABl9S;F&CG3O!IjDK%YisF8#YoDq zi>9V!Hq#?j4D~hJN&^eG(dhAU6hC<+%ZK#l^m}SBU6Hz(v{FW1 zr1bp24k=^HK1o$JQF_!kO>%yiCQUhWK{D%oM_RM;x#aKgRkCpZDNQRbMN-4^wAiUK zeU7L}pQ9Vl5^hT66IxQ8;nrkPWK97F+mc9XN0++U(1{0j(OmYsE=_?tw@X}!s2 zY(GkxG=MgFJJXSRgUR#32%2T(M(-Amp#bZ#KT3J!PsWRC%6qM$TBmh1Yq)`i6&vZ+qG0l#x0(tbuA$Hk;bb;2 zk~Su7pcj5oRG7AjhK$-wXJfWf&v|jwBW?%1tGkmNG`na`#XWRBZ6BpZ@27q<4${

wrjd~6*qR6wisbSV*@?Tg? zm1a6fIe}xP-nJ&`9KNGX(_yRh>&|}3&i;t>?*1{!`rAcm;o<^mSfjgA(7~sY@OmLN zKVF7HuU8;-^-2`-zB*mWt3^-k>QSxmMwIiq2`wAaf@LEqYOQjlNiGa3UqSKe_oi)0c*W>FqQZatj(k!|g`V#Rslbstf)d_OVIi z6t1GtZZaJRMW3ddO_e$C= z`GxGEd++1v#rg!)h=X+NK_Zz4B$1^{GO1S`ruh|*P;Z-5dUG+2h95spIg`?@Qa4Sh%{8J1GdY788zu9l+oWY- zJEfj(2c)O(j!2D;>pdSDxfrcOg|@ zxrBPmUrOE&d}zYXK#E(arG+Irn)$;(4ikf@T+3kE*(!`GM1)h+>kTBCM^VA_C|WXl z6K(n(Lp#H_(!#^r$azK_z4G2co2u=i(Mh}MWa)ScdJ<37&+MhPYL-SpSC7-I6(`8G$w}JtGK1DGI!(*kWKm_#rdi{1$ssk5 z8kIXoZ_1q~qy7SIh`mf5^fzd?`8}#uQbl^x%UX)vU?U~%>?VVr+0NEdU4Z{Y}*f{Mv;Tb{NOOm zz@x~kl^dOo9!DL1Orl+_r&6ZZG}5NdA+cgHnQNEQtOS4Rn-)m1i#60K1OA3Jvxe_X z6x@6@UCj)|EV+iv??%w{kLyUAxt@majG~IeqG`vQXv&?knMQLA`53p*vaMSwY|%DK zS%&qpz&NT|VHZ)`Jv8BXJZ6{#D(AePt|c6x=iuaYJ(->@OQE?lQmI$sQ5rns81ag9 zvJxlBKRbh*o}8wjk!R>(qiixu&86_^7iiLjd>VW6CN+3gPpaO-M!MG8L2`2%A(?qj zml~G!mOec4mxj#HO8&*GrQ&VjQty`=r6F!Rr0=o2q)(gUp_>DePu5|nK~S1RN$FBz z`#fpJ)2q_KX1ArO%il^J7k!jEC4H5KHaDaEh|;vBusm&@P?^?Tu%vBGs?myCwdl5? z4tX_hNNH(}>GkzyWIebQO&ZgNeps}l2d6tw`WYLlYS|gHj2+#)+KsCA?@6Jh`_iEk z{ptB4L6heUrf(yMQJt0}&=0$k=BqnJSDZv$^HtP*>ujpNej(kQwUo4LSCH34e_CG` zx@n-MRwWvWxu+wW2S#c-Du^P_1k;Z?t0^ojjJ%eI)BMvBG;doZwMD2M(VyRipI9ihvM>X7b(g4+NdTJg|^}g?=n$-@_ZkI&Lay?Ad z?NjK^{S>UprP8o#$0+`JI{9Xuz#Ms!hA0_ydEOZscRhz<^KkIUH+;wt@ZYot= z+)J7_bfDzebfPrulecvFvPSZ8G)WWhhDn>>L`ko_wn!;OagtTtJ(Bai{n9$kA*u47 zV^Z>g3~8lvw$#ccN3y$HAXN;0B6%jgl0wqINQXE7lvWm-(crYQbTOtpkuTP0+E%5H z+p*^CQiodlT2bx&jmfrBb6T;vC9V42nm&~3Km$(L(qW6vSbw*pDb8K#_`|NKz1^u& zh(tR*defRA{iy2%LA&Lll$19d-`6mTj{3RLna%DLaCibGmY+(ok7W8>xPYq1E}{Ge zzO=7oC2g69^?+c1a?cH*h-+#ZAEc$sjz${1*+jFzREsLURCpXau?G|$XxP_Yhh$X4Yb~-k2JDInMqyDvaQ1`^$RCZH5P4?SMg&PuR ze(?bsnv+DK)`zKY*bxerQt8UqqvUb?7>%xSg65Z;pbEMSnpy7@9ad#iCy`5;L(UQY z-7B4ZTu4EkT1ZLifztAdk1)M(P1?W^$nwi zW@~6?>^d5AIFkG=qo{Oz6djzsiAKl9(C?}6t=IToh@CrW`Jr9((|r%+FWF0TzwD(v z^8~up@&N6reUKXWO`?pNhiJ6JVS4YBLU+?s>Bdp4A%9M%=MyvN%%oFvq~d96@gtK? z9?GUUHO|r<-35AI|1ypHo=-1d7g43iCeo+Rqoh!OS<1fWD-8@+OZjzHOCSBCr54q< zNRfW~q`~bENVzLhq^~>Eq&^kXrQ6wA(&Xyrr5m@eO0^e1kVZv4lWKi`A^ANhkpjYg zOX|=v)XK90?F_9%^Ilp|Zu=UvW=SpD=u?LZ4_MK7mj<+QdtO&Apih0~+@YpK!BNLtZo1NAA4B8rHnCbu_}Pu;DQ;IxhGhi|7~hd8qMu$K%2 z_fy5#gS6ExiK1PT$>(-5sa_ue+b9;F?F({O($oua%mC@ndYoZFwF z?r*c`Y1=$1JaLvzjJZq=u3w@4Pp{H*ivn6rH)zq%`}DX|eQDQeH;JE2m$cpiQu62^ zsk6r#sqftN((U+dQt77qrMBf0rHvJiNi)x#kn%cb;&+gqm#$1MlnQs>l+JH`C|NIh zBXvIgLHc^`o8(z+j&mwUR$djT*{n)byxWrM?5;w?e$}9{-Rsa=7c0srs!#8iHKill zTG4Trw$$yG4Y?k)BL}-~bT(e1+8Mp6<3LB6mDi7!bP#l@$56VLIh;OvkEEpjZZuLe zhD_Jo>2>5p+WKP(jov?vavWt^*laHObed0=o0rj6`<2wQ)hgOQ)t}M^sHsw_8hLD@ zX_zHD_Ya{qFIUsHmSMDXVK^n&tRqkOl&^jx`Itr1vq76F=uQl++qjiZOxaEi19s52 zxZU)yFrFTN+ea&-6X$X$m_}xY7%jjPNyBCRp#l`yzB}3+%ba!)uZqREqzw0e3z4$iyoVrI#O>gM>j@nYf z$1&27Cex%PPXZ+_!S|?Vgi719qNKF$+octC4oat*q)0P&oRF$d$&}i@I4kvCds%u@ z<%VQ6@3A!e-dpLY{YUAk`&VgO>Tjt-DRU~qd*L1m}b zrD;J8skOc_WpXnr+odIW`nRUN;T^~%b)=cLUFh^S2iiCZ-@#z(Nab_;(~&98^la}y zYCHt%|EIymWjMuXM$_Ov?v!12JVlM1N*fQz)OFJg`aNzAJ+3p4oPK*zZd0sNc2twk zBt2cXGSK?QCOTDUqNc%EJ9Y>q?_q0b_2>u+YqpMdzK^75aT};4cq3VaMU(zi41KMR z->DO}m5MLN(a#w>Xm_7IWL~nDYMehnDPBo5owo9mx=%Ms_D?oQm2Pa7KKDtKD(F)sb9|TOfR(w@ z_(6HnaEtR&<;{1b=aueB(_@}UFN3~G8Q*?O?;DxZ`1fV#_^$Hw*`^Ymdr_IBo|a^l zRGmH>YthnQb?92<`m||SBQh&!N|nr8Qkek&fC6s_|J-~?WY8+6W8jLME6j?pWCHSzpJTa zzu+iUdXP>D3r^DP-zRDDi43az^c30NK242gWK$;Az4o`gK+j8GrtgEUP)4Of>K%BC zhPd9QMdpvFVB1rgaPc_}b9qe(^?#FjTSw{2pe2%9)d1;8_G;-#T9lM=ZL73Emnb=m zKP6QenJ3k{bWVEjRw$LRzb+L7J&<~|ek1K{@Kx$o*Nmu5X}XNRThZo61)9>hGM(;V zNt=IKlHapx6#uIx&Awwrnoo^r|D2{Y?|O6cK59)>4z{LgS?wtDh7H}mXG@khI+7I7 zjgBNa&|-~5_t*9!r*pl@CA%;EnBI>{Hxm@ra|q3zKY}_1xzW!DW2o`I@swF=3U#|W zow|F>q%KY7Q$OE@)X~R>X6#=@A5H#rw^<+^yQQXzFEljAT1RO;^b{Ikq}I3>Hi*OM zebsPs%fgIrvyp0xD2ltfi6Yx>p~tOa$>mcl+4}6DDHgkEZeTnOciBtHGxpKT8~f;j z<{;HOnn<_X9Hv{xQ%KW0jbdz$Q>Cuy)b4sZReX~{m9&|Zmz_nL@Em%Od!F3IWs*@h zQY^1ggNU0{2lwP_pSnv8`#hvgRxc^->1%57`YpBT`I}f{kdij9lU65S7IZ%--7l9R zjh}f=dSY`)+9KbR_UnqJ#$oTJCte?=sJB0)m=Wg4qe^sfmL)B8sYdlD*QPaQ_2_ZG z1{Bq+AswQ|G@@4v^7U>-n&{S)JgpslKHZ7d_;w-Hp>DMBRCgM7*MVxRl*q1DFFI)L zM7}?rXhz@uO<1EbV6GzhMP&c~$ax{%>;ZBWhCepSAlc`r}6?H@{ zw=P;h3viEe94#kX?F#Cx^&_8~el&1?0F}%Sq)tBq$?!u%4naDaKg~d@nIRNWHH>V0 z*HEX}2y*@&NliG4sv2wd1JOydBi$!w%fr-9=3g?V-$wc$!#uFJ-## zqxq%;x_IayDIrOed*Bcqu}Yx>f9`K}K1L1g(olDky=?jBt|@_=-%FR1j-*L0}#2U0HoqFcITsqpnRsp_al z`0mNKQq^|9BtxCj^juV+^<^yR`6o+SpsGnt2Gyn4i|WztoW^u{P)n-XrZqJ`-=5sx zcBHrzJ1T#*3+-*{Kwhpr>1(szq*QjK4G)=Y=Lkx_H<+FUxzN}%!|^@Jqe%Q5MQz@@ zk>`jp^!nIX%DFq9-o;L)vz4ckDNsfF;RM*Po@D>Zi>~flNUiTLp;I}& zv?6r{JwD?{H7+Ce-_^8xzLugJ>F7#=j(X?e?-=sNK70-4EPvT(g%{T@pxRe}MK) zKS;MHCDNlhNi_Ft68%n0rr%3a=t;R$nmYL?^{;ZA3h$htu@^IFc77(AJv~E9x8%}j z>vJ^r>P32&d6jr{A!Qx7NgLkYq61y;(xI03C=fl?_KYV~ z%*vUS!tXr$`@cVbwE6SMtd#jb&zf2N^T^Ch{`b%S`EhCN@n_HKO`DjN`DdS|W_2r= z)m`=P{$@|6@Fkto!eH{EzWif9(Ipc#Lux)~}RVS+i>Y zI+maRUmyL?4D3JrG&JD9-mlZY2WZpy?^XY2$Lss=3(igd>;0Vn-Os(*f4$#-_BsDw zKVSXd2rhl__xz?*{u>_)v%de-nNnsI&1%6dhL0TPG|F|P>wlb2^`?FPtss5Px>h#p z`n&YsSN`Yd{;!WSZ214)ao+a%UmvIc|Lf!U)=CifebhKkr+=@joS6l#z-5%{KL@FX za`?9*_WkG7{^L6SqaIiLH%{vx{f*QAZnbP&Mixt($zo+!S&Bw08fmc_DES>zv+MeTH1EXcqyPRXKSrYvq|%c5zXEc%?2h08@*9KI}z z*@d!5xG9U(MY7m^R~DNd$)d{>S+sjDi|H?A(fO4utl!Dv?R!~V`GE8LgzbwgioavO zvWlouQ4uMYib$`f2xDzUl&PbLBXt$=zMdlHHB>~sri!T6Oc8xrDx!TGMYL_Fi2b&T zi0Pz=P&-Am?5c=O_KG-8ioiEziWPknq3@@NTLZA~U`1>hs)&8V6j6JWBF>Ca#Cll~ z!)Geuw5KBM7AoS~B1J6pRz&7xL{kA^x_A|F(ztoX4^VekzNf*p?#x{8kq45&MrH@&2!} zID*(O_$7;U<%qcjgqKKl|iZ})iSC=Tl-w)@Z!EsHB7#gAomoP=tUyHn02VOCXShiCUGxsXO z^AKJ;p@`H=*uMaucMUmkTM^sac?jccmFV6@78QEQV#FX>3k?4i8 zsOKw-Ibb9F@VXJPgg)kngU1H&+bD}CVAE!+EE?~GzV^zZ#z9%+f{XEpEM|hs8fe1@ z+Hi+9BB6_^XTk6sxc^~uNf!6OXB5~xDUii9=;JN;d;*^s@OcL|FTiEqOFR!Y%faS2 zw2=iq)4=D{XR!Vzi@=}IvAH6CfR7uP9Ik3*FdC*7s zdC&v6{9LMtQ!Bvhe&h`_a^<8VtaEUlVDkK# zBA(fLh|xDx;=QFThBk+PbVaU1FV6=cuLsM*!d(`%#zPO25XYIa2$_d{z$j@c;s8#E z!6-057LVX%mB4C1C|(Dvs0dlyhNlgQlEnq^ssmnL+hnl?dinxCo3>9D&d^lr1G4x9 ze&&au0kG3TQ)^E^SJ<|JA7#m+T&^rE!Ot9?R_Tf?4uYKq+&Y6>@=aN+zYWe{cjN)y z3tyX8jC=v7RB%$j={GoKl|dXTApX#ib!A1ofR?6$nJ1Wag{Rd8H?FUUcMTPh40iEg z7Y}xgIw+z5dfE$a4WTC$w4~{+h*IEnh2dlHvIXE316Bc}713?HB4Vd2LJRFYoC}_d zzyR7&`NAi`=~kd3JoJiq9HfXi=;q-T>>m%`JFJM6XB5#FdI@RlA=c%oMCWHJQUA9} zc-E1{l*Z5r_-uopx%PmU^pZtpUs+7-k82%#dfgsS|W>`K48027Nr6aBQ1PQ2aV`uu_H(pC!n`k;P`Y6j=xS857*1$ z!A2Z!6LMjTEUJKG`#4#If#ptcoD7DG6J@b039(NGSFrq)B8zI!Ve?b?Z1C%RNfsZ# zt~NCH3CzAgZ%@JP`+Xem333LSi+&-Cuh3k`8(Da~lZDwQY$e#f<9xu;@i*9;DdJE$ z=n5RKKzGyYpf-Tr6=+Ur1U-UXjW&wN2RC=rpf%8%H}sY5q==^jP;;CWkuV5MhoY_w z2M=hg%NRwh@_b$%7y-x^-u39+#UBQQJAQx=UJWnst2-+rhU17y+F zSr(U|vsz#%je_RDFMK?_2YS0WO%_%%ava;{88{w1a=j;FJ|8`X7kqA!Ec$t)#xFz9 zupB&~#Q^o+TDBJ~+k@qoU@!=Ue%GSjgJVK8Vi_xo728p3z%Y3a&It_h%Y1|u%vzJ@aj{5;O>qAYsBNAs`BqAggCd4T<%;ksTzqtM@= zx7hbRJo_U&8T>kcTLQdt88qlpRS|x5QNy4&HMpGsw>{9?gqDh^&>nth15blL9&|tt zfEsnm1=>Mf(!=*gcwv9&=_wd#Z-SS%N?1m##MBg(IF*6zs!CkFt`gpFRN~ARl_(EB zpMR)C#nQ;La`693vM5s%^|dbQLp@p8Ls#RPqBm$Ii{93_78_hkN7RSTh!42kbU-X5 z#FAw35*tBtW5Dh!xa}T<8Vz=RU1U*nBz)5qEZo4!9o#42eN$v{3+&o~U8~uM8`wF6 zT^(?%1r54^n+LcpTY)%2i`D_~Jq_|4?52WS+coIP!ewy>?Bq!FF&pq0{C;e~=k5e^ zuzRo<$4rog>7Xn^lV$M+3<>#`_&8`^t_Yk}_S z!85boz(c>_oW3F_e&9K1?T>H%ETf1q&{_BDs73I~a$r>iPIuvl3rP`;!Dk@&?3oN7 zT!0#X5&kNxL@T38JcM3;B&kH+F_q|cRwW#vn+EW-Jug%u=oL7Z=)MZBu{Ly5AGr-qb(_NnTf)!4$+->kwu3ARvH5mIe7mCtfz{MM zjC#xBC^*GKXVd#bLtxcTWX|+xk4UAw3R&zt~d0@489o`R4i(>E|Xzbk%?7s{B zNW3ie?8ECxIEQ1C%GKYpv*eYV2J^JCkifHDJ{%ndOTrViXM}aniz-0&G z4<=8*WL=I*n4$LT?&3B0ndz-c{DP;|{j3uAzpKRcUn)_(H2eZgE>)66dGIh)1Dl$t zKXuSQ)sw}chBy~+aRHN_U^1ya7(hQQ?7#?IhJuMVn4Iece{+Jj^g|p4@@p{a#W2Kw zxGX%Ood8$xAB*^d$;_$HjS8EGES$ij#vH_F9_rp=e3rK?a=>Eda(LRy5sRFUgP(%Shdrn{2W0W=Fphm3k58hP z$wb_<@ctb19A{-w{vsG%h2{#;yF7qSpWymlpnn06hVSv-Ptfak^g`g#7yjmty7U4} zorK~Ic$yE&=x*q3qJva zSN51MB;;^UXsI_?LKjQ>AxKBcTnTA>h2A#k_ zH4D7KLAyv6i9YCw5&MCNdjxdQMvJ)Wz#~W&=R*-o=pZ&i7T4Dy=hq{KQQ#7dngdN- z*@_r~!J^&h*Y_fq65z9mvKXEWjw#3uaJUZ+E1`*p7jVt+tZ!GqtN?y`9XV2jS?)G^ zh`TuFN4O4f*zy5-1%uT;p%HK>D2q7+9QL5jtU{01u{QcI=wk|4oNtHT7y7V3?;SZ< z5tGojoj{-V)E9l)Q`GsDDnaooap<&4bSYAanfFwp1~kzI99+%e2^G;Z)xdRGfq4U2 z+(q15BIXaUz3YU&%MNkuE{p6Q@GF8Y9FfOPsPp|1cjSCJ^1b6o#MDg|l@ad@cN}v( zI3VV>Q&G{T>yRzTUF7-)#9FlvxpUyJ|NQB-2OU97Q*qoh}N8_u)f`_t(eZ^&GzU3jX{KIf&R-{0y)A3Sa#NFUHI|u)HFiEHK9) z_A^o6d!uHgVn+Ql4;^T$h_UFWgSw&b?uA;<=z|4jOmHZfposgk&@+3YhYLpkh8bpg zB@bZ>jnv6i33Y)=?1nczKs{ex6SV+&U#dBByDf6CJ$f7m%o@n`eFT3-o;wXetcT(` z#5)i1P9KYX#v?Bg>n>BE6~uX|g6sE0-|L0zLT(o##-)7GH?4rq5aURH^jq-z*J|hp zIX=jMYel@hLy^m2=%F{lzmVJg5bG7H)T;C@!o)VXCT&Bo}rg`32nRv2gLi}7n~d7EFsUw z!2jDJ)`KddP9xu+z;_nGclsjkR1dR1;(o9(e5Wbaa4^esh7Wlm?#IwqokUOE9Ye(Q*H?FMf~{s;GjM>s*70{s#4-T@rGxZtrX@?|ROA!=@E8S(ajw;|7W zfkEX3=ua0zJKpdZ#D50jz8x{&Z$dAGc)vlsAFly}wV1; z-+`G0@qdE2|CuZM9zySy0{&@Wf%rGffCjSAXPia8BKFO%z(4chIR)suZ=fC^_Fi}4 zpNRcR#QqENe8XE=97B#j`G$Rd;u;YDipcXOm_t4z_E#-2zrkx3!e`o|uWbjf@oWMX z@Eb40{Z|{r4!+Y6@jqjy2um=C#VnZt4>|+}USOadg0(ixB1=#oW=~W^pa=AVI{W?nPgUX(G_uJG`O^V@&=o!k z7TJ9f&;FS0z~ayd^hn@PdK`2!5q&RM44wfkfWzlGsNwVBL5mQxCGhg)s4LLGH4SQZ z5Htr4b=HCHdT0r`zcvc~3=aJ^!Fyv+^O64=#D56l@0bJ+ONMVD{%2Fc;y82w1{Pp2 zBolQOHFg#_gq+8^1Nn)Nnr>&mUSdKxdfc z*MiBXU5a>=h;_DH)SCkIVJ$tx1Te`%zZ+H-J$JpowYg?5T&p8yc}C3wlg`jaESS7> zLwy62D0ox?xY$ocU6FCkV4?++@Wq(#eGnHgdA3p(XSC?c^r#yls6o(0{#s}-615Oa zj9?M76?z1ZFLBVwZut3Lcn`L92QdqQ&4MG)!!h(u;L_(bYDzYk=@#nyZOkIzavEIPfr;M>oZl;13@e7;e#HB~fg!Zf4qU>@DB=NX@#~6; zs04jDfJq4S@yDB*H2gcO_iBN;yA9S|z@=|TtlNT12k4_Mv@soAT>3y;;IeTLm_Z}e z!Q>M(QXM|EbSi3$3hU6Q-zyeiPQ+a33$0WNK^H0h_n}$Wa}R2R3$L&_M(|4!YU69^MfR*6^$&s29iJSLMJ) z1Rr z^P+C70iUI<6k!8K^`M(p(9GIDb2-?I2Afh~vv2^`(!fXO0xhAASdE2Gp_VKIqX-4F z{!B0i8#^`p4F0uxwIY6R0G~ZrJ55$Z;ybM2>{5vm%W z&V=Xs;QGNv;|$%8L`@ux*=a0TK`%KIP~Rs(6Vsp_@c9NdPIFPSJ>lD4(9sgq7OxdhGR>yQr-V7?7{%ST;` zP>C;?5ko$yuy%tOS3v(^iTc_A`Du-7X^-5sLyYXv8}>v$=!kv#Bc9Mrf9NJ`Bx(vg zEy*1@2u`CW;(g#VSOJ$An7?OYuA2@0fzjXv$X)o`E9gkQ9LHG!&H3RtfzX&5vj$i> z7~r2FsNLb#P=e-Mc37Yx@W_C%?BN!b#hCU9pM{`mZ zBT z#_yoZPq=6H1HJ$~4XFSfp%2T2hR$RDyIT*vENYNd6ZE{z;eB8=uPx>c^aA$i1s;OY zpLG-y7&Y&YpNC_uWdhda!6suidSGwVnw8KHysRN=O9#v+yYK~A`!OFDV|~knUU+hO z^!!zkV>PipQ6HYu2EJ@73y+S7Z)env?&yKRXC=dX1g?KD;_U)`3`1Wq9KJsaISNjN zW1z9|i0u@tX~^&-aPkKy-5kUbjDF37pMcX)FB}J~T$i90LsR#B!Nd=}lm>kO7`-z= zi_p|7u<`>ZX)V@$H-Jx+EP_y%7Qy$P??mm}3%2mOv_$w`GG>WG=*3f@XKah1DHAl+ z^)%j-jeT>`Tc5>wUVw*zll2w&Bv|zUtB`AmJy<#3f-m01?DGJ+1}7J2D)~Kn3+SmP zwB!vPjW2_BDtMhee9i$n>QxnW0exEoaB>ABA23RV*G;rhL{spYhyEZ4Y*q|~e&B6O zz~-Widoc6yJT#MMLLatVB}QU?A9+nB)`ClAteu3I!<)dRy#;c&2K=}Y&Z8ybWrMog z34Ki$^iUG6zc*sp7d0Q+Iq8I214aqZ&?Io`I0V-OK6}BZ#yDIP^piXp+@|8a(-8OR zhyytF2dBuHn7`-1-@wUpK3I97=7Wy}KGVRb@hZ#~0XPpe)-CnuufXU-F!+Wc{*m~c zXy|Se>N@yr-iA823!K5{3AUaII2Jga0i(W$QI}Go)no9(bgU_y#5qGx7SK{l@ELRt zeaJ<8E*NEl(Nge9hL(=sMD4qW{CEhDe1!aZ1AhgZ`e1YYGiup)qZFX7tOk>;%2?N^gKMt`4o$EI+y>eKkF%ZO17PBBk2r(NhhFG~!DTRbYy=Op zfyl){=!1vC14f{hk49fS7JdRI0pPJ6EDBV(R?kcV?|d}yZh0{9j-^&<4p z-pJ#nvgi&b9{!kVpqb-3^aIe%aU*;ZOxlP3^|t2V@u&U_*@2z}Jeu!@rsAQEeK^1U z@UuktFSIfT{+?EE+WV_o3Yi2qPGc*b~Hl%IfkWD;g+#D9ke)_G^c zBM|qE^AXF1m?;+{hKTv(Wr#asUa%7TK?~k~V1@V}K>X*K(65C+W1)ys7~a1IYc}h! zCcFW?HW*CXf_QKJ+ei^a=D-8F+jexi=}qmcHntKccR@Zizk`99n_HLU0J}f}c@Os`phyoqkx49E|(SsP|^m(66Bn zlf0lMU(EifC(#L5o57mOuVR&$UKVwrHr9Epq4&;ML+Os3>7Vr@a|yv+-e*L zaSn{Y^O1th`U6J!uf8knwW9`KpYgNeocZhjvRrI>GP?zCJqY!%&eCY!G=v^!Py%yAqgXp~& z@t@Y|@A~3w)Qk}L({pF&Wf*EV`YtnYn1;S9U^?d2xmbHyhP7fp+(XeTVskkB3_NCB zK!5g1C3aLo4nY&&x`BZsW{iRG1=NAkZm6Y*y~|`go`!gU!EnUA1i2oJ9PhgvHNhX( z7J!*Rk2->wA4kl8A-4}8-UATt28j1(#QS;-HpF`Uc4!{){he=~jd^vfl_1ach;w)YtP`Tw zHNtZyphk~qriioXqvFv=9k#`ciMU61#jMx;@875JMC_OL|9fxhvI}?($2^G|;ez^| zgPCNU3h#k04W193EQEfRf~OkywoLf^wTjre89LeprpZ{lNyjYvQV}G;w@9XD91OR?oHU^?a@;wt;#c;I8L=Q}kSE zo}Ry~)bkR(o_&J!yk`wwi_!D*9eN&}tmiu?^_-Na=Yd!CJpYcK8$H(Zx@UTR`A*NJ zO7!gYUC$lO4Scnnfz_44*c@`$BI?z-H_ z78?Ay!N{+78~I1Nk&m7?a?`g)ZvE59Qgah;O*HX~H5xuwsA1nyT2ASpWuNX^HjLEr z#qnBB^#QLCEqB$3w1y{T&^DyszW**E-(w zMaQ=+^*pbEo)cQ?Ij*&y$F$RP%g%asv;(Vd;L}~tCs@xzUG)5HlAb5b(DU*IdiM0z za~)s2rp9|$>-oq=Jzw0U=i=>pZgNP^_fqxjlB4HrFm`#R=h!j^4yb0})nMplXW)C_ zcc!<2D-SYoJoqgiXW)Lb3_N`?So{8E7anY2U6_H_tw$cj8hFxP13x-yV4DjDPRutj z-!bsvCk7t>#lRtEMlP~6a_yR6*T~50z|O+n$YpvOSqE+tz^%yF$nEt;o^iy;S0ob$ ze^&F%RvIoIsNn@h4X3Qr@Wjm;ei^S}(F7q8Uv9-WqLBeZMp>cY9m1c7eSNv3|w4nVB2p7&M$4` z*>#Ql4V-4Mk;OtIpR_cwv_#Dj;c9MrP|eA&)$H_J&6Ub(cwhq!zwfT$njcxpMqSIhVvNvzud9d3bN?$+|V!_Z@zmM5Lo@{>ziwuBbHJ=F63m)JgQIlDCY zL5n|X!55qASl?F1VI6h6ske?9-dc8;j+c&u#-{4HvaI7%&}D`XIBB6xaNG)pO;g~# z**cDYtYg2*dfos}ecu+oDe3uoe?4CrtLH)R(&w|G$Ax;X24+jvLSNhTyaL*4kgn(Z zXQ0osdN#kP=k)7(UiwhaA#e1&tD=E>!#B4#gXTIJIF1ZFPZ;>~FaxK~G;qa*2A;JF z`de+_H|q>M@}PnHBpWyr+M1DV;2ZG7h!4;mxVVJE%gU-bV~Co=#;UoKR?VMGYF-wj z=2Z!59+acz;n&o>=9!w8{800W${OxgSHqt=X*kDO!&aj-ymtbas5Jc4Tf-X-8h#kA z;YJDIm7?Ja=QUg=U&B7o+WWiM=Z%It|JJa7Eoh}Nyt1{H)t$Ax%R$Rypu3boT7EDL z9Hwje`Ft(E_S5nzcxOxSbK9iladFV+AuTsV?Q3~X%Ta||E&xCIwwAr1!|KIaE(1SJ zh7O0-(eZkCDTCcw=Um8! zJ&$+Nvwno0FN0P5d_8~g2Op!JUq$M9##TL_->K(OhxJ_Jgr3V>*0V){p4S(l9zMnP z0k4-f@M&mlqLqPTI~e!`YS=2&uVr2a{scx5dekOpsYi@~k0jywibg&M%|zq{^23^H zw(g?lkwVSa!6#van%BFldBActA2+CZ>uNQRhn7ahsW~xO%`Yyfxy5ZYJG@nMj}L0T z3|>vDYB;gEhTC<~aN%GLFBuPw$r>IGjg?!X;Sm}Qy98@EI!eQNyEJT*s$p?P!*_Bu z{PBW@LoaLi+q0CuOKJ-Z*!Wli`bdY5HR;57nu%R|TB8+H6D z9=%E)bOc6Dg*x8$qk`J#RxzQ?-Zhp*L9!MmvV-`TICMPn@P_OL*XvdGN+% zdOoCq*246B7G8KFR?p-1qi2C1Za)W(5A?j~r=B;X|5)4?9tI7KhKA<5Ln|uOBKTgU z5xN4G^FIx|Ej^HrmIU&RGHM>v91Pm2+1p0VdhjuhgigjI<`WTrMa}i*Ktrq4TtTnq zCt%clkD7ZYskv3An)_W+^X8js&U~Zh&t)`hSyRL3Yiam$OT@jchSMbt59y=f76UYF z0YB_L7Ozjza1%wtKW1zAosWj+t=90fEgIepo!vU3;nv_K-_r1tM;c!I8lL$74 zmdF0ll9`UHHP<1|s6AtJJZGtnd%^4ahwFF}{4P8}hc9(O|A@Xar%1=Yigi2-b!t@^ z=*U9PRqI0|t@Qk(1ANUHz2s!{8?v6+6TRXhJzML+35@!00ke2LYfz_*@H<;*=TI&5 z4CoCdXeX#UdO*}B7iRou&2*6_*$8uSesesvbwyQ<+5c%t!vhNnN)a8oeTm(jAdg_fzlmS?uqvO{k$8Uk*U zwft<3mbb6aa#FCCM?y8~CWstGP^(n%BKl^CUlS2tGIgD(1$0Xw@#f9$ejZMS@S!Pd)EY$ zhHBnptLB%T;bY)2q@S91jZkw{@JN}i=2%(HV;87dS*+$dOV#{%xtcAZnNsKvJfWGO z2sJm_2yX)$yLh~o3VmgxUR}j=H`To84m9>u&Cg${x#R;l!{@GpOAxe^R|maaBMq-= zqv7Y^bD%r4;e3<<;LH~qWTnDjO zlBMOcsL9I

<0|Era(DeSjK{9JcVdV6M|Q;w z(+{%@W}Kg+&}X^pcrRji0Une%U&jMhVqeUx3s>v7HR?sXy*e($yxQspYRO$4<|iFn zqPDLvL%x^CoQayz&=GMJ=)Fdwo{z^m#7sRu!c00T9P0?^kq;g~-9D!0smS>a*?Rtb z3-#i$o-^LV$G+*=8{Xtu9X(YS1DEMz;9scgJ`*w9p!a%;Ic6MY7~@(4d!!n;FvGx3 zm^u4&HS*7J1FJIOVHJ#QYKV1!rbg~eMy}My$W2BW`M_w*p0bh0dmA}!CHiNBk@KUC ze0IB$f9*2z>U~CjlVaq+-Lxb6Be=wJ11+&fBU^d+dX6=Vy&ix$BFU>=^ zc9js;S%q-B_92|!C4@&gg>cT$5MDebgul!W;o=n`{KPMW{~waB1Fq)(dy8z5WM*ZP zy`pj7?>bhp_sZUs>{)i*z3*)xDP*RMgu3q!QFcj4*?T6NQ2*!q|Gm6=sXM#pInO!g zeV*sToiHu!*`cMrN3_IGX=%a*Ej7u~(!(2CT6#xI?Vf1q<0~!pNm}~wOH2FyXep+g zjxJWyQCU45Rd&_U;`Tb4@1~M}r6IX!39!#ZAzW!*m_}U7({=Yjm_Q zOh+%(I%*K9qo^1iW$x2a$3r@*mx$+`)X^WIBlBe))x55wcZE8NeS-U5>ZntZjzT}< zJ-+Gab_G2Rsimieb@jBjxt`v1(Nhg~J>g%sqiVi-YBEqyPDAxHX_lT0i}aMaTu;Fv zdTOWA(8<$lo9`sxO8>d4coqeA?S9Ukha7{61rgPvY>)6>I2dRjV7Psiu$>DDSewbbJ0 zJ$m|?pr_eMdfISBPdf_mI~D8cP!$8sXo%PDXrRV20~xu2F5rFj!we*i#QQHY&?A+B z5-kSWpNRX<;C{(K$1)96?S_Grxp>??1HCCQ(5ANr8c}SZz!C%fD>HywMhdQFBvn%* zb#7s#m@YL6AcbD(etGy+7W7^v1${wH=F3>9uqY_Vxsgk6ZvJBC@RN9hC&l{d}$*7 z+eFLk%+#%gnLPWLsmUla-B@6zty|3WAj3@8>@1Yo&qC8X>uB{@JtO(`?ah?d-AvPZo3Up$(|>=W`%yjje znGAo-q$)L2QGE;LHMY?C))pGu*+O-DS!jf(g*--EsN*CH=5-6rS!JR5DhriHTZkRB zP{9!k>LMO@%|f!L7Haa=LVLeksAfegZLMvk3Z1NU+S^Le1Fe)g)k<>~SSe+Pl^iZx z>GC}*J#QF64}Bu2f+B*dXX{8FfgJ2=q$++!`h<9QQGq|Fjg))UNTW)SYc3|5hFn?d z2Aq4F=;uHatwc;O#hS=F-9)D@B6hhZ>hjD)9v|_z?7TuYvKv}x zcXJEvce7CWZWbEV7jYP9p`_u6%{U8vn`j~BEDNP9w$SS37HX=skZOm8&K|N**P|AC zd)h+onHKz>78-EhLWz$pw7Ce6`(>eaHLcXUft5P8w9>iOR#LRLQfy}{UGHP1+1Nsj zA7iD*Q>^r9HojkOrGU*=@-td#^iC`7JZ_~8*R8bwk(GkqS!w1UD~+odLCtGKP`O?a zG+|T(HJcVej)4&ryDWlS5%c@85oAk=Acq$blvXa1Y&?>}rbp6?m`K`IDT>rFqlsqnPc_rbb7ty)(@gIk zA^soDbl|s{LTXs(ZX*kQ=xL!cKMS=Si~K{(OOc~j=2<9anT2+(w9v6|3q4g@sJX#H zlPng>LcVHKEMz{9{6@Zp-@;=HknhObgwGb*`WrYPZ^t-WX`PFeHo98rO`i26A|ApjD_-UfxD3KMZvW_4)luBfZ#WL~k~d z;+v6vR5j7fMkear$3(uUMa?FeFn6H7EHY94S`&o=x5^e1*`qEE17@>OQ$q_(bPn|= z0-VX|U?$ftX6oo}CeF>2G{{Ut#+k`}s+raVn&}a0j>~E@{X~sj3NGbY&2%^3Oz(l= zPhfZmHK*)>nNEH&(-+j4O;y1WCkwrBvCxmE7I4`@sqWx~w}oyG0j8r+LxAVyMZhf3 zLdi>k=V}YB+H9d|+br}&j^{)GcW|oCehbY$Y@zGNEmUH&kV}q*#skxVA93F|3vIWv z(wFL18U#$MxmoF;yOrt>wo(Uh$a9_*Jsx-}t@Ps%YEz<>?qvQCw<8~|qy}b3>O@cm zFbkRwO!N`7ZC?ak&5odi;gR%VM)VF4*3{(~UxA3chTG*o(HZan#j_5NJjWh@xTd^9wWP_3FZ#Po5 z)=2A2M!L8kIG#YSxo)Ixx!~XnBOU!}B!BdrarJ?pD|#mI?Zr*>$=^gH#+xVsINzIT zqRuPOE73oXhnr}o$wcjdbKX7^m8F`f6#OiA#Y9ulLsj=o)CWDN$$xe8tBKqzn8_Kv z=S&lDv!$6fqIT}+g+7El_<gXc$ljUa01&_;ZHj^8;yk2W2j|ejj zhyl-!m?`x*dT^SV8l$!jzKtIJ&`jIj;4yXg^UHpcAK1#=WL5bF;pkT~kYx4|rDc?W|uMMOq!VH33Sogy~$L%mDVFsI6(?}jo zfO&f(;eTr&rvXNqJj6(6rx__^j*%)Xz#N4dZ>->JrI_j(TJEQzK{$_0jMvbQ=Nbxi*3y7RTKa}FN5m*CDQ0VF z%W^HbZ`6{c($WvJmd+g3(vDOud0*DjLYyyJfp2M_wbb~hmeh7QBh=7Q+qOE|(@jUi zaeinuMn`QX>&P@$M{}3xNV`%;VcT`ITdSiU@i8Y{5p4=wmw%K|b z2L2ZX>#;Z0Q(%~$B6NBZ$f3+AJq<=4l|O*zC+q3z89m)WPFXMLY2qb4^}3;_XSsS( zz0{Lbgmc9Ye3j~{Z&d^NI~a)9Hqb!in;+_lJ8~}2-9W!F_pcxWZScqU;|!D)fIM1^ zd|YQBlMc7V8pu}|s3zw8BFy>SP-`wBw>I=KLfgT7h1vaE9W&XZ2NaQ#I)-X~@S(LxY=Z=ygk+UvYN5+eJh6eKhpZTSM&zY3S4t z4c!~3p_Ks|dN)TykwF@&zD`5iHfrefb`3StYAD8nGi#)V%J0#T(|!#NwQ1<+c@5z| zu%-34HROIzLvv3J$d zvT!YpH)_d$mzGSixIalt^Ha3cJ3~tyuW0Gm11(*6sioo%T3Ym5OP4C(%-cXmLva3G zDbrEK**coNTt_ZA+m1V`qtd53S`S>eU>|wS2YNF!WqarAXxueydc}W21@+x2ou=3VRm4iuUbP z(XRv*ybmghPF7Js3VaKxDzdk!sQq~r)yu>l=Z=cDJXg^Z>}8G?tH_L<%)L_VaVo3n zo`ag&)kUs0LC&^T)6x!Vx;sQo0Tb0U3454UfoiJ15&IbIUtEu2Cxe_`_F7G6YHKJ7 zxiz{;IQ1Cbo6FC|-Wqzr^8)PXisbYTdRsZ@p-rp7 zyVDkagDwhs*;hd$eHGNk4<4SO(B7cGRhyBm#ytY zc1qx>q&e`D+=EXczlD+k&)l}hqj zr=%z)aud1d7NMkdF~~*aUwPzS2y$*EaxFVwNwuCK&yZWG#Y$>~+^ShlMIRki>QPHU)1bjMX$ntPO9icI1MHynTyj&; zfv)g;V80sE=YJlA3;lp4F#IzF7-HZ020ULeRzdxN=isUE{mcMf^Kl<`utBR7l(-gN z5%@_QH^KjdeQeWkysjE}8sTk#x8(I6__yK})Ern}2G+%i3hDy9qk;F-bOrUvg0}%! zf4dF+_OXJ_zf{nx_wX5g!_V*xwJL??UPDPko$yspNw<;v0lV%0G;8d*H$xN6h99JD2zUf;rG`PTR>?`Dl~Z4foQCg&uPz4K&p!CNz_qr? z|MQF7!!A4&o{`O&a%yo?P7T4iHuvS!6(Fm!1#GL1=aLWkirYzADMzq4p7iTXnfs(>tbNqW)8Gi;Hh4u zpfN!T(k}s)%N6ti9;rP@tAa-VRggD4dNZ7rl;(mS0Ic#RfNwe_tpUfTV}Cw)go>0d&_Lb7DE@F5#biVC zaYX%U49{DuaPo)7wr42(IM5(^1j1LPgnv#SPT_mP>G`Q}s(&80Lkr~v(6XOGD}`nq z1U>W)FzXMFdDQ`C^`T3D<_Ty+r@=>Xa~<$&4*afP zk<*oIIo$<*WzaRMJ(bg9@N>#Xa2@)_QDFEA7=8za1*oMxf#FwR=+#(3A6*r+2^?Jo z{CWYuF+G7%U--Cu&~pbXsKOWp{hp$rr8B|LrFi~kV64UMW?&VAS__=^9)LcQ4BrEA zT7sHd44ySbf6jWSpwJfzYV;A<1D_SBlk0u)1^xu>R?;SLWi-4#r;4HJL5FGv&FRxM zctxPIO@yA*u3R`(h6d^je0+VvY5H(rGzp%LrRY7-KxYD@7+`cN240*);S_xw+7L9i za%bUhybw;QIpL�!`^1d{@Bh9yFsHsEajA!m0N^cv>sKX9_(o0Jzm^2G2oT^eNQH zuE1=lJMi*AyuHD-{^&u&5bqIk8i{_UoDQE3YNtDR7q}Xl_Xaur+XO7Yxj4O?Iz&Kk z+6B(;hKB^0-909!BjDXHV3va(SN@Wm(zEbf@NRA%a^W7Z1@Fp%bFM|;<2UG7Ki~~2 z1NPOB^T294aQd&F4r>RmV-I-k;A=Poe5!+2n#rhRGZZuc+`6*?exdEq3w6LG68P){ zmcXbHxU>fNe7dNhDd5o8N2rnDPj%G0wN2qu^#@-9mGlFelWY8ToDa8Cxt{QLYy)57 z;K6}^xGyxn7X{F<(QhoMYZ2g%vIlD0VE6*SolkSa=^Q)+FIK@zr3$C+Cg|4Cso$a2 zr2wB}=uMgvxE~n(OADufjBx1x;j}If8a8x2JMhXAb#VW8c#wh5Z#((_dw0reppH3% zCk>$UHAj5{PX9W{=}s5KtdE=ufl+x5tong({-|jq5zjI3Cd`B9F$nPmuL{AZAO-wb zO8AQn_&y3a1EcQyfhjP$bqe+LjGSJl%jug0AJ+vr`GZ%#a?l^|z$*!z{5bH*{e*f7 zY@GkWn`{T)Vs+HJ<_gLNe|Dh11@uA90yf7&2K!=ll{ibEANd<&ij z4z&d~#}X8D`UJepz$GJBL8@2qNERtbj#(oXTA~@Vgx^8T67U>26>O)7>S1(tNf_M? zgJ1XNe*WfXEAm{eR5PENq&$9iD074cSMW;4J$ zyfd7Vflc4T@Fpffp9VJHQox;b_!Td~^8svTLvzgqhc3cnpa+M#d<~}&sCymC!$VaS z9yaufZOJ;6w}f;ebsg)V`x#<+K}I`tBviUPw+A!JiWof!Q?p5N4y^1)@f+ zmy>QAcmQk+Ryj37tvj*cnET>DTb!~x59A=S~l`-?xMJ&;q`naK&^g=yCU2Ei{pydMQFiaUyycc(W6I zXc>A?qcsXzgxO*lYTMT&1zErsV-9LqzJjhlM?JJxl4ncIc9;jInEr>!Mfj-3LT^7f z4F2qOVYFgz7~O(bv_CNMeHn(i7=8|T*1Fq;lNT_#!7!r%msaRAZVSTUH9&t^3!eaR z3Bha-j(S!<5+1}I;gqot^$eVObO!g?Q1>o^UpK)i;L`IB>KOQv^9FvZ&v@Nmz~*l_ zb+t$Sf-m)J!mEa!R9qJ^0%wxj$Y~&ONp=I5z?(wgG7k0a5<~yMY%ydK`qB)gz z1}sq5E{DQjwH-6M8gn=}6CVkjfX&z#a3U7iVV+PWqF(`<6Ts#OI1_YJPA>~k^PbD8 zDme4{vz#(e&$3G}Qv#c&_K16J@S-7bK;Jpg5r3newf9v}{Xv)mfXSWd-~>3+V1a_Z z0GsRJOnkV4?iu0l-mjo`nB{6+$L#T5LI1#&sb#n}z)?w+QNNPbZl`qAuZw@-n;H{F z!Arv^0v?7ws9!%XqlVoJqg~+1G+@%S5_}qU;XP;_PN(~Z)BFMOJq-d^riW7;>R1MF zc@+vzHRiUe;7ot?nHC3u<1zTyz?C-e?0y7S$}+*V>!^RZm=Es4n}Zo)@+*7~zMMua zEB70ZEe)rm)zE8z$7k@RToc3zxTJwE``aV_9Wg(216O*Yx5!YFz?Ti+%a-wS>Npo1 zSb+H(wQR~tcoD&whMO_F0~4942J`BAc zy{1|)=7A90zZJ8$9%m=?nG5JI#h9~;GZf^2b5t4n{_Q`Q<=~OXg)ip|{3(%6VN}-( z-n8YYS@4!*CBj>M8otg;VZ_i^2En7Ypc>|2M|jTA^H+C-7sLZ`_d{=hkI;S^yio9Z zURs9whFYcz3nvx&ObYno4s5C%2&bjs%3Ac6Iq3g;t^vzz@C*IL37GsUgx3UpX6I}8 zQNH21m;=6|&onEKSq<1cc0g>LP%Dc0l2vK#(jN(g%9onCPRn9=Q|O8DR3ni z{l$9;yu9GcP0Rwd*2!rEW`U}}rWiG?P8cv#;cROHZ=%pU;?a8!05{aLGH@p8G-@EQ zSp#fFT*Y&MPjI1}MmtSRaTNCu3^dd*-mre5t`ou{`ao9 zc3}~&N6^Bb8;KeQzt0k2k_)fX&sSlz`78WRRdIH4MecV&ANPb08eHi%52648+%d-a@Kwo(&!s#IT%17Wb3A0(d+u`)(KK!4+B^;>n)4HI>J?^?cQ{WK;eElInWexT-1!UcL_47G)I-f`jhU`mEPz7hm8pu`fz!C9I zaly4es8hSZh2YNU>t2XIM|~%F-@%8|!*JctILx&H$k*vOC(c0+55!qB2>x8eeDHeo zXYk>bb zw_KbH{(vVzYj7snf_V#lWse%Sp|@NCPXbZD4qn08rV92ue%Q~z%eL$=>WhS2N3O1^ zg!=7{yu;^}g7LfIv&9*~(>=&PaQiUwChaov>MpLAc`m2h=zVLEBRQ3ECPLqP(**C= z61}b+_Em1UrUG-xMQ@zNz~j(C@Wl>A{T+qba5U~84?h~tUgOXYPt8K_oAV%A3uUVvOGw*$QqGv(kI1wGz}GxR=qG$8oMqxMpIN zQY{s&cUI9boL>jJsi+}#x;JqB*qwpcv-+#3=13Kd8K)uzymr5_+kK1OuJb}XZn28e z1L4_&*KX@-6@{$9>%nigFBD$7ZP@WbgJ9V4elX(iUDy}zhPQ98ih3PW(dLsXI)?pj z{3X11mWmc$Q_+iT6|Tp`?brkED8PF@!Ry11=kQiV3*M_}F?PRCOH_2l4qmv5@Y`Yk zJMF*yud|vSy1-utZ{F&5@Z)t;(;g2s&e&=?HW)kN(P}bI!hN&URJ1@%ld%&%zCukw z*b5(3!h>gkM{ggzd4+1aX$LK<6*LCauwq;zc`*=JX>sk=5#at7yEFI`wSDmUMyY7T zEco)^HLQ+(wwoHCF-Ap=k0M^DRP-}lMH}(CM&$m_TR8RscHA%Uxj(>5_yzlHe2!}P z{afPqx8e8S25+H1cG!h2)uidFrcu4rbOfJa7E{yU0ctus0(#99HTlg`Qy4Ux38DCW z+ts9kX46rnCbbriH$ktlLZ{gQA0jlIZE=XjUNtQ|pr*&rbE1;eWOqhQuhQXBJg=sj zS!#NXui-b;B)bKj=f0X!p5V1VU{C%Tt{N)W7M{fp8p?wHGsRs)-}-83sV^Q6O|UsMpy9)z@l4Q=bsjVvXh8Q@XlT(I z4b_GY^b`KYWcVDd(HeTZ7h2IV4LM%aQ2$&FZGEPpUe&eK)kO>aNDI9feLhD?RXW1Q z*-u3^5Px-)iZBzeD)>NG~h1cF;hccbKuLI zuOYj|8hF?dAHKxM2f}>hG zC}~Lp52#Of9sE_OHA7WI2JjL&?}0p5A)jr?TipcYRgju)Aodb+lkZZ~!DKZ(gQjSH zsiuOjYTEZ394?Q~Qw_NRJ+VIG8P`}t#n2Sz!Ry)z@m$dhpTP@x!8Fu_pe+wXtcM|{ z0UDYx5j4eTN1#0-uR;;K&S~(~rfaA>^u=K} zG_>{(;sqUOFMI`Go@nUmOARRz!!t#QJz_W+F}(gqLj=vSz8&IH1-f=^E!{@^{z7BS z>8vGVZ!Mkjh8GsT+F-;m+fPdY!?pAVI-|>UXo~X@UucY%*Jvpj`gh<~Elq^J*xrQr z!*4qX8e?7pytv1-v@2apQ{lUf%hJ-P+geiPpF@zpy|*K0RT>J`A&!yARbbE;7_3J8w;<+YB@OKdw{x;J^!zTkU8sRJ3eEVP zhTIVAVraW1B^t`80ByB0yvERV-5s^`r7rTfp_Z;SMl9NCX%jTvVV+v*jF@-z(NZEb z-5Dbh!_ivmKOV83q@`Ljwe%MG`v7sjzZSmd5G~!^uBF=W9!NSoA8|i-1Tj4UZTOUy z-rL}RhR@mIu9lkL(^A+&=1 zw;TqJ%tSq3r>1<=^p3mLR0B0h0GmIz)ims#nv#pr15k@^R@P7@VDz(|hHAEfHr^3l z@ID%f=cvuVCVB+!13rnfgiD{s+$kCZB5QkRLia+gOLa5HxrX9XXPY zn)vBx2E69htvcGA3>_Z+?B>P5s2qIZ@Ouo}pr;jol=LPT*BG2oQESX1BMMb?4!wEj zPZfDp0uGJgjp&A+-~~?vF!Dh?OPGtkun;qaUQOGh(PQ_k=^bjC7kWomV08{NgcY@| zJ-iZ&&@-9?vzHaY9|sL>tczOG9JqARP`-zT8ukaD&?C2x(U2N-EFn-sW^gHX9cuSx z)He-kBKp8tE4Z=?_w9y{0`;uQQPj62^qkWg>WE&D3r@vm12y;I`NBV1QEx z%Y!e#Y%c2mo?58o4ZtUOB#ySwQYSYpjp?E#U039MH!aNsulm9#kuXq86~L#^3BVBb zD|dmG+5odFxK+il0)NCC@MkV?%6x-7_y#XU znU+#;jlkE2I(pbjN25pTXy^tVtx~}A7_XyQ**ek#mm}eN$_Y}^@A{bYFlSmZTOHb_ zqHo|$%0AqN{?Z%QLKI?7%tOyl$yZTw0p?9$R<}$=4{PCij0T9e3+6t|fz^7b>AF99 zH@pDLF$1J;L5~kpQyyj}HRgcxn3e1jF!vo-lhbL;OyFDbRW;ccVD7>k^y`(HVoGqG z1~42}0X?dchPt9QPDQ^@Ym5Gl`k3YhZuEwa0JU*6u)Gic^_u{EreZb#rbkdC`!B~_ zv>JW`^rsBq+Fz$3-zZ?d2XoY6;Euk&7(6_C9{!jtTvu>YL!;2sF9OeB;Ns&)@WP-* z?*FKvRMg1K-x`{YUi1;TP5}?gfU7fVS-=^UiEOsNWfVl z^auWZ5S$xY!5iY-;D0_4j`IR%DoX0Fq7_3_&~q^hELKq>&IOJkxDDrkUlA&5gZcl# zUd+t!c=Q0@vM?Xdy``dc!0;sI{9PaMnkDdwzyolkHs%D(!vZ)Kc2yJgQBxK4ug8P*D~1t3H^s3wxJ3xi|*}0=FfYDc7p0 zQV9AdxZM~zumkun0k41T0KV~RayyFoKUqzIC(ug;@+eD9L%{Ju%%HPTuWRNbPcY+e ze5?5 zI_h#0x=|bK7O?AB5eT0Rc9(6jS8lNf`v>eSey8K#`oVspKd|$BfF0;71wFt%BJl_G zv~o&va=>+S*!#D_nSX67_-ugf5}f^Kd*ZC#4|?-(U^fbTh)GJS1gwkZD``2-r-|4H zoZSeVwQ0xL6=wCjqJDrPH5gLBl`ThK`hl;r;m z8W3{9@0XHZfZI30?FThfG{6bh;P(pZrb2B2#(h;3!{ONi=jXup zl{G>|KI3pL-DG@^+C38d-@63Y&Y=cGtyR$h)PeV?br96L3IiW!5-_d6}}(rva0OHUJiS#&e&shN>tE9?64wFKwC|L)`~sW zw+rxzVE5y59ebZ_{JS&AlkM1FEq;Xezz(a$OYHdG;y&yq>ivR8$sS%1?Dayb1J9aD zx`5pAat4O@n(U&acdn?{ZpeksxHc1etZnW}dgY;{oiZi8^-!}1+%yl+nDRmFt)nDz=D2T*xL&UEG;IP^=y2BJ^h}+N?o(rRPS8oG2}M$ zdU~Ig+1{}l&%Uv(mrK~QzyH|36gxg{Tm^nT!k%w&6=-cIKNJbmz}5_2kLl`f~qPo_yMRZ{GB%4_~k9&-FbA@OPgEa?9yK zyw?stUO8tdUomMocL^KC?}m-$zj7w>vK^Co{Prn4J0^f{I5mwIEt$bjrqAZ)^7FWJ z>O!7-K9J|%Ucy5+F5}G>t>j%C*7GG>LinoboA{)W+jz^#Vf>a&&adB7@V+0FykMM$ zyN2la>sn@>KQ)Sf=@Y{pN9^X)SH|&>abM=uD_>Zd+g5(t@IN z#~6q7xuxZ0Q&tX?nKp*VyxJwp+%CM972m4NcD1g{7DT(SP+b>h8Q7bBEAe7Bm+5TT zqM5AGiIV|?aBL7U!F9a z@sV0zo;22v-+VrdyR;v{r@kA{bAL?cl^ajzDz`a&MCg3}-eUoG`?G|nSyuCj&o}bT z8@KSh?{emZm1LBEK|t zb6+N_|166w+M36*qYBuY=FeE;KgBHW_E(m7<~xgZDq)AKm$6oN%JV-js`B)eHF@AJ zXC7Cf0soWHh>v4UxqFRveACH}yh2M)KKQg3k9*?7{hJTw_xBFxMKpmgw9VwDl`Z^c z#9^NI;Tm_=D5TKZ)1;!7-K6wEu99SFC`D3T>DtE{(kahM(x{$x(%@^Q!msH!aq7fN zai{(x5%ckm@NIBIgvMlvX5ED-xt=C!e>fqkT}u#Kn#PDZwZfgN z)-zk`hg_S-g_E{Pb#~gMx;k53Kb7ra?>V-0DgA8$JGGWW z@2ie=d|_Y?CoIf4Igus5KF)qcO02=4Ypnj!n{30oJ1p?#0~XZyGYif8%ZfrO^X0>9 z@Bz;Cc+2MvxK9fg{`qK2zO%m@pAjYFGw06dn?~yR=%Ri6kzJ(J%x!@b)nu4dN9iuP zrgW78mbH-%ZfPQo&S)rgUhO1Ze^Etppi0sPV|i&;OqqDH=&PveStLd^eJd(Id?IGN z$rt5`2kBgF!Sqr4fsF%0*Mp zEyD5aTA|xGUzG1TQ!F?WAToQ86K7ZXi2>P6%zD;W1lQ~?&Kztn4lZ>SE4wrl`v*FS z&z=6*T4q184KZiiO#bI>!*(6FRXV=k7BYCJ&8ca)?P%s$TcD<~?MA}O^qu}U(>Ema zOy9cx*14SWAH2@~j`L3Wo#MUC;e&U^^!l>=fcdgBIZ9ci-*MU5**UUpo$kuizAt66 zbh*^lqM`q-&o`;AHjYfUc?MyIr~#_BlEl+#_Ijv z#V)K!WM#Wgu(aHC)}nV7i%7f1WS?`HpY|RLyY-H_4k=~qih(RM?idIXWiV961h<_vUMYnA?#hiW@#m!1W z(Du`!)3IaX&6XqL@RWFw^fU%_FiO02j1&{b>4fXP?ZTz=7GdqWPW;@oTttS=7p|)( zix!JUipx7lj4t*PkuN;NvyvWyE$u80v~D5J9j+xt@v0(iR|O&8Tw=Q(de>I2M!L;0 zb+2t{n{Bq%`Acknc6!-fsksBC`1cF~>MtOmv$dilc^$?v1!;=+;$K+15eSTIDL9uc#|pmR1+J zNA1Ok*`+p*4_|EWHodhSjmx*ybT^4xum2BmV-?BQv z)tJY#hOBBtCpNJR8;sy#toNaDtfOxLJ2`F!OOIc`+Ff44M!jFh(spfPfo@@}?{5uD z^)#|ZwPV>Vyjm^JM~@o7FO1&CZJVMc!=PnS_U92&|D6M+LLVl%bn7UMsn}ea{nS}% z@voxvt4ew4ao#6UOZ!gLa(*Qm9C#rXQ@w#RngZMREa+G0bpY-M3*Y(f1F z+d7?#vgryI+G76>v3c(Cw6*T}D*dZ)P5)B0cG|1o6TE&!j_{t+##PpE)-+kc*e$Y! z`BAdt4nh_mpDmlKDwU0zTZvU1SBFtKS5|d)7dEoE7aQ4TAnW*aD66+}5(~+k$wquy z!g77rv%9Z0vC6XTY|UfLiMzGz zWi$W#1#C{?2NvGvJ3Gnj`PJD~dFa@hyjoU0zW7&TZkyYZ_c-ju<8S)%1B*M34 zFYD#fjK~F&RLf7QALK3t&T*5jCN+?fN7j=Zf~!l<4_A~1pZq7n-hC3$haZcuN%zIz zI@zNBjx4b)AYHWaIw>BuOcE`cCJ47TapI67M)c{rQ`9Ndi)u@i;>X&}VpzLX;@yvB zqDMfG82EI7a2qm9IIN#4rss|qg`J0sO&NZ|r1uhD<@<>Kp6x_srb%+Z=h6sUng8Q-uRtk1b_Gv2 z&zh2E4t?phB|FqR@A*;hD(W}ho9*hz%AK1aYv0u(ySFAq_UhI(+2-0$Ww$;5%I2k3 zWXG@6Wsm=KXO+i$v(I_M*z=L2*r6=}EPCTC_AqA=iy9fs?ls=T)D{K1qSdg6`9}6D zcn9lZ7tgwE+sjrO_Ot9Si7ZKelKn_bVQy2;v*jDFvNfX$ScfZbSlf3$*z*e&xT3!U zUtCn1hp6iCsP4}E-xybZ+^#hb+0S{Se;atw(*sgc?sh5Q%xua1^(d)zRxjz$3ODK9 zM_1{AUw!GTo#TCggCtAtcaVFB1Voo zA(|#8i=C$q3CZW6@H?vRV3HAo}I_-+wz8gCM*Q`d@v7nX^wQx=N9lcowq z^|8V@caZ2YhKmQ~dyC}D9YlRMS8@ML6Jf{d3eWj9#d_bWqTi`9+p<;PZ6)J!ZQZ(F zvdtZF%(fzNr!DO8Qd`|m0X9cBZY#I0x$WhwPwDfQ?@0HH?2>jb;*r;w=N{fQ-V5(N zcWcYql#Y}|JW|LuH&2k|mOmp4*_tg&c#ej-)X+@0%>5^gf$vEn=4s{bVjP%kkd{m3Zm++FZN80iV6H37=7+ z7rz~~lE)3ONq;+=rPmeYlDo%tsei#NX^q_&>G+2M(lehvQt$Y#Qdyd_RQ9>L7ScO!k6AkQDBGxZl2!bK^yf z$3W4uArmjHJ;bu7UBsl9EyTf~IwFf|io;zhibKXPwvH|HZ2m1T+xj*h+3zIyNdXI%%`e$PI# zALHlCJUgzH4a<daSpx zp_6Vg`SC)Q(zS@KI8es2-c{m}*$sJVY&Tw1+M5Rjt>*F>ZzSgfH>I%y4oY9$_DO*y zyQFgoYH8=V5b0{ARnpgzQBq-DCe63?mB#PvCAqtFmdq78N_`HskecPVNLO>~NpTly zNvnrdlq~6XQr^fCQM&$vh%mnqai5=w71avF^hw#`R{Jb*-v6Swbzg|u@{{O4Ny7ES zez9p~j5v^M5q1-`V(Efy!X++LL_c36wyp>iZ`1)|qIsP7xYJkIW%Uy4^=_hOtCnK$ zlbWKbp}a7@d1AZfoNQaNKf(4bKioDfXRPhS&Ca%`7faL2DMQm`2S27=bBOVN(PFU7 zk!_cKejhK(JaSd$=k{23)We>Y%yMMaD>i3iN_(-Yb^EbFo_>smk7v6^2e7Ui0@;Gr zOW5(j%UGkdwXEKD1zYt*%O2O;!8Q+xVRb{3+1|=&>}lF1Hlo7~mQZ+yU5S0nHs!x% z&xe0z-knPrA5@u-e_MrDG1TC{de-LpmJRsw!L4}GUN>HCkvI3jWy|~OjOFE)2J)$w z!?^z)D}QtL46pvK(s_@JLz3sg2r2rhS-O8#E^UihD)|qbA$_|)Mp9h!lcr4O(iek= zG}Y8aviz$josD;rBG*)tQsfn+5lQcb-@HPx=-v&H=zl>-5y!;x9`WL#Ym8{xEK1aV zViF^NZx^#&HjCoeg`)Gb_M&TXBh)(wVVVBZcD~hJTiNf6Hec5WTaW05ww_%#r`skT zOuLp*$Geefm-mxGe_7uBFxj915weew$+DOY*|Gt?MY1jP99W0=1}wNs2j*FS9-H)O z1G~1>$i_^GVwwN;uz#Hsn8%1?Y}ku*<~{2wt9s)uTN_Zo+W&aSs^vUo39nwVNq@dG z)qvk@PYXL9=Vi|^UvlNwDtt=C+B|hfUH+z|A$J|rg8w?!nuj~J=SS{#=Wkbg@#YtO zd0M$aeEX~6{M-1^d`F|n{HtOr&)7bbKl7TypXM##>7$nOmq{VK=aa42<*0e+LIV#l znE14*7XD*V6u(k8hFhQR;i`Xoxt+^l?zsItUuwF})B6A6|H@T4U*@ck!e?xhdhTB+ zy*m*g-Iyzr0-QTYYvwkShI%xX1|O&{8N1p`o*h4ker<9@`>+e*FBjsVEmnM3H${y7 z=r8tI>x&sfe%XR++_PC`T5R1v^|cv~zDUn;=|rLaC1p*e$u}#5Bph<_xja{*SOV`&-mPe z>pk1?h$z~xG-CAjW=x`}9%t;D3QB|7L;=5=!EKj7e%c5?;31N)hBecJb zqSb`0Lj7}yP_;|36?J-kF8*4CclROvWTWgJ$a>Cc$a-45SXJ{d)@%9#w$HMfg~&qK z?JJwvp9b4m*a98vJaGrJy+6bbUP@w5%3&^X%w#7s?y_^W3RrgM7i@8>*Q~7hCw6bo zA9hYtjbFb|lh^s+%r{26@SqJ%dEWB&{6&MFylN#+-n|Rwwk#k1d*UEo_--h#kT9J4 zO&rJVBgS+0`IC6WfoXjIiP?Oo!(3kR=mP#JHHa%_E#r57hw`LKTX}H5aPFI=SR ze+fL!7d|@4tMpIdom6LdH=T{o50QAqybFAN>}5X8bdyiraEphB-Qg)~3V6QD6MkyM zGk$6OYu>2ZM{Zj8g`eI0hY#yp-sgE?MIY~8HGK{rcJxVlkS{e0@RiD0TS?ukUKO_v z91|7Rt`i|BQ<#5u&soa!E8ozI+L zVfQmwakE_Z_Q+%A75|!@y8nySi}}myn@ZWsb(Ayl^>xy=*Pl ztXG`6X7OrVy<@t{zc*{eP{QKw~yy3iE z{99TaZ~Qc#PdvVlpB!+A+ci1LuVo(No%$W;mhmav&@G)GIW2gtB#DO?UEsb!m-w?2 zncQ0SG8a>>@cP|v@QU6!ytpifAFO_x*P5Eg7x~`h&AL70#s`mhVXde9W$H72eCuk2-FUn=>uZdlDH?uCPo_Lj5H z=Iaf7>L{A}WS&Zpm|G2L&yQRj z$lWdtdf&#r;5DbW-C_zQR z2nwjE1ml_ad7k&rx6V1g&V9dg@49=fnccmq?&+@Dm3CEaK$(e$jEDq?9G3*{2a-Y4 zG!-h7GvTL9Hn{T-LWHLTG`bGMv~x$n`p|JWwj~d0^9msSyfhxMw+Kvai{b9Hi?C_> z6*$DJfQ^b(K=f3>a$z-W+gAgh2Wo-vxe4+Cw_ttKZK#-i7ed?bgXM-tK(dT75@{st^vyJ2tCdl;|T0}gh*aM7d>UQ~R9w#d&gVc!=Re)$y|TfV`{ z9Yav?=R2qu{Du!ZSk$Us8S1{D9OW@2Pw{gUsJqLRs5(a#s%x?u6|h8|+Dwk1GUK$V zHRncBw`n~}Ws3==any!dHqntvF$-nV)g<`i)=K*9=pUl@c7RIoRit;V4PDh{K~|v- z#5S41f^F8o-Dd-C9(LgG=nO&5B-HNp0H4(9Fwt%{1m6sT3fmRX*Sa1`vUb2~;{-U@ zmk6PvB$!d23A4ofhJ(PwZhb-HrSQ&6pDX5heH1jc=xUobjQDjOImMXW>XhjS=9rBCB1N_w-5dl zeuN9CABNQi;9Sul#On{k=ruo}^x1D1;SAN|ZQGnR>ZSg;IE?MwJ?BP!)9=l){t|l;1T?il?VdHEz_QN>X(x4Mv|D zdSpnM%^XF2yKhW6HJeZq^DHO_a|ddQf`Cfyb7Nv(HdS6%k0m2bj-!FB2goZ_4Rnv# zK~B90BGY`JG$edjb?3@~SE3O0U`?tW<|1Q*Sxd($68=)bf6=dbxp!0Vdm=?D~d*}=3^m+|9 zHok$B#x5Y#-h=JfK4>@n2*NiXVY>Wh=zZ4@wK-p5XyyQ{%^idQ+wTzl;}`hOVo}rD zSd^AfmeSuOM@`!yPfcr-r(~8ZQ19+2P&*wJDK9T2YQLf~wfUJcb?}A?)&D`2Iurfg4)rC4EXY8RWLly|r>rT7w=C^?80=6^)< z6D?pzBEXbYGk`Ii3qPiX!B&^eFr{x7yx$)O-D~#1fe;bI1~if@2iW*t0ez6Ik}-2s!LyHY!+ z0Zz)^hvv*iP#AasUN%h-eDpD_8h!%D#=0Bh)8 z(3#x_>y$o%jdwph{P+b%ef8HP*kFRx+_tBSCy&q`6^UFz6w?Cs!C-}SEJ(9G^kk4 z2-n&AN+FC}cM=o>Ps0kEV(`~20W+H`5LtH>qUTnEF{>Ktx8494z74(77}`@-1FTiP z2a|r>gWp>ofFSK5q;xfdD5MSE5Klp;_$gSHy@1bmJAt-&1#aWsz}UzxF#p~Sed-@z zdT<}i>gt1$mY?8F*eCE!`wUYaegT)MgJ9?U9rWdYfS=T+eBtvGQZE05RXxApnlFpW zQ;?x{ZIGe7t7WL|t7WP7t8$bTTb^S2<*D;u6{xN~iqy4kMQUr65><6XnKGWBLKz)a zp-!r+Qa16b)UiEkR6?&BbzfP7a%YdAvTZdfgFdz4 zhyiu4+K}?nGomJ*A4PdzHm2^{m{P}r&8Q{AW|W@2IrX&5obpJuplm9oGV7Q%)t>4| zmA&LpiL)kBBZg)&U))}hQ%YYW?K}rKG-e`r#Ck)+^SSV`CKMWfhQYV-(LjYK!oG{i zuwrcnh+A`@_5D#;bm2H?Z_03%YjHSDuL4XyeaBsVt;>`r43;u5`^(8lU`p!5(&f;)8+pB*=L; z4QkH^!gl`<$ctMJb*^i`yL<=S584I4KE{EHLn25*Qy_V2DtNuv5B00FKqW96rZs0n z%ETNvt8frhJ0y^KCKr5G9RtOr<8UiA4>BYtV2Pj*%DYd(!)d4CPtqAUH})Jv>z#*^ zq>C^swFG8sltNqfWk`&@0={d?prxt|4iA)r@7JqvWO9Wx%cT-#i7=FW#vtyy4yJ9@ zpf0=t>)UE!blFV^n{x|3wBLe75AQ(c{Cb!_wgGIc?!h{ZMi|X~00nm+K#a#jXz6-+cpzXS{{BkS+*0 z-3>2S_rMSQ5uOEofoD$!!Ti$@%$oWgRy_Ry`F_9P-HAU?Vj@F5$d{oybY!U~>*T0k zN(z+T97QTuTZxJks8HLTHK>iBv?#@{v6NW`m#IzrMBCicM@xs+qjSqkP@BU;By9bS zstZ&=|Goy4OY?qu%XMJ(cwKm%Vhr*=<}fM33cSL`LS6lMNa%8a36I%u%FqpV)e_(x zO#*)qfzJnkt9cWlyTk(m%O}I=4o^^XnF`-T(?EX2ba*ZCg)o;{uu^$8ICakftFHNQ z&|@K_x`zPk{t__DT?W0mD?svS6}&E54SRO3k>YzD5GOW3(}7L!;arrIM|VKf!<~?I zB^uVA*#)vjaj>jC4)!iifa>xDkR99&%UzPd&o>1smhJ-+voxtKxgSOz&Vc<)Caj>d z;QYI6>AcGUwSHjh7-(LgyyLG1-dD>b9cb+uM+u)b}(mjYYpE$D>;t z63`c&-AJ}=H*%CDqQXzom{I8-v`lv|IylcO&bo=r3U49L)H^7={T?bj z^$_)DK0+H_G)u>yp!M-Dk+1L_a{BcSInM4yCHFodC(a<+vET=4y!r=CA1edrT;!nW zha7xsSA?SNYOp1I1XOftLD*vhSng^LUw2zd*B-3l$s||U-;O}vuo>GeuA)8L4C%4t zwW?2R=ZLbA~`^ou@IppiwBGM|VjJ&X_ zo;+A|k38@GfPAvz1sRq0mb{QTNIrV`lME5ap`#A+NM@=6y3wbEUTjxEy6e?Z;oFgj z%P~MvTaD17=_ZI4n4)RxEzq6Ow&-o&Sd>^|haTpSM>S0js7J{O_1|+sXInUELlO@? ze!IjD-(vLHV;KrOy9!YOYtWPFo6z31F{p^%g${(rp`?Ta z^xQHLJ@~R0X^u}prPljUfqp7_aW5Us`<;#KBXZFx-Z7Lda~xe-kdGFI7NJEGPNVzX zXAz<=Anx}|=)v_XXa#c>jb3~k?FqPpdQ0viuOAOk|J_ccrr3pafA^rKRl~@u`Zp>n z(}I$ZmSEK)$Eb}F<3M|7+@3KON9bx}Z+{lH*LX)$S69pkIZhCb|G<|$&1byJc61sMaKh0u$(?l0RyX>8#g6C`jlcR+ z6cVUF-bvIYuRpgY7fo{@D{i`w(=|Ed?aYa!y~9-U_X8hti_2W{%dSB3(1=iy=n5mf zthSM3=Eag4YtqP1&DrF*KSxN}7bi&1r)SBx0hh=N;g#f7mpkOOr46LuN(-5F_BFXI z=?fYBb&w=Gev)BAh*l;SuSJw7!LO*pa;_0L+2dZSmMlqr#DZ18sU_*X1y3Qt07DpQfSOD5U{ zhtYMrT+|bnk5ZRjKnEXJpzTrj(2HBIQJnQCu!xXl0&|P;Mb|w1>R3Adu`?Mfn(oFP zNin#(aVOTfvkq^K2*-)9#W=BVF|K?&3%lt~!#!ubaIIt#E{dk`a(fYe9>B$pI&567 zGy#{5vB!gkcDQ`9B{s7hg_r%(!^xXR;I-E3*g_gxx7)9bFDffypM5g;>d#?X_VQ;s z@_HA|^J}2>66FYPv z&@1MK(N|#+z3}S{`ZyNRlCp6$6kF0ypX<|iy;SHktL5nGkAqbgHQ!e`=#^A$4Bb(6 zp0}t|0(j2joqu(IsUFy;` z&2RXGV+a0Ie-}F)yNNdq*5E2Ojg#6daGvlAj+MsQS6?}U=R_A`JNXkhY{F5TkSD?8 zRv*AO0<&u8+&r%@TZV%*l5ybtaN4rws^Y?i+V%w(as=z z?eKg&n>`2H?V5=RrD=FY{$zZpW+EQr0l4y_5H}D64jk)>+ftlxYN|cnp<#=;Q%$km zlu`I4Wq_IHk$8WX8ZOzdfIEET@N1q7p0f52eaYz;ebVu=Z5 z6INBzS^4F3X8u`vjAs#D`mTU3tjncueK<%%)Lz=|RWu#3YAbE%vx4pkoI&$HiRcG1 z_O#k2Q~GD8A#E{1ou0h#Q`IfStSZMF{#Bt3-jzCQjtO}MdZIhc3q^;n=7}C3c_r#~ z?-y<9F(%c&d6Dkv>&V(?Tgk;0@noW0I;j(On(R`&L3-SJMLw4dkq;VF(d90CWZld~ z1&)!(q%jF?aZN>8qGD8hy$w-s)tNOr6&S5|!?>yaGamNs!t+|6UVFN&Gsc5N8|aW7X-$ac5pG z=KsmT&qK1Y#_TNoN+}aNOwYjUC#GT1gME1Plw@3`n1rWRBx1qdSj_f_!OX(#c+9zt z_#m?ycfJqBy7YY9_u3x|sA-s^<%LB>lX1k6NjUT>fyeV*aK`!xn74O4o-=!_bejfi zJWazKPboCS)01`a+!NY(c$PYr4OhW2+m*2UcX@ozR2JJL{HDEUf2F@1ctxl5KB9kJ zyidzryG~CTe~Hd5I75p?C+Qvjhv?V3*|bN@Ui#|k7}{slMmnfq1|4wKjZW({rBB=* zNt>=@(Ip0Vs@g+NR(V!#tkTV2TDdnPTIjTVt7xx%j;QFkCK-9niOl}Ih}^q<2f0}- znS9`#N8VXkK{k7IkoC0+XmXx0%7~bX!X@Eo?x<)KS-KlpvCp9DVPodxJALNe$q~$5 zUs*xX9wF7(q+Jcu`Y{o|7^*Goz0y}uF!ZOB7aoW5^ zxTJ3ZUOqe@^F*`p(VH`|@6Q={Rn>HCdV3Pi{Vl?0>Nt38f(w36jK{%ow)o==3tZ}J zj2i}Z@xq6i*dkpWug_D%BQ7Z6p&v5Xm&3xV7WUIu*T1IUR=uFVZ*8VWCN$E6xp(R8 zFV!^h;|kr}ew=O&Pp4HHx6?%SQuXlXnfH2 z8rB|DhBqEB#b?)C!ZUuH!g9w7@sln2c>K@fc+v1-9Bp|RtG7yU(1dI}r6UV(`I3gu zNA1IU)=7B%^fkbjKab9H(*ij8jMm`;rgi|cy~%5J|7r>Nv+v<#brO7*X@l% z9(iGo*aPqPoP^&lCb5M+7h6tbV+YX$d}+-%Y&FMDnrmr;>2a30?vp7VcW@M5X`zLa zzAE8Xu{@so`6peQ-bZiV`iwri>?WIyxcS4>Yhd793uJwbCe9HeKdq|qz) zt)t%#dD5ypdwTI5P1;1N!xXQdtJCLL@=X7A8OCbLFjk&3h+WMFu%tbI3e1teg6Afb;CV+cV5Q)5c>csQ zIPla-tkHi07bfIkw=WXx+MR=+Y(0R@GcvG=cPd^ENjNEH4_?WQ!<-Y*xS}@-3veXf ze0()_*s>hYNfF~M1B)>14#C-)3-IZIK>Rps9^RQd8yAMn!umcw_}l?ce6D389$ZP_ zuMy5zF=7IK8)lEk%i2l|OSz;3H)Tu*1eDtpB)Z^5u$L?}f z?~FY}kDD~fbMrk&$?v74kjx{)VoS-ljhHmj`#^rbV1TaO9FM|B?m#PFCqb^n-IOc_%sMd%i5w+`(d^2Y%xe-2t32 z@Cg^k^4R^e|{ai zTwrif;x)W8s2m@;c^S*;U&2Yc7qH>#vv|6dlr}H(F~L5L_Z-T_{U;^3b^JlR}j~`B;em{2;K6gDaM3*ki*AE3ALW z97`-sunVD$T{AUs!%aCHFzYw{SkOy{3184zoA1#)w;DQbeK~!-_%yB1mron!9;SEd z$I(~H#q_`rFS>QiC|bG@TD9!RiYjd%nW|pFBGHEf3rT+dRx(XFmi!TzMb-x^qri_Q zNM=Pa(mZt*{jQnAbdR)Su7{X0mb;CZ8-GSJX`v&Sk2-3M@oW|5O|UYf`BR>mGD4mS zj*?^2&aoKZJ3sNCMT1yn=QmtY{|PS)`iP$o_TX!Y?{IJYTbvO43cm?|iKj2^#QYT< zSgidVbCRCo)D_Kme)9uNjBLbX$ou&7)Ozf+_BP%CH}Q#(8~C^Ob?knp0?(X%1#dSi z#rcILIFox8Uv4>##kr^OgULnM^I<+devyWrPT?D5imb9~9d0B0`O#^s#~ zm?<2l?QLGtmYo%}i``k;Hn)gYNaOGi{{@6KoDl z^kD(h!sau=Gh8N+abv!?x-co-6PN|V`B)g0i?4ee#0gTL%ldX2ULBQ)hm;cV`kZJyrZf`! zwM*@WxK+5QX%UvV&%+)sX5z-(Ubx`$MEv{;AKzZ=hD)ijIB};XUb<)`K9TaB)-&p% zpPALsKXs4L2aMBb$%|5<3C$pn7p9QkXCEZV!922P z%Vm;#r;e0y8YJmamM8%`qwL93(VNcYX!VRZN)wy<4GCXvR4CszR6_L@1`;< zViTA-a#76Il`EJt^a4g5fb#FD8F??0V-cy-TEtX|?^e-H9d>B)czGBz#UfitLh1C*Y zVcw7Dcvsw0+-}s0FTQEU`6*5Kbax%zC9MnNZ@7X7OHScy;raOR>0I2{eF%>|k&T~A z@%ySZ1@G38<{<0FV2J3A4@`G%kmPOH)Ug&R>=PArNOBq87H8AyT1 zMKKlmNG10?dO58YtvY`fY0POsA2nW}fRtX8X89R?R2)S8dMwy}QU}sr8-VW@b7+6) z0Er?Iq%=>3DrpY)uUQ$)(f$}Fd`~1ZeOv@Hb43U3QV;ti#fV#7^{ds;exgA@vV{1v94UR)URv8 zxAxq|yKiGGyRsCoymb+mPCk$IZ=A(@ze@40QGlZca&U(<_x|?EG#sawf@1^r;$M%V zap1(Ac-pBg(wy!{{6n+`haOpq&nN}r3o*0tyhj99_p-rL!%eWNwH7|VOaZ^z+fB=a zH`6{k1@vdf9J*X{H|?IioMtz;(dmhuReAN*RV7Z}Dv3)rqRr7tq}**TS-_4V^Qw-L z8{V~(_fDvzcTe@vJ^j(BcY-sryY7zg*8sHY*LrkEB^G@&-h&c&sp!?QEVQ4SgN)u4 zqDoc?Dp?@SKl@&b;*9R13Z;iA!~7LG_4_N@t}X}MPzAvCD$@ENHPF%30z9k-=Qf#w z{^-&0`kg&o&LW^=lLv$c_(^k%g21dX8yqw)G0VI&nGLFY7^8vh%vz;I%ybtY=GP&2 zW@j;%u{!O@WDqvYScwI5!9tsv?K6T&FjZn!o{(XpKmWvm8{gwq-`jAu`a`@g`8IxE zRgGmQ(b#%irL=aW6n9-Yhh-+6#AEjy!$p@5Vw2g)I50R7=N9e43MUrgdK83D`v>Bo ztv>kf4Nt5r%g2s*9FCi7i3_(G;l*<_@uzGxyj@!cd)W2U+>1wOVX+ zTW?hv=_OZn^wAZ?t4|A=cfCTNSToTj-(*qQ>nEb!5_$5`y?tcp&noh0Rx{au?*+-k zvQYU;1ynOe2l*VfLFv0V=%>_oa}@icoGpRKPGbxDy)p)MrR_lrE%u>+4H@WNaW0y3 z{RDd5dlFTppGO`Imr(BVTj=%v$0#$X6``aK6fnLU^|uY9CEYTR8m0tu6f~e{^GMh$ ztz~{zWDS)fTZr8?7G@SYK-Fmyblg3meW4FbR1bhnRx4q4Ml8s0DFc%g=a|*{$&B^P z9Zc@z5GHfTmno%PnIGTAGnLq!`LSGwv1n6brhk@UU}6t8IQ0_m*ME*@D?P@RPZ?}5 z=QQ?UpTtS~j$!t~1K8Fk9lzeQ7tg*OiSLE1#)aSJVWl!(9Cse@D2W|5XdR7n9-H8| zgc_z4KhS5@8tB%#YWmgnJUVPe0!`@q(47+<=xSRX+Wf<}s#|W=RVS3QtGXYDS6wc4 z5cPN%k#1FNaw-ld_ii{sihq=l4Ed6z&HKs4TUbaiq=>!`Ya(L14hmU64z)))p^S;H z$or@W>D}-^iUHG*T>LC_WI-Uhbbl$54c>|N?}YBX27>GBg?(fg{MMyTI9#Lijdg5`^5D0R}$;AZ$+<%$Tzs-X2Pj zuG}#&IwPIY-LjkUr~H|g8#YYZeLdz>oeHD)L!QyS`5V_Lf5Eq&*5kai3Vi#>S)4vE z1t*m6#(ml0c*KvzcMPI=x}`c6wG)7(J$K8tt-p z1U>0lZq<}Gzbh@?$cb!}Nl{UoXpfaK#Hwsa=Xf1GUUKVa_$%w zS|!#(3l140D!~R#EE6Edv+jtuXgWIY>Wj+u_@i%c7NDT3i%~-9N@Q_hHCo&li7vn0 ziVhV=qo>Pa(96bnGu-wQUezK?4$US#r8ak4)df;L zxrq$g{+S$%kVU^ORM0(}F-W|@0r?LKrMQ`do~utsMzeg7%-}r4?p}=68Lvb?^4FqY zdfU+1xAADJRA+8_lY+?S8EAoW7K)pfgFaMA(9t8gD2a6(MU5yxmlO+8R(=s0Z+#YJ z9z2ig7nh>La0TJcYe;`FgG_9yk+pjrihXexg>~FVDJl<<-t!jZ%6f|C9e$1k=Q@x# z^#H|ZnbJanZ7op%+LkLr+SbmHh|V&qu^AK z8SHpy4QF-6z#iwZAow#L)ONAqOCASieBy$)l>lrF5OjW|;PcbTa6D-`^pExdLw$e9 z%by3nw-IhTMo98N)HLz!sfgdCJV;V{Zn$w%&ng~(022pOoKK|<-C6xKIO zQ2VZ{Xj@Pfy8i^D5tDA9U$btbHLvcY9@~fL<;xbNyuS?vt35+5IqfKy^#YCR??U`n z-RSx14@jccgT9%4Li^tKqcY9!NH6y%YL)wqf=~WNFS1#1biEv$tCa($ND0DssDQe; zDx4}-hfQlVK`nbEOtsPj*E0s7QZWiHPc;FZWD5w|X$>=YwxDrwJQSUCf+&FtT$=0( zCdb`i(P1w9@#O*YK>(w-AxM}8;OjCGjtx(NM;%^Z_GKzKiv3{C)w%G>aK4mx7J`XV zD9kPm1=4ge-2EyBr|MPEdukm#+qW4Um13dBA_;!%Jq8=2iXmcl0FxVAiFt%K?pr>J zM!kOIk259YlDs#hY1<$2-C=#yrf-Y-bX-u$_cWo6eMo6yHljD? zqW)I}=;@d@Bd{c$o z_iC^(S{-(*8UaxK}AjkE&2$? z*-KMS^xff1;}i%>p9)I%y}_}57BukuVf=|XU?2zpjh6W!`)(m<%v%KYg-al@Z5h;( zE8*_uRq)|M7&Lu~0ORb9FxasbnDfz~tQ-sbt>S@NkqUkZS@bw*u?gMxNkwKM3rYIp;MD!`pdRc1#zv0NsNw_{ z4cI^$aA1-P7tV*3S94Ir}K2#cE|At^HoHXYdpU&Xs%)r)u-p_&B0N>f1RTLzfFlYl}*9$2x~>Oj}-1dMt-)>(2VcT(S(Lhbp6|F1iQPD(ETGKihiI)gcRRfrd6nYUR zR^{QjT?V3YA%CRt&CzHP-vf<{T!`FHtwk|MVv*yd1T<)vf#Od{P>}pNq;lah3R0;= z`#W!-l=Zh!gl8j)z21aINPX2|wRTi8<~dsO`~}J@euMloy3uCC9;9>lGg_HDh^nK1 zp^`^ZyTw!bpMDm~!A7Y}QV}Y_lTszfUn$k6CCWgwsK5h44LmbPKycd#koVMtj&v=^ z`JxTuBXwZD{YV&XuLt^EeTd671S=yWFn&1-Hf%PA!|ldUC^P{XSyOm0)f{@~SU~VD z3+Q88!9$5P+#^T$c>Ja{r-z8B2eIUTON@rC+iKNvIQ z4_}f3K=0)|Kr zx$RjHo;nY2TRE9zHB^$9X6Pc-L~CT2%R?*^f0Wo5jO=eMLz&}Npo0o4QGwwa^tf*$ zvf#y_d1rT{)XF{Ru2vFy<(-1!HTEOz{tU$aor%_$=b&jz4xw9WM^FoS6n&nRj~-nv zK)KR-Se5HVXxghY=ucKLYJGeW`8k!KYZ;~Ji18KlZfzO5xwRa{R#l*-&6Q~0CI*oi z)oA!#HOd}y193mpBG&9X=w3!WvZ!r9YwVg3@7rV4JhBzN^m&RJrnMthzLZ}ZUm%}q zsSWG<68T?$jWpJDp#takXyU#eRP*X1TAKVBDQJB|wtYh={pNRMY5E)GNb5Z-p0S|T zLk5lo$idSa^5E;P0HXDZu%Sy4Tn$yA-9i{`{B5`lo5VmHq2T`JeO6i{$<~^8Dxg|L^|4jRAEQ zu;it$D8o`>>9UkrvwZyhCP`;NMp-{pSY%^<0i+_}BH?7Uq9FFNEA7 zT}JvQe`A5g8oS_cEdH+^z~Y}j7zX|C;duYRc?SRQ;r@PmDe#-WvGdPm|F7%*`{q@E z*54TZziVFo_u+%b`M(h#|DMpZXL|j0ID6*wfBHFp^)u%;>dzj__ zPyY|tU-IH#fFXZD|DXSo`A3fZTmEVNC4r^C_et@uA4~gNTuPrq^{;37uW(&Xe{;6y zKXCu&pYVTvN6!dTr(*6T=Yg?b*zx%yS8-&CYtV;b*B}gT=6jOdbQQbYShuV=5xw>t zkrj_4xf;mvo1V=PN7i##jRxF-90AwPcNUkuID#vllgRBkeuf*LQNbO!Rm1J^YU8qU zzHRYRwAgWn<(fQ$(J1W;HUP_;zxj72^1KBCmTZ6b-y0TC(Bt$t&*zkryE(e#5l)2Y zDo5h+o#V&T+^EqKZt;}U+?uK;E-U0Gmu;@i^9wNM>5g*b zrM?GVkC#7>9ea{z{_8T&T(6ba(8VHJ)*BI$Z{rE}2LVw$WiCXAk0u&66cVC4 zSBM&gXG9Oh;@c%_@>%5;eAZewe(@wfKFcYVueXc+3STZ*4e`N7FCgm zA0_hJa7M&hEA?6a8`|6{^%EK@Upd*Go#|{B)acw9x5Xu*-HzRm?87cTIng!ZcB-rA zXEQh5w7qT(cdOjYm)>@Z@R#S97Z%O+eZt6p0o?ZDkUes9^UXQbYC!VvMC%zQVE9lDPH6$P6`E`}^ zSZQs%feU|lH8yetJ7YZI7dMBHnC>DvXPhD=TWSg25gkOtf}*!0tkV zz+B;sAZm1%V4%oM*kHmJ>MqEUSr4CjByP*y2Z8p`H-um&CIP>bH1Bc|Aw3A9!*aC2R?@t zl*|#WE9QtU>2af6*<8Q0C0tf?3fJ$`X>RdgHMg_>F}G#f2QF*dNS-*-o)`blg_mkI zktZ>l!()$J#}jd4cz$D3c&UAby!dI2yoU1UJa&^JQBbZ!*!7Gf8g2*($#GvowP`UC zFNh~?|g#QT1JTH^b&S!RrnG9I()X8HD5eu62Ip0bbigZ5Pq@227c7n1N;W3 zGJemNCwx`oZocH8u3#X{T+pC9L*N;)MNnLpEQoNvB8d9hEf8;Y6BZ8!3!|PT2wM*A z6h)*Q7O}Ks|66GCFV2d-pTi6^;g$LoCJ#fz$(%j?+_$SW8M;fd>3@dn&?@!0GmyaDw(Ua`R&9$QnM zknC3>qB?X5_N)nn?kHb^#e@(ICgDUx?{>nE*-Pj?KSr>EP7OxiO|yPx+nWzVcJ86ar( zEr`!g6_h_O7D$%X3Pkf&g`E%Gg{g(f!j>2v5$n^+|0qrV#ggVrAKTYBhdW8K9Gr`P zG&)DLKXoZz*TpU#TV+xgK7ztpXvQY)t4j+Y)wT`9#q9iA2z{sRX+shS1I1 zL&O{JBXs*S2+uuv1Z(ttg7ry}AH>$;s~VW{S?V5q$-G(og0Qvx@)7&_ex-R*SyjlF zI8^W(bnEyv-Ea5<0dfLe9W?>_$4G%VVXUCRUMMhciV^s2J|JLqFapUQePPrFk+4%D z7M5RoD&-sR|0qpr+7CcV}Tpxu`_EyfQx5#HZ8k_am<`^*fk-=hSs=zR{i$F79S z+Wd;!Ag{nH=4$a$x0>*3a>w!{3*2~7kNCWniU1y4Esp1TFq0=%yufRa$2|6{$GirM zeqQP#6+*I9mtbwSBO)$46Xi6YX!$smV1>*h>~1Y3B243nmKSM6)V30Wb+(1D+xA+@ zFUtI$t0w$nfi0iScH^^y5#Mj@EUB#(z#k}I$7iJ_@*_;s`4OSClqPTZ1y0&hdbkK8 z4o(p?2*U;9UAqLTEhhx5$`V1)&H;gHprf#y4irkYjgS@X`X8l9+L1&D@msc2`P(y2 z>{O0(ag83^^K&iRPuLi8hXz2f|AO4Er)LMRO=t{{1Q8PVsWpOZt}!{t|lQGY5hlO@^5ou;8ur2!+U{Kd9k8%OU)DKfgZ|5>=?pkY4x%j?v8hj58mVI zSu5ja-nrCGvS+g!`^-_dh;RO!h7g*gT6Ui!z9P?MrMYnJHc#i)yxGo`WbKpc_#&?A ztH)eb+n-XMufpq@XT@u%BzU@J!MuW7yLeHqDLl5^QJ&;B&5N+P!7E00dFAQPd7XFO z^BQh_bOcoRNW+sgZl{G zIZ9Gn%UmkE*ixM$;Ip&>_~Nh8e3nHLpZ)Ftzhykl@8my{>XSBpRN5cDSkYWiaEz4d z6K_Fka*RMXyhy+*lKPQ-eS!fCRbfHLG-0t`wvffd|Ho+(G{I*=r8tn`r6U$^mXCznmLCt&LmUq|Fmm+w#PYPP`hunY{A;Xr8$6 zAg`gckjKur$?Noc#)}_6!1K#gk@AJE)EAjRNQQ-k?&Mig-7=3D7+NW{yS5V%F?$Km z>*<8*gj0mw*=q!Ab_3C)-9!|lmqY>ci6~}k^2G<7_{FnCQonu?KQ%mp-#K*;U*eU` z7wszKM=|I4;>orAhSy)Eeu|0INAVX#RBaVB%uW>q-9IN_uel*$ML!Y5pEMLooE?Qd zdb@NAao5XqDE{GPy|AfyRznOjUA(+XzUslYwU`JVw9#q5R@n= z7`sML(b&;g(O^Sk*I2McY#0lce?8+J&lumyfAqa=e;lZLZ^u5!m9^%)%3N~`t$tjJ z&x)DW4!TR5+y9}}74D39oruvFg)oZ1Xoh<`9M|(CMz>hUIPPv?gk8!Qds=p5>uVf8EJxdU>|1r)eNr~yKn)f4u@JWpn#27@ySxj z)O}Lriz=yldovmFeT2-K@m^-V;w`tAhRM~+*>YXv2Hb-kms{8ua>*I2J&wVgJ(|Z@ zXh=)>TFg6kkd||$q^8q(Qu5#<$t`xKRC9bN{C%aw4aAhlkE85wMq>`L0{6n%lyLiQ z%0!n@`m_pMJKj+Q)Tg<#ezfshAgbp=TIFjD=B@d9w!BFw9F5 za7|BV)w{Q_;uYnrHAX3#0eD9ECXvJg_JmT2St<|qg3C_l(lgc zWtseiGHrLK6G{ST{k>pX#Ypj;q@Z=rhT#5b9&JirO*<5NnlNmot&LC7#*H_z*7%h+ z$9Z8LOUxJ)6k`{6V~l+UF}C~D8Fln(M(}DaBkZx6u|D6!2m;C&wd^!wdwZ7=b#KcG zokpPb0m8I0?5qZ-=Mw~i)MLdJS9 zfFY_nGuk~}8Nu=X46$$^)~|*$dhuMw7N5cp*Vix#e*+^}b(kT7&tmQ5J!5wEWF00U zt34gX>V3Pg3E^?9c$$ut4BE#gOt{HPyx*|)Bq66wkZ^`_iW3@D9C7{We@i_{Ji9wW z7b%sBJ#I<$yT;2z{<~zv$1gHPPNLlL`jlK_yCydbb0>)j9Z5$pOB(+U$9K^*(s5!L zN%&-vwvc@6i|itm)1G2&qzUeSdSD%DASJvROWEtqrL5P|u|BA$B+E-F)8lKDF7_uy zB)ZWOxi78R*Oay|4W{j%6tr%>p?D^%4#&V01q`Y7x$Lp~Q ze{WVlC79I|M6;HUy;;rs5v+M+66;Xpuy*kVmWbKKns1+Eg@5j_YNv*rQ7+;nZKF5? zbmJr+CUB}9t2onk?4Oi>|1ZJB{C|*Q*;}aSQ5qxjSOpR)P=QVDp^s)Vpb$u+Bw$OZc`x7#8h&5NQ* z{UBRyqJagS_*CIk&Ey99kep@Je^SW5bIKQ+ARG_+c^Sr01rkS z+>Eho3dLGEVk8Ou8ROI`jACsjqt44?OkullUv!w!Q>U>OavArj^;qstW7cw2#Oe+N zvk5QcEOEChOJogW6A~x0=9lv@21%@C(>gZc&?c6P-_M%zPT{-h2CmsPtj0!grp3OT z>WLp`Ap<#!Du@$SMsm7wJvhhRKAhmga8B}k31{}p;8atKIZ^xlc<$yt_O#~wUt^-N z$RZ@6LnO+mol)Y2i>2xw&!q`jEo8*3hcaa=A?}knIdP~!o*+CYx20p=@U}B4?x?|9 zM=42csKow-18eDSl=YUFlDPN4^*~9PekM`Y@mK>itfMT`$|%n38fCxnno>Z-IN*1o6A*Qm&t|3QaQ1^IcW%%liF{iNqt%lDfZby z63zCJ;ukkaZNh8P)EjfR-AyTVbZ07ILIfokAA|d&DU|N>I$S%pP=Y7BDTmK_%G5(Z z+bV>#C9^H9d@7}FvpHH(uNSRtJDw)W6KLJCG+N7Sq|Kjp(264Lm%Ck}3F0R0xND;w z#5Y=9?2LQTri{W*#Aqf)F~SQy87_V#BdI?ZbBRTaVCE)9muz6{^{|e9?k=MnSB<&1 z6U+5>V-1r%aPQiZwe)Sz8kb91O}}BR{>%(keK?(!=(1UEToJ43wV$<|IL=ziD_P?D z8&<&+oM;HXv#cFBf{f(2Hr+VU*Pfg?Z8)wOYEJ((fiono-~=0UIm7icoH3z6G?AG6 zpMuH1aQWAdrh5$b4Fe(*I(Cwp3o@kov72P(Azx)g?P$5B>K@kGf6A>Xow47lB-P7C zlEhuiB{FrSeco1lZuXFR=`qqi_6lhp`}9N`tSQ?m zb;udY*5xkcD1SwXRi7wDzk2wy52Vc&7JIJ)X<>XEZJME`Erp4+ZLyx#RqUpkn#H#xrVykZ4$;=L8NaostddTY z)_nv;Y*|hzMj9y5?NaQCT%{b%o>KbKYD(wSkk-01rj;N4X;c4Dni$iE7IDc~E6l=m zrU3Kq650Y5THXITopA0UEv&4e4eSrh4eK!ptphRt4q|jOWQ=jg7>0;RV7HrsNYOG3z>M+x?j2^q)xw*?BHU2dA`*3d@OzXGxrVZtBw07f6 z+VTr)yf3hq8@rQMsw!wp)JNRU)zV^>J7c|q`HFim)^xkzo?tLzTs4*v1kGV|y|s*K zMjoU0H8O%HCdL7kjP}Ya#$obd)y62+d_IOXJzmb5dQ`HauP<52cz4eHHIO4b;yH2E zN=|rSFYbj^|K*tc8=L>k-~Au?hFBh+mKcWyMG34bsjwnfN^F=UOCYr}>`}?J2Peu^ ze+%Ujmy>c!=3Tj}XFD?CP=D;JEWo;aDyhlOBJF`MNv-=&Qj{A`S@0$dn|=%>H149b zSFTXn1z&N$=SEw*M$m?=XjLSKS!({I-mJE$*YY zXc=YsMn=43KVwlFe#U`^xa8n z{BDwxx<{nFo-@TQ>qZfiCsNvL8I-N!I3?-)nNoQpT(f52Jl9Uz%$=rHO`l<`obi3; z#VCq!&+tgesJ1S~+=nf+~{~PoE)~p_{Zx`kFV#S+=v8K0kF`rz+ zO0F1ipN)A;)MYjyvyOF~XvYy-H2+vkj$~^>G^_OzYf5aC;8hj�E=kkz1rh3?b9s z?jh5xo+FbCO_7=Y?3WRRPIBW+2Iq`4a{ax0xuWQZ+?01!t~>QvZZ2zt`;0cEK3_p{ zE2ofL^E@)4yd3xGPf1aJN37xZ#(w-bN>iFd5w+(iL+e(w@URl!cUiRhX8~<_c#0Md zx6!&re`#TII3s-Ahq3IO$k>1A80DsoSc^H%5akbX-TBOD&bYIV)DEnv68CIBCbLB9 zN>(&66K5pzS@p_WtggZdYmiI-v6zfcp@MWMHd4JNIm)r&KooJwQ>u>dFV%3%q>`RT zr1oAtW$HDvWrnPkGQp&MGQ${$Ot?lQ*XL{HmS3lFmg2iyB=#qju02Wp>czN+C?<*P zXGp=f*QA2Sntq;~G7Rp5{gpvD2bqYocNZywyot3-51RPifp*07pe3SMte;J##V0ap ztpsajZ7OIz^#yZScSdnJh%q)@%Gi^1ST8PM^tK(0E!Kw56v67dw`UzhPkg6MVMU&a ztl>~5Yc8l{wUfSKPP+0Ri^-}d?>Zj)bqc2NgtSMQHp}c^R%-P2n=UimWLq202zYVKTB3a4Bp{#IC zI;#xc###=Uuuf*j{>kcpEGGZ1GrKxH3u^W4dC2^#{*eg_^%8ZD%Tb~@lhkq!XTo17 zW$FuBnP5ScOjzwEPlyS{zWpS*b-+xy=})RWVcmAQsa369;vGZ^yq9B*xscTS{z5v| z;Vfi>!IY!#1}b6jc}n%dpv8 z`!TlO?HT=fC1dzF59?(4nE#Y8=D3H9WaD>+)7NK(j|HrGUMJSjjA03bf)xy%!X^yc z#X7cJVs+6^FeYpN@tFJzn_-Psinq?#8ER5`N`!^oqHKpBL|N{pOC>Y^NOj4PGSk_7 z8LMkZN8&squXdS69xIl^~E}0x?H;Z0wBwpDz@AkL#!(W>2x&E=uxS}fyBtO| zWDjHedf-i_{3pdU?J;hmceR#vsvxpN(Pw-ckk;l*g1;{3d%D`VW#lSw!<9P5~~v0kmC!s9W&z$_iaY~nPAoZ zeOUntVs%}@SbYl&<&#r9UIYdEf|I8vwNV{9v5l($U0{!?bGj6FNcR$C&%IV)VTt z8TAFk*qX#*j|=OWVJjJ1MFvAW+>J9sN3s4~$ry*$Fs7Qn*bnez1pxu9wQ(EPu#>{` zCnH(Hw1hPeS-~nZb8u~c&5Dw|F&_V1e)6xg@vonHgKr1tJgN|Dvv-6XZsHp{bxCIE z+P7OHx!xBg`hemnL-+)#c6^c4ye&v(7th9Z-Y(OA6UmhXt}E7!a%~3A=5@*?3DQn# zesDN1oJl#_-KNyWmNd4au->VnZO>9^MPMOKM3vIUNfuh;_8em*Vu*E}vGz|fhTyJ@ z?dBX@yVv3zN=8#1iX8qccl&u7)K zOIXvNBvvmz$7)YvOiKQHbCCb>DgM{|+QFl}cv_rah~((3kn305gvQ0}4CNc&lbBk4 zj}lhDm5QUhW%g}(*jxS}OPDoNu6ci2t};83`b8~B$(LeM`|ck0j$1VVhydmSjTHzPjVyh-9@t&VK3HraW1~k3b9XljOFNh zSX&wRkH5QS8inGUKfOaTt1?3tHMtbBSDz4ic*qQiH6sVtlQB|V8y}gzR4&t{C&!Te-Q=YK3F!+W+6 zH*7vjJZh^?NI->4Br!Q%VoYl!6}D1iKEG0`zahgjk;`PJ$CWbE_At49SGL^x_l{gL zrvc_60i^2Na8iQSllB|?u>R~zDQ=9UlrNT1=CW;6LW`r6zL$kkdf=RwkB#E=pD4lT zJ~TlN#`7iPFn2GYO{0voYUT%8^87cRb!g4tTqee1Fq1Gh9_JhL8Rf?^#<8vrXY4## z;T@Kh?3uudmTIxywT|UJ7qF%~_i>KsJF98w$thp7s4ppr#FZAmj>MU}F|=8W>wB|pG}c>a zqt%&FDtvJ^BAl^&k7o4#BN)5yR7QLP^YyPgv5#4XF*(5q_CIFKy@FWPwdJfL3D1*~ zoA7;QWF=j#SVyU2B`Hlg$)phM{gRw=)&S1lOT|e#<6LT1_CF4jnN5xae`xL=vb9P7 zkbaZOLKM6$Qn5P?Yb(E_OtJu}NV-?5+9SeqD>|9w#cP>a8Y~x}IhZTr{QVhR&)*Cu z1ta#7%9v-Qa*Pipe#lU2r{Oq%w}~<}Iz{P18qf)?d0Nsql{WLc@$ABVTJKp)+kQ4+ zh;9-14q_Osc?2VjUCD6D^^8951d}lMDkHgahY=q8#)!6IU0&G(>sk0*l&xYD+Tq-U zp_C%{R~t53F=+{ zn3${9gmiECJw%hcD9T>%d6cb&!k*bqsdlwD)`Bs2SFezX+g*}bK8}+MCgEArG3`iw z1WzhXYDgl+NIKe8l7ylOrEp$I8GH^<`bTcGX}gjZK1-mjU-r?)D^=Lj`b|58LPne! z#7Kfh;=EicL$uCjl&*Nk&Gf^JeV~{wbjO|M>XV37X+N~TVqbMv^i(=4&sOnT{%C zv8R%ejF`w+JklA}fC9#__Y5Oy|CAwI-!S-v70e@Avl0f+Qy=Zcs`KWv#=2D2p0<%C z8th{^Jw6j9cUkM!H>@H26RV3Pu>S6b{apd4^TNA|W`uIw8j>?haqUnH;!HKeIKhL_ zoS|eUXFHIOz28IsYcXN2HVlbwuqxzizvZE;XJtz5Yg@}iZyw3)NqIP<`VH3zDM_f7 zkjhKhq~MG*MFjg(!X^qlOPxpAb4`?Tg9X=VFPal{qHWo6G!eW8=N}KzmaA6U@w5SB z{@MZOyW+4{xCC=QEuJq-V-hy4XA-K6jN0iO?rrbl`H&Bc@~#uB;QVl(Bw~rV9a+JV zF08(N3@ePC!J2jjh5>w@SNh*6uMu!XsuDzRZ_OXZ+cPwG; zKQ6M8(>69i_?{)Y)#oIQ-8kJEKa7b##|>|V>qH1=`z+zO-5h6K(2cWX_Tnt1CeyL6afIZu1=v%ciRY-cPzg|r?<5f|ii^cw`6Swuok5$Ox8a=cIXwUQ zizX)cGQu@&8O8fZhG-VeSnKy=h*_f<;j;;hc-kz+VOYgD$X$#j#>CiuUS^1r`;6q) z4@PywowZa2Vx3UJ8pbfJU{F7t=NiQliaD%!ZyKABn#*bj6|k7EV?SySE6h8Bz1uUa z>2M{^9pQXe%O5yLLN|8Y$I&+kOfor2(IciM-{_fCwE1U!@IE|f|w4=>6sD%>lf5FZDKUy<#AZ`DhOiMC$;LO)?I$`b|T5;5a zF+OR^h&N#!or+>q-^SqjfM*cUER54ChPa!<*aSv~Tl|QT460^?KWiENQ76_+d9%W> zwyeR_p4EkP#Jb1;R`6;jYbsvM3YuoJVnYt@ZMU$>!P{ANtG%rC79OT~egx;WFX9?s z#abHt!dVqBPH{ef(;scaSqB7h;!R>sh@%{uPFO=p9n4wBt2xIo{MJfb8Yh{9F(J(V zt1$UL@%h)ESH+}&oFc>M0rdeXT5Bx$I%l7a>JO^D5abI0Q-!Jr+K`ter^ z69YUG+K(3T!?6F6L#qPtd|T}qTx;&)`>vW+-*;nFPg*i!wgc92DMm4}H)C{}%!n>0 zGM2tLS3j+g5xq3x`hSEGw!(ANTm@svyoKj?pD+nNPOMSqjdkDVtokg&DoSHnu0Ph` zPiM-1vO{4EyS4>%onrt{|HR}jm^J* zFAdxjDLVN$N;2A6rjMH>vn}2sBkneo8~68;JMJEm>jQ0a?G;bb!j8dnzZ*!2`2lI^ zRzvC<`r`aCPifBMOy!3alwocio+0s~6?;{FjOKPLtOfI!qx8ZW;}piQB#{wM*u)SMN*T-EJ6MCn?{>tA zSp5wWpPRv~s3?`yQ+qK^M_GaVKC7#I$2vY@y|EeQD?=M|#KKTcH9&@Yuvz$=q;bi| z#7LUH7PSr8j^4SHpuukrqbv1JqFv<|5np@>T|Hz$*nyw z&;kbbZ3V(>BCw5a3uX3T@PFJ1#ytvy?$aZnbh!+!oTT7b5(As>ai}j!hwms z;A})+xRpKtI+iQp@T$Qu?#M8R_f)}c!B}7xPKFyzrUA2i2Auz^ff(alsI6WKf}e@- zeOEFZGpvBwV^_j9r&Z7`HVvvOGGIntCM=q>1`50B!0$jV4BDO#ZSyxmif%KQnrwx^ zt9HXH-+j=g(S8Wilt9bNN8pIlaX8wo9FDHL0P@63uz&kyIR2&rM#ope_52%ntyUFG zp7ap77EIK!kP_E{L~o@cq+`57Y+B|b6maorEeSYFVmXw?T-cU ziy}JkAB>^A`=55G_41Xd=bt>(Z@@NGXWNY){wP679}nU^I3}cieiHS*cm_GkFQZ=} zmB>=_9GxtGgI-RqLG0UFl&=4dW=#K$I#2$KJjc|BW3l*c*jN`(&U1w>6Ffi?=M8(# z`NFapjlreBaj+?5Bxv4@10rNHoOhlM zXAaGTJ}c+J$Ucj}ir@CV?MMdWycFosNeg}{sW5ha8mNqEaIaYgtclk_LvapV{I(9t z;`PvRLjinnY=(Y6i$M9(0P!bw!KM7Y5J2pQ$p=f|QS34J{P`3($CtzHFf+9DJr8xl zi{J!T0bHw~V6GKfs2{+lB{o=`@*2E0*TVFD-$1zH7l_6Y{D$c+{G=9c{IAuX{DKQU zJhQ@&e>@_951QYaU%Dcg4=!$rj(r$~8rCEsmz7y)%gJ0+dRUMA|7=EgUKXQ61_SEl zT!P+nM^W;|vuM+)3N&}#4HQ^*7u~IRh+aN;hDLpTi@rUoL8VQ;pi2E$G$5`HeHcYR zAngQ{ya806bB3uHqr;B`pz`*C$GMI1KQ{wCegl1Sla?^}MJt%wqAko_90ZMnJ3pZMwBouUzLW-vxvS=y z$MG;Mc_KV@Oors(X`pnO1zwZqLwwCrSdyOz-@2thL(sypu63iPw)vBj z>ezD7C7p#s`b%)k>l(Opzk~P3--GU{55VQv3uyN6BkUp^;Ntre?sp^j*Y zTkXMntG)QbQ;qp|Pnz+uH(K&5743M%mZqrR;3(8#<~VdAd?wnnZ3Q}!l#YH%)}XkJ zS!m^?TvYWnAD!#44LzzVL4Tujcu+aOi1@gqG8zK=&;QWnwm1uqJ|O5;zY9#i-VaX1DuLcR z7$%ntgN%{GVcgu2(5wGw@Vhe>CbXOcyXH-SeU-BzMKd2R4_yf30}`QXd@__4rhtFQ zN@y0A3MU_=gV)kE@a$+7)O^o|eyj3e#_J6b9={QGcx{69bOFr%QwU_C0ZO-ThgRWx zVA)?I{Pa2mAJa`BAx^{5&~jL*IS0L&%TRsnDwN1>z?mCW5FPvidIY|My9YkOH?0Hq zUjGZygZ25)?k@b-w*p@C){D>D;KPr3@5ir?Y0iJU706GV)EMn{ZG(byX*5+j3XSeK z1+}=j5G|4~MF~{<)2g?S6nv*B+s};m=WE_FJSnTZ7Jx`-BWjexYV>|DaCq>cQvjPB7fb6~guk zpahBBC z|KcJrF^SNBKnm4Z1g)+ZV4Tkm=#a7xCXXtGMn4WiTi;_4$)1EO9%tY}TQiJ1eE~L{xD2)z7JR;* zgY(jN&|}{Ru+}ZXHa& zkOxB(H-MW-4^CSO;Z8;|(Ba#l*N7e9acvj$-n$enw2^r^?+x?G<>wXz}qyS*DPn&`pT{`Tdk zlr`o1&+EW%$ah0SH~OHYQvqnLu`?Pvw;xj8QlTa{Mx)x(Q&7Q`IcVvsd8pyMR21r) zfvP1sv@JUaeGJv3g|WrxwEHfkkK2drlEdhDqhn}F_lxMO^%~;bZX@H#2dK}eS7_LW z_ef5CL7{vdS~ZA(tArEmdC~x;zjB5pD%`Wp7eMQQUNE`7AFRs6yEX(ZVHpt!`{UX` zgB3v#ml+DfhKJ!A9u9LGk}zQ=1%WN2!SxFd%0am9@9GQQGY7!c_k)2`#Q~c&0-DYq z51)@uf+?M*LQJz+;4yIy6s=kSX^R)Zk;KJtN4OLWVaedXU!4n68@Bq{VAQ^9;0oS@>S!&jQP$&!v~9r8uHTSv+Rv3A z?c>F7%kkzbLz?j?dj#=~C|A_zh6m~^^g~4lJD?7808KXyKzEYjQ2z5VhzyQLkJDzL z#-Yp5i+(A{;+cv{JL}Ll_jM>SxDch-iqIHhJ8Io?H~M8LMN4vyqHdv=(2`!)(DnF7 zsL0cXBoS}Ww>dSa)3tAC=9{0W^|Qa|*UftHWn6veja@5!M~M1gD?4SkDQ@TlJ%3hFDs(xg8$ z>kA|lAC%~nM$q?IZ8W7_(;J7>=9-dzS9|D$u|J6h|!L9(>xDsCb zu7Xc3QsI(k24v35f)KxTFu7nojEm4iNv}d!;kyO8PAmd%Zv#wFZikwZT`;oE9*~Cb zhuzWxFjaOK27EsTi*A&|%gJUqHsK<;$t`fHOBL8d@4=rrPvL&EHxPT|6J%dlD-x($Jy2b*Lxa-8Rh!?-KLbf~JUyQL~xbP+$Lp zsJQkF$_u!Rnr*#}ss}$tbx&;Q!`7F`TUdh{Cw@kce%7H@MSs!Z;q{@gt1DE;2_WW% zJ3Kw-1tjkShj049+ZumJQ3SyK-+{2FUpv^R4}z^NIzj6V;gIbY33Id(ShGe7GcY!9 zCv}56f4f28{qC^)dr!DEupgw}83@}h#=+|8Dp))b?^0?z8JfSG0i|_wK-_B{d@4wQ zb?!^y{iq~Zvm^x)w`k$OqE*o2T{`^4J=na4S#YOs4hX~7L)(2DV9)ph_*%Xh{Gy9N zU%egt_wRz8efL65>OSZiUkV{>4&o`6V-Od25-KHUpxLi;Fz3KkXdP^Y^CRxVDE0yD zh^dA)U*Et0c`dXq{t0DGo%xn&0$wu2lV5eii(h}ZG0)pO@Q>=fj9ypY8RhP6g0|Lg zi;DY7(7}}?QbQM1aa4))l_Sxyjq#|tejZ9lUxsuoSD^LdR-s?|HR$}-9HiH;NBK$l z==`2dsG)o_TG4DbnqfPQJfvsQjNX-K?Ax0t`|u;Y$LuLmj;}_>!|%}fcOTHq0l$z@ z{~J{&I6;(?3(QFuz=m=U_J(TquM^pIXAS$Utb@tsTtv=>YDs5NJF< z6mAGRL7R{;m@y#|e%F^mu7!rDaR78o7szw;00-(1XG4cT?(sOhuW~em{~7~hAB=}% z?{JUomjkj>FQJn!rZ0F%1rP)iEB%vwcsbn0hhWw z5boAP^@BpVg=>AM9XsLE*4t7C}`*t@OtY>IG21HT+J6?M6Cth zd)$Xb&mQCN&NJwL{vGr^@Bz@P?-1Um9{(%Gg+Ey0&Ub6(&ChMzh|l;K%zM19if)tN z5G@+vhu#@mqOc|H&_F{N8r6YB)gj%{CT=L2&yGVmKC@6ir=@64*Cf>X&I;7XHyxSR zu0cfpTD1Q|4)VFR0W~hxBlmVYQJefz=!x4o)MfWYw82z~7Ax)`^2&WQn0$h!3SOYy zL*Jt9i8W|#F9%xgTMw>jaLo^H0Oyb^OuZ|B-r*ilbi)(UNBhEzEFm;aYzC430Z@IX zB^)rc2K}S9@a=OuNEHRa*cD;$+A9iP+?Rp-8xn*USh)9-gAe{)q3fI8(Es5;5ZxRE zZfl1^_DmIQ+&vm*rm2C@PKA!4v*D_FJ_xQXgEw`F(EMsLxL!_y#gnzrKX4_aPso7R zk(rQ@jr(Kgb&xzZ7eYSiVcmoRm|9f~hTeNXcw#?%9#IM_vk$`DeP!T^j>Dgar{HI{ z89Kkc06nZ%psQ~sq&#>8k`d2fNzd1?wXy~duBwFObTB&u`if=;iGK{p1BLa|P2lod4(t=B9= zc`hmF{@*mTx+Wb77G|P|*Q-(9*0t!BU=zxGz6&+lcN}G#%xHW63rN(b0-c>`L91(T zqtt!(kl*r0=+n5D7_0Z_TFWme_v1IzTY}Hbs(NsFP<>o$8^VI8t|0JohoieZAS1>H zV*dKVzWa?}yQB$}dilea;Q`=AwuZaA+d=P&P*}MQ>oOn0!LcI}1i><}j;G-2Lk1RT z0B*m*d*bqX!>aB;tp&Q|G(*n4^veQa#yvj)N9vscx1r}?zN!KePTq&>KOccK_bDjypF_X+w{Y$CN0?mu3%=z!^P3!Q z{EaD?&ujhoM#IDSoYvQ(pKfdQIG6X1~vPDDnHbs`mO5F7tRUtt~P+GAq^qh%N61p3Shm; z1J;*&g7v94gb=>C$7~E?aelB=jrsrJ0LbYlf2>xRagW*ptq1cd7&$ z@iZLm%0b`cXox-06&6+YfXdk3P`W?~5F88Fo~WSfh%pdSI}X~$t6{>PX|S+(2Bf8E zU~lsU5T;uWuV*F0r9G>l-^_G4sa*r-Zsvg2Wdm%kpAU}=8{z%RE#NkIE0l+pz^x$% zK{lrhhI~E(vBG0u?0gE^=+6SZ|JGl3M0I3yEA=JMbUR-?xi@Mdo?R9^_ zEOq8je{|y`pLz4!8#d=Rgi83DX}0M476BUj%oB__4C*H7h2(>WqX7pd zqOiOfC^Tmds>@oA?)#*mx9XK>i76BD*Rs(Z>pJ8#yAa702GqWAKLYDvloD_Ty?AsU z6%}7b2b*0-UKv)@B+`bSK6{N$r@zO$F+ZXyo4=#bQ-4vb04JE#v_8~kIYY^ohQPIP z0q@%a_}b4KJ}vQu{yqGlRW07FHM<$+f&S3+L~}^@YYC*bw13?OM;-4TKH{F18A@s%rR@B)-@N#N%G+DsC*!2ZGx}oieUcr zolvOW4-(S>*cxsE4R;JqE>tathd z?w3EqYs(L)XLI3si-3Q7pb`JBTLAA{K=Zlf-=pbm-YEW26V&;1ILd6yAYyh5k~SWW zq8qExf_;lnK<`8}cf~5SDI*Pa_F0Yg-N{CEOY>1Yy%`n%E=Ko?ji_W@3F2K&pe-ZL zp<(wc&`@*@C5PWd!8JD2q0cLHbNqWWB*B5cU;TyVxYdK8JI;`Rcc1CYT%b>wDygwL9n#0SJEn$88KzM{Tv4c!|a2gQ;@$Q`<%PkU`PQg0z zJ2}k%M#IUT0Ed=zgLk?fFnDMmkp1Wf?tcbA>hM9Z`_f?e{%R;(A3YNMwvUBgo|9lB z=J&(DXy9t2MW9JdfCtN$f;=q=?88>Uw>25i=f@gY@Fg4SPhST$&)3847Mmal?~NN> zx)oag*ab(X?1Slj4?r8QBj7Ub1jKDQ4g0j`VL(a++`VxXcD$~{yY3!Abj=I+_474U z)xCvwP9I>0TP*|y)dA!<@ju20c#q58e85IOenwquKD!Y36uT2*R{NpI6=GB%jzD9+ z4n!$0Mxj>LS*WgfG3t)%`Ln5O&?_<*&FZ}#`9*C)XVSNztG%|PPfvHD#*Tw{@9Q}< zDd`f@Jimd$6ji8|<38FP^c3CjdyVvpT68Pt8``=2H!`hoh5?IRF&}n^#g!iTJLnDD z4Zg5`v=F{n{Ghf6){SGE!_cNJfj`;`>_gi?^2#9STOI-@uY`eMQY0)n9|dRMQt)Re z3*(kW!;pe5ke}BB9twKFn{j=ipeP2G-&VrgcSGP+z6#PtjfBmnF)*XWc<{wLE_K(Y zLgOX#Va~RN@UrnT_%S&VdTvhwcQ-Ap#&^}HLFsU+-fBP*>mb6O56UNca1fgyR$l;z zn-@ca@*S}Gsu7s563`kB!_~qQ@T1LX=>Omxgp^%@-q)|f2-P*%qq+;04iDgT{!92# z_7Ur0U%<`F0lFzaAP_n68{F&j-alML^s&d^kFCdm^&DUx-|OEJf_a3}ng6LNRHDsB`lo6s*Dg%44_Ejj2sz#rG+EGpLXY{S919jwopy>Qx=tlA%^tQSlEO_MxBa__W`9fcKZx({; zdJ{M>y%}7--yGU5Y7O3oKybbg1g)2cf}IV6im-6}xkbR}sZns?It`%;4yJkW@P1Y| z`1-s%bUvzp%CUXmkPYwGO&)*-aJrTcY2^j*^J+2d z2;T`KI_!f@?~Ne(Py#-O%D~?L6sTsEgJA1Y{F;#GtouL;S0Xsf~t+R31Kj=mxF< zec;yoez2m`0Qj(EAQUGLf&=$rp-brqTyIoxXWLk45&TnMX2 zCcr%Vav&!p!OWd0(Bcr@^SUt=_UO{#Mq&ootgB&CNfwN)#l5bl9`;<`1htO~Vb-Uu zFeGa`6rVA|*Ng+;bjk##2PXi1IR(F$oq^gRXW?$)6(H;1!2MMf44Zc!`r4ns+F9?x zJ^usT_~L*^&3-{Zhrcl6f)oG232P}6T=>LHPd>@ti@$x-hoAUH$lr@=&X1kek}ny zyJ2s#7bMo}3ln@|Kwdl$s`?Lstd&Ed<=Ejc_wPv9XCDQx^TtBX>j}X6Ooz*d=D|sq zM0kEZ33{$hhNa0X;LbEHG*_g8OXoD~kz_*k!_{ynC=1>+&4%>S9N3whi+3&O!Pm)p zkY;a&jaRn9=;piNX1Eao>mP-+5htMN<0)9O$P5{CFG72|0^UVipzFya>c^KTec?!+(kX~>h=u6)UBcfNCqH*W}U#CHi3^3yLiVnzxk>g66zw3d}@5q}gcIxgDqreo1%j ztb^!g&2hAO?kP01;4G3KtwPe4_mJZ3bL2MY6-u+cMcYJm=t2i4sK3`4j(uqew6YW1#uIK~OVi2sqD-gX;~J0p>vUrw$ut4tL*n6l z<_x&rVgXd8Err8J5~1W-GU$p|z||3{P)B9JysAvtr&tYB3w2QDnGHWGa)9^DgHDq5 zaIkqkbUeQiR;(z3x)<9Z&v!5AP8@*JfWr{c;5antej3<}3UD5O6(Yvng2h*EgErt{=Ba+fN%D$1)sT~E&r;t126Ip<9$9w^0QKdk6Y32DNhjL{sKAoc#VFI_=ZkS{)vu$CqR3!0pylC z!@!BIpd|5bYbU%X{6izC>D&Zf-|&YGku4zmSW5_a(Hah2X#)d$iy?MiD3n))!ND_~ zVef-*zy?1A#?X*;CmQ_cb%F7xyMtuC0;bgUfdcCQ2>3e)oc6>*mSi}<@)6K>%t$yR z9s^5@#{s8M!;pxnz&F-_Sw0_%-IKt)F$sS5SpjZ(EwtOb3Rd(=hk13GP+GJGmUyg% zA&u9;{mZ#)CNs$KBD{yw;Ld_O#FRR*nxmBR=F))fn{ zLQLunnArF>x&8r^Mm&YSlV8C6=Wn6!v>I5q^b<_F{0k};*XNJybmbR4_TZCx zdh_+h`S8Od8u8Anv9IXs&tJaj&ws(S{p*ree0o|tzV*aTe8k)+Uf!oAdNh9qikYwu z%^kKIjooKNi8Tk%(%C1`t%qmON5^^erSuBwI{Z2c`tuZxPI-ysKR+VCyO;Matq`VuNx|Hv5l3x4Qv#<&P|l^q-ILx7A+Ou zWo;C1*>t8eI|%rEb!JY6wBxrISWPTLWRUdSjV zpyF8N!$@zX@)}>ItU0eKPMxA$a-N|~jhUxp?+#YbGgN6ruXMML=4>6K3_P?*iOpN0 zEUg!>eB?gXrba1>OWRbX{O?NTyIceZyr~cH9n(c<)2eL$6Zu(n_N}qPrt3GK6jKQiykN&`@dGsUi_>i=>AYTbk=n# z8n5p%W4xhD*WHy}&J3*LvUhbgmy3&Qx){tgaVapV<8o%UnadRqOP2?CZCp}gTe!>~ z*WLviteijBj&~k=quyr8z`wqD=R6Bsw%E?YAV;c&Z{)kSm|PGs;u{`!)xMZ z$}?SaWp4)aE74Lpk=Q_)U9X`s|F(^?@{X;t;#YGe=2I(W{qELE_j~P>D@Poa##fyb zdrv6!esobzdAcZh-FhhZT6id7wfia6rw>#r`wv!fnvYOoJC0KF|6NzS?W3HDnW%VI zouV9ma> zA8MXfvdhmZ?KfUhT!ODD(Y(lNJ|vS*>O()YNjvWxpu`N?$^uT|#Ci)9u{jk0>m z@&5G{*WL{j@7s-eA97p8zSNFcZLdTYv{6>LI4bo&bW{unI4Q+WLV0lz%4N>KH}|?K zzvH_rGv7;PU3nj6Tm*J)qm{>X7AfkYSf%k8UQe7FuZ-n2_=FS5%F^qp%933x zx$az}{0+)h`rgV@S|x8&<_s-R#=7oNj&I+qj9GV3NxyPj8ESe`xpw@N5^j4|F}rhK zd0ci`@qcwq`PBTT@-*|IvaZK7rE|_ZWpw#hrL5$q67c7b64gq_CFwwAmypD&E?qj* zbP>~yT-u$i1{ z_xJn$kNbHy^cVmAd9DAy_o}1QY4m@p;J^O)fBd2R*H?y){qN6n|F82VjQj7;`#+xG z|L0#Y@@p%S=N=YK$6OKN`VYmMM=ylo!`I@_)%QYg*Ef;>=$FV3(?#jV3fTCiBHULR zAgyH;JS?q_2SaM1)5ls^tZ#w}pG~kO+!U)P*2c4Yb>K0%E)E|xhf%Qw?2RpP(WE{u zlw09mrZoz?G(xgY6I2-26!tF7an#rz$IDt`!0NWB&fn)5(YgbIf*g_H4|u0_g?^ed zs-rCdAC*++GiT3_M3riUuI)m{5({A6olrtg0c2}2s)k# zMfbMhaO9^=I`)c2Zp|34PVcHfDQ4R)hTt^LTEdI-Z_9mCUp zr_rt5HE5^1h-v}*h2HdA;y{;YqUxoW;!e-EBKXBuQ9kRJFrW5U^jNIV&!;lLn8Aiv zwzx8M3aVgGo$AO7t%0oRCU~{KHVS^3;oL=Y^qx}>qgPvE@U;5qkYR-?!Tn_-fl1Ga~zAaJ4~5pD_&hI0`j(kA-Q-coYosflfUQ zlkfUsY2YL{Eu9SgB!4)D1)yv1>6p`XCTbSV!pzq}7#b9eyo?ac+7Jrgq%e$J76DN^ z60OQ25pygWYC#OT<}OCksW>>yUy6OhD<38wrDQu-zaR>!;*lNW+cT)O9Pu3-eJLy8~?>7h(C--Kc;60M^Yugm302aI?`_ zbcncx20yxr_ZN1FV2fMg;;VbYsmEiXzxSC~G~uI2Snx|2ru-4rR_UPTPd)HMD`9lc z5W%A>W6RZQSn|yXWAse$shr;EVTL<1&2V~FT^#&w0f&xOFpso`#kPhR)VmQ*JZg+8 zK24A^zz&hY&9S0#OYFYh5(f`CAn|%@{5jeNzrM7?psyWp^L{5}Z4#FX!`_UcCeR{)UN;j(zy(rC|-?S zj%#twGY12{<{%*>7r$0+Kz8L#Ff84S%<2W`*lZ_Keiq?Z+8&IIJ&0KI!)Ty;0`GU8 z#quvV(Wj21=;*pt99n!*)J(rD?tZ)}miayu1B>5?wQ*%)^1RRD?8KjhPX#gfqu#;Zd|PdX$^scu*abJvM`nzXjTlw8VS6`tY!| z!mGCpP%FY3o^2Xo$CAd#+TRqWt()U%x;CL3a7WOh&kUK@5v{;lLwqj`=EJ`{+K*`AP#I83?+92+}n;uji@nb zGGZLIEE^9$D<6DmrlDL;LN}WL)V7_5cZIXiGJXyWv*+S-dJsO33W3w_P?UcU!;si; z{49)s`Q0e2sSty~lNMpTODyt=mmu%^GR(3_#DQzefoVx7s+$UzscBdmvl92;^7Drq ztj3V|EDXH07A=S7z-3b|jMY4NCvHOLuKD=*W(S%~EyCby`!TZVK@5#O4E-<1!TYP> z>8>j%vbPsKutv20uv=t*Ix6b8pBAfjUJ#ysw?*m1d*X%HBQekWh1ihrN|>2_6z`1o|T%T?zDO^UtS+oa;;G}wh2DoXo~N>?eMA84sRDX$F92eu&L1s30`gSut^8( zDRsop0iE!ABe485Z!px-84iZ7IDDlWVivlio9vCrgZpA&8!!0NJIBTi1ojWcy^}-n z&T}~WxsJd&z0p{-cMRq}^TAo+i_8Ch2X$ot#&n&5g1xhlWi|&ZEau{ERuH~44#v2` zg;1X9{VpR{>OAQ8=Q%N z$W@5cR%1z(EUcfE4eQceIL_Vx-|t&cYf>RjuHA`>T@T>SvV-VVdKmTQ9fNJpGx&Jy zB2MjXC&u-T6%9MB7XHgNiN%ddM0us-;%f7g;@#S_VrIz&VV7}9lsCO5zVv$_dK5hu zDVc9Xi>+@(>sjx`=pNt1#lgQt-38_1QjRW$Ez?JQT1A{nt%Mz|4YA>*A-Z;E{~1{g z4o|9~LzWR5T`<80{(hR_-nzKb(E@{S)Wg*ymS`GijeEBmW8onu-pJvYzhE@-%g5k`i4T5H@kP@23GiOvhoaB^FwvQY+dHSBcGW;6 zub+*Ec0t%?7YucK2>SL4L-xjSr07RL?{fr(u8V@P*<$E>EW_Sui7@w1f?MxYq}imw zqvuM5*UrH6wySYO&Bl_6>yT%ai>qDokT+>7I$3N-lKT#3>K^nORD#mKM=(3>7#z=@ z!hx0-P<2};alYjwG3iH~SZI?e%=~l2u2DteSYolLH|&tOcI~KW`m6b^QZLE{#&sH`NwL^la3F-e$kJQ=+Y zreVXKRp>N+4gSPtBYa*CTJ2qjwnsPMRq z(XGZwMDI9df*(X)#v{@@YG}$U%FE0?|_v{ocnih+q&^_YP zj056BucJa%I4OqjKO=rvUl1R5T@~@KdC$Vi55<=Yuf&%zZ$!bxx8ic#doecfmv|ib zSG?Esu>O1n_-xUK#okI7-nKF(bgK%V<26w>wH8V)n_waDF;?Ed3>QkwaoOAwv$O{A zd0~wqoox{Ct_jMn*rIrAGtN8qC~n;bzfC$~z_CtvVb>Y4+q%H7qYKI>bc5Zr9;oIn zk$tKs9LDrT^%{fmK@LYr-f(R69EpN}(a`NT7Kww$q3<3Qc3XTgFm@7}Z}r0(#{k40 zn2vfMXTt029JKP7kCQqJk$XA}E4PJX(22&__tBUV8H>J8*;kInV~keTbPWBq8e;caxGZ0X9b?wRvgc;52e!iGd?99q7NM8j9{j{XEHpZd-YG|s zQ?C@qex5*N&nswiv4gOiIabsj7%o2jSR^)fN))Syq>1Dznd0R2H6p!uoj8)RN&G#z zMa-PPUA)#Q5@&+QvOhfDO6`+gAYwiXn<$S3!&B)sQ;91|}yNVZ=aVq}?$=tqNwCI<6iX z^|gZEVr#5RX@tpJ8e{$47I-$u0nsblAhB)-6q-ANAE}LJTY;c6U2tZAGxnId;!tOI z7&!OFqEUU(PwNNYgdxybISe;e@OR^VMq@$TICzG6<7HzF=a%{6SehStx%*>S>{LYe znu(Y03*eC-f`;Z{C<>0idi_Yu^ovBMXB0N%FGj=kIMlhh1jS`b(Qr~c_Owjo47wZ# zyCmb`krg=QnTBT(>BybE8m&yT@qR!q0vy((#`cXkP^l1$Tkpc96~(B3dJnD{9)PRP zVO%UeieLRsz&+v|?yPx)OWWFtzilRx<^#gJ0%i}&j`oA7eunpWl@xSUBn)~Bi?m*C=OM9 zDPHE4i7u`m#0Pv6X>WgvarOTSzYHB5kFN-`h|2iXv?`3QRYPQ~5du0GBeJXx%&u9$ zabrE$T3g{@X#-5G*BEs^+oJVOd(8jV3h!RG#y0=92+8gM&m~T1)v+rkHgJXUnr>J< zPD0nq6SsHu$H8+0k??9La{Pv4QO-#0+BX_a-i<}I-#+N#=!<=0C!tyJWE|V$k8hdN z@G313ErRC3rsYBymxN$`XaxLzN1z}s3L}bQ;QeC}nzxTb>i#%*e2QoPNx+!o<*+MC zf|W@sRt!(Wuc52pvThA>#%E!9&1|^Dufx!=^|%t82aCpA&~(pse808>W890;zhp0l z`RzxqQ-|Qb?+E+KDa5`!$F=bXn3p(;X(^+`YEOUBB05AEH4Yb1V`IgFi_1h~-W%kn zSE?v!l)?K@tPv+tv&F<}n?=|2`NGp}w^+Gvzo=UCh}c^Hq_DqxUaUKJSq!$nC7SUw zX@+M#6gIbCh_r2AMW;qTgwKa^e$GP$be>)b^~W3FT#+HRR|#|fY9yWmnyXY|ZdFjh|@ zX+|$Bp5uuH3&_jhCtLAj&BD>qS2hO`19HuzpqTdq34ru=G{~T#!Sc5 zBeT$c*?jyx#a@1#pIfjb3?Dv4AiGgC4BcZ8U$_WWyT`)FBM!$_EP>^gW%y(pk2eJg zxKfgYm{Tbjdn*++My-Nw_Gq;dR_*w5YlbeV*sT>_h?D#O**= zyWNrWz&dozaY^}kcO<{VN1gY*n>YkD>O`duBZ0&AkJNiAGxR0oM$%n{<- z0KE@4LfFtIsQj=Q#B+Oes@NL0y0(XfA+xHa19X==Vf~6O_%^63#vFG=#&{2ODECCI znf>6jeE|9n8H|TFhaj~&*C&o6(YbI8B4>=p<8l?3Doudne0pQh4D4M#6Pdl|z@tqN z25Af7`X~g)283eh*$8xOAB8DLqw&soF(xjK#W|Nav>U$!f%BH40Y8_1Ww%5a?&D{- zv`fLg{VVW*pKE*hR|cH=uR-9RYzz<0#j2@!&@bJDR+YBkfgSHazbzl<<`lxJPches zd$732eoTCE02R{?Vb0oPxPG4Zz#DP_t;{bX^4(L^4E*TS;7VPw-=V26KGIIuBy{N?Eta24PJ02Ef2$1Vqe8 zJb5@8_UYqb)yf;;=^EZvpM=eU{#fKY6OK`{QO9&HYPAT4*DkIhw}ivKO#}*+NX!n1 z!o@q$X#Hd{w)1|=6B6R_cTFPhA4@@M)JimLnThA0R-t0&EIdwHhr-15cs@N3k>(pQ ze%EH~owpS^h1+28o1aJFumd5Ni(!5G2zm}Yjt9rj!05nTRIYi@Da%w}yc$wnq#m>o z{w22JYH~}_cXk`$5Z77!@sncmi@{=P;0W=3({y2E79`?57m9@rQKJ6+#X@>56%J<- zgrRS;7!bBfl&)SYykBe)}E*eLT>ugfTA-uyRsmydPZ+HBlWF z{cB>fW`s74OptZ9Hqwpi!mXYK_HC#K|E5+rKcf*g^|XPDZZjAbw19z2OHA(E8n3Rm z!M>dx;1lVLp}B65Mcr_^szm>~y-{meAIwSUkLN81<4_xZ2Kx9>Xw-cyT09#MukI6I zz1|=G0kdE*b~a*)f>0?v7(d)YvCBFFI>V!I;oxFq1;^otSc0Vf3HWQ1j6%y5xHLZv z6)R<+4_2Y<;Tn7{$U*x3b-3F&53$`h;B>?`nEWh&Rn3EVlY11y0Lj}RIg1?GO{7~66L0EkVnM3EC@7dCT%VFjqi~V3HcCuB zwpcWrm>_2Ot`N_=rVHl>*<#_}bz=R|Jn`ehCh=UaK-lao7SFHl73+2!AgANv+4d7+ zT<#g+f9kyWap9JzHv5q{dgQtI{pF3=^8URz(CE83;rmB?T%m*1OS)J$T@PiI^%0s{ z5sPgNF)_>#_bXP0nSD)6PpXO5eT*@oxhbAps*T_Z=IEbefnMSD(c8-kQI{Jc*31Uk zy_#XmgXUN_vlR-yx5noU?V(lch$WS};=^_Y#g|?1_m?XMAC%bi*aIys`(S=pe;8lq zeFKJ!!qAQr(Zq2w&g4wNz2HDt=Fi3=+aTQkyZ|GHg}}@)44KxEusaun`g*arJz)tJ zmc}D&VghzO-Mf@3Lg%32hG5j!4?%3_Fc>$Az!CpQ_;FTS z-YW)v{7mSEWy=tGG!eNwmgBueDz1HA38x2{DD=ukq|*kJhHgUpQ=9QOwh+r_7vo&) zb)*%QI+^E~iWA*!MaxZX#gvZiMejS#Vqu<_XmNFrI436vlZ@%YwDv+VXI+HQnX*_+ z@k|hVuCEXss;7(Uma9bLx7p&mWv&?4Zj;!)bh~i=S|on2-z^3nIw(%Z9T7&M$Hl{5 zr^Uc$>@Oh~gcbKWpESKK#x}h#+KR`b)0`I~XY@NU^KF@M$oeF9xBD*6_x~-hy#hS8 z>El;aB{){2H*QzK=>yfd1~G!1Vhr1ZCfH7ITq`j{Kt^3GKWUD`pScI1+Ykd6HiBbd zV|=>b7*XL(Fwndi);w;Gwa;4M$M}|bp4k>pT{>XJk1nV_*cBD0cY~#mM2kod?Az`M z!#-XZT{IY{I4^8_Gz#CYkHef?6}y&CLPl}`g3eFFm)$dwHG3X(tA^po%`p609S*|_ z5xA4E2rqgs!GtAC(b7HF^ozne@0#$Rb4zskaa+{ZeJU)RUWjdpFNO1oSE63kw_=O>P6QACE{aQki-0rC zt=;wY^U|4V_UbFviZj_7YNgI*iz7@%( z+i|)=F?8E_Vj5|S=DjoM`{oIKn_!yP?DkZUh7_2Wp9X*Nl0pRhm-YZoT2oQ)FI zJ}eU(Mkk3%tx`lErwkFZWsNwHw^39(xJ3+`T_`55-X;78?Gg8z9uQ9qj)>*zF|p`! zsW|%llrY$MURWkx66X$H7q9hhirG4MMa2q_#E^t%Vs`vXkvjQ}2%7a#*p2xn&VKzR zOk3)pFk27xcT_~aw*mL@4AJm*W!{6NDvozCLf7%!AJa9%wdr-?Q`Z6qR0~x4W`Sk* ztdL^X5E~me!b>L`)UmO};eAbESZEIi_g3)v=z#ZU+M-!edyJm!2=m$NH& zn{ndXc6{>MiK({5I1^ujz_iQg(eJd=YcmV6Z>6WW?mbvkZQ(0cg-#b`YLGbIBvSOO zyI90DN)nZ;q>0*fGDYKp)uQ5?wW3Z^u5j|&Dz=nw6Mu4chzk)#qFwSn@$O@Zm^k>b zh(32rxM!A%6Fp9eZb9cotj#6y@cCs?T^y0o~7W`PWsrpJ-Tf7zhHk65KM?Q&_ z=f8>c`9DOgU%Bwj*F*TC3YcB5B370fVE&sbNc5gu;s5QcFwSzPF0QR5yxA!kF zeL)wjwo$Ml!wo^zdmv#zZ&b+h;=O1GpttuRsF#MImBAQX@bSinNxsMkpN_Pd$ zM6}v@KnycIET)b-BCN`fiCeu(MW@7z;_u$;!e;p`aii~D5p(&8NYUk9rSevM-Bc!e zE%_*pjQA>6t@tiJd6tV8BXyB^OAqbd>*K%_12lA~g7v;tQOU9fZtkgxPfo_%yQ~ep z8FleszB%GEE$}4Q5~({IAo6`ft}7bjeO((^UgUL6Q#*7mYlTY3+v1#8dzdxofbJt4 zvHNjHoDFiqs4<((Pk68L!r*R0aVU5MI=YQR`4b;FmT9PN zJQ4R00IO;4UCU5Bs~?V_w=pRAxfpYJ-77ON93ppxQv?W4X5Ml`wM@=2+?=+QW0qq zFYYa0E)Gx15XJ+t#Lkj+A~1iGxVX7M3>{u1o;nnZ7K8SQ>8naaXvqQb`N?778huRc z$UPw@%hTeP!&%WS<$_pO{gOyZxG4r5y(j7pd@OEyKNC|1yc7uw%f#qPpM~p|ui`p> z3R}H$Vg5`9eN%LCyHF1SbM&!#WJPS)ZvZ#snG&Qf7;`KVQYlHZx8+T>>qZ3 z`GGE2oX{2FOOZ-_dum8J>ePc0k=C|=x;Ia$AkhbeZ3ofsvJkP3YQR2(MlNZS}DqPwu`=I zJH^ngyG88$Bcj)w|2EnIWnip>Mc z#Ha)B#i*^HMc~&T!t6}BXzZW^B~%w5to5*`o<6=ct_a;#2D~O;1vOh$!?oAd5YeM1 ztetAXD8K|3@upZ@#|-kb8BXY$qo=V2Mh>rsXC;>C-P#&f84c0a!v^z$ZPEBdQ!JQj zhk%&oxRGhkYa6YQbj<WCw^oPcy-*^w?Nn(2)F0WMhhsT+S5!X3v$ zdf~B6Zw%|w2g^eTpvnFrXx4HV9zPiglfh%)m^c=}>&GL>QG>C=1ib9%5B|UhmhYR6 zc69@hG$9DiWed>eZy3yHM8G#D5?ivPvBG2#etwHX=*y*O%lkDhc(EKC*CxZ1`!7b1 z(qX}mT)+*zoH!$lRLKc^B7RyIU_tRd`% zRmQp+RdLI_8VX{n;r8k3*f6;!9*;J{VLM}ZnwucP&lGpd%d$FP5qD@h4T5e^F(l@2dR%LzSQZs4`nu zlN%~%a;JeNMP*GISJR|JbxpqHkz=gMr6!tuW~xckdYZg!smTsjnrv^a$pwuxdCNwV zLz-w(YpTg$J55@&(&P^ZO}=fb$*%1+xxRxYH+R%zv(6fSi(Qjh-86YZYO=7WCha^l zd90r%Z+mHS{vb^z4b|l6(VG0DYBFk~CT;yRxn_zc9|dSKe}*Q1&C%rK1)8)9)#TJL zO>T_hW5sB)TAU_7EaiKM*JQsGO&(9vWcLhBURlkeB^IW}w3W2+`BZr5a&0!@xD(&WK? zn%r@K&t0m?PZu=#_#R*XQ%yRN$73B|>89^1hnoA!r7e7A-9f(cO`eaeCsnz^Bb8h#pI7B_dgUU0GMpYc zNsoAw&qR8}>A5NkUa0cOTUAECQ)L}`#qKNp_fwTRJuQ$+l#6pGOUH3nRB2^14!2lQ+m|aRW`xCa2qtHQCgb+BehWD;{53X>tWQDIGMq zkbHcdshcbL_23~{lk|w^08N??p*QG{mt-?-3>opL;!R)rQm=`841d;0fF@haB$HWu zJ~A3NpU)M+$Bm*_Vl;U(jvSZqyi`p-N!MhP)tW5N;&bL`a?pA*+{C|=Pw!$)?l0l< zo#T7Dq{*u{SaYv6Iq8cgd;aG0SMinS8v4o;-F#(kj*pzuOr_sd>Csb_YezCy##6fh zRT_~`P#ATNR^`v7s@%7N*+lOQTcb*!TvcA$s7i}`RnFN(PwiFZ>k?H~rH9N)Rq1ew znv+*FS?#~9%16w$u$yFkhkvK1zC2WA8M*x-vvbV0o@5qCX5sXg_Fk37AIX#aK%X@s zL+@|oPmWdSx0Pghfeb4eu*R#B51HkWSrVCTti=qo&}1hvizTm{Hq1P7I%3bdr>|a- z)mQq7cbS(;XZnD=Jm{qX-8JduuE`Ql`oK$*F+AMqsqy6YdIYm?oF-3cnruf_ttV@8 zEdAs)jk?lPV`lTU1hXc{s#&xqUo0lyrK}rR?ti7q*38LVGBhH;H9STzCv%yRr8=72U6KB%q{+eL_NKZfRTJh-ZB3puqrT+U z)KZh7*7Oe9RiM|Flig2pD``u%^w@1D);YPg>8i;(^qDDr=GIG-=lZhe(Pt|LvuBXm z8|L8h(X0a>O>Uo{$zo<;7@3uj)vI}A$3CRT?7Ost{VI{X$Y>Tl^qG7N_VKZgvQM63 z{*jH5nXe3(?jzIA`pAQ2K62789~nnBQS?n=Yu0K<)@V0X>UpZNnAvuIuqtgwQCIpX z#)o<3OW#al%?7Bl8F|Ierw8*Z=)IXVhK824(j|Guir%ZZr6}6+^ ztjI8(3`>}e?KhLhR%%X`GYiSHi1|pC{ySBfGOrHCdB>8{UenT2ps&d(lyokz}Vwc5&pEO>WCQG?~$d z9w4{PWOkIy4$xx*=`S%|lS_hlUW6tCR*-udvy_Yu=WEhzKkMf`y(N64H~ajy<34i7 zO&?kG%14eiV2;&cJzJ{sW&_r7b7qGlv!#nFbrsg28|$|Rv&vnS_n3G0$!oz-_9OB- zJVuqiPd8R6p=c%%3F!_?(dgkI5`Yk_-Gu~om(h^lJB*&@o zs_agdRx4C#NdKKCzj;}z{6>Cb*QqjPJ^zpFKIW^^fb2Suof+A+pbyOsGB4@F14q~w zOPQf$_v{pN_Y!B9%c?v?c1hRyJj~ID=JypiqsB&FXRgP>yFSTJ`=)}L1lLt91>dyB}PMgUnn!eiHkNOW% zWxFBt&v5EEQkCP#>)#yF&zJR0W{v$g>rG`p2~ef~bb2t5?B}TRO%SzTNL|TnX@n}@ zMY9ITEIpo?N>s1A8<-UIG5Mryv%GIWW@|3qir^tv?r%Z4w_umjvfT(;Vzt+yK8bTbFb=n z_9tfDVdk9a49xrU|+HZKldo_NrXfT9tDJ=POrc4Vg4>r}kuGN+xz>GP6H>%s|%VFzP*m**}`K zJC5(whZ&~v@yMqB1XX_Wr!UCFgg$yPo0^b`8<|`PQKb#JWXG_U^RrDE;_2iP9r^=06RB2hD%E)4#yN9#LUTV0X>mf4POcsleaW*Fp%QJj! z%)YDV*^92w?_~0vOnTo{<(2!YZ1ae-B)J%oOFc4K{+7@6p5OnGwep=cro&m5-nmhQ z^<9&FioQ8z&NWDVP1d(%AF<=S-hwRJYI3b3`we-7bEa74M*q+=w)9IpXNPM6>`C;C z8cl}VnO$Tuj65c7_mOWe`N$_^5yqb1u>#j7HC1`cj5^on`)|U&K!4n8#ad;KJ?=~f z?HvuCWLGrT$mu za|WmWd&BuS)W14C(JFztm&C_Q;p0&My3{`@olMqH3u?b%ohq}|^Zd=^kk8p-2R*rq znMejP)IRPoYv4H7OQ%^=)c)v2J{EPipdZ5j9n|0bmMR6cZ$a(1Q1`S?m50e#V!y_@~!9rLS{nU%uK za%Z2p$3COS*&xn~J!BwrYz+NHk7W62vidZBPb7O&vL?^3(PX!`>^pzGrQ>cNxr_be z_!ssQJ^HnlDqmW%b{g<~wP8(n;`)Mq$R~%nl0NUv%<|$KFp5mZQWI)_k=l=^-c@Fh z$xP<$cC12ZXkzbdC6A&Y<6!&+a(T2HvZ=exwdL(Ofe_u9Mc zM-S*%=Fs9Ns_a4CGg<2f@443fKz*rsZEF7h7r*BZbC7z^VIG}j&q!q^ZDl4sW&MXT zkD9XI=g|WTslPt6X(4l|1#>C2IcF>Ok1}S{9cI%{7w$#4bME$Jp7rCJhCN;mXC95? z{7eS%^u(|^n)IhHwlk|4B;4UOzZuL7V14IN>#1@4f9A_TYQ2(LUrJ`} zuV8OredlK|$1|x%7P+y;{i${SR+V1m^HJ+U>YQ54UbCCL_Os6(Q)Nfi`Rr2m3f6hI ze|lf$zQA>!f0O^_9JA(Ho!1gLAM)GuBmi=BNkje+cu19@t3zby@e0 z)c-egricvIhj9L6##~s&tW9F3Gs}CkwykoR|E%wJ?6c{cIkQmjZ`3rwld z-6hPGgFKIVr?bXSu*OrV_h#02+bjI@nkpx;##^wyCoog?KV?R~;<;tan$N5aYJHqq zdr<2eY8}dKc|g5$sP|gx9mAZt z<-qj;YrF#U{vmZ=M$JzuWJS$`sQEt5OQy`7-u>7Mm^*c-d-zCdIg0xaW4Q*=cR%2`<$eP_u!Z`6j^K>7m}~c?^i>*rNv0+nlEL=Pn!LMHlPmXXa>4;JzskL(`+R*b zSSP>vJl%ceB@-VxN0<3uote~<88DdHP7Wt0v+e@vl?Ci=^uaIspgS3Ou>KRt;r>eY z7S{cabzJ9>K^5xnPVHN=-e2z}Tk3xL5NAy0i#@d;#9B|F{_m*&Qr3IA=G~e^TnIl67q@szp$tMpuhfb7H7TRV3y}IU;gb^8nNCTS?_UV;bOvG z%WPjv2G-PnLKE)OG}EL>b7njH?BZ5jH?(2aQ2%!w`1elqMptU#Om;3@pSZE+ssAnN zf3z>>SkA2fvVShKXX8+=8%J}-9jD0#^uR|R7bbJ2r4L53A55G*$3|XPcK(ctePLsLxL+1iM=RVHU2g&aQd(atrlT6;f;@97^o+|muw%qT^ukRzz z>hpcqWbQVg??+KD=2HooET=CThf!mCVqzTkx|Xu9l1KG4W)i(IkU0eUV!{^Y7n$rQ zldWW;x1T)7q>Mb8(hqv4I6LvMCzFn3GMzk<=!Md|+*_d^zLQ7bb7tdfRaPXA`Se6Q znN(mF#eQQ?|3#nB6LWQ`g&yA{nY1@#7LiLQatS1tYxIXLeKCsp6Uw}gCzs+zJm`xB z&A2~HKSZ{t&g9|74C0;RqL6v`9$(1m+$mOIhm}aKX%XKJ_@<~jbx3`AEy%8Q!-dPYq?I}$o-ccnvAAD z;!0REC%HFyftf{*JbuDi`7_t4f7z4z`^r-`K62-|zWFnd>j`@1Jag;882XTWKt8V~ zkUQC&4qz`2WX~a+3JW-o(KD7yxCcNsdscCuF^9fe&$`Oz{tUTHJi+4%pZgALf?PVk zW}o36WPEcUY4p-ZR%K85Y|AX^%KYxn>=@2I;!A%7us2WVd_z9h$VZpHiHu~03bNyXdpsoD0Zl<1zM;6RiC+ z>^1byeER3oMb2K#wIUu@$;|Htxs#a}y)=%jf|zSx$mzj5RZbzR(PTCHBYnzz>p@oa ze{vo5oAWJM{nn#42FwF;`b9=%%(myX$$*@$&`Wycq}NE3yT~cCC3`S^w2hp6+tMG* zw|Sk|d&z247k-Vb@|bPGWHpnlo{&{SZ)Og?V2G# z%U*PW+?J7-B{_9t?$x8ebndW!-D9ngR|$`Q9-Hx${K@Xo3;Osq&;RhBHTR64?15yJ zOHbKXpr-n~e#6|GR-L-lB@nG-p z;!Hb)ULm6fqquiJhBdB{p&xtBRIbIzscRr>bQUvz3is#+ChIX~@ju7SAWM2ITdXNB<|RvFHEE#K-j4gO5D-3q4PN z#WM@7%DGOVzeehC{?a2Wa!WB}UDId#j5r^W*>QU9m??cnueBnxab(t~F}-7>NoTSf zOKxZEIqNbTpOBp=S>-YdQ#{#E=%vnNYjN0pPwBlb*sY8X9r{~0rmoE%?r)?hA%k=44-tdXzWfB4RI5WUrw zoIcTCD=IKoD$-8|>?P#1h2DBiM$vV+PNBEzn6u|F6SJ6!ZON%K`E+Q>d4PVJNe?yY z#vbm)xp)At<&5I>Q-4iP3E=ESHu+=|62!+Ko07BqcPk&6bj3$D|Ce1xtaD4Qb(=7s zn{yo~m^ljTtQX&FALh^i&RIiQhvQUfFp2xElet&t&;7js*5fqx`ao6Iphwm%q|eBq zWd!S$9Ad};iL7aIIIx0Dsei9koVC`_D?FNI^ZC|O=Z)+uo9Gex#F-5KW!kPC{N5ry zCK;%Exfa^b%sWV(j&N^*9D33#9mv6mUO9Gw`9%(+$l#w(D&67CL9bMM$k$8`IplDU z92Sy+^&9H_j{Wfy-zSf^-#CZ-;(Cf4cG4?z>6K&jNrLidHa(L?&*+mw zR|~E~$bmP7kTZCUX~3^F;!M?o>!mi#II@U!rcd0MWgeUn$fU~%_Myp~$H~NR4%e#- zn4=-&6~%cmle73a=2v$gS^pjP9`*SeZJ9}Jnc*E-=RLUp#);%pSh_q~kgQ~Qi$YM9DwU@pDMV!f^7dYoF<-NyHv&-|gz zJy`E+sP(yG_Kgzu57zs`L(D5`9m2YwQOfV7)~n9(d06xFFY@uP(r>K$H`LtiHtXar zz4Cx_1NA;fz57w?uCMuesP{;5n6{vEZBq}DH} zaaCqgE7p7|>;1kZXCZ35fccccx*tuQeOdeet}lP$@s9mPml@^Rir(qK{TTKYMX=X* zWsSRVjmbH{v=1|=FR#}R;+!{}GdO#HtqFX-ne6YOoZX_yh`xwV;oO$a{mLxzILhlS zoWYI{Ba=F;o0itZ+OEQ^@u$ZBdcj#9 zOQ`i1*865^ZQo6kI_%%WJlPNW(veYW>qp4s5^LiTIactMMgF`_;z$PFRQa?w^I$OZ zcmy?{!+pa@_RUz<;Tq1XIjnJNAFzS?ZQ{C~HUF@HKBw+m_i`P>TEEENn@#OoPizdAeL=mg%gBwjJe*obQ|I=q=e4Y7Q|3qBANBxhy@olG z$Q-Fko&Q~bkFUa?)1lTztZyr3$tz~b3hF(c^}W=B8NwPr&!d<*vYFcNU|n~n_W%0t zU+RB@+Ivy+6aVz?!pC!E?NaLntnH=D7nOQn>c`qPE>i(!#&<{^=8ANEGNjr$epcB6Wf+uMdw zZbO?#xpj4qa+}aU%59Htlv`e8lw04#D7TDNQEpc2qTG-lIrqNrJ)_}^B>1cnCb%Wc@=9pF zOhSVV5(e#&aQJ|PAx9;|_$8b?DPhGq3Ds{($bUz|n5Pn6zvAmYOURvBM!{?{vgVet zGf>9EiZZs)6RN5=5^GM-2>;@mQp$I4i-T1KsnGRAI| zF>i^_Km`SbYAYz%NWrfN1?2}RaE(!rWwwGdqJoPa z1#4FE?{_MwpQxbgWd%8ZDCkyP#ks~RN(@o4bg7Eit12pHv><7Q1yxU45S3!ViHsu3 z3L;hw6LBe0MCWJ`zSSb$ZWFQMposF9Mf|ucV$drQPd|&eoF*cF7REle6{`zdA(SR& z#PL;iE7TAx)`eLS)W(Wp#PoQ$6&D6rk$Z#{Q>IuUS*>V6e6KIJV(}U)HgC0J?;$Jd zp0}d$4J+o~VO~76qUbX#4n4Qx`FksF5MwJb&PRNIWS8J2zH19g=vZ39hbp{QN5U<$ zgrRLDeC;5ib1w<^2NCZH68g*{_E8cFL`!Jmlu%-|1bLf;vIg-#E1}R;=E^Mz*;0so zs)V~AB=r3$AyXC^;ly)NNf|S0$>`TmhS6Ha`R+2743trElnnDi8QGT0I793j?U0fA zkc_koGCtgp5&T@nt4s=3mQ=8Zm}P2Dyap<$G+M#;=?aAH3T!tOWG$+qU`rLd?nh&3 z9Sat9wP3?|3yQfc_!V!##-kR@xM9Ka?-u5mh%ALfT&*mkdp!{inu@sIfqV)lCgfL+ zp(2J6zwfg|q>3UQIYjjFikQ4nMAI!I{u0Z9hKP#9({qE5A4Isx#o`&P=#bfpJONhN z3NX&)t@u^RicyWoLz5N5T3E5Cl@+@?Sn;fj6}5UH`Tm{z({> zSH^=P#D=(iX(HoF7a8RT$e21oM$4HpmT58)4#*fpoYuXQvGKQz26+_3mr<~=pMtlC z6wLgppv$vpnDP*ViWW3)V8JlKf^yv~cu#!7W?L}LVL{DU3wp1#Ap2$u#vQUiJ!e6| zM;3g0Wc8mo*Ct1*+ zI;YIHAeWWTCtlB16SIvL=({Y4OQ80hwqWH&zV1H@wmi1rU^1VRYC#ubrV_K^VrA-KW9lHR zm`Dze>1;(Pxp#>5a@ABT?vsBRELJRa^ZDdn_f6E}?N(UGy@hA2_;k;TkuR*M`Pzz^ zX;yfN+1vuGfvk_C8%wy{QNjgkOcfwSUn_&1XTs5*CEkwIHCO1vkhUUrP&$w6~ybCkt+N zwZPTig8HK@m_U3=Od}SvENHga0zY?p{1&PFI$Xg2@d?t=xEO_$Mf^2^+h#+RY^N8@0Yfa0FXimM{T7!B>uDOU?8f)D@ z;LXCh+W ziug*d1+yMr|1DxcMk_Sd!YRZngj}mp+lto>$S?9Op*_DR-=+^_4>QGzlf(tZa1v>On%*XX2pY4J~x|$-<8v4l&fcWvO;ONdYc$e6M`K-P3w9Edb3u%G1%B1Ez)M^lO?aKy3?y$%oh=y7 zK5cOy3+uE6W67O4Q!L0klYCgfxDuQ5vIS?n7Ceoy;L36f8m*-UZn5C`E(g^(r5^ zRzyUV;_Nrbvo++|u*M=5w-C{fIHh$Gv8)&Slp!K&6RW?hbN?+8F~=%m?Q#+M*0T<> z&gEmza{nMPp-xsiD>vm$z>7033n*P$LBd2GdtKnXvKOXyHbLRM4Z2T3CJAoEtE>0~fL;YfXnH*w4j%L)V z7R+Jtr9m6kovw^~ZwopOwP49O3zm{64r*Ech14=)@>#TCkkf)g9t$p$FZvpOt+Qb0 zW(&&hu%I2Wsdj`MOtj!Sv3Y;if?by^*nHK37;@vp{@84NS3M2Qjh}pKxN+ zl05pszVLW95z)k^1+l4IQbYjxbfB7uwzWl!Ybav9NyMyH#DVqeEPIeI#Ao3+5jOHD z19kA=Ox7Xdw4FF5I7RI8ib!4|BA$5lCZFaItL}-!_?(DbcSJOL&i8*MLi@~q^AEK* zyA?%BSy8XN6`R-R(ZzD(YY_Q-S z@krXnx5-tG>2^zWL45hvI~+?DaZNi_a!jK;8+(eSXAtSHBtK`c6lTF{W%6=NnQ z#A7S*NFpAYhFQ>e6l?t?K5sT_%0dfn5s$^xtXDdDw1WI0FLH0RU?DY2Bo?K1TX1`? z1t*Eeszc0u)|gF67RM(-KJVDieX!u+C*twl zf;zv64KdlDMZ^;FKA!JIY$hBP@%*HSp~MFN z@#j9|*_!%R_k)NXX(F=z6=BM1#dp>ue_i53jT_vJ{r4~{iqGM>>NwA8=dDP3%HHlL z`}8sr;@N|b?kVBMYzZNUB`8~nNnPSGfVytA;DE=1l55x>#aqzhDC;fbw)Q$}2=&9y z9Q{S@n3GAw8|LVD#^h{H5k2zr{9ly19mL#WuFk6@;v(a*u`VB(r_oKR`C;Tc7G`ZPPmK~dkDzAN9LSji zF{rzg=Utvx%iNGq=P$pi%P9Mbeb`d|tg;n&R_Tb#3;5g4uWsWPTcS^z4 zvkD4dQILLBLCgCJ?mpssJXO%@t%9IW3g&V~aVcHFkADhwWmFN7O~sBpDmD~S@uirG zAEi~qRZ>wYSjD$mD)QD<5fY~2BG1fk+NdbhS;d*|DpGr?xHL$`Ske#mQ2RV2Mq(fqTD?D*Wv50W8erKe<$e3IakN&g*sMfI(qtabXlUK z?P?uAH|rR_jjxT@F>0@liAQyuNz|d9(oy@Oj)pgNyuGDExvOK_6CEFtbtJsf`Cl99 z*!6?2&tOMfHakWavSVmbI~J9)qiF>@a@4Tnzj}5Ah1t=yy&XTh@>)MTq6gXWcc>lO zNIP}Rj?z=@7#U?p`T2HCUuefniyd`pt3!*=vLYsadKc5HfN z$LY^@#Qd;hSAYXm3p$Wc-htUw9B5O=0ef2qmJV>B)HDaeMF*aF9mu)PfuRQ+_;JyJ z3ZlI>wGem+iRo#Et@4_?{&lm|E9?aV_~i z9UZ7Pj_+x8V3^N=$*cLkdmXstcVOXF2ev(SVDD!KW@d0g&f!GeASbF;c4AnF6Wtp- z@t~O#U0OMDwUZNl!=0$p+lda7oR~bviT8`0_~vs0%bcja!HHvWPMqBBM1bFkHRqhT zeA9`*hfd6U$?LD2c=OhY$PZ2w`RT;Fe@+z0<-&)eE;KFW!l>FV6sqTfseucE;DW7{ z3$;4Bur=I;TLWCEHrR!V!(Ava-GzDcU3eh6P+xbUo5zI-u`X;{?ZU}zF8tc>!uz8x zB%N^K-DSSta~F!fbHVl5g*x9|C=uw!=YnpGDdEQcs%~^{>qbm3HBRNak9 zJ~uM1bwl3k#;gQ4Hr{Zf_d_@GWb)v@W*)4c>cOZ*9<*5PK~BSils;Y@WB!f3%iJmG z#5Kn6B4Zak-HDrSCo*hwV$~fd@@3=ilh1{~P#4;EaUoMbem~5Gd{Hi}u)45ixeJFk zxX}853(419IQ7je5`A*qP$Suv9lresUxFw;N}3c#tWt2ZsxJu(^^4 zfwetwnmoAK+=C%)JjmL|gDnF+m_OQsZeu+NpXR|x&4YJ758f>GVA}={q7E-HwUO)4(b&#f@|C+^`e-;dwlGS=NKGAs)18=^+<9SkvDF zeFS4R(}S|22azj0*ty-qzL;@3?m@{PRjL9>;l+@rW03h8LX~pE6e% z&;J<5+g@sb7x~{Y#(%x&UciU9!9E0q`mk5%tRg@PYRxdU&-v8ElF@UkIzu$|`NnWI0_adCRxa*M@>CDGQU%hCY?uEoy z56R?1Onx7VSMVXWnh*VJ`!Khj4~3fg@Z0QzpK;$jh|h`iA^R-muGNRCx(~yb`!HyW z50j4gaQ~DKMK1VI=avueANuel#fR_jh{azYre=*nY`GYmt`vhxHDYkGbqqESk3oaT z7);hosEGnQ!J)cjKwI#;$62`xN(=-_;$A& z5B${2>uv~7+^F!Ly86wH%(*-mRKY2x(IgT2IS&ub4lP%labc&U|MyeQ{Img-)ttKr2KYKGn9MQ}$i78195 zz4>=Tybwnc(?~C3rg?E@o)-tAy(po2G1ccq17fJ|_F^&la{q)ECC`&P9->{Kyw=3AwY$ zhw{{)xFqu8g%9&S@O9Ln=A~oMqCpHgwU5EEaWSYqD+VSd1`{{N;OmhX)c6>KTcu+$ zgV+?F9E(%OVzK6TEE)$dLFENY5Z=R%<=LJ1>vCe$Vb{k( zo7cIpYmW=|L>Fq`b>aCF7j}Pf;dCZ9Zsm4kJ?lmbV(4Jq*wxsLb*vjNdb73-a>GQu z8$(RbQu7AKxbc0N8@c1$C>`%cvBPeppC51IuMzk(ie z%7fUd9y|#4;A|}q{t6ycr8b`JNL?iEf5&>TZX)?QojhIO!B5G9(X1g(w+EG%d4QE3 z9NFo?!F{YLiL4jQg&Y?=*mT2#eh*kXUU=a7?m;|rq6YPHZdEUyG7pB4!+*?Pgthge z7IR@R^I-KbFWyb`;sk5Rb!ujkOkJd2ns#`xh+4Vtju(N%+WgOp(b;`)*YROzJ0Jd% zi)YEfBGkHw2R@ie#=rnM<%)rj5<|U@#q1%mxLlI`XF7k8UQWy!?ZmRlPE@oy;fZl#Fnc9=s}rw}I3co6I(wPD z&=V)>u}7-+&WS=FojCHxiFo!4VjdUD6k|_N+=X*xsg>1SSPe2fbjCom6YxNu^w3(pq2a7blu#2(4Sn%8V2--Ef5Ey0CHr(NiB$%Q#LUC@~` zDbHM($$sMyd!$OgTxgZSjT2ej*q74{FY~BwkQ=YcyD_!48)?j=!6rA)wQk&N$KI)v z8;2s?=r+WS?Q`AOFrON(xp8W_8@o62`(173FPb`SCvVBM_f z!K_dZzI5aB%+K(ZIjSkET2ZZ+#=!?;8Z zY6~4Fr|JlB=tzE~C<0sHsvsdfzd!0*||pG&i&^ItpuWOi^@?m)*P z4s5O9fKkhVJPp~`H*;VYzn-;lpkFHoDzOLu)0O>qF9+K9bD+mq2YyX*;6s!HcPtLH zlpR>7Igl3Xz-i`F(hdhY9p-CK@cSeO>YZ^w;W=Q&9S3qh?8_cKwha4l_E1@dxR9B-b=vMiT)YdDZ!_2a zxUetOjWg7ffvnXjJe!C1P$6IAxwWzlAv|l=8)L)yc{bGX+0Z@ChG)BN$a>g@##e3V zf178{dp6X3Y(v*SHY~`*b7u|>uM25-SXzT8R71OlJX3~gsMS)##P%A-_SBHnS3}we z4Q0piOgf3ro2j8=l!lRuG(^~V{&ezjjfORAHEdn4;qX=sxp!#jwp*hHYgl|zL-z|B za$M7J=06Ri?`e4SM8n_b8j8H(dG<5UvOk$y|1=E9!h8zQk%i~kF8Ouj;92%VIURA8 zbrh?sBe1ECP(eq3o@eb{bo}CZ_BzkAc}MC98K+~zBpolN>(oFUX$sG{HlBrDI$EsJ zQD>`;Lp;YeKcS=5IUPIi=os`&M|7HwraZ5X3ADq)v)=hoJDNAPBeuC6w_DqBm|8TI z=hJ;Nc+T8wN8|-N>VLE&JI{b!i#ZU>^WV8S4y<|OK>P#xAC7Ss8Yv-Qo`eP0CA_aG zBd8I*935puai+hKd&-~OC4cwGaC28#@Suz=SGg$DfvOc`7iVsaITX6SHa@!DvIXj zj3ciKu`uTwK`PdjR580EXCYNpI73vVK*i8bD!PWN=+j3Jjx4U5OyV47*e8nn+K}^|4GjY|Oe@BG>c%`Bk2L>m?_*zf8p+pKA<~6++CKE(7yv`YJDXH4VAEG zs)XL0Gdt-6sZ0;SQ=f!d>m*#;BEhy-!nq^d4PPY=l>UQ3bj<^y*}U+6!{ zs^DW`1DBdDjK#^agy`2YXeo}pRb~+$b48& ze9x)qk*wlsnu^{HZRj)6h7|4rUM!)PfV1Pck`nH5-ygR?LVbsX7u>gCBllrjB@Es} z&jw?R^AeJ7Nf`N9!c2N@;<+Qso+hDMCVFSMXTHMN&MD7W2FvI~Pr#e{G753;T%O*W zkMwbLVT@B5;}phsY!qjkqKwLnYdM#UKa6YHjpW=mdOfzw$je!1=414HoTV3wd(rcZ zX<7Pmw!N0|@{NoPpLs1^Mtml6Gnax@G^q09NLq+ zqmsx`ayRmdf_(RQ{V}m2Cih-)KFYbM?K@|+oQ0Oosp45d6|2f~URhhkM{;!tXP=%3 z&PpRW_nfC9C;2#l^Gy6uF#|UI=8kPE{T(y88#5(xXZhNS)^)kZH%s`_ntl!PB42k2 zh5JaD(x1D^K@wu8@GFXYX7VG}E8*D+a$q+-Q->t9JtpDm3HmlJkUMuJw0X##TB?MF z#O^$?d!I#yJs988$hQhny%HFJt~b;&X&vxid1JU6dhRC-f@XGd$w{sC5)bNcMRMun1_d?Z6?7(sT~F|P&U;@zQxN-F z!IsbbIRPrtIj^oEs3<&zdb^D?=2YSX?ioJOpHq|ju~}A}T4BYLSM=L_;x0Lzo{r4) z#stv!#JzG#J_$Xz3;0A03*r91zKM_A#~d6&Pt+(0tH(=tK3hVm#q=-QBqY1&gIX?O z|7vO$HLWW5&&7@t+jI19T#|6{x`c24aewfXyU!02eo@C({FKlsql{eS+?xEvp}36R z6=djD7{l81EjA(-S?9iYmFdeQhGU57cw#YCM#)*!J8IgR#WDuiWOOC>p8I4ZEhmLGldorO9P)y=f90N`tcvp^RJd2D_{9BO>wWa_eCIBRdz(`tJ#QPVIJ=L&jFa5y zT()vgZpFEmRs?^e*O3_Y#U(4ihJk=^l;ShOE|)P^nU7M7wY1w`x34{p;ofq)FW1( z|4=J)P@@XSxI}KfBeya(lyR8&1d~@AsEczu@j7w(M{Wi6k};u=jK0*!$`iQ+Cr%59(Uphvw5G@y^Oo<$ z8nl{P*(9TaIjlvaa!}8xm%Yj<*iNjrai=5`um1JfyO3k6K;3MoU_=D@*Ppd!6!%T! z*ZAoQVpQU2aNl%6!DZH>7jG2g=U(UMVikR}(-&}p{y1{w7k6l#xkF1|Y{l#?#ATNi z9qGTQevv!NC-kGS-Y+E{xpPbCSyVz+@+E+trnpcE$C{E8)VpfbI}dkhG3*0Q^_O57 zNsiE`QImdv&$@);)Vejx=v7-OVf}guX}ct7hpBBx`TQgayUCvl4*8JX*xMrPJHioVdIk!rdG> zb&~k(C7%wnj^&N!?oN`im0bEpZ2GK|v6h(BB!7yKKOK@-*Urh9cZqe2wd^>t`I}6w zOC_Gf=X)AGbbn;DqW0AzKEI1F_SC<1#K%=t;jCIgcoWtda%o3f1ySr7?y`PWpQxZO zHLs#b-oz-d5|?7sv}NB}f7l;dC#v|bHoa@VxsRrHg-oNLQ?;V@GHTW~`Z36r%-kPG z6A#l{dO>W;!C;trMK~2j`u8gIg<-JYbJZ69Qg&IdKJI1}}xGc=` z9IPw3xEm!lmCDLkNKASWk3|hwPt1&EJN91WOH@x8ARgLC87;=j_{4hDo_J&;Us7kV z&diaqiyHS$Wxr3(?DonCiKR}hre|@3jMjVD>ya;EiG0s9)V1@>iCg?R_heLmPHbPw zsP~@!;jc38{FM8f(9kIQiH|6O%+@(kPyok|!;QNyF3B#EYy^H;KhV`mLW!Fu$cf5}V!WHR}YQdzO9pRbIcr=iQZ2_=yZp zD!rs1_?jQAN31yo$d_!{71YU1uCVr`vHy#!qTncN%{XF`kC;UEV-G!$@sFgRK8m_V zo|N22EdA`mSyNt?Qn80;r_A&$IO@?i+lO90s}+ri$;|auv_C~(;C=c8lIg+yOdlCB zkyz_v$&nJo!(WlPU01?*>eh?~HTb<}bCm!90vhEX)=2Phbm?d#uB%$a+de5RI z%vR~yBqp`U6$kt5IBHkP1H_bE$w!`irFJd9LH#0ENEV1pzC5J|>LtCn?LFXs?n!joArWx*&?uyY9nJr7wTI##(kiSV?(K9qgiJr@(enK=c8HF zC1R9?8g{@=?s<8g*jy)HK2gJpkS}NAWd!b*;h>JKJ&`NYNb zh<*{CV|uq$ahUaFaULrQPN0V-+KMob6@NF<oz^i#N_)^))`__f#>->Ifw^w zDO;8O6E&-YiFKqkJxX0AZ0N^K|)_d~ga30>XQH1wmkSA-a zNl0(Wn6>5Ef;gNf4#SDV=Mm%%vB*7vXB+BP+eP%5lOt`ZTibbNxx9qAzKUGh!fVv7 zBKvqB!69ONlC}LjdjVn*d0WCZa%C>-iuF15i+H4yD;+;k$9}O7r-rr3%^3^v5QA%wX7JuQKc1>W8F`!&;H9yPVgKP!gI{cDGGkhBX&nvgFbM6Q=M}; zQN%jx{1pfDaGiulNBOy5vxoi2IaFq9B=c~35#~u*-d9$Y_fIrr9#I#@@bg{aXFJxP zT1-t?$IsS=Ij2sS@pTUS(nXxB@Uu;bp&qQDp08u?vx)bfQ4cax5401E9Y5Qm^E@Z; zvkhh5-MGa+KV-j?%_Yo{XMP{a>u00rU5hMxU97+QNF=V3mTyYdP=SqM*WVp7{>&{++|TU&POT_Y7Zu znP(H$h4;@DRQbprD~+?U%qqPtDszUj6!uls>!|o%U&SN($}Y0UTEiJ=&M11(d@2$- z<6QVr#mEj;4B_V}Kbn2nOa-;P{5k75i{;OY=g$&u^4eq0A6}7oq>qM|eBYoSF| zL|0HTnKPWM4OLVPQ*nSl=Wct>TYIW#JU~T1de?rBRPl8hXRZ9XGb}1fN-E|$Ir~}7 zxf^G2+c$DHw}l?JI2DI>@ORtAYx~)Ma1Pt`xQYrVd9U9^6{oLoetTO**?TG~Kjdra zb5m1P`d3wK`@q+J=ktEkOPA4xlbLMHQyYe6vtexx8+PR4EVh6Rg*dmZ%DL^=QZ^jm zUgAqN8^UXFHxX*Xuf{f<4dYywyNdN~Z1~iP&*^5vhX@-64zgkCV7_*w4QZV54xeqq zu?04qvGFyW_4f4I&~1qgIo8-Pfpg!!dw4z3hAJ0rIDE@SuPxvAjg1~~8l(v#fCw7ZD?B2 zhFARk|JAkOI%i}NoIi(z^Ym0n6ZbS%XX)6pQ-|}2j+R&X=bt*BafcY(#18O0;OBhuF5^G5mJPET zF&~E6aEh^g<000J<>c!&{C#1=g%38wF?Rj4XmB%j9~iUV^z41FOUxOgK8#Ol8x85< z8hZBE_)jlr$ip0X!5Hjctii|6UUfM=ejE9j_Gl<~P(y`d8k(J8y!e?@&T8mOpXJdj z8d}}b@aMLMrVljyc*@wk(UAN}L$&Yx*?%;w%A}(dV{j{njy~L5{R+~txfFL#HFUhH zqoYbFcUO&dRByt47Qgn=lX<(jjx`+^ug;8LZyouE=$JoD$LkR~ibpbb)9ACD!@U(f znh&_o>TKcOl|Id?u{vUx>oC!y86T&k`ym|-7|WsbXHGuJpLw18vIjca(2v>d9eto* z80&vJW@NQvT3$Qa(1#gNjr-WT+_l!{jN@0lIyA=3#&~6G zqa(4aj!6+ZzVu-Z_0!RNfQ}+VbS47Td{N>8h{Oh?~UI(Kc{ z`!Qyt_UMRU%$6SGj_|mSj@$*_xIphJbFnTxt&i^ODD#*xf2w2ma~+Eq!@lV{`emd) zm7dn)%++o=?P!~ydusYy>lCwNOOPGy7}vzAb_`}*qqsZV)xnP6z3CerXvgHyc0A)Q zyefBy+G0D7aVP$P@pi1BH+Bu5zm@*j<9r@>;aeYYNB)NK&+g!TGu$f%(_>oSfxqn? zILRG$*d_;x#d6+2z1-`k22%4feX^k_b>dBZ4GSA<7)(7lH%Y_2`5OLXo|hu0AMVw# z<1qProVm;#KX8NmeN26L!F?`ye3y)+RHGRWryZPD!d@kc$++h4p6UXy>-3=Yb zZ*x!kLdO%vw%bP?8~JtcE90N0V+nJ0Wfpp+a@lb-pB;sZ+cCEU{Q{-!7*WX%TOB*{ zGjBUIV-B0_$jcq=s*ZM4?8O}|JqNZS^c7FE!#9(=+c|vBJUcd8?O4xv=W^TWrKeAr zaSx%7xD4Z-cMrYC`-v6zx~r~}6OZlK``!*cqXQ%8AwEV7fJV(d8c{)xk)NxkT@yV$poLX^UwT@%lf18>6B6I)N5#~R0Kl>T- z<)V&?*NE>u9jB6YOiv|eh(+&oKIady&BXXH=cfhGtIZvG1$w!MlqVj{^{LGDF~p)6 zHAEHY*=cD<6JinGoj&e9c6{dEyweEkN~9gXCfo6579XSRIKuotr`mCbdNVkd*e$ao zj6A5Z$&TGS_#X6yEV@K~-L)g|g&n4M#3`NE3p>!dh66ntIPew@oajj}$3h2W`mGDn zZ{718zwSD)fIjQoWt?!Zzdp)dB7n6xCu_$T?ukQ!s6BNwv}S#1)sg;%K^kJmY8Xo0 zx;{n2@I~Aqvp!ZKZ~pkm5o*?A)`gp_2U{*^SV7+Orgz~6YhY38*5EYO$&9RV*@#Dw zjtS&X-6}e~HFe}}K%FO6?_1D=-A*@1h1Brgj-R(utc$-0D%Y&J(v4)T>|AsZsRax2KOmCtk0p^~ZAC zF^L*Ax+pb=nEfov>s835>cpj%9oIrw3!0EKEr>6D_+_Y7GY0cHzj1F6AI%;6d63VW;v?D>vxk6&1^A$YS5C)mUNppJ<@xgY+^{wY8Eo1z*j zmuAlv!dl))LqCCiO)J*xt{V3htj7a2_=(-7sjSNjH2Uu~)O8cTl^Pnbm$^*cTX~p& zCVsDzh#~9w8{#*a9+Zo$;}@xYDa7$2am@UKHT^H^e-`>n0?3IxF^wOOrRD{W}PWr z#*VDSZ!LW)ePPGb4&)R0Rw9BvssZE-G0aZvju5Y%tmD_HXO-XBQJq>=xs(H!t2!`G zaNrxcGio!vQuH)s;@R#}Mir6l^WVJTocN!LE$rXhG$kHmY$z3J!xHvj!A^20#)i-p zHl(kozi5XI%MaSHjNIDFK72ZTMgwlxV13BPWcKta^l!Z7y)PLwl+VsM=VZT`m$j%2 zd)Nxp!G`SH*gLdtLHyciaFUP3=s7ygUgAIEn~S>Hb-0En#8_qzYnnz6QIv)ZvW8F> zdk*q*F0n4Oi8}ee9xj1B2kYC5^X&Pkr`xY;@ZM&R_mF@7AKui};h*_4sjDt>_Dy#7 zQLK4JN%kD%ZI^1SV>Q?dgz`R@#yTt}a;+WfRYx6vy08vXZ?CZiejmbqZwBvmnMJJT zQhOJ(Cn44~-0b0412=8wBXMnTg8lC$_F+$XzszeLC4cI;6l_NdYu8QcWV^9;q_Rdm zXN^js?$y0&hcmYW8<#pzhO>$;+=)4P&KzsvtcCs>gXg>s^i~|>zsXXBXSg$mRqQ;) znI+F_sXVJia%a4d=d|2BqYdKOYy*8XOY_;_3*s5AH198}WJ4hP>Q5myWNT={5P{gX zw&6~D8$8`?*ut~Td7i(?hwGD08 z^1Ma2V zisW=K`w*UQN;Keg@;rNI4GGKz*8t|q2#s?vK9Bw7^6@;wOkxi{n|&jD`AF)urfbN_ zdtZ!5TE*U++*g?s$Nco7J)pLe>lw-O+vNH6Kh$#a9C>-KQV_M78XZawj~PYY zFVeA(8tmIlkK<|LPwbr4*(b9eo#%|V9cM$GICq=RdFQCtGG1_IcBdNueHP9!OLHc% zV~m2OoH^u^I9sqOxWAfni%tCZK{#Ki#5qmj{mwxM14 z$Q%jf{cMZJ&_^&qMLT*4n(=&_XBPKA^SHm}TwsPn#j+TBLDukW&ogfn=K`yDs_-4+ z+=A!dtDFa{IIUvpB^AZ#Eu72T%KTWxdd>lUQ6F}`Q*q}T=L6{~0{`$@b{l#y&#cU` zIVI`QD#P=51snD-*W#<&U}LVi!iYWd%+4HB2e3C9&Aj3LaKD*fW{o+?TzW$-X-h4+ z%>3y>4Y}}$GXkFRVuLj3)yQwwfH@$(y?CZrrePa--J~abnsEBMDocn`xTD(7T?pr| z<2iFZ6u`M&0nVK`13%iBb5G9hPI0zXe+)fV|C?*g<9u4?oSHMONgn!py!^LamUCXm zId{kwKJMb<0ebb0^6xp%Qvc(8i*u|8oYg(}Bx4rmr#b%d{c>}cP>{YJ&b8l{=Kie$ z{WzST_6Q}mjX2ZhjH(A`Q`HCZo=MK6R!rs$k29cioTq-9&m6EaKUD7OoC=aS7h1N2 zo&?T@c5bDAk2zyKsGx(veMS=Jv*$S5V$K}nuC?AB?lL$tV$*gg?oXJ`8f>fOo0p~q1||4^$i zthy=;4|WJKBiurxFrRR1=Mo`k+;U;y!_~t5mg|K{RW=KUqPGhbx9<`X`X3f(MVG^#VfcdbwpgGo3$h`P_QS+*HLFQ}`#m!~wmN1v^U($RzsFZne)zan% zIm(zVsTIsxk4k3gc_s7T?N!YcI|iFe{HtN^*R_`Uj8fa|TN`2yomiD6yt`r#bM|q4%%6Xb zF!wweX`XAFY~DL=hWUNoh349a7nv)GvUzYVySdR8m$}BUwdUx?iRNbSdL(8U`qikF zX^oLke2G!1++1Vys6IyZ^{tFN7lIAAOBppE7ch>90Yj-=vUfM^?c!Gi`vW z|2f4J&^Oj}%UEyfS^kJ=@7T*Gck%;MTAAObZDn!@2WsUL7UeD~3_2JjY=2Q!h~H94 z=o%F)C=cogUs{?4adIo6v))#y6461>Z}bpk_aGr#>rukc0po?%Zzc-CHD?G-AIue^ z21N_Wv7+!NN)@^db_fs7d4-_&u|naYD}}+StA&QyHwp2p;stBTeZo)kK_ReBg3vR^ z38CTTGs1;~=Y`MNt_gM8{U=DT?+L#7$%5^6s_?DNTcP#Sk3#5`FT#a)KZF+R(uFG4 zzrviq|Ag=(+05gf2AI<)>J`ry*f6cquTtEAW#9T?06DzbTk~pYIK;pek zUyZG|lZ}msZx|JeUNOu^6OAfG{KmHABSt&l7Nf<$^~UQbtBt$|mK(p*ml&0sd5u#6 zE@Rg~#h7Uojci-y8T)chF>(xwG+H$pWQ-ru-MHolI}Y%p2L5x zNR^V~HZ4}Nd@ zFCe=R*EqjWe|?b9Wp{NU`%1I$WkpNj(}s@1hQJ=etu^C>{_Ca)O8+QfP3dT1?si33 z)?OFhHjEKsR<98b4%#4G@4sJ=qfQ8gI-C~9e!3uBe{)O7zw^FeWK9)YifP7s?-wI#?`I=C_j@Bt-&7;(#h1pN(kaHu$R|dt*LRH);a83F z{mvV=vY#@VPdRQRPWKzH4<0kR#vd@o*57Bi8|*T&ciC=)$80o;w_9&yh+A!_<5wB) zzAQDOGshTHEH-0*TiMtey3iPZG|Ct(M;ggFha3J4eT~xz-HjzZbszzC@jB#m5A)`;G{6@}Mxr~VC*^J9=G8p@wzxMZdR=UM;fcYgnJ z+a~{=^(*{6EV}>LfM|c0al`%B3U&40(3<%td<*q|yOPg8FLHds_;02s9$|dyav@OID$EpzSv^*$MEcf2#+8E=gE=v1T4 z`X|PYLU)bi9+!=LeJ>bcKhGJ{&FCLr(x`T$pmF(fpt0~+R%7^vH2=n&5B(`qF8KE=iGD|o z1O6W)*80uivHrRb9e($#5&knVMg5_ZViQX3?3K{v(BEU5FWfx#J!odr`_29~+kHGN z>`QllSjTnkP3^;`nPwdqO|4Vbn#N4uZyM`7WjYje%d}xnE>)3O$tOMv%c}envRA8MesZ^t zxw5T+In3A3Jp9pAbAofNIk@(M#Ic@1iRZno5~KgtPizxeEphly@?~u4#PZ#P66cR7 zoVcuR-o)aob0=moV-alt})+ZUh9{Y^bopu^WA8a&UOk86uDzM6cZ<#Ustjh?lq8qJ_D@K<> zqVcTPBI9%PbR%P}(MH>nLyVv4eGT)Jo<_)=3lhC?+jwU&&#T^Aa|Op}d0Wk(q`Dn$gz#$czZ7u4K1P)C6tn$`<$C38b-*@ zrpP9tv`eLuk&2`;LTE^-=RP;3i6o`;(jZhu60-6>AD$2Qm*>MZ&h@{}`Tei+S&|AG z#OUw058Q_fcR9zJ%bfR~gWO-Et=zp6Q#m8QKf-@A8ibmUZV2PrU4-Lf_|gDlDZ!%r zRf1*lrv zdFR6wvxl(wVk0~>XannUAK|dg7ntTCi6_p; z*ohs@$B^W&pk;joKYX7nfAxed|EAi4zcpmYkF+1j8%1gGlWbJ@jWZPZEpgKP#Npz6 z)t~Rg&+Dgf&yTdlu#2>Bw9uwY_0)EwhK|*i(szT~^hNR}#g$#A|K6q3U~7sI|9hTw#I15(pSv4KMoF!lvU}pmp62n7!yAoE(-2-la(}((p2b z+7^RBS1EKc9E7))L5Ef?^p(7T+#hd2;nhdDdt?x92T0+OAr+kGtAi=~#^Li}))(!OH|65&X(xF;$&24rIFY}x(2Tb%Gve+1_4%c7T70gg zJl{7$j=y_CjK41bo1WD6(t-Q0>Da(aa#{PFM#a~Ye{&_RdRa_O#A3Ju+796wv+WLQm6ig9?|uTMH@x8Z z<1T?@t~Mj*F^kcBvw`v0bCS7VoW;oc++vms?=t5mH!zbX^)fy0`Weths=(auF_a%}f&>3r;q0&un0w?KEGd=58($T1Xuk@& zOwz?_4>OeSo`NrrF<85HF4mq}gxi0t#vlmC`Rk73n(};X{++=G&)&g5O`FMIT{E5^ zzQB-Q^gy5QmC)g-NyuyAm z#@8j_`c63rTCM^%`r1%YY7Dr3JWO0V5o|4OVPx)f80Ia28n+qHzkLqq>HC7S>mInR zau{|li-8wn38FbV30IuYz`wjS_;D)>*nn(+_xa!ix1lqy2oeNB*cw;~U6Y@K=lR#* z=QRXl_R3)MVik0C9Dy@;jzX6%bM(ov!*8FR&`;YPJszzCb5kx_r@q+Wm(S`&E(h7E92;-EW-f%O38@hdOSa{ylEo-kaRa z9a-GNm}G95?>4S!rxn-dug;A;UN5}idsf(&^{*5mYJV+D<_e+4i1doj^l zcQJo@LYc!kCz#S%>5N&!Wk#>PkTINdpBcBmoza{5k=Zpv67F}%gWnb<_%(VYnD`ol z!yPl=uUf*~S5u&-Spfe<&xhG&OF?JVS~yX#9m+56hpOmS8r)WN z2=x?CBGhN&0@D_L+N2`>aLZ}FD=D0xG%=W8m34qu=eF~n>sRvn7u|WwMZh~>o5%}J z>G7j(Yx1F~a(u{h34WxM7{8_P2erp_Q%GJbt?F!~uImq}d}<|y4J#(&rW|s9mQ74! zCQba8Oc!59Q{}`EnlO16z5BhHmX2Od4=t9EQJ@>$Eu2AKw`@sa+Bmx1uTN59vUI7j zpF7a=ALn|kh%=GNaeKbaCwIDl`gE4*Cj0f!c@gxfuvfzIy$8?8f-^(q*uMn^!LT`Z)oIt5jNbf`DW z1|}jO=13O9ost>|Gwz0?THoN&MHxJPK@;P3jPdEr@uIZkBpfuEjko08aetdP9(=bK z)e8^fo2(G5xRii(;n(oMgFI}$@&*r?{^H+^YUD2u&*pzeoaGgM#_(c}yZ9ximhevU z0^TZOD&P9YgkLA7$J-xP6-g{S~%wgJ-YXh+;`+s^R$!18%I*mPzXKP zww$(dR;0g1of5O9>D=^h+{6)&xoqYLBk=2DPCQ{nE(6C zSk6~~C4Hkna_YG6Odk`%B8x4DQ9vA7!(_n9x224*n%uu)le$TE$@8g?L z8(joBHwerJ??LX*YB-`$4`+Wg!o~wHp}M#o#F}~_;7UK}=MKQxt^Z*E9|_#IN(LXC zl0)r1is*P@7-|HlV`tQGoSUhIDN=fTzr z@6%k|obHN((F<@YuEjStn{b@I4{DX{MBnZ}jQJLdck`mLYG)j73_F8!lFy-F;tgyb zmx~n(OYq*L2dL0ogZCAi@R(R9Zdv>jOIoE_dN7V12>r|lwbk%aEf;uOg>8J^j79wD z`7Hlje=1*|G=|sil;z>EIT%2__xc4x3UU;X++H-1~9`dJl83Y~_7$uBd?O zGc~aH*$Wt=HxSVN9iB`5gO~wHwEe1po&TxfwilZCXoe1ERETph6(gI+DgXsZ=isk#Pd zL~g>0%B}b(-xu$1*@L!q2k>`uAl{o9hBlEAXs&P+C6-6y$6GPD`(rGcsK(=C&qU00 zI)M@TXVCpbGM-+Og1QRl5q&ao`RYr!I4&ED@VY3RAr ze#=&r5N|_?{hv_v(=Y5>Fog3yNU?kUnvGqgP?j!u8nQwjM9~Z^D&Mn{mq} zZ~V5~7yo$fM91@fIQLl~HiiUY!Omb@r4oWOkA-4HXBc*8MPix5QDncy;H0!TTzBmR z+9jPrwX7sed6SGE_*9fMIftr8({WYW1@sTezz-@}`0c`F?0cGx{ITnJLh=R%G~L8c zv9~a;xByr8-A1?dMff+l1gEBRxOK}tyzO3rdgZk^=I#^B|JsCGR=42B#jRLa{tDmM zyvC(c-DqvqhnZ)7ApQ6&%KOT(+0u&asX$e>u|tjhyGN5f`+F2yENjjVJXpfMPAcV% z10(rTvnjmetq*i0JDrrH!bmY%ob1~F2`B&Z7npa92IJZV5M=5PrT-3p>m%os?HJRxX~K3RRoygI};6;%*7qCi_z)1 zCrVnbMEyhSFw1T?j<|3Tn>GaF@eQF!=fhFwKqPuCIEv?2$KuUd$57>YJnkAyz%!wV z$V;5S$O)%VrvDT!K6nP3#Lwcm>@?h!la5zoFW{+X8Th?33oQ*V;m=K%aOlM)wC~Ht zdoyxy*6AB~W?Me)@h?Eux)>uhOVC72h{8P_UbP`ypK=e+>@7n){{T})RHIi`4aSdq zglgh-*l^`3hOTbJ0L^C9iEPE(pZ{U~%{FXb-HCaAZ&2bu59TlUh~bX?c)9R9Mr{9$ zR@P!{r=>Vs7AeV22$g29zLaBkGK%b-QEF`WBz0CwQHvF88_C{E(Pvd(n6X8kli3kl z*08VK&hf$5UHHmpC3Hj}PY3-j3L|aTF)L>oz(Ru^V0P<1oS5_;uI>E`FZ&g+HcAZ- z%^!_}{$`l1ZH+5-*`rdK3&!kRhBL0L!k^!~uyFcD^wIFaJO0~osBkxG$p+)54Pp4N z@F?czN284YF>EbOKOyD#|U!Z*~j_=&DVzwpeLKWJSn#-_9xkGGaHL9nH4hF=4Nr zw_uxnY}luBHnRyHhj`VfVZ7f_Sx|Rc2<214;LwR=5S)Gw?@DFRYT5{NDFa-WI|mgc z=ixDD4}3Or6~6HC#mRFH;?V9;EXj_*QJbUjhkh)6s*6Y4ypwn`>ohL)Nx{Wq&!dq? zCO({=g(uQQYj6)2+#VJpe}J3 zJ?kDut}8>ol@BmjwhHHGRpW!kT5KKt2*rLq!le6k7+_S7mR=3`;(Q|>U99=s*Sj#m(8w_Fxye>q67hifF*@6l3h-V>%U5JyoL6G^ep6xhl&i5*W!2e7N7UKdE*k9X=;3S(>aeO?blH24MzX!<4B3sA zW7*4&1ifjuDXmt1=-jZcNJyd=U})+ zE@~ai!!7D}F#c*WMvpAPYtsm?NR*-D)N)*G^#IlPRbZ%b6;AP}!K-s>(IDgzTKGQ3 zY`zZnXVhb9?^B%oz5$2+KEr=&o3JRg2`yTlVNI1MIE^6(re_l zzd?A?g<}l6@y5R%)V2SB#}hu``l3E;GWd#-7Y6W3*$+_+`-#U@{@|N~L-?*#j5UuG zXAA#|vo41uSg&Q0ta_0oE3lGc14fC)eL#lwRFY$#1~XPXO0uv%lZ+1vAV+3yPa?7?CK z_Ub1?*7w;M_HltJ`~2%T_H2j+YqZFUwQ6u+R~%oFO4R~XYD0Wyq$DFuk z?Ec$~u18u>tD+V8!vE0J;3c*szd|EXEJ^;-h9SM8@t5z!=qs;r@#HS_j(>~cyWXMC zg7>(0Q#Ur#2b358gdy6WQR;OczJB`!Tiw6nm%IV2jQx&JGzW3tpPzVC=Qr-n_=9(K z#n|cL;%x5&30C8&B)iO0ihcWCiXGo4&8BiPtm92t_S14X_D`ie`^-v#ElyEjj~gno ze;z2Z&R3M!RjJCXSDgy`_OvRSyGxCYn?H>8o~pqfkJn@i-)gdxZj4~>Rco?*@rIP5%?%O2LHqz6X}iKS> zcW}yuBK&svE)HHOL9;zV?8xWQ5+2}Ng9>~*rV3Zv)nL-c8ob!?5QDZq#++4k7+UZI zUv96*oU8TdAN^FcN1mZ4HQ|Wk&oOvzGu}PWf@f@6(a-lKb{>0$Y5!i~_$ z3U4su%Nwk*eT%#7-eKOb_qgU-H>Q;IAY<5z&U-&#!m5ubk^Kqh_I<|dbG~5N!U0^f z@f#Kue8=?4A6RW9Itw~};rmyAQO;hB{bwQ0jtvoK1OAA!({d%)DTgH4|3anM;m*=5 zZz03ZTP4fxek{w5oG-^3dC9Te74mG-az*ySpc1>YP?^=;pvt1U8oRAZjrEaGXO|jl zup5*$S=IH!*>Pbb*o^O@U#||k;D9dM@KujJy~}`Q(TLUDV9d53G-G$^Oknr^H-)wJ zcVl;2A1C7v@=!Y62wONe{C#^fnl&6k{$nWae|!|zEssP0oJ9P6`4lFtPDbO|=kVyf zbd0phK^PRYK4Vw%@+@%tONHt`mw4CG;xP66&{D8OyI3Q-Vv2W8ER(QMXT z)LT-50g9z4)54+6Y93$Tx`*D~Ww>?w13Xpn058{8Ab3=w*_Tj%@`WMBP{KK_>{$c%oG1l;aIIAHg#Xi_4#kS9u zVF%P@S^1^1?D{ow>{3MqcK9eo)_aE{d;Go)fNpy4@SWiZ9h=r<>@p zUvdoDM_s1u%^rI;re-W%8W$nZ+$M`_Zs=hAcvCd&n}9bLPQ_7A9FRg0wW?;IO6M%> z)^kR0WjFNoaK~WP`IxrL1OFRbg0*)&v1si|Jhj#fQIk7q08rgC?{OXI%8hp2>%BX=MP`<^`f=O%R5Qg`l2YC|1jaVP<$3T9}68(C2W} zjg7#Y_EBg*^(eZEN23=PjW`s8F)L$n^`v;zj7dO`?&BCJS~vBcCvo!clXz;xX`C9J zgq|+R=&G8E<*})F^v5~8>wX^BYNw0Xk#vk*b^*6HWncuKg>TMY#(JMCxN>baKDv1o z-!8g_@g~=?{8SFM9k_wtxSMEAw{Z5YTr@tJhj8RJRtFcNW9A*amQjSWj^4$-q!Qfz zsuUmG5aQ5E9|2Lnr=H<{w{{%9w*$HUw|IWZM~rYA zKq-fB==)2Wtx?lp%@!upSjXzp-mWaLn5uLsm(`rvrySGaQYF9-|8 zP}5ohPc}%QW}7sgIW33Qh4QHHrHD<5N;uh38E?E$K^wVYsJL1aEp0~N_0|!H#aehI zQU`av8i^l;`nVblaeSsBPFih*_h%WS{ZUiQ|0&8}M~QN!3npO3dJ9yuu*CesmiQYc zB9Rpyx0-@$&26wHWGe1(wnee`c9;`A4cj?;jB1&V8P0&EUl5-Tuy|MA5udfr!u5|t zxxKf}C~G(uv+7;YU(X%)6}zMDtoc|}v;be0F2uj?9=Iyi1J?}q#QyfB7^S})omy7l zEdA9O|8N~{{;&b}CvC(YmS^C*^B1wq>SIWr!=?gn1rA$&zSL7_hW8O2xnd|NMPPKpJ5*VNoQ7% z&t}xB^O#|ZPZ`}`jZE{^7YrBL#Y~^^nQ7GOXL>9DFgB*r&|@VBOQe-Ve3la2l~RLP z&*2cWKnJo5M#3k<(Qw|&7&Pz!r*)tCaS-u#qtylslUoC+vi#%bVaw&X?Sq2@^%VF-y z6(Fo!1uwp@2AhmE!0cTM3y-dYTWdDL(9um0J=+_C*7<;TkuUu4+762j?*RC<3+&DI zLF2>yaCDs?Y#Qbd%?1bIreXjb-x~-IO@pB2XAtPW3wJt{rDJI z@hld~V&kBGdpx9%O8_^cMA#xc1%rQ*Ksn_s*a_2N!o^GQJyjdC=r=?S+s^e*R@aE&`seu}Fq3Fh>} zws4b-UATT*8?Lreo{L=hSy-KYTd2MKlrV7aY~ey*DPe(1Qfc%B$C9ky3mxXl=n9hi zrU;}5mI_Yg`3WlKqzis0-4gt6tQXwR{2;KBQDnV6bO%!9AlOyQI3 zjIDS+W8qoA{CQHy_^%N%emCwjqpv<6fpli6+HN9u(owNOqs!e-X(-7=Cj~U_iXri)EzFa z@PH-%Ji)$kIf#X9f|vTCJ?6a__@(KvUyW#f^G2T=riIcrJ|?nnBk6)5&+gH8G!on! zoOu#cdm@zy*34owH(h12dn%ZjK6T8If|rcu!Y-z_`WtfzCE@uCIq*NC3d@poVRf4c zL}X5Z!YhtY`hE|LE|%x-r@bbp7fm#2Zza8VCE9CUOfuv0DfQA#Qt`_nkHuH0&geYJ z>!*>#?qte8n?%31pP;TI$LY38Jgu!cMsM3f~#$O0V}T(s+Mq%G@PRKH>x1HJeYIOWYgIJgS-Fd+Rx^)@m+M$Z@Z8N;t){ zh1{f$94;#QDp#;1i~F6Gz=>x@a%02;IOi{$xkTS3T>Cj^&SSJG7jLh|dD#9DY8-hb z9AETQXfKf>Z1b5Z9IEW@fi7Si=`Uv7;0#XqC3_R)R7!UUSonOK^Q>Ke)v%bvzPW(Zl@W?yeUn3 zGlB9ZTJE`y)Sh~gQrk)zOj<(chcBjrWAo^>hC7w^QmCz^u!jY8Xr%7oE!^ogoiI(LlQu9(-vN<=zjejw~#cB6(V^i9>%bQxctcoU1 z+pC_d*j>iyHeBXz=AGgCM<3^+27Nj6))m}`ra4@ogbg=3%#;huQsX)_Bsllu=Y{#< z8-?uOgwpkya)M#6ZVRj@Ok!RhU(I-p3T1v96EpbQ6fT~e!e3J`;{Dsyd1*IQen3H) zzvM5&&)Ol*mq-rMu6>{ATU`$wyZWAQ?mAcOtT-> z(%GaMaxbVPw>jnX=o?R&QnFGiAye^UQZ~Co0WS-PtIMMjK_1z2H)*PR z4z<3!Oqwe$k=w&8@;I1I-W$_M=SwOD?Mfy;-81BO{S+zM94B^991VYSj8gq#=xxwZ z>IjLXIZMOoigqZmHo@fL96+}l_mj-iy(F%>m%NT|r=ulX>65^lbQi88u@TEDJzz0q z%~?PpPu%I{PB(fH>P&aUok%*;k>;z~)Ays3Y2~|dbl{jFrIzVZs@@1nk5r?+IthyS z`GfPi_=Wqa)Wdlgb#ndD|8ak_>$%l#HJrm~jyt!#fcv*TlY6u)n%f|GkXtl)0e3Rm zfosql$DNkc<<{g&a*6evu+sCK&}Zqz(zosQ0^P*POwiK>3|ExSjBber$3$EHsL5E~ z>YYA+c(pcvVMvXi)2G5)?^NbO6iPXPAtW&}h%)XSpm_a#H2w1q z+H_?*smpGmIJ=GXPLkV~Qjwz^jcAsp$3OpZb=SUej~{n&ALci3%F}DP$s!(hR%bEaIDrW{wUyH)t;Pj$&EPZhy=<9tD9l@hb_usahWdz`Ua znFX;$7X0E>WB6+`4EX1RBY8)`2tGVfgZJMsjDKCB%8%Wn!W$MV@J18!Ji!tYvHU{Xd8UMt>+h1p%RBVtd;#^A-=Nl*?r=)uegJle9*=k?H)|H0+!cEp(qjz77nXxNA$? z;3Tp-GLiCsPY`jm<7vq|6I%1nh$bo+(2+`QS|FoFd#)(alR;_n64ja#&kk|%x4v-E zKCie{CC%KI+zM`-?KSS*wR7B}3&%LuialIu2g8l>GU0}P^au|JcnjTvTE-fE+gW5f=(&i z(B!+PYVeLHRd~INioC*ad46%E9RI0HhVKfI+$(9@z6Vs8c%KgY z5D64ZC?KYonnl_{GV3-C%H~mZ&@D2~xPXnIENUAr6k~bYC#idac(;$j#A44cLK9GvxkcfTVPYJ_z z(fANw>ZsaE+sAJq!zF7eEqf)YhcBf}5%Ywb-DyXI6IsojNh209^zhRZa_P1t)pB#1 z&}>2voegR83w_G6)uxs#Rhsfhj@I22r|Jp6xTAOaI6BwP+1WngzIbrlzZV&t^rh3> zBKttjh}+36l~~LbjF`$De0Nj0J?M+W&cKySk1h*t&zyOw3>)6i(31CR9LsyGGUl(w z>hL?WH29{g!}tv@YJ6yyGEbWn`Nd)i{FS#d{EpL7eC8{0-X`xKCDi_<)ycm|^Y9?u z{PCS;I}Xro(cHXS{(-gx^^j%RJEEs=sr*|P*#&ga62Jdw{JR$VdaaqFw3;d7MCe^%3Ds*~4kNfYF^b?+*&M%=ni6Z(vw~%uE^C*?OMQiG>QIGjG zdUPY3N|`Hk)%_CTi7awWO{c|DX>=w&l}eAFp^5X8$aKePO5S{uMkXAmuNX(J-LX_P zHHP;4AElh~a9ZRm+Dm7H$-p~^bbS2DRBJy?n!KCbdw0;TgWG83<&C5ezm5*tc#)aj za++DWfcP*M>i2S{#e!M1e7b;&*H5F_!>3S#yBRqi8ATNjwQ1f+ReEA1PrkWQw7u;E zXL6*IQ>>}r=BwT1G#_Mfc2)*V9YHhVNweJ{oigrq-Z<; z7?$(rTo>}+lIQV@6j=WBbl`X0n$D-xPvJ+~PvULQPT;q3=KMZc6JD}u41eyI5&uYQ zB)`-{mzSHV&99y?f;E5kjAN0(N@U{x^DS^ruW|?YA&W$wull==Tk%PO^Wr; zrnn22sp8{B3c8p@m!vaDeqtIG?oTBv)fCG4l|DqLI|cQZazZY-aF(~y64P>zCzEAb~p?DvW#a=fOc3~#kfoR0ASvsyZqT|p6x?$fOOr8HjY z4*fZhOYK89sA|Pk@~yf|dn+!|@Cg?P9-O72>8ZqaCR0lIX?m!Cg5HVlHsbSR$@jof z8nr8u9BjgA;+|lU7h@0A|6Na35sT?T{tVKYIE~J1n?g_SjU#5;Fv^&tLW1{lBzf!~ z7qI#ZcXROz&TOxcQ+`^+rB+|(BtOS-fd}_<&1*cka`z!&ijJ2sxM@FmOAN6=rp%JHGsRT@5y~inZd=YPvkyj$a4*U zKME-{U%1t2pRm6EUFoNH`VQq=g9Jr-U4q!Ha~XWLh8g3N%&f6}$XpckGO4TrXm3^q zr!m7}M6@v^IZlA9N9-Za-UVvDtcLHM2O<4*DDXcSdI%n-rbo$ zaB(Kz-)+q&22bKAcuwG(B}elu6?%O1(qVkFmLlIgO`h+p5$DGr{!Y%nK9YKB3-wer z(Y2@bB-E~?CuR4EJ_~7r*)!?N}}g6@5-0-pwH z=H78NCNN|?Q*~q&lc3_q%*se+n!O${uROb$8+9^}Qlk$0H1#3C-UMtr$3w+Rdw3hn z!n~6CP_b`0%+cBcsULk|h3{c-Gztc*@K6{?iiW`E4EPXy9Y&hvgZ)z>RpBg}& zws!QV)|6U`j45o3E?rYrrTUN3)U@e0x2~y$Q-;Uf$4|sPH@M6hJWAvoT6S@1@}Gp` zI%0&*$C64*ABqWFR$mwVx4K>Mb>4Tu_8AgP=@dn#Y^N3zesl$s7ZSltOFY5Ed7WqO zmEB~#XO=U&#oHML;SWaknk@Ky(1sg-#=-dzJ23Hf1UFYVh--I;yzUiXSG@-A?Ar=E zB6q^BAAw-ja1@HC9)~wR=fGF;JnUSa0m@@8LEC|B*pPS~8h!F0`P5xFQC$Y{9@X$% zp%FegwS)Dc9vEN#1AfbjW0}1IZpu|evk+Bmd#{20Pe$U4v7>SQToY6^n~W<=9Z_?i z2PW6};AEx6{1puoerNh{KFC#>SFry;cCj6FTdRz6r3=WVIER*sa#p>I&ywTs1llG% zN;h5i(;o+KlJr?YzjT(-7`M5!cDy}pkkqAl@+!39-Y;%bTnDGpTEy9>CUT`Iq1?9N zhd5@?nj47!DhxGqD$P2#tR(Szh=X5=svzX|B*Djniv(f*hXl)_ss$IF zjhU#+Ga0MFZH&gGqs-@@Nle1iJVs$b9h0p7nz1$-U}Usp!KG<9Y~yvnv34Zb=^KMd zq$w{I|ZVq zmQW*?3EPDiVUmQAJGfh#+ z#uC+8237XD;$q$9X!3g-zIm>}#~+d5Ll+lQ!0-%up>c+itJjjYJJ5wmW^`D6h-;Yj zjGI?>k6TjY!(BT!gWDri=H{LIDQq^pCG?R-Vb)y7l4(}Pg1!fr1txP;n4{VzjQh|| zMivqo+1NWwTkH$wL*Xwbe6b81T&xI>(uTv5cRH|di9R^mnt*kSIaDpT1h)iRa1o7R zSJ6Ch4_E*;+MW<2TmkVnykMvGdH@Swc;RsXey=?OierOet3w#95sLukS2P$z#leoz zC!lW0Y1rLy1}6EZLi_nNsJ@*g`u}YB5Pc0Uzq|pDYi>hP^Ifp-xd+qStKdvbEvUV( z1?Q~|(DAhq{3f-+X8YH$|8N(q3Hc1(5(D6=JqRVUe}dY~UocBn4DFr8(LPxUd;iHG zxXGafDdI{s6}-@>hC3!`;lK@TY?(I_M;z40urecTkTSu=ou>Hh`hCRi`)N(D;Lx18->A+jM&C>raMib=+?DjF2l=5w0&R5!AU1 z3T(bOG5)_3nW`;0OlnFUlUCZvtPkm74hke7Oj8pkT^9My5+*<$2k@rP8M0dzf|Th> z*qyQ#MlaY33l8i9z9kSgEe!>?qfzj*EE+7*Vquth90a_H2UXQ1h?Gl*)hU?}aUmOq zDzC$E%!NO9^WgRJ0*DVP2KV(MUz#2d1xojz*1a75I8{OQ`x=nl{t&bm)Ip?I1ANzP zg1b>okRJ5{z8Ss(7eNQuoO}&`bh}{s`}ZIm)(1DQ4}hR&5H9Tf1r_%Hz|ma-?|Vt2 zwT3K~oRUQwae3@iR>0Y_mGJ&YWl^nJ4J}2yeeNnvT-K_Io#7*J>RKISlXX!t+yI?E z8KKy}F?jamSd4jUhJ|Owp` zIOsJ83+i2Q-{D2Le2pizo2^7&)jc?J;$f6lE~6WtMo?A94Q~0FCCrDqCWb%L!~9q# z1x~LtMYVWCV5@CG;o2NXFIWNXf!^@&@m{F+@rU-TC{c`!g$pt9@EA_R7w2R!>JWJ? z@1%js;ft`s>>7*@z5!pN^I&trZHVtG5|vpA3hq>ZX=W|lws{2AyPrV3cRkFKdj{#N zn?SAmIT)X9g%@u+;l-KPpx*fwWGDB4%dSr#aquhXwhh27`9VnW{tcc7t&Q9*zn!!j52LtjZXRp1VZ6h_M;Iiyns(mqe_zs zYg{5e6<4U)Vc^+# zipwlU4M$HrEb?aswRvIa(hYb!WfvaV=7+me!*Fs*3HfDQ=kk4Jm_u}f@kkMQ|0a)s zT@It+-BnxAk@tZ1ZM&eyHV|Ashrp&+(a`uJ4wQ3GLD7R0*!|%gr08Y9k0;qs<8%{H zHxGgX3&HJSDa4_hA(!0P;8;1MrkGeX2Me5E9E@1(H0Q3n0q$l?oKd90FCK-aa3c<`4Z zDsE82{+a4H(nAx|N`{NZeFXBWMDat!ln;1}#Ef0~`0Sej-djH!4}=)wxb88SWMGQh zkDH-}>^NM$-CV?ykH-paOT4voA|9G&h0eN@@m~8B{J7Ny3xjNNtce}2%%6^RSZ9nMnu~=cE~tIV72o>0D_a?uI=6Bizs#fU1Sx z;fmllgi4CxK&u4aw2{W}PckT+C5vCp<#B~j5qlGq@T#^lRxeRO)kamcoUe|t2^zTY z%y68%RtpE0YU87JT~y1{$Hb)u==k0MCI1+rZod)MjvI{{PA2%g*A$nX7>6-m%<-Ys z1eEHqz?%~$BKS>2V^=Hu*ft5L+fTt64%S%JWsN_s+u+&pw%DL*hs~DLaFLBYI$gKN zu<+@4*g=2}5e%jmqKGRLy?rA;8;hULM%x5uTwXZ`|K`rcXqn&|&drRKK(puP@w-y6;1=<$N3_zRl+B9u5k*zMITNjb>)W zYGt@BX$XV$#t^#I7LJd10P77-P;lQBY|kwN;{)r#Xxbv2alehAm@*FkVcJ#5H)23i))Aa?Qvi2Js} z9-aT-(%F}ybEXYuxORXbtrLc%yP#w1TQI)$4vqwOLwiUMWDk7+kIs)^@ZvMZE3%Bmo#-AFxC~->{v+nERmNI>OJkJo71B@_p&uCO; z#$blJDJF(m;?^k>arPxEtY1F`;k7ls^PYwp6By*?JE6u2H&lx}h!@89a_T#Jg);Ix zn2qL%jH5{|6Lq4QDSPyjDQ}U0gmp^rAw~oIFX%y&P#7Kh_8?j+dk%lD%Aqppa8B|3ZU5i0*H=S1T~*7 zf|yH-!7ySe%p1NO7OJc;apo!j%W4?6d@Y>VzYb2XD+Kp1bFCX}g#JNWAx+#4LyS8h zM`{PZq+Jkqd^b!vvW;P|4y?v?ocm@O>!>z?-a`TZkP0HBLVcs{PBB!xwk)lN1xws^gq7e@7Mll zm(V_u7X4-_`S|?WgTM6RZ=d93n|zxT>7pSclHJ%wTV{q5WT?fdOoCp7V)zvQ=GU>J7MZ+H2B`r_)p zzY(_ZzrK$B{kl#|{_E?0hmI-H^1r_B|8zfpmh=C*=ZYQqThINuo~u7_Now2t*WIMH z@xO07{JL%1Au6&J-D}hC*Fz@hkL_=#nfAYLf4}T^*=LxP-!A{B|M&S_ZvT+?fZxaM zr5lU=`dzbr8y^1Y-}~R)pyGqKf%)^On8Y<-Vr&rwN@J5m74vGj%Cq^Z%DS+&T0XwJ z+Hz}xnn|6j7Q^?b4bJy!X?Pebp6bNv8w_NE$gb%0f@c*e?;N^yoV z6*=eQP|lRC=9q(8PLbQ2Gp1QMS)YxZcgivQhx`p<0NYi|~1Dzzor zQvbHX`Y|cSrT8ABFFsKz+i+RQq$R0LU9wdAt7lc#@eS096$NT%$`^`J1vb&%nAJA# z!78%HuzK$dmU+C66_@W|6?@z)v-AzCe_EH5OSPQaHjc9o$>Vg2rJQrdHqJ16KWCJl z=Ny9rc==5^#UzF|Hfhh>r}yJ+=E=N0F^jkCTF57QcJgAlgO_hR&)Z*=AkKg~#NJIo zyknXXOQlIf);W_fSxbm1ZZk2@xj^iE1;KFz1Vxvwf|gDIQe=1p-G>N~>6aif0YCp+ zVe$*UI&(-v%kTlw^1X6}o39mPt9vxYRykJ5Jl?AG)=N-1OK(ug+Fw+;LO-Y!AEMP( zX0qDl?^L^^E3jg>WY$@F0Gs%3D(l63)=+W{>#Ra$=gZHm^;KEUS{O{RP;%0meL2PY zG)~fZ8YllcpVQqaDa!jXtoHi<$%8rV+vfX%{-)NqBmBkyET6yu}4&J)pC~uqk zkY`Fui0g|#@nQ|4OK46EGj+sSZv?T&&L#3Q4r0suK-`T=3i_HLFtg?fiX}G$-Em&D zHP(qt%KCphO#YBNV*__Xscmbdr^ksX$96V`=@+Xsp1-a%eQBif^w+68ftyuK{5h2? zx2oE$pQE;B`Lp6f6>E{Fu#!@_tmB)5HFdnsN{+u~Ek2PPb4|m^liG16(uY$-WO0h@ zMVu(v$w{UxDdkPN{&cfVlD z_#lX};Z#oM{M%w;y0@W$=fYBjY+fIwRd}XkQc9|vd%LQ{-32Po^T#T08>!kESx>EN z*i5Y(xL7Uj_G3*e>a&(z{aDAL`K;;f1y&mJij|F&aF&@toc&>K&U#SIdF_du_uxRz zIerf3tWwD7U_a+NcapQRH>lqE%6a<*^7^1^ysd5v-hOce&&*2aT|Y8;>BGgmcET>+ zc;q7Q?Dvd!AO6hSGU^k_hh9W}%|t9U77%gqD&ktPj!5<#CCt)~#B#lzApbf+F!bLc zNZ^d%&@>d8w3+`_nCQ5<4J41)C`<0)Xnol_G47w|#~8%7F$z~RB@?zw={;CnC7WkZ zNfI`xm^G*AI2NMz%pR?_kK3+Jto~W;>K@KIZ#H73suVVH%px{%?jF|F_zla9k#LR) zz?sUo;FvpIIN3gmN#tbC@q7iRKW67VRX=maBtM=BlksAU7~a&9%E{M6-uty5@7z0? z7t7`FvJHj2r|TYGd%?-;M}6b%wm_1YQiquDHY19LLy0BOL_FJ<5pmomBAfk~+5{y8 z{X`HXc>@H;t~r9eTcKdvBNrJg{I|lydpNJ5yU~^?MVuwt9X&%K)8xiz4c(RE_e>?z zsk};8s*y@-=%y0CA5bx={%YsJYHIlzxmvzpzuIC5Vl5Tos7~*~I-8`i(hf^m{is6L zRIrbAYJadyr+S=wc{1loH*)UiERMldoaNS8&M@H#XNaO>*4?r^^R*7|=p*uuqa%3v z!aUxhT*_OL%C$8_?X1^LEx%cycJ5OLMja4d?1Rl2g3R;S$5tmRVx=j>OTb591&Jzy|rXf=})8_nkQ`>mYU zdYI~$^PJ^MDPCSQkoI*fok-G+H&q-|~0e#Zjdv~xMHZ?=cG)VRu955D8| z(WR-q)qu#>HYO%xe;0+G@dY#VOeJ zQ527Te|k(5mku^e%xV^`h&ZS4T8mKMF*Ala|0PD-HeG2wQdDI;lBhEOJVwO?tW{ZR zJ5=(oMb-Mf>1y49-D>9XPijlaYOLXbl9k#LSa*?JRuOug{yn2O`EX2dF;4QK0jGZo zob_>E&UtMbCqA9YDURoJ7N0eo@#|hrUgamwd-Dlr{OH3oZGw4e*%~|}P3B#pqjzBS`O!2ar(-ReEY?sz zmu2o)S2BfYK?(buU*EX87p@m&&e*Gwn0 zjxN#9pGWlG9Yp*74v{Hi1aVkv!CJkGpr12YkeRH4qv~V9K0i>DyyPez2mUQE`4yLa zEu&=VbreRcHiilQDMrz_k5XRxj#7IlRVCIus-ikxt$S2YtuH=SExCP5E&lXft=JmE zD#|yb>=KEXpy1YIBO=DV*&WKk<>76I-w4 zESt}8t`CjN}u8^LdNUFue&WJ%-m|! z7g1B+y$Nx3X-_PzjMNsMM-)4E5dERU9Ww(mj}bN``=VOp!5 zseRS16-U%|>L2R6#juwCDXe__B-XRc#!4EVV`ar&vW&JQCo$LL>_IGND3-v<9t`Fb zqq8_|;4Dse+roJ+tl;EJPjd?LiZitJ;l%-wyz^rl-oB#;&tNuhbJ=)9)E?g0`v`CQ z@`Sg@-}3TT#i=|*3rrP&n#1<~4{zWg9 z^T-61ft#h0J-DMX4~S7SOk=g-cDmZ0GgEDxRf?6w@vH(`u!>&uS#i`cDkFDUiA%yM z6!kd89t|h)PvsJ~&EUiV%jnp(g)?QG<7As}athzaoW7hN)d|78`Fjk-qzSJbJebB% zsDHR7kM|tf%6n=bcB!`@>oZl*2jx;few(0Je^t=-s3Im7?I18Vb@ zmJ?gM1Y)|^pJ=%(;uLZTV_!$SsauI%@|+mHy(iM|#RW@#H9=afkszttQLrA_EZD0Z z6P$x?)3NNS;GAAmRAj3~`3V~D3b^;D#iW6^VwByoT%p(3jFG@zrF(uum8qRs<$l^$ zt=}io0!A`tx^ax%NYT-R&m+~}+ z%bS}WVD}=-<4#E8N3-w2ei`IixM9VxubgYaM<&&q0%&a$m zI!s)Of=G#PWra6ul)|dYQ0g=9DjiR@soX)e)vm@?wP)EjH6#6~mK^Y79Vf$C#l7LI zHhmq-yxPr5$Y<7At1|WT8gjbTah#%Idrsu1bBXuobMC}_oca3)PEo!LZz)-m*BS_K z-qD9QI5T+XwWYkxe?2ejbBs6CzsDORKk#~Q2r&lNBks?H*y5TKnV3MBGf7lerx0V9 zktn`o5YOv5gb7$k%q+B|$Vtr;F|oGab9se+qpvIPg`JdtHebJwHyVXxB;Q zI5bP8Sie|h*GH?J@$J;&v0Sz5%NDi%?rF8lQHFIctigJE%w!F<3u#=p6lcVy9Fy9c zvouZP6rnR|-&;6K|6QCS!c1B%VlP`w^!uotW!n66d?wGzXGL{fxzg8L^r;JqL-j z+7rUOqWU8FJsrdR1$$;K!CWp{V2-pD%tbm2I`wctOwJM{ig|***>b^N=Q8z6o(VQr z2{AFPuIMa8G11;q^k{~OwgZbqk6|+%yHx*DOdRr_(Q-5z$PUCX639&%o7s#~44d*fN-?is8tZ#~QOKF4Ye63)$!qJ6)ba~I#qF((dj zx=TNCp3N^f?Ysb9J1vUWzewU0jfe1Z%;BBV0^W3K6>s5=@UoGYdF!=@ycqO`H%{>( z&hrh3seDVK%V{S3#eD87BH`+al>qs~9mlQ)&9qLzT!<*&ruW zl8(jI)+y7}(yQOqvImt|=OCUncFLx{-ZED9yeMaGq~-Ljr*VlmmlKmWa$28DoUQK% zj!7xcTLY@`?sau|&rFWyIkdd~jfwZnp3fVOT6uY`Q&dmipnmrQo+&Iwba`co)?J$z z?<Y8|{$yV})Y8Jxh{!`HI1edlOwy#)2)La4pkk#mph&&f+$IfLlp^pR9g z+spHo>%qLj5yQLu+wzVw-FU^&A-rK<28~bU@Se^VUiy6vZ<%(49@Dr~buaJ4B1A4! zAPT5WOzmQbts_fZ!)d2(_HNg;w+I*BvY5uoJS$?YIhTT5htEOTZ%>ni_(+PP&pw}j;sefx4u+BnW~ zat+;{=WKO-d2=J0dkP8R6HkQm?%fJrGD*vu*Y)HTTaCP^O6Qr>Y?_B!$m{4z5)6Bt z#;kmZ`Fm+1D^`cbry3Gw0ZZgXTN8cpWFp_xlUO^AAWYg6s!#HWd*V`JJ->$N+HEF^ zb`FZ`St9>>o9J9miE&$TL9x23pgoi*$ZHLx{)ky{Hd!S&XKWH&!<~Z5_p#uf|Bd=2 zexjv$s3;C-DthFdMMs4U(e)*tVsi9fipk%`4~$=<6qd#%m3r?~rQH5rX=<6Hk~TZ0 zQnU+GD`0|Jw|a)!CR?R8R4vCUR#RTe;VG>1>sFRI_=uI)qVbX7%ABiRIOn~FoW<0h zlOGwt>EBG{Jcm|tF5d&3q0c>z+3V#LTT9ctc`ZJ%6Yy4DOWw;S@`?kUc|+G!YWq&$ zot|7?AHI}l($4X+Xjbgo$iK?Xh^`Ju!%A#cY~q&m%g;HsaZJ zgxC)sC)(FPQCq-6Je$7|@w=4f5u>QSj}@3S>bHNNEZARU3cBu_1cYYR}5vXR_|^RyOhD4c5881htDRac*xEXKx)xIS*qw z=OCJ|YBQ75_E^O^KklWrvWv6K^KiC?zPz_yHJYp9d0l85I>x2*(hwR;={Sc^JYu1F z#PvM0klJYXF7n0&A9yd8A(jeKVrgE781HnUd699%eArCwv1NqmbdZP_PZMv}Pc*Ms zR1ojh5S-h>1>-g?jc>IP+*@V~@|*MMx&2hnT>UlHWwa8}UQI7Dk+1)>nEX9P|6Rw= zdkzgda6%2#JS?pRhT5)NzTHis=8VlRgzi}*+I)_dC z8qQh!Wl^5Rdd~ix@{X=ZsSnhKH{P7Xd$lEq=aYu|paY0;hJ`2^TWJj7GSTPUq_N6| zg7Zuh!8*n$*w0QC#P7H0%3N*!!sH(!@*gLU&)ik_&ESM+XJG@%A&XLaW^bijrDv2s z7N)j`^rv~VcWS0e0nKB6VzpvVYNu0vUJ~%~cguO}$4k8PE5)R1IK`nS<=f3C62Dc% zwB#vG3zPro{{OVMgi(i`kK)2bNp7@s)DZ=9 zt)$XaP^h$h$yYhbgs2m>W7W+0;;dxSO4c~&J?nTsn9~~ebC$I=dGDBcH2+nS7>mY` z#1e$YURo1-@&clry^^?#*oeV-o;WX;6QujYY2LS`;4C^>aBSZvFn5Ojg~@+H;{Je6qjCd?w%uf>y*R1HZY91KC~kG?}LeH>SP+bSx$7r&l7VwUqLUSJPDmjFiV(x;i(pOR6 zc9vS(^@`eA*n%~erDNCPdYpCE63(#b0cYVNXpTioV{GSn$CX!<`(1-DACrjrVh&Nf zT}7C%J;d<7sNmFAp?*uaU=$b@#^hr;~6FE9N&xe7M^2`+iG*>{rx$YVm9Sa z9;I_AG`wZe4Blki!0UVZ5X+}t#N?Pl47U~#ss0S{9=uKS^lvD?s5p(eloebB>jdLR z%I}&*~@4<{X4OB|8ALpr}?jX)Q>Ot z3zL6<$*Y-f!*T~khTmV17p@&KD?&HCevGU?KBeh1?tZakUa~nSl4K>2!TMmq^Rm_0uRf=?Q14U6z+0X~c_?`F!Gn!@TXK zhu1HzN+hc_B(Yx$>OWF$WZ(5fAN!doR@4-jz1;-kT)QB)ZbbQA%m2dUKOw>}No}Nc zJACXJKJ;R#2>aRS7)OtnN^6=)#VjnT*18v{ZJ}jY``JjA`6{J(&PJSkQ#R*X;ouZ? zi}HqVEqVRf9Ny4p6>qL1A*S^~#Jh$gOuy+wa%nc@u&g2WA`j>oRzYySYAnbH^$~Ra zX9)ULR|UN-nT}nyzcBd+m=x6b9M-UuHr#aSkwO})Rx%}ysubBx)lAZTnrCaviq)R8 z`eUs))B0f?bAA&iT}(MJZ~b}g+7^^YoXlk~0b)WhrpQhrN$UM$?-obf#)Z|Stmh$c?ulwC6 z-cdD(=vxmZ#wOWBQTHry_P$TtTgnK|cu|n4L4}X+bwIRMgM!D>CuZ zMdskKzc8Vgc%L5)yKzhsKGbrtp~826jQp`xsn6+4=c*Q}>^Ua2W8O-&>v(aN2@YXh zj$4!;(2(l#Hk<+m(75nU$}PUlDM|^vymStoBV^&7BPx=_^Nor7U^?XpAEq)=N^q90 zCCHqKg6q<7L9WXZ9Pif&mSxukd6N>Najskx$8{BTtEs=3_UE7L@OO{#KmE{Y+Z(pO zY)JU3*)0{OAH7sAMG>_mH(brwhpL$wS7;8}m(EGGu*||tj;V5qlc?`OcmU_PY8)A z4`}??SF}Wx7Okl@MVq;qsPLt_Bq@*T_&-lR{+})Yf7f9C#=yEkwRXx?ew)HOE7wQZ z>&;a-)|FO@pN}Y=mlvzFK8b3pE|S&ONMIES1uS!d&KoK~ znJvh!SOx3Bi-Nbx2f@mT6nsZu8 zuqa%YU0=bZtcdY!AFgE9j8iGPk5tS06k#ocBU#V&6|D1GQBG&>M(1a3;PkQe=zQL0 zymUY1BNaQvyNow^$`YXY^<*M_swdi$Ih40TO}en(|n#3f`M;L2D=?I_H+7a?)6I-svEk?~V}_uDO&W z!TifOcYpU7|F<5lkSgo$oS7=GwWWG^n?|N+$;c3;ca2QttfHo|M1xwj+dDRx)lTP_Yx^HyMl9R+&O5%-dO6Qi0qB-5UL_cRM zu}s@b6urEZLtKjXe?`F{))nmI1Zrnx3HIn&f>XL!kcRIT^v6yKw(gIG#I&L!Gbl)u zRfrO8>ZYRX0gch8ET*>b^Z)U3@(0-S*vzmo{-foG=3NR;+&eVJlRimlES94ZTNu=q zwQAPTV=~L^y-VlSRp6wOWfYU+9AjfBPmFSnbEz1jS-nm^Zvh)lYfNer59!8n>IMY6`@vzanM?& ztWsT-rBb#^lJ`=j?_f~70*kV)6*E}t{iCepLt~CPGMBUOFU{*tH{?yeeR%ic4Bm3G zfY&uHfXe#VG1z^nTc!b2BM2QOB4f(3+9Kxf~hj)d5;<6r-qm~hOSuAO?O)d)A5Z+%AHS@T`P&L~zCPDp z{M8?S+aGr6Z!7Wrxz;)S*U8HQ{;&S{+vj6U%3uBQxBby6^Uv5H+NPRc*A(9LyOHqg z0o~8q`rF<9Gu8q2`?c*qR`UEu*2sUH(}kRgb^25TV;df7`L}>CmpVX~aXn$@*TJyi z=|ot;WvMgSiT{P+P453u3L^X z>aM{ry*Hv}%s%wm9C*CaMI5iVhq<2^jlDq$&HcTVHQ|#YG|RQ;W7`}G0P>~|OlT1c z{P7lGP45IJ&-a8`LkB=p?MP^3HA0`oX;61^2CSPp1wN-th1s+|#A5q&Xznu;cBagR zkc>HSaa}H~mCuK&C-ZcqmZ9|21YMOE*e*=Hau6?mIE4u%JeZsH3A4leG@F8gG!>uK(i|ClIQFrl zICNW75ypi@f!iOTiGLF)aiaz7DB2B*Zs-LmA^jnK!yuSgYB&tPH6A9|)1ZFS$&kHt zGR*gx0xQBYVJ5}sbCYbSrOAO#d1mN#E*Ey!mg6mc-d8Bsnn4NDH2j8RS# z?!BLdGF1+K*uMZ*`7B0_pA~nFSdTOAZNZ6i_T$Kc6WG7$Q*?;m@!?lLjiz;Z%}&0i zW?b)Gv70O1kIfqt0B`f;5PUZVcJ5(eK`{|d_HGVWp0$R^2T5=sTn81V_J+dB17J{` z0pc2tgm&G=g4ogsH(N}A9R+FdF=R5FY@Z3b<5{r3Z8o$`%YlNnv!QkT9FThRpzEpy zaF5CkiCzN3U^&c4wZe*PYhZWD^)O_(4crSh!HBI}VB3xz5O8M?Y?U5_njeqB-qB9D ztvU}$i!Q^D_bw={y#eCpTTs{IhSq)W!NYTpX>Fb7u(FyLvOatRqkmES>|YXRHt@#? zF%aK-%VGZpLFl3NG#0O}gaM&d(Y7}fPcm|Bd@cgd&xyhWKMqBUfNNtALsgBj+U};f z=Zh9|l3L<1eFFX{(H2i1PQvwjJ7bIXU2*5>6r6v#Cr;?x2MgB?Mb-4-*y!*`lxxQ0 zM0*BSZDPV@Tc+ZuoE$uonTykB@E{Hjf1BczlNlEXqX0MW( z`8&#J9@UG`oG-gO_T-b}v8A4uh1I(%!>cY4@V-eD6q&7n?22lL-qRSg3*#YE)ed}a zcYx!^bovtcqUnQMak4W`26Vbh^e z@Jx8M&q`LCK5))LdSMH;Y=azG(xkv$+pH4nB&m z5m#~Cnm4#*UNOzm>@u2Xts^zNmu!vIeBKlLePc;DvOW;p1=XQ%R45FrR1eAr#=wt7 zv5;?U0y`$g!HLoB;lk=<2szUg`i)G1>oxlUlROaSPZ$E9kEOzO!!Q{5dIY4(j4-j? z1kj942kF}kSi0W?-t*I;$C(`HvOX7<&z(;T5HEm(;TAZMQve5-EQ6$}R*=Q51$O8< zxF56u?ylSjUEG^MIeZHgDt5xVm%9Kz?}OB*hvEE$qtJiCd3dnz8q7Oz1BS-mhLB;8 zp=DhUXo6pX)cl#&K4CEMr7tFA7RQj{C2{@Rve>s~09sy_$H=%~#OjrBpg9DWq=(|E z)?pYTuZNa4(Fj+Rm{&)QcajBsSeoK8q%jT}9EauCHODm-4AGQD(xkhFK8UVg~eGFar`Ia%erTTqyd5)>8aAA3V;5Q0&}d z=xCz0+N4$RWaL^X6I=+(+HZu;EjPobc3VL=>5Y6o`EUMM!?09;;{VwhvQbgmbT28ZJj7(E<#{AgDFSo@`oAfQV9#;yo zPDtVGRvB!)77CHeBB0c=hA``v8X~#?d`h5te^YCS4s8p|>ve!;(>j5Ckq%mPPl3p@ z{ov%dL9n%g0a}zB3qNX3gy7d{(4&0@RJ$?-?pK@&OK5$&1r26GxA<9bH*Pi@h@1lt z&2wRD)A`V+pa9~hErvu~3ZENVVWi(`kbGYYueRABY|d7g{BRr0E@KD%vt4j-!yeGn zx`JJ|oQ9pWhTs;-1z6bRDioi36L8`~h;8SATkqe(mrh?HWiEsE;YF~>?qc|(cL}_= zMuNr1l*NQ%<+1tB3V5(dCEUb?;F}6EoIj`zej68oNB!#K4NpVdwILe0&PufGBz<`!>FrF;MRu@*{(CHg+|D}z1x#3=PRX>51a(~8v`3zm9ubgJ(?P{8mlV-#Y zNZ%Hl5cDxNGO#q9^pu0a-ik1!Fa%y#l0$~6KHSJq!KKt#NRc*y)gziihoLQDNtXmj z4Co9e-s+&!=5EmPmLAS^>j$%ysgQ#spv$w-kn3-RU60bC{|Z`XbpI5n&SgRJ-f58I zKNGSwW_WUW7EH;T4XUTPP^WVq)YmM6&>lq{ z@D;Y~^TqLpi(%IhCGncJ6po7Y$E$k+v7v7TyymEg%YLei%?4M)d;4mlOkEpW(^_`z zGQ+X#u7bCcy|~M z8Zik?(hPi?GX*n^nfRq(IxfqZh4sQLSYKFlAqTK%#0?Cr^c}0e z^4AbesHSnF>9Kwfx5iEic^;d8uq3=~;}69SNny@}5Gen)Hq^b@09@M?P_Oz*9R*;jpn~@a9H;+&{Vkj!mkB zTZ&Y}`RUcsl2#jERjrHHn#u9GP#@c#Q(=`94(HzoJR73Ldehrr*T-$K@v`YnIsG#J$<4EPAf;l7TO(Y9$SF8Y{_F$?EljnWGdE-k~Xq*YifY#sJ5vlGpu zkD-6uCG2zQJN79SpqWs>Un zu>ZCKI++P0DI5fBuG%eK~Ho1C2-_iZx+L?i% zD>e)QUW^3aPDWTiI~`8QGvJh#`ZEhn@O9i&nB$oSgNx6Eq*ODs+%y|9&J9LaQZ?K0J(@!9p%&01Bnj4Z?*Oy6CWGu}9WZ?=lfC_e+PjFVmq^hbhpcmI)4=%LM-y)1Z#e3|h;2 zCVU%ehEgYTp~?0IFt|(slwPzL20mB`i#Kmp5;kGvWF=GD}gqw_JXr=O+tlBj-%C19W_a9vyyE^k)?B(h{ z@FKbxj1TvR8b^ZR=IxrGIUEMFl#!6yQw0exSopa{EUehw7}g$a2B)h%S%W=8?2U5laJ(q*uD+bWP&SO@Pu*nm{n3PWn` zf?9>U;YD=^?CpFUUSynr9sy@)J<78%ZR!PBe)0Lsb^1BURX5R<@rW6~42m01~ z1Mcx&P{02GZkG>QM;FCwJxbu+!qV7kzCW(KF2y?sE8^*yl`yPVHH;rr1LtfE#YPRo za8N1g!{jUQ1Fy!-Q&_au=P_$dV=R#$hxvUIsc+c@<)gae@YQ{BeXUe<`bD2kRW3i;W@{;ii*o@qX!z*kb1%Y<9zefxBPez?D9lfcJr#V@GRfI)(L* z4L-g;w#d4RvE!>0g%2?$p}sv377Pl4W7)OfXs7zH(^mm|52&DASrKGA8$&>L9IRyH zA)r_rc;})v*t_-s&AY(5GbvDKavyNen$0PV20{1lBOqwvXt>a898^-KL!XD~ka;2l zDlX21RR^cSOYL-c*U=1Po6m;2<7UIdA-OO(WF9;Y$cLclWiV)n719D%L6zCnb1Ar8 zJ{-Hr)9}vf47@xv3zJt($HH?t7(8YkUi4jr+d`~Z_0Tr#d2%1RE}X^p{_k*jl&|KY zB~bG{qmJfQsj0Em^0vfox4UEIImO`2^)k>bwj#K9RDpAqYk}fy16a^m4c?Wpu&I6% zXgpO5x^)SVzPugG?%D~qM(E)1ukdvJb z`2|y8^NmbM>^>d#zM26_#VnAYoCB}V&4(&^dGP#mK4g?#2!mTMh5@gaLld_ZYV}$R za~y>blDZKZC2fa`T>L96v2LrisCL?F&v*M!QlPnaidTH>opI- zl5thA>Zj_sKQtV(maoFWyxS&A>KH5O-u5Qz?&zYI{?#V1n_P5}|SxZnGy&g3sc3{}16R7C& zGY)C@4FemM)XcnAK{G3`h9-GTPVD;Ihhob;cpE#beOV~)2!fz0RbkG>P`H^<7dCC+ zAgzZ84{F8H8u6{bStAh|)@~0S%65grm3o3o*B4q08w~ZI8Q{m35wJiq4yL>w58c+(%XiRs&u19$^*bCWD1vp?6vgvhiecVHKXeT*fo(oZ(7x9n zxjRAFZfqs2QMMY+k=DYx#p~k19khn?;}~4sg2mb_kJfYnYgMH6n&%;6Tq87hX^P&; zTCBJ@9(xr}#D~q3u}ssxn3X>Szx|kuCtFX&2<>zn6OoN`cbc(%-*hq*;`l$I^G;w$`awjt0WlPsyp18(hG{$?+eEs4uM7A44`f>5{%o&!>)?y zFz-$Vl+c*q#7~*9`o&Zja(p^;Jz<78PcHP%o(FH)g-~Gxt${vfIXs)U3S!2ug$5tj zf_}XX-b~mGJs)p_uihPi`MaP&#eGnC;s9K^b{Gy89D~TK+LeE3t!1IfBzF`_ytH^ zd=+-T`U(81+=iZ49zf>sClFTSC5(>x0B7odhtw1XlfE-J*yD@6Yn8;RHX45{;g7{< z2jZ6Z`kiR4_gg>iNi>euHlfz4{^<=FZg;~Nln(G(wZm7Wt#1G z)$Qu}X~0yk}uV!e|k4}+c3x_SGgo!05nxr-T6@`Y?^)vH|T^xmUVaa_JMZBc46D)ISMdI4pP4(w0vS*Ol9vHI!Y(HI}s|HIc{0HI+whpxyNxa6FMGPmVt03W&!C=i#c`Z_Ozto1B@dFPZj~uID{s5+BlBQCd1AlS@^A{2_f8FwWA=o}1;@hWprz6B zWo4_p$2eK8blWEfEZQ&6n|)kP+jT+SrCyaM&3+@F{aMS_s8q+cP8~zr``PVmd%v}| zRi9)Ivi#FXTHU(6|VXy?hHnj+cF z_MCik=yka;{hjO``B#4WL&sLHw!UrulkIJ99PMq}wP(hlQ~ivk%FHg(j)peUiyNaQ z>!DMmM$>0Xf1A&h`Yu@@UC8v4I%tBW7lt9y-0NY|CyVt`r>)V_+x45Ifi|0^u;2t~ ztjjiO@{wez-?Hseok6>#V0E|j-acRqx*T&}e4S-y0|>x^W4 zr&wBDeqNd@UyzbMmq{0wU6wkPUXgyxzb2&)t(3Z5zagzIx+9&vRVAIwc_=yUc_t;* zc`u#S{v=(r_$HNhuaW*({E_ni)sbKJ*O9eE19{8}J=DLxoN8krkHC4~3Ehn4>$wljN;SXUci%EO}b_VtM>hU-`|rRr08}0dl~nVEK|? zh`iu@sH}Z{y_|GqqijM79~=d&jp;9DV2VjUz3i% zzah2ncuQ&>e^>haf0sQtndFRvvBGLEihPgS>2JXZc$! z)*VxO$jyVS<-nabGQM6|{$x8y&RH(WSCNJb(>>bodaGB?>ReKFrJow%PZjcR;Q`tkjsq&wq~ly96TwLNoG z@=3^-yhfamS~f3~Jj_o^x%1CRqtebwZ^mAf&ULGhx;DEbRX@Hbxr0Zc`dISp{7ib^ z?WGj^>#gMK_epw0HPR;jDLDlEmPVM@mcPxwTHHWKj=WM|ZkVel`<3d;&EgE@FGG#+ zZxf8=Ur812RsC>E2I=SfW2055xWx2x^*=6r``GL!BxmN#lxmo!M`Prf}Iix|k z{Im3yY}N3+{HE%ke6>wI+x__W5vT9aqfR>~Anjb=xR8zKoF^XJe+-+9K`xo*-#YOO%ctNs^`y-Y#`;-XUGN zxKna5N|nM|;@ro8bV;{omNfT7jzr;yrIde1q%)24CDZZ8B}=E1(!uRT($xON(yGlR zQbFJ)sr1%0X_L4mRqIqqwm+(*p|2iGbuFGt;Z<)X!>W(cnMvOz=g1oA>E>Tj`n%uK z33$Y$zP8-&u#S8yv4K2iK_fX7=ipO%8^}6A&E)yFjO9I1E#+JLTgwOEw2_x(n8|6L z9pz82yU3<$cR8qAFWEYxul#dwKRH8tfIPXYEGLxO%I|2H?77iV{?UGv9805R<1`mJ z@0P1Pz2Q{3-PCFFk71tjmM$Aorsi= zbd8bk*~iO+#CEx`%T9T3c&6N9$5GkTuT;*AEt3`Nn{rCl3wiInTDDt<*RwsfyPfTZ z6IM8<^;jCz{w|9%V0IO)aB>Xt2jx3V#zxU8AGvOp_QIUvD!drEQ_C zWm~E4*ktP8B83K7q|)r--SoP58hHn0(x$uFWDs+Z{2g*A>)By4o}W*b@0_5)1%>2w zsEE#AJVS{i&r_c11-fEhN?C8p$f4pgz0SHuX{{=$;g9Q7d;LxFin&eYzpChS-G`)I z?-5z8c}cb1-_XP7@2L6V4^-QznmS$kO5cBe!*$?lsNiA^WwiW7f5U6DZD<|NpQFvJ zqsy5q>hp|KjkwN`#=LTrKL2cQz@-U>>^jGo_a1A`5Bs;`SN^RzU`RXm@@mgj+>r+z zv0&w37oJ|wjX!*}gDd*zso5 zq1@GO7_WLWf_EMm#hd%Mu&@7kt}kY=c91(yZtuYtwxuT9mWV-Gt~TA(eB`R+nB8OvzhrPcIsS1`TVzLt8%h0~SYadgi* zo)Sy8QvKp>bp6S8@=4oCPyVISp@hAZbs?RS4`)%Ara3gfLoVgEK1SVooTR#k3#rko z)AY3V89Mj!4AqpLqj3=z$vNl}O-Z~=PE{3D?OjQE-EV-+ElN`F(i8lD!Y&>U$b9k> zI=Jr{*=>JKdin3k(f=dW{rriZ9r!|D`+cK3i@wvz>|b=G^IsbBycXw#*X8{hE#9nG zj~#aDu)k>o4ql_jX3<4vXQ z`B9(Fd?LOpx7yN!kL<8wlTvH`UavQ=aqq*PPJQ`MXn!`V*6`JU0bI6H;*xMXF5EhV zSLZvhR>R>eIga2S6P6U_9H6p2D_O(|Cl}3@-WN#=57xIi|%j9-QaL-Le9B zc}Y0e?-k9szpC`x2Jt;9LdzsmE0Q6qmZw@)OJ@0{hb*` zrK`f}P^(y~^xi~YbP{NAGX9_0{%sU`G>Jww-$|vouJ^R0OlpvsO}&=qP*~GEdVAz3 z^?*OjS{6|0mlIU*w1{#}pQ9It&r`*}3-tAA8AUi>rj?_wQOs=cNWD!~ZST>OP51G= zaSy1Z&SPrY?+KNSc}hB7&*`$(D_ZXKnqpGl(($_QX^_oFa*q2%%R7Fh+I@bI-I3qa zV);LM_plC^xoPpr+q(RFctcK#*Jq0z2CQY+lmk9CW$h_O++@5lum5VwS(BUdw974d zYE3JSHZte4ULE;z!_K^IL04`x&XUJ__T&|-dU3F6A3pZmhD&<%D{r^z3*&V z-gQ3JUlBxID?+KX(^~R!UPqn$BWUrGjWlUmES)?VM~?NkP+HoAr3;abvP^J(g@lk_9`G!4-`OE0?>)12KUG%WoB8TY+N z|I*7S{@f*6n|GPMFD$3TvlX;3xRS19-=ty7Z;^HC9lCt{K9xm2qJG*>$id|qWmLSN zDO=ytYx_@>YWkJ(hW?;;zQ1X-PHn#XQ;SoVXtQN%eLiB_kZXJ!@onG6Tx4&+H<~r$ zo|Y!Ojo9Z#`p&->Jl?Caba-|gIu+uyR}W1d!Q)x?^Ol-@iz zwhudhvtf%D{dfXA^YrNee)Ub}S;vNO`Tb$se2gO-=sK~zuQMlXaN(qD<9UE^<@q~j z@Vk~CY-8xfdP^5_?c^XH8WPS8rbY9x<8l1*);{)Jo55^whO1UzXSa>j8t*t0>S#{X zA$KG-#w(XIzWj?l^T% zE}-u-PtorcMYON?8TxqoEd41frq-9w(T=PVinqH!YsQq4cKs_`Ke zG4IAn=9b(%p$DgRx8kXLdvX4U-uz*HKVH{}Il(~Uysa{u8rX7T`CvBe>cB?hM{uCY zSe_I&fkU)inHsop#I#xLK7JA3f9cONTL$yz#T)pEM=FmRy^nLIW^wVrBEFhk&L{n< zH9JRI(ApOLNy~8{jq5vtTy-F{QE2(075@)kXj}_d;G=z5^U&khTds(kzCYOG}HG=hv zxO?_h4jcDHlewcc4L#hCzIU;s)D}a@Z>u9&l#ih;&s^!B%TzkOa|SItXYRA&Vip`Y2ErDFp6RDGD zGC3OId;1UVAZ1qyJ)V+E^ZfSE9P2bnU7JbzeGX7`$U%DW@(`IHg|>bn5*#8rZTnuhy!|dFk4`Z-y=p>Y~RPlNxc+@W$LBzX|_MG~fdhnzH)1 z8Jh}Y-rm!cC+=^-1N*k(eqY+~yuW5_V&92Z#aZy}tKIoJ_2h#atl7i~GeB7%KGBEy z%5Is<3+?#HkRfc?!--qgjNwf=z+~(8XS3b1u!eZK5;!8hH1yB?BAZjRwP_bty#diy%Vv7ipheXoE)lp>k zB$_t2jUoOUORs1%RcFQ1G0#M@ez=X?b$8H%Z9B;*V>hj6u#Y}JOrtjaGbnIqCUvUZ zPmzCeC_XxuE*l*si^zPc{EDm#Dx{N-izs98IWpZ}LVs74(yHy3X{>Vv1wFb+FWcXx zoh}dQ$n{4w?B+|dc6diA`@T}c5r3%QdmT;<*5bF(_4s{^Hox1Z%aMKc_|Gjp9$3|g zcT8%+)XR{a&71M!Jw`lsY71UmuQgY?<9iG?nz8KPoe!nU><&hSRvTsU7uIALY$cFC545DVagDL)uBek;~Nt3RO zqGmJ3l4hkVm1Rz&#$%>azmaa#`J@+3mln}|%O&J_%$IDs`IC8zHPlfH*IG9ZqUWpD zkx{J(iWK9l+ zzn)&FK`(Ao;H3N1(&sVJ>^C%U@COPu{zk2i|Dc6Qzvzc&E#8`2hku3CI<)=O)SdAUa_4m2*gr1%}&3PW@+xxKlgC)GeVM+I(!3->JdpPpQEWwc?=cw-b9Pn#8ZZ5D``h1)68DGXj0N{ zn(VNb&d$l8f4ciA_-rVbA+~lEOUuh1T=}}c+3$l*rNCP5zQuRF> z8hdX5P4JcI$WdFmtg$CoOGjM8(3z4Q$I|tYlPLDiWUBZ)gJ!4AB;!|}lzd@x->hD0zYR`R+9si9g;)C>mMP50mtaaz~fXt|0J!{E}}Y( z&(YkeCA2y7BK5;OopkdGJqW2J;~CefeAZ3+H0S}%x&4lEI((!8%~u-b_=B9D*3jj% zwK*|Uix)N1<_1wZJZg9Y)_>7}ZD%#)%f}mW`=9#kV`s=M@{QPejw#Q%*@|EHY0EE7 zI`ZEuT{(8C6&GmrWphcxFHRHNZm{Eb!yNhjvyptv-i5y%9>cb6rg6kfh0mX!&u{D& zvk38H?j6Xsjbm9YPT-{`DSS+4FFVy_vS(y2Z@pZ?YkyYoiwkwhdstT*=!@@2T1E8U zcrZP1bfDtVW2qp{m1eq4rms5FXl-YATB_|y2Y=6_T`d<<;?||qG{T?G{9HqCIt9}E z?LoBhK?p_9Tt~YWhLg5a6n$wKL+kuvXziI;>K(9&qSbhce6@w{z1m9eJ|t1lr0vu> zYzOt+oI=wBQ>nD`9=f?_4^0ivAipWuROOvR&9~*zf-y%ZLoc6ZSQOBVq(Z9K;2d4L zR6@5mUm&aDWpv=#H9Gy~7JYtjmmV&7O!~>ssK)3ORjF^O-1!q(p8853{C-ko_&<8R zt2Qs|jrB`D_Fh)&^5#SJSstjzH+nSY`ZZ0sjcUj-kB#|JwkdbM-JEqZTC&#p*6j1M zBe!~P!Rvgic=*9S-1xMHH7z9OQ-k^9uwnc*Y&ee}G?LeM8p|z5PvBV=cbu(%C*3^1cR9XryHot>$wRS(KPZADM!_9Kf) zGVR$TsM|d!%Jv^iR|ig_fG3kEzC@vwj-GV+;~cV2@uquT3&^s~Qi|N~M+4$l(YNX~ zv=@EHMFi7l*HGH+97bF0){%$L23j{Xik@tZrT%f7Xj6*>ayz$`!agQZ!@fHxAU}nI zN9?7VQR(EDltoT{2T1pPE>#&GrLJvHQa|*WKM}=r?0P8{+gHfi<_0y1zC-EyRphQb zA$89yI$-^lW|e)SVD$?fF8EHO-7nf(@Q-esti=sC*X6&zwRp~CJx&^;&j$<*`TKMu zt{&B#yS{48*S?yuRepPZx7&inO-pv1Xw5nYd$HN)zO3)rpI@6xY`tm-?=m0Gb?1-d zP|U#R{jkrKI)~Fz7jlzzE7xFPH6^+s~W>~cQiZwUu!%*)o98`*P^c$btqwo4t4#gN51)uY39^sRC|Rn z#TB41HJVHPxZdw05=)ssd)>q~m029i}XJGyme2tBcPphZ(hP-?^| zig$OR>y5_Im1dKvgYh)Fq_~m2od+F^pF?>M=TXKGAAE=MVtSail)_&wr?fr+6fif4 z=w&FGyj@S*PDjzu^f>CD8&96C5-8Uj*IZJQD137=eVnqL>bYZ2p<_B-EI3TvEsxR& z*JJn&;N$e1Lh#|Jm*5heb^|||2J@j&1*D2bNPo7{=An5$rH;Gk>~}%#L$2 z*s(B=ZC77r??>x3{)aL(NiKz&(6aNIFmYM)_UR4HCFPlB&57R{uXnXcD%T>Hq4lZd z?FMAl#DMzTY)bdPm{LV~OL{S{H9g8OqY)-u>B>4w`uod@V*2(X#T4tEc%pkjGVMA( zm=3QRLMxUCImk66oEfB+9GULAD7gbm?0v zb@-l6&$eaJpIHZJ_vl6w}`i_#Q#WOXRfW3f=I*UXJf| za!_v3-ICjscl`maHF-=Y+P@&ZGjHkq+iH4z=MS~4sKqC!9&2B%$LB2?@Tj0>+N&D)xB;r5OkG1rm@uj#>+F1^^Vsy`o$!k*Z57ykHgB0K8L=e!k*cuj}p9R4AY z+nI!O$Dy0~(6ltZ8-Iikj=aG2YlUgPZ8@k3y?;hi|NRZkj6U}?Kkq-%ICOih8PcT| z?H`~+!9@+}@a9IeWJnXTGdH5FU{kucwKaKKwWIAl%*kYKd+IZ)Gj-q6gGOj<$izrP zx~&G#mMB~Lkuii?SPrFs-5towVmQs)J(9-yJ5$~W7g{@J0-ZZDiF`F~^g6_y`ZV;U zNcTBZqwP%sd1sfnm@as(pxU?nNq;8|Fm`EKPZ6|}i zJ4yPLLaQh3q4(d?slZ`BjdaW=TfM_%oqUA0*&L-;K?U@q!AVNbJxO0fPEovD5v92o z(yuL*yckWOJai4k)c}xyj@2Ew|Cn_5Ajk-&J>8@)XK3k*9 zx;+~5%mRH5{fE7PA=1N?1-g1WvpFirx z{~W#9x5Ss5_X^@2HKE)_+QJJQ_wlm$L)`Z71rEIxuF0NOsQEtqnr8I5M;h1RZ#4TK zeAR53{8!UlsYf3M>XBb`Q@XgiIqlfiibm9FL(R9hq3{LmXx;b@l%NFo8_kO(w%9)9G}c8@2i} zlTt6uqta#Gv?FXGxnMoI^!+lr;<}plpA4kbCBf9^$y%CtG=eS%MNjWXCt8C!N!se2k-DoUry*;%ymMmDwYe269v$feAeN9j;pJ}p~QKn^{N$o+OP zNmtI%nD7hK%jP0kVehkg+%?K|D5rI6Zcyvs+w^AaeKIb7OeY;*(cQpLwD#^-dawPP zqUzP+zwve1a)J&wR`hsxP-FhJz<|deY{e7Lwc*G^?KppJd(NNVg=^{d;M_C)`B=sP z%n^>fw#PW09q7i91q*moQ(v}zu!e62MzG1=1n#qF4+k&I;lr)3^7ltEnrWl1XrAG_ zT2Y%trVXac%Z8Gc%W-3#rRU4-mA$WFMur1g;L_2bu@ME1}a(>MI~)w$ul{Q z%sR!>g@s9EZhSS`FEwMPqio*@Vk;nsKCFbAF-Uj(2qGz^zVM zaK|B*thb>LZ&&O&>d|n{`{v9}pIo`;<3+q}rXR1};LrC$WBGpgHh%bM2cJ5Y%S#5{ z=B%W6O-{}QO+na4&BQ5RG`5CyY12Gi5|NGRZDLdEGr9$h>EDJ%-)>J@-7Tm?es|KZ zvZBBtz3IKe^!(;PI;CYtt9K8kgMmY6*)l<^Wk<5f8BUj~N79>~W60EUJRQ5@N;h0p zI@oVEbvN`Pmz(qG(vJBw`P(8YTd#0+QbK)#OR4dcGO8VPg`OE)C9MS&^xfbV9do)%c~|b!!#@vbwbo-w zX#I><8^5DYPVcGFqA#@S!#C<2^Ovr^sKYK#wRufueNHZD$nh5Xyx78k>tAoi|C~)Z z->fbB>UH4216_DgZ!7-jqT$DNWj-#Be2h&RJ>Vl!f~NiLa?Nh#yQT|x1dP<9IZF*G=8FmS9NvnW6?Pz!+=_Nv zSW}Y&{m5^WhNkTsNRuYoQMX6-w71zX`Z;|#9eyx^4n;eY)wi)!txTk_8Plli8aKKb z?n$-99I~10MOoXtX`}lB(tov-x{BpAw9Xo8k{dwJeudGb`Rl2wbvO;b9zlEcqiEjV zjil3g6P-=jOl-M@Dz0s%F;|nxKuV!?4R+JG^gZ;)FO4R&JV4atF!k@3N6C+l5Y5V` zQFBhvGrv=`tyK}tA9a=nye_7PeM@NjDa>5BwvXPJYqS#QIe%`uMN5=B)ZhIc8GU+4 zZLznU*5f5L-TjKZ2EL_6i$72p&dq!a_)AXv|KZ$DU6yXt_h@Tw^~#(NMRw!;r~2@)cnx1Dv*nP+gZZ1*FfQIXk&WDDv+eh#{OI#4 z4nG#ffs2#aTG`Ka zq;VcVUM$nS^@C}DwxG7&jlSPLaw_eP>TsusKx4Ov@c;grDVHN+MbydFR2vu z)06c5y~*K#4^PL5kchjs>0@}dyZ`4oz4h2ngfSvZZ(iJBSb(>bs}Nq}DmgPAQ=0+}=(#pR{LlbY&ocebu)wPNu zT%S0SWV%Z0!TMJknT|s*-Z$+ztCJu z>+Ls?>yJn}bt#%Qt%{*>rm=MAW*kMSn`v~%cyip3K+WE6rCZp?f7N;i`7BSN^+Boh zq3#~~J9Zy=D{1s>OFC_OkV%hc?kD4O2k}o|4pF}yN9p^LW0V?roHiGoqO5=-8h~?Q zoe!TQ-{u#{HmQsPFI}T``W5u;c_l6QjWx*Gn{@x}U3$@>ijF2dB-c+*h@xImhUt5J z9l-~>((fxZYxR@jihhxm>p!YxUx)XMugk#^I$VEY16~=^kj0NiT>hgm$8|Mi!+w~v z%1zj8i7B60+=A2NTk?n#t+>IxHf)}Z{(jPeAJ6T{3%m5=+>f^0CTl2{w~+t!fTia-iH9!z80LMi1=7!AOlVp7Kqr1dYFZkBH(+fQ+H*m5&PMQx$}&$rT> zkID3)`7U~CkV-8#?x9f|_R;c9X|!=xI@Rr*L0Jd)(}(bb)S@znjKv{3?{ksp26W+WnY}_q?RwFRy7Qa;NF_chtG{2YPep6QvLNP8kuu>5@k+*1K1i zBd%)kwT{~S{#^sUAR6+^=Z)A{+khXwGvuRxnsL`ACY)7b!d|~k*>z(JZuhS>|E=Ga z?<_|TH|)q!w>xrIcMEPaw=1uI(~Sr2vSg>P)@=R3hUZ=yz?(PO^3-)hdBR*LJ~qpl z|7>yPtjZ~@ovv{8{rOy4YbjUfuHnh;*0XNFcJ}+8&23tha1X1yyykk9#!j~bz4>8B z{oT+9u6WShKT9a_VgMN}2%?4QA=GGi7$sRn(5b`?G$}5Q9-GCJm;M&AjNU>Wc5S7T zwUf!xD1}mDchT}}oKI}Ok0N)Z(ax3o>9TZy%nJ{Zy+JNTyvd`-J0DK!s#L0W#VXnpz{`mDUC)8DJfZ9)y{M*O1IjsMZBN45F)DJ}l6x*k`} z)ZyA2b@>tcvst7bzd|pFF>10k=EEl>BUZcY-hBRjl3Xu7xL~fzjV69-Vg6`1I0sp z{o*04%@y%hqlm>$ifB7Y5qYx}(P@<;=7uT4Fh&sv;}ubusEFPvinzKL_hl$z@Igf^ zKcNWav?6AgDB|XIMKr#ph#ggmSo%~E{a@m~*NV`5P{f{UMNIjj2nmRk$}$#q>t1NYPit1tV3AHc^F1D^+Z1ql*4!syJ+p``W8Qr=u!-yQ<kcURSe#virs0dXqTmmoCCP;Fy8lZRb-t}#me)z z?~*ENRjT6ZT~!pnR>hGYs<><9DPCE6iW!NXqD_p4i2mgvW;Itt`+lBrHI$*iukx6_a9M&{&7XPoKeKZ3ySDfu85l3(EXtz{GKRc z{c}b5fXUVOiWvDt5m&w|;wxBOtfh*RbyYD999rn9Vlp@kX{w5z#?Ym?Dpvo;p*8e_ z{*OAT!p{Plc7ayiRdKN=K4ULcIP_IfKhTO*;R6mCgH@4luZkH%p{paF+X;G)#xY}5 zF$@e+Ca7YjD~<u<*Kt*M6Xib&v6Wt5xBl?J2sNdx}(NPcbLcQ=}Anh?pAqq`o5l!7GnO zC_G00aD7Hbr71710{Q4y!XhxB`Csun<9=P!@lfOL<#b1cn*#^tccBd@J*p2 z+`uUFoFeSOsdt4Uf^RBf)jj0cGeyL}JFUQI*(XIv@XnrJikOKU`-L1Ut)q&9da8KR zKox%)BQFe~2X1Yf;;`=-RS^qLKHzlHMipoK z<8cF#jkc=j1|Jm-0S9nuJxUen&Zt+g8VFXYU^N?z_D+Xi!RR@%>s7IT1GsL4Phyap;M5NpHa8WY1^(%uf!E1V#quMn zc$kmRQJ@MZuo(u=1l&-?zt{M)2A>ODNO%gLcu(P#?;(8ddx*v_J;c+`9%6M1Mbzu3 zh;-!GD+fi;L}U@zY+8WY_eY;WkMRwJwrdr!FkBI~@Xrr$>atxCPT+Ju6}<;uipm57 zuo{uC2=h~lkV_OX`ideBA>$f?QOCQAxO-m_>mGs+vd-g$BC1|1qQg6Q3cR$CcLU+6 z#Xl913T`of(5K+9%XL+er>zPd`0EL{eFU==V79>+y~R`&gIePAgWG0sYYtwgJE@`y z{^|^0>4H~LFIANHf$#dN0#^nQU*WC1!Kx?(FL(HBF8sB91ek(ZrwOQGWZoC}N(;Pd z+>kZkby|g|!E57uJU4i)T88_8!9>2kHJv@b>7uc-z5OYs?h+3CC(4#%XlOG;ph@m3(wo}CCZi*N> z068^85q033>8^@+FhdcR?)V*MfXUv9s9vmyEq;o4u^L>!Y9v_oT#tDn5?+e|uSDpY zu87rHifELrh)Vb>8-DURu86HCk%O2Ia^R_$^T;{$rO6lJM`Yk8_=~SA!WtQ9cn5!n z&%RYD!T?$5iTPplOY}GJdx-hrCUSAo5A?1YMXUwCOW@~S8+^1-zdFzi{7yH755TSk z*oA}LQFv|`_TfzUB$(7_Ym8&JcP>$4>6$1Lmd0+A$HV3_Jhqgu=&?n5$}4T=E3K+ z1l{cwQ6a#$i8*4$idC{ z`&LEhZc~IA{MB$5{IOdRc6*V5nCISuUw5$Uiaa#Th3CNV02q3nP{goPcpqmJp#$Gd zy{w3z;CH?p^B(vmgP$3={dfppg4_1zir5Brci&+Cc&`Y@kCZ7{moR}m{U z==H3K4TInT_{m`u<^nL%$LtoQg3Bx%?**pb&=;)UEQ3b=@CQ6K7OWPABJaZRyKv-H zlp^9{6p;^ZUE=ZQ7Dcp7gg=w;`YGsbV3w1HS>gcZ3h-)n6f@#6Mf539#Qs8PT?7{A z@czy#VtyICbxjfT;J5S}c+4&IEHG*mtlxkBn^9173p1=Aoa>0jp%N zG7#__Zc*^oo#E(d@YEj#8H@buxCV0{vhD0vFu8_oMPK>t(_(fjxNbOw8cfUuLC~U zz{RzpD&Bz0kY=iIG(p`X3!_@A!leUdEaYE=1+)g6;pkm?@RC_y_y8G5U^H^*|K~t! zaA9Pdyd0hb7n2ywDTguFXn6|R5xV<%2rH~pj_>vmEzt8zkzcy#`ByL_yVXP1gGZVP z)*h{)3%oMG0`qKFyiXfNjPHluVT(1RJ!;Jneb=U<4MQ!Qvq}>;;E83&3m< zeqM_H4;DFo&<`w@g2O&=NQ3?f(dbWLa3U65H^FyX;5)DgMAo@&$4s#gy2C3)S&DcH z7K8GTXUFg!!Qojk)}R;Ai^`zoWjx0fJhmL`sfz#4=o@Y!yTITnJdy>E1c5^oIIISP zr_kT|Gd|B(_~|=p(XvCkK@G6DLp_r>Q#?N3?>ouIiv5Pn8JC9g%jLwAb8^Tgo!kWUSvb;ljZecZHA{}WOb z;RVfWW#Dnyim*ccd*s3^&|U}H|L21Rr}3B)%z@B7|0=Y+u80daq3c~7b06!Y$H>eV z=rzcsOlWOX4NkcIr*-8o_z^X4@((=-dVfS8IjxPFMlbO}AL(v@+D9MpF;c}PcwlgI z^kn3eAF?SQ+DBTUS3~pn(ER>D%n|bcdq_U?o;U*8H4?KNbZC}s(2}TiW^|kY>$URW-iZJv+ZtMHL+6O2(C|3wya@g~i8jXm3#OZnioLx%Wu&;pm|;B{pcHN=m$9e27BnOtp#n6GkMUt2061DIzK>-KZVZS zF=w@Ehq^`|XbPTCq@#<$WQ8iazzaF6v0t+meSaNlU?cpt2Qxt$X1p`V z!4mK<#|(HEuk{+9a`hAr6Ffx484qFi*h365Q$%iOMclSQ1`WmfZzR?Z<50U36cO%$ z+**jNMcq#eMsBS~-avcpZODV2;0De2r7I%-fFf*i(37F}MrbV+Vn#)#WS>W#L*toO zFdJRNY*nF%9yidh@1lo4g3q2{Rz{W#g4XWHlMlEBy#v>eSf6|X8|YpN%@;%O)~M}8 z$P-IwUaW^2gXW*0cN=3>EJaPPMDLvgy{AKO0llwufp*B2ENje0|K&RJM4-m;ZFl02 zJ$fy)?t?nt>4aHo6!t`%F@K@PE61y12Xwcb0cP%aUKR7$EX>lV^~SzNb&gu)}|tgxqu`KbbaXg?J*FF4rGfo5LF5@gY>wU|X>|34FHQ2QaM z`7MVq!yZA6=3{LE{hOiQ!=V3M=7%u{Soty0cw0X^ltwa{Q@=K9a^{j zij0EZ%YG?h9P0eP{mIeL+)oD@G{AldYJILD_B5b-ATlKp+WR2Sr?pbW1Ze*R+Uugu z7j}oXJuzoO`|YUpMyT^V)OaV<`0^phjA2;Q4@ZAMzPxb3{>WJ95AEN(s={_M*4en# zM#jjf`%|7+ucPME7GT}741AZPKY+n4)cwp5)HU><0qv_dW6hUg+5s9gAWYA;~ZF=nSlJBh%5n%6YxPQd|*CH6*{xAU*e5Cf*<}{qvs=o zELUOe5Qu&b9>*i_-r{f^cx>H**W8MAOp+=ZfJZH4QYv_K0*f}su~sa=JOCbXMd&dX z!3aFw+y>W2_&o2?>;Ivq^gV@LTTjs(`@-IHJ;mz<9zuf|%T5b3d=Jba64r>$*e7vC zp3K0{vlVe1nYGOyIgg%kHxzrj(U_xj46qrCjOqoS9EDH1zsEfF1oagE&_$N~=aGh}uSW1X z7`T*oX?th|PTj!B4~%F$@@)co!zAR-H0)EsFKywKQFHO0;FSSB$Q>}+y$syol^Wdk ztj7JpU=<2)hJ$f5{1%VCegU7R;PdJr*dKu}^N~|0@E*V?rwsddmGH`Q>`~YD z6w{k}ib@MlQ9jmFxX;53d>QLTtlgcOf`>KsB{WzA*rCoxVMB-yg=l$x$3rfLY-bo&z5Geim~TJTx7Q;^3ht z;MC;4B3vKBM=#L-UxO7q^c}1wfz_H%;D=l@{f=IaTssPGlfbPbJhiSCSZQH@82q}` zhmLyiLKEnWn?JZEgV$K_a&C>BHbd>UM{ae-JPKxqyP+4uQ;U#s|Ls+dw}D6CtN(h; zJ}{d$7@6aM>~VxgoY2$3EMhcX3%+U%Po+%3UJRIBng0L%%6T*40R=UHoHGKqo=dTJ zzXslcpVGmp7C3GC&rk5usBPHC->Hg(RP=xFiUX^2C$Wcr7c76`em(H$g?=)^L#R0( z;`%)gF$!MsYJuD`#~QIKW}m)T*U4Bb3CvmWPJgfopNTa)ypx7pb6gB9{V~gjVUCJa zL?U{AWjyB5B;3&V{lV$@K6nIuM+Pg0T=)-u@730_41|AHCo zG`N|7TLW-2hrfP5z+8*$i+hT>5Bz?;LS};BK=5mT96a?AYjW^wi98Gf!#3zk#^5#u z+`_pBMWG1x5yw>@BXOjSkS`FOrXI0meC`eDx* zyyAkfrxpWF$Ul29QsA40H?T+Z9OnYCj*sr@A>xxggfI3X*F5tOf5An+G4@WHW6jkT zy|{}aK3Sqh2jH_hV4dQG-tVf2C6jTU3T#@=MQ*`AV|<}G{L^F&GA|e!Mqtg1jB}2~ zF<_Oy4LP<0K1#(}V=vY#8PFfB@(-`*S7_j>ecE8_YRt7WwFPIs@>o+_X z{2TuJhxxG%`~zm`U={|yS%cehee?_D~ z18#QK@Fuv;1~X3$X01W^J(!Ih0v_<2)=2EDfK!J_=!Nd^&>ZlXhxO+Fm^!N{x6YN#*wtQ7-r1O%uJ&(Gsn#A7-EPS&-1PP7w1AtvPRg|Rb6j) zD;W!irolUE)0L(8g|)rwmOn~Q^eKGJ-m~tcnLX>?=lq_8^Wuznc&B9epwyhnGEsJ^Z6OigHyZU)HCoY@{V&37!~@+ zcLJjj7##zn2r!D9ko*OsPSi5~hYNMbz^yy!zzKZX!mk|Qa|~X2!Dm_lv<_^Z!mA4C z;gC}B3w#n*1W)kEjTWv1pIqqR+eY+(TcEk!=mGaeKjF{1(d2g0mf$OQbKfO& zEgzUH=~=fAHD%3{o^@NLAXaJl+*zp~`RIp~BA(@_#Z~dH!tXXir>HS?(XsB`sV{xG z3`4Uh0m4kkQ_m-4^5^Z=d=0f(XB zP!f$Bl?TrZ4(0Mwr;DI7;7}9{j=+<%Xxkca$Wn#gF*y8Dlm29FVqC9x-EUy=TT{-d z?cqV^-gQg#0rwGL10MfO0e=^~K*ye~K^wzp*(1)ad3)APxC`&anK4HkdW_l8>)iO= zy!1sZnnhbpZM{0zQC2)d}?a(5rNA@|E~EoXZ&ljj|E@7R0^# zTAsUs_-*E~+xXiK`~dniKNkHU=C$!?$B&WwC&BI%+#~Me&!IiUKHFdLgSdCS1rEe~ z!+m-L4>`x4|sTDeVJIdCe}T_OV2B zfEUO~?qAKw*^;_ak(hgPQm2XgDdN6_xF;p%ZPBLDC5a)qpQ!?8Q{rBOdJ9usr+5(rF0|qJ@C!&A_;occNqPeNt{EeCn>;Usu%yh z03WvsT;a&dbL0f)##Ypm;>Xdhw>|5=0S{jh&depq%Zk+F7UX(+YJE@m-=7*Ws8`*@ zBZ(bc$Zex0gTwRy9%(u{jvnn=hAypRmbjJ2?xIh)pIDOblZpK`a{UW&ze3EwrUe+Z#FMuHU-Euy^sIa43Vq=O z_^GnYfvUiJ3mk7u{{O+*V+iL)xbSTJIVR% zXiHXN|395CoS2;w^nD`w-jKKlGojbSd>1kQOpdo9?v;u8Kw|!i9G{4$97R)JQhVFu zms(cl>_^OJk>e?9kgs*{NqDBO#5_Lw5>4C-68C|`e0f{yFuC4>9C!ADFT}av0M3EK zIGdt78ApOUG0#5czw>>>L~8jIG-n#TGSMjyw*zQaG-r|daC0H=yOh|kAm;0MEpTYD zi{3<-x^;&9xJ}>TEf}TjQ}@}3K6O)#k5sM1Hd#vAw5p~}Wt-ZRs+~=pyV>NOU{jB2 zHhqbe-?3@$Gn=lx z=5;^XRQZQZf5)*aRU*5hGTSvEt6fF2*_9xdU0?Fq^|+i}ajM(pt7X@Y`gWCSV%NpC zcD?It*MaVKIs4kxeT-d`$Jv$JW!HpgyY9}jYt0h7(yg%T*gCsP?6WI$!mjD(?D~G$ zt`s-zO8&sEn@_oXvTJ=ZhknoO(9XOL-ck-7s?B{Z9J<-xp^YOPnmWm$J9dY* zyBzB7bI3izn##n5iBMOf`$^l&!Q=3A#Ep z^DNgzBK7`uq&g!Kb55hr5DD z51XbAu<7*(o05&U$uZfcAz)a@VbeZ0n0R?k(54*IZ3=;BnuRv4UTo7KFf~?#7r4IK zU{lK7Hl5gSQ=CIK^#ND^d7HvlZCZGX&-1{hx=(DX^wOrWUu~M5+^%32yPjsZDZ85RLz@_%Jn!>B|k^1TYQ^hGTM|lhfRwL5T`;m zH7agX0F3fgAm zcv#-9YgW5d*RG{4>^jN5ScmUFpHC z?L)hcyt1qFcX%Gxq1S00+Qt7hvc@2X;)Bs?k3+wO9ICh1p`S4hCB5m;_uCHn|8Z#M zCx-@r*`<77*4t1;hoK&SgH1tGRok12b(o6yIZdN(L~8P@NZtPtseEyXXEH9CZF-W; zrUtoe8lE3cl&~p2xLmJhQ)CEjhw6!S`{hQIjrr*G2 zDV(x*x2aW6n_l(;JGk|72r&eoMR07%Xq%dV%NlSg=!A0~;_8E&V6$zGP5r>fgk#N@ z+EjlXJO!fzo6*`mHk~|d)9zC?jX4AUf7w*!|FG)!H@WqK=c1qUKHK#82YHnQ{Yz=r z;Iww_&S+O5bTnlSyBvA#iYZ-TfnUUV7o3%g>znX zb0(O=x7A>l>O8M|->yyI6Or7ZKG__4SlXfb4ICPaCN6R~v;~Yt&Ua|*Du!QwesOoJ<>!J_+An^J&BT9-`;1H2YoX*F8a?#c10yo8GN}e`{>|1}2Na zW5Z5#70v6sm%NF!>BVt07F_P1wJ9zHIf1 z^~iDzJJ&ts6w0YX(j~3)+uJYh?3SM>VZrAU<>}o#TuGVAi zy5zGfDfMmjJZj};yDIFqs}gms=5=b@dtM9udzIaxa6!0IA3Vl6U&A@xD>in5&eN5RnV=o z@FRXBn{3U9TPt*>jZNj?!mKWE1`M8n!Owow7~+40_=k!A!QnP7C;rLD+w?D&A(P1Q zDb#&%NNK`VJq6U9Y3)b4Gr7$11ugL z<$b`Tz-d0id7Cza#r&&q>^jfC3Ff!C|DH{ms7vR;XYos$7W`w=`!_ay{KRJfpWhQu zTi{TN)Zhbunt;ol5^$(A+`xZqhd=Wyc0~x>@ERh4@6 zV~$-1@g2$bz(?v-?Tct29GdV3E&XBF+8hpD?BdYji4H|?ap)V~q0VdSP!U6~e1>Kf zHI=z-q&9%br4;B!PVx#oE)_=)%22=IMwKdPj|HDq3k`=KZ5p9ZaN|u&8#UA>PdC1M z4|oF(^#;KiaA-Y>oE=9TiTxO2-_HTY#6F9Y$9Zh39_5G_KRlFqW}>RjFfeVPC# zZh=FKRW@CK6N%8M3~=HzIP`@V1;OISes~BLI%Lx>u*h&4?{Lnho8ZwJew;?X`k-G` z@7NUkkk9{=*Zvz$JO}^Rc#Ze`?IT!$#hPz;i0|koJlR71=?Ygyq_Hb;2D>`twre9b zXgj!^tBf~5(>g@hRj}57KB828>Pl0dhqgtaYw_Ss_X&300++pW?P{|GTsPqfkJ~lm znO)~!ayt(E$mq}mbS(~kq26eRx*UctmmC^Ftx+38i4GV_p-46TJ5uq}+cdBcd0ok- zc-4uG)uxIyZ8`!k<~5|&qEi!F@O@g_R0J#zg2TrC=sP?p3I?giqiqw3^E5OklKX7L z*hE*n|7lVD>BMpdj|YRe@L_?Aibx?b6xRFf-bf-L3$*DYJzOKZ*AD(->U9Szhy3FQ!6?JDnUj7{Q>K1kAy z%7VlETpf6lVdcdY$hw#ZqxqwAqaHvK-pMQp& zIg7@@i}Y8B`AxV227f%{{r(S!kI!wo_mZ6W2R?m(N1wsrCwRuS%ZN`NgU3mD5eAEF zY4KWMaq~C3j{k0#6+Fu1v+D(y7GRR9Fg_X1EH24)6}zIrB^n(o(#)=&cV^p~858ibEWVbvkt7phJZ? z>!j*#DDYyc-o3yFe~*+4Ocqo{o9d!FP5Dl3ZJcA!us`rD{qR=9(2kKdl^<`@A#~~1 zRL&WZ_$LQ7#6xTY=un8~qDfi6;le`nYzc8(YSVZy=tcb7tw-}V;v3MUl5pYk4sZpB zJ>YOBh6`9UKgfF?fp_3A7F`O%g&)NK%KzdI7slSeKY+tvIFaKnp8+hAfkl{01-#Z# zaA@@wzQT>;{~tFBCg9AHgfj~|bq8*gg%`uXqDuzOqFL<9kd@j1CV8px5AssS3sI-3 zD}Pew4_35mTvh6Lb-S+Dgg3RR{VnW@+XJ2rqV|unYyV`s#yjnLJJYVd;L&^=-kG}6 z_8|2Hu5^5h-=^NDFNy!cJO2eHmD_R#8t71qXoog@btpR?EXnDq%6d9dKR-pvUWB^d ziF0{>bYm!(P!npB|MOgMz>lXQ)|t?s0(0?P^Qj5M`X_N7x(+PJ^{wQ3J951pdb5!D zehpJgi0|k__&H*1JBps4i-{Q5=oYKTM_?YKDwJ8sEpbRno z!6gd)`R$EOZeqRa9a{FD8b$tZBIY@Na61mUN$!6k-W|#RT6o@esj1n-dpue+l34#B z{|BQ-oATg~^4s-{m@h4A*Jt8htc+cYi2X!jKNT)KvU08@_NC!NOdV==BWg4l%%Q&D z>15aNE_RjcNBqZ7ckF1=3|@OVwPYi>@5DpLP}>iI1ss`h9}YdmbK}8^Qsd(kcW7x@ zdIR-1i%xLp^CE`|Qtu~cHZ%q8N|ypnK>Iq>q2^3-sN{6cHF)O)uN=zo%c1kB4DH9S zoq$)L@)#;w+ED9ChKkiPw4j-x)YPSJ^b~6LG<3F)p|L{@t>mm)inD4_o1p_vgYyRG zSI(`Uc)#>Z46RyesMUHyJvZ{&n+?s_VaT_~P$+E3dCXA6IYaZV;z8~hYD`b!=~F|^ zUKo1z25<7gP|oj$e&FRt$2T=Hk*RMfOf5=f>SG#HeKMLloyC+hhpBhDO}#2?YF;r@ zt4f;sqa2>bVrp_NQ#a{tL^m`wps}ghElfRaW2$i%Q?I*nZtG#HUoTVl`mrR|! z!Ta3f_5U%Y52mIkcB*(6@6XGJItlNkuL2Hx>R+AOW~C+72NOA(-SW3 zxZzTh2VCB{RN$jaDdM^{Iip*Syl!1C=2qTHZl$j0)`%u?TPv5lb$G2?z4o|uDTe19cI(ASuFtrY=YkvG;#R@i zZn+-2mGzZd<=?qA>a$x(zPa^_%iFjf9ZT-f<&++3xJRQhc+}}Pk22=)s8Ip_R^Fo# z6+LPykM7s)Rd(?BXNAFFKGEes?JlCTt3p|Rq%%j?C zJi4{Xqv3}=8hqZP!v^@} z53kA$_Ug|_uNtoM>cSVVqL(X!AWTc7zJ*K>PRm(N+EqepjV@cw%} zDtwLiee6-O7ao=Q=+Twmyvk6)s}eQ5THD;KC!M|8*2}BoW4xN=@v6u&uNtiL%C^lb z$5F5PpYiJSRj+2=_3FU`uV#JZ@87*z5XYzPNqriU!Kd#9e5zK$r}kC3uI*EY`aWfD z=+np6KBU&u z0kzE%(AWF{Ehrn%z-9r}Z68q09sw1e8c^-|0sYt-(6O@teYqD<{SrYv+8xxb11?2$ z_Gri~kBndB&jhdROTg=xS3A#vC-FO6*hlX8lx2udrDpq7VTDh>ZS^S;-#hmupHe^b zDdBscj>YpUrl?;pYWt~Qehq8w*Q=g z*S?#69lh^Yi5Gs(+kTz+?APXBekDvC(5*}XZO9&wy?8+WasiF92DGLQ-=jl7PkILw zXGlOnTR_>P1Ddrepd1?mY8(^L>SF=*x){*dR{>pr7trHh0Vz>Xy;BACD??Dd3I>(E zLQsKfK~=E?^{`%0quU3SzH3mWh6nX@Vo)Wg1y$J1<7WgldqYr}_Xbtua8Py52DSQO zP^X^mbgAr*KUQks85nh`%r=Tk)K_rg)?Q7%dut3_#Ry(lei8>LI#qm*e_lHu|*1>tv%DN|79Uexjf_u6?ADgb9 zsb}cPSda2=Lep+|buGJ3dk&))fBE$Di%$#5`CD-LXUMN!3L#s^f3e9ca-7TO+Au|omXyAV*;zXQtmI)HW%r;Nnyx1cr@ z3aS&iY3ml$mi|GtC&ux|1od$$aU?fK`Gfj*R#2_y1=V*sSnUid(LS&_71WKJK|Od9 zRDn-Hy^9l4g)|{O%NEk0!XbULgmj~2NTkR z>$x2p(hg$Z{$fae;=liONFNeL$yz*0OKV1{X_qMUA&Q)eQZX}1GlNn3x-LpbPDJVb z6|V2`?-x<3oibXpGe)aTv1nDYMyr0)Xsssp*CL}8+7PYu$D`Hqe6->{iq@kv)78Aj zbgk(;U7codd(CtWYBobfTr)K9(+u?=<DBe#e#}KLl)Exs#^>6qeD<3uu3-|sBA-m zdOj+sq?3dCVsHrs6`mf{!Z|^C(UU7-wC7k*GtUL(M^Ao$(}e^f{hcJFJMd#qe)v%< zL_Z^>N$o;<*@e9C8Pe@BA!%wz`J+R6vIcH!g$sv5@|+Fn`K6FPzY8gC>L`5xpOuxO zbUY$Tg_}fac#kNh>KCQwQBnE~C;r+PrTGVOkPO$Of&BZJC21-!jM9h(h~OL@-bpf*t_?p_V* z?1P{xz6t8gcU}(;j!7L-WjNToPDmA7hxDpfNOi}Bv|wULwPuI(4z1g_DWsK$LwfjE zNF`r|)Gu3<(&mrSzS2<|jjr_=6{WJyD79M}r8GCA^ea`gniP!Iy8+RP3Pmg8XtXx> zm@Zp*x;k8RX=}i(cd>4*sOr(X0Df_cM{)3U{S)J<@NG$2c=ZL(mTQ1lhoZc?KF6!x zN4zR|%d3b-UVVS=)n7lonv&Qje;%LC7xO8IXPSUl`aBXZWk(BrJ|)HST3YZ= zIM}a^_=o9uqFHeGo55qKcU|Xj|3bgc;EjH+_oMB8&DiPJ%Cq>2*M4RG=vV!C0gXEj6Er!Ry!iYq zpz)sqDgx)Tkq;}&Qq!6QnJZG~`UZ7=P*BI=_Zu+pjxRWS3;zETR95)=t2}kCD?R{@ zu02QHsvMeiIzZuJ@MR;$@=?b+eh zXgoo~zc?4%b?e{1-TKH`pk)fM%=!XAyQ;L*Ms9>uNiQRl|^`+*+yvwM`&57zkl zI7>Vlw9%v7VUMzad*w@b`MVy)zVhfAUOpy1KAl|nn$D}2Iq}ei@yjK>iYepOpz2;F zt?AX&#$Nr?6rbPHt4f``y5AKa-ruWcqwt7R_?zKXd6!o+0$yF2$#d|LDe>dZedHIO z+;+;V@CD8WH@$j@C+~{4vBt$artm3GI-inf!&eqXAFBA&(t@5h@M&awpCbGCba0SQ zYsdQZPbB`+&*NtKv}hrDM1Iw|?bB=W>neHGEgAWd*{@o7?Y7nYs?rhdAcrDG;HA-# z3A6E+oBY~w2_K4oev2Ra0QSL#LB06w(CiYNxeez0+c>X3;M|?v)B(=72fLX{I@DBh z<}6q2T%tL{t~OO=x2c(bnL75(ls`Y`&~i@2ZS2&LmQJPa6=@?tKE_F=l)~;M`O(@~!-00Rrxn-;G)|X~(UF+c1jGpKR z=h>Kvr3bmpQ*vod(~sDM|!$f-vwy!x2hrbv2$hv`X$ z=_iGg(Tiq=v#o|*Z|Fn6?8WZLSmp-w*1YsP!wcxE(aTA6l>X{9dPuLBCwya;n$)3Y z%%$h%Wd6)v!4l?b?aI(stKm>0_Cq3@J9O(0ht3RgDA`CJH$?e(X)lx$h)*5WHO#!0{Cxew)WD=p zO~~QY%R)|NtmIV6nogDN=TzEhPWs?ZogybcKXs~iBA1qAcBxo4ml{=Y>0B=~YQ9V9 zZ@Toggh%8>(DNIbnBFtJ?Nju$ zJvr_AML+Kqy}ZKo?wZmweM7&l7X9nPZJ7=Apf5^~?tlI3O!Vl6dUCZKMVrc$rL$^K}noe(R0y=ZGr>S-5%NF`$!)KWKOg}8$Ayb9ve-#72oUNQ1 zKFp~-%bc1W<5WrdSfA)s%^qXd!-ICU9;?VVW?9A}}=0!TRcbP*Cw(`CCKF7%2 zq>q`uykHg+hhAd>1HGYFn2R}baYOZMl8@xAwKX&U_J-09ByR_ktE2cn^q)J^ll;l| zie>ldd%)0;5c^Q{B_Av@lz0`p0`w$*ZbC!pMXowvsKF6l?-V`D3-l!EN1mr2>1V&= z+ds?~zOX0t!%(LrraF`7kLW#4$w=Q2&HaOXFJH*i&f=z?mo=54lF1&IsVH*4aywHg zyPMiWZ?PmfKMq~~KyUFey~P!7Q$^@2{+esbhCY8spDz%$m!rL|kp^qdRPU+zI(9!`ViRH7$bm%Xy~;4&1= zu{*SWA-hb=(U*-5U1e7J__RZFuAwy#99mA!T?C_~V6-Kr?X3V4L zBUd2*Zq%ZOT#sA?x5!3@GB+`F8|<2Q22*k|_aN>YYN*S2;ysz%MT7RyPyU0vJUG|T zie=>ATJYFuDEWTypod%#J-TrMjM1d4Hw`UAm&%c^SKb=3&^zw>le|wzj)GfUa4Y<~ zsRj8=wJ!l)-r+KZNf)!8{YJ z)U@^Vi1(YyMejE*+O_u%m_Fb=KAL*)i{3I=?L^1wfziUUPQ|I?)XEl4b?5>gsIgUN zf!SuK=AU;e&kLt^=X9y;6qn|tajR()=FKaarTs!nQaN;qTCjpT(7rnSXEr&UIoNh^ zNi@@;JoC9;=g@*(4t+f0P_+vVZNAR#%pJ7ku|r9ovmeM@xdu2Lq6TD1YB0kyR5!Ds z_t_0?%|qM^lkebEqAWAciiT!aM+4A{N?^3REqHW*gFWC~AMk~1sp$J(foqe%s`(^C ze}a|EZ>V$>yHN`a{aRvZCRioeVCW=xU1j$$;t-geF%$%=I#&#}0k6~W?B-)bZ_$@c z;1xHCsZQzWv1c;r-JCosdZvD|(^HlH{pXH&8~XQCs3)Jn>*D}Z`G!$r zMwtrYeTKoW`Ct_TR+X2-qqX=Ru=;#}8Nfwy;HIfjPq_xC=b4;ZTo8`~K4*G5b$bwX zXO2^AmpFCcBG}b*DK|a#U@P`BwzALpo;nULuYW_UO8>``ZQapkH0})c3@q$P8 zEeqoIqxQ;Xrx6L7g- z)X>n9hWeH_bkss_*EUoZ{&*Ur&ER6|VrXrDcrw_~hT(8yG>-+7MqqLc?OX3Nv=~fw zQ_D*)MiSDjj0;4XFdHTxIInMXEusC8Q|W7z9+IkYawz6G_i;Rbe;z@pzdt}i=u z@CmbIxY6z%+yIZWKfokDF;BwYEVZ*_HbZZ6P%HD{lZxYaz@a*|^lb%pZL5&SHHaA; zd4!fFZDyz~S~eIwCimqrL-^Y;Lz}_k3s@AJ21oGg4c){w+E5MfXt;n`*h+WF-S=wMz*{Ms#N5GMB@Cfd# zorbpgO^uk#%ngkzzKh2n<@Q}utJ8o#zKd{EYnhi5+e z_bR&@;CTi!FFu>tp{MLsd~C`dAHH?_M2BX19U8TOxg|TXC8#G^!D9(t^}v0HE`UpZ zwCer`uE8X30_sF^cmOv#QU4v_@vf+$;oxx0U}4?MxnaItO+wi@aWizyzWAb zdsF)dqcbDfgPs7_s4YDs(IYz^2;GVZqK9xKJ^oQ8!bDF$CofX&?Drb>Xz2M5n_!6P_hUC3oGc)*i2 z)SEBgm_Md+syUd{DdtpW{J@1_oa;El-QC4Jk#pC+a_qFPXOH$GXAJ86^^DYoDh_3$ z&Zog+W*){~rJFMX9<1nU_WAH(L(#4`XB=A19_)+1*|Wq~uKmtDnOc7@E}kn9wLHC{ zzrdv~b)*EiBrZffC`Ak^qE~PvaSeC_HXRzGS51jMbtHRFd{-YH-=F8fk#(cM1)hvC z;DD3IdJXjs7+N~rP~Q25%*AkNg`pPUas=Iawhg>@{#G&^nJoA3{} z41IhF-`*Jd9nIPV9xs35MG~VyXxF2(Ci|+yH;1X5=vTY^rd$P0y?`qP(XeSXssG^8 z3mx0r0w2&C9|tZc!R0l$k6Q&Q6V-;?!i$Fljow^tu*1V_;GXOq`FI>nC#PNJ(mi)uEo9sreJoEWbnf zLez9>O1kw91;8bD80@Ggt-z%rTgr-uAN3f(lA8biKf2K zV=ljta{`#00F%y}(5Nk(1;J$_xMbeLbHbb%!6YNvwe&I?cLR-kgnx%8?fwNzbgSVH zc$WyT3{O&~AYS0|4qP^8r|#pcyHQtiRDd7Umfkh+$n{KZZA{GolT>Y~D;-Q#>}skU zm=woXAEdS{0+Z$Npd>nHgw$4$FVe`04NH*pvB0!%WEaA?Okhi>D&vVcp+Ih?`QI5cE8 zXRrhRomp9=X=I0#8i`(rqY7R#a`5j-f##n)^$AR8*o`= zgBxzN3|z`AHx<}H%>t7yM@+T4#&vwB>Q{9t3?@$YAYwT0H}g1U&C0F=yEM}|XU{sq z{_+ie+W}9K<>8E5&Y>P)Qfvq{Wi-FJ7*EYG9O_NIU$>GoGQAMTF}yN9I4eGQ?kmoh zV6qafocRW)z-B*~bb=$V(XVy6i3gbMC=Ev{lC#y|LOuL09NF5M7*b2x^`qtt;v78! zy&r>Dn*1LYHni)On;iFnr61g8@EB^zsYPJF4BZ2lPAl1QfiIn?EhoUn1tzt@qxDHc zRnOrYE`bjmN%S}SHt5%{x14+6N%HU9M!#w$;JlI&tpbxd)RP?4li4|`0Y%Xvw5tzw zr486zhbv8LQ^&z10rmbwM^n4{;;Z4w*I}mWpj|;Qc{mxmlf4|ywI|Fq(A2F`X`J@r& z#a^7>;YvyyXG3azs)hKmO%7#)Bi%ULd(f_EYJICW+z%$Hzp{G?PwFQp7c)^0vf(?y zBNw{$vJ^RrZWVziK6tXE0Wk%Ol)c$4hZ~ay5=ZoE&Peb9lfR}ITJEIIfJIAihzB<+ zf`xSfwH!P;f=4#6_+zc10${OjBl-s({dVHx;7SnfT6LB)4w_}akxviMGk9_rO#b?a zPXLP*anUL`aw-{nN9fi#IPyL#7x3`r2BZAoQVK4Vp*ELicL)u8i+=T|mLzPCCQ(m5 z^+1>UnVK=ygwtS#Zq4(7PZVBkwyAmxdEQED3%JyVD`^j#dXk89BK_NSc&{f7oD%1i zJX1O2KXR(Xe7kaHa%f^lJg|ea`WE~w=k(juh_~+?DnM*A7bI?+#XmO1H?~BFyAuau zxM477ud&p1@;NR#@;mXVMyIIJWNb@HLl)YhIwOzlTIZkNWlkf*_l#HI?`Pp&#@;Z4zyvYf#$HKj*Q zj8+hvOXO@{a<)3>hP>qM#^Kc1(dhm-Q#Z)p*hqB4fD2xH6*+usI(1_XwS5UXwSrnr zPOsife0TC$@JrQ?az4Z}MVzOeT&BLr*ai=X#w?o zWIF0)0R2A9c_1BLurOW^?Yspyqxc?~;9%Re|DAuT!oTsy_>LFR;78;LpQ#fb;zv5C zj%9XgXilf1^EvezpZAh;(GB$SBmDI8IVSa>=LSbV59RDSi9SOl{d|{Gd+7(&!*jG+ zj~Cg3C*=H;@P9qI!}NuYI~6+T)IanfhF-;&+~mC;@bAY?EqdnE?{DZ&e4y|2nHd4R z-|(F{#G!vg|NkocX?^Iy{*&0HB57T!%q$>v4tlS-=>O++X<$K@J`{CndKnjW+$CQG zy?(1p`RdW*XQwSoE0+$nbEzkDf`;8(I{gQ8fj;!42D$WeluH{XGB+@I9nKWZqFkyp z*QK|MT*|zXS;I#9z5CdOJL}R#`n?P8@;;ATI`)S5{MV)G-(BjS(5(Xn-D+3G&3?RF z=Xuh$-KtU2&6&Zi zLsi|{T!+ukykmJ|`gY9TJ~Q(u--~^`0X&DfM|hZ9sm5}9l3T~8x$!A(J@v2y7-9#I zy}qmL^$nQsR`V5Z{kf4jD>J({yWQHcmtDbwZY^b}FaBj-%3oabGNyv+;>%-_X#t64_&7PIkvOR>*bfq5h|A}f23 z2V1%2?c`?P*{$D*)1+}+TyD)JE-&V~C6+LtgW}N3DN>2l0KI#;YalB__+` zRn2@}?Je$Awg|7f5%)!ZGV7e~)%r#3KWy=;J+se9_8|JQOP7-~*$jGPpG>EYq;si0 z`ToASOLfM%bbA4_xg9QDKF$5NT*~l*nH~G4VS1O}a=BxC^@H3`;L!>4Y%y`|!u)tY zG0mLcBPVm@c||>n&n&rqZI6<*@Tfu?a3I%`Fi*}&JadgA4ih}8#OyAaK^$g~tBX9U zvCg9o%$H|vWyh2l&fMqGih~|KJ@K9Kz|KhvpT-XJDSL=r z%_Hmzy~pD-ul)nfjxLL~3`PfCE|px!eDw@_!05`+IA~rn;#`Baq5%clHpB(-`4wnqDCmZEa_k|wqUExulwH}S! z;L$qrcsBX_^T_e}UiDZ`{_h~y znfI?^cjO%Vqwg*==f4F;@4Pz1y#Hk?pDwVgwKAVi<4gPWyd8TZ%=J^x@hSaIpW?Gi znkGIwAnbxnz_+Xz?NqHB^jbQ(^nh7+8g#$H3YSXXqxL<8H_WXg(4?>B-AYuMShr+1 zqN`iQN4k|h=vJ{2Wy_t&Gro7}1lKenJf9j>4|%&MQGIeo!oE}U45#w^R>(LV** zp(y6j0Or|2=V#F$^56T+qf4*B^$T44?ooF1q)-BK7fo4|&Z}E!%FbL~)yeCX zzYsA5mvm*lDq!`huDt43+pAq*v5x$2GQg|M%*qo?Mt`Qkqo7yA*x&gw%c}uvygInm zt1+?UK6@7z|F^sN1YAFZ1G_x;;uHU5KFwe^YgHMa`mhJRt-numy*{N{%#QR?AG^lv zTCkt9QhqI(=-0v_rY!yOL)7tmc#xgcjt$i07xP^jOf61&oLc^n8Q6Q5PG)s$P;R%@ zRG}`^b}M6Rw}yAaI}CMe8@nw9O}APH*k_sV)&+J6UQ#dr2($Zo)Ggm>b`9>Md2r?t zJMZb@cyuJOM}MMOyRxIrVAQPy`3y!^t8pI~eXI+Hjo4*r4maAONu51v+n>0P@Tl}S zc3vh^3+(K)d)R$}J2U5clwlcKzlMFaE!@Auqw!(t@DX(Jl!u)-Zr}9i=v_GVlwEW% zTksK01hZ~&yc(C@t2kiQ3=U;Qr*@-LaoB&qRn@CMz$r_8uX1+wYIApRU{9w12;x5m z?!X&6ym5ffMfkEZ#;d`{(7Q`snYZEIQ}(QWc=>GsyXowtr)lNWDR!^JQ9ilhNSmwd z&t>%Md4Xz;a_Ub`&)P;YIz*E<$CE>rPppf{0M&Wr~wH*`jpCp&hvW$cvdJc zm{D(@m-fhC!J}qX`8RxvXZ0wemPfbY-24t6xx2!d?&xG655F_>$PV9z!?*A7E!lLm z6#d&n4L=KpNq6$QhJEA*I1W66cRB}mFM4#5+LH(zW8Qi+;RCq;B5$ZWbuxK1p@^5? z!lPy7yjp|qHDb?cVizxadGH9#(y@pA-Rsr771VHQ$;XdgRZGhLVqTx7H1VnaV4uus zJ`Fs^P8|A`gWa{!_c;GfGgUN}*=JAqvXK7qR(fK`I8$(r$$Eb-izn8POx89{o@ao4Q_IB{(v+(3)@V49G*Vmu;wG{Z(OnBAoZdJ_V z)=z5F8*nUHjoUTdD%$|h+5`^b(Wa>< z@aO9__7n@yv!L%im7dVu{`8kQ8xGmdo(yNgQeQaxCZP|*IWTU7Q#&~SHD&f6F^Jt5 z&U&#kof^B0IlyZ6w)WFc+_pW1OBK_*bT$V*u7FE_ z6mw}1p5iQh&3pPiMX`gU6$- zX&1Y6f2m9D$%h1R_z50y6?st}pZ*(OaW7||M&!mXe99k>IWxX-X&U)46MkP#k5|d; z)_DA5J$Qbt7&%fB4@u2@Rh8cnRmUH4_Sw^dGf+pk-Iw1Oq0uL(p@Sy6H4fh2n&nmw za^gHSbijUm`U%bgcewtWym*VAqtD~u_G4=3@B(Nj+MKJlM<)luGdNlaPTshHcA}dH z;N0v%__Xt0mHNoeN+pNR(Z|Yim;Pc(`nh=xU8k2)qz?Tc`hrL4JyqqLzl~m#LBGVp zd4C3d66+iKQ~0*@^t-pxGa5mE#7%EICq0lYq6g8M8C)m&Bwd-a^`vJ)PiWRi=GpY= zKH5wzp)b^t{)TIoiFc#Fy^7hy4(_K{Q0NdnaQX!)FYxcn_>imgk?3pOr+0ARDUW}{ zZ0Q|y^Y8Tae*U+Yupl0Nj0E%mlEZQ4dAoimC-OS=kX~QSYUEN4Y9>8}3;b5CRC}i$ zpx0meF@qiGROca1bsgbUCc~-5Zl|WwTj)EV*IMe-j5W*y$+s4q-RfNDH*YVUI*4vm z%*grtcXEUr8_v18Q*D=0qWMKyyR^0=HJ~SZgJ_CL9^J;rS~t1atKfO~+4AH_)%ftZ zHu?9H9fDVap+Ug_Zpgk zo?kwGyGUQRS50QAjoB6HM(=f~p)+>o zmT`yP;A3`Np3yIUZs@~no=@NWEPeA%@xdW6{U-WfxwA0C$Oje$=+A=lwo>dYRG`mO z{l9r%e)@pL8n6e{jCprE`a9j}bqz4}j9ym;dR&J_o7(3zH8x->$xM1<^XPpoGj)!B z)`bo18*O19XdC@rH2WL;gH9zqPmk*%8uEi{FidUbjL`@^ z2fOY4IDf$N4Zqk22A@_-;BdV1u{-IGT5Rlk6+KSgu2YJG}*Yo z^1S5|%eMN0_mM$7%E&6f6ic3Ps$eU?V!!j_{&4_X`pk6SX$IBl8U z_JZYL8varL1eB%UF}embH$~QQmql zPX+6asuit&w60=38e7%+{$_+Vce9$-z3FRPJKU>pT`|0Yb#RwP*5XEEYt=YSt=+xN ztO+}`unryD);c@T$$CD1H>)>KZ|j>agRR&89d7;m*<|a-L=NlYA%?X{Ew?pU!hm(< z_@K3R<+;|!DVJGmUt49plJJ!E^w|_OO17>OYh76=c2nO>v3XX13a?!q8(v;!ZMd8x z5N^D3Sa@mY3gNfcQiTV+dKdG%cXiCNBes}MB{IgOi}!W^$)jiXHybo-|C3Ooy}#e< zTD@R_BM!h#w{2 zL9Tf#<>R)+mdnqVTjEz-WBE{hlO=NLF3aZ5 zF_r--j#!d5IAd9M<-8?u@{;B1kQRHtqKc~fdyHyQq-T|@}Yu?zpeRMl(uk5|7XHE{ZZs+!3G_3cQv^~LL;wRM$Q*0ehpS$EC}TYs#|TI2rQF0ls&RfrvGFCCluQtsGr z)g-Yk>;4GOKJp}-y2$Nt*|R6ZO&)9rpPalRY$S{hr~DZVd*<51uZQ#xCmPZ|{N{ec z@Ud>z@T5nT!`ZS_3=b|*G@M~s&hWMU*}~nEWeK-1(}gFPzhbI-U&Z9V_$cPYUngTa z&EFaG+ls|8&!2c=GMpS0b2Fk<%=4GoVk$R@ALAR^Z2!1I*Q!0JJg$27!dI&2SlTG! zw{3kRju#shvEhj$B5j$45zlYziYT`6L`3;~HzE?*|Bgue{8dEKSzjX#OiFACt}kSH z-=VUlAtGY}wPYWKw^NtKC$K{h(pVf7)ZIF(YV+YtFPRNHouqxx*sMz=CTn z=QD4!3?IJBl6GT^WZmkt=?GH8KW9nuU>9$9W%Ry zbx!)W*19=*TjRwUW6f6EW__F5WzGB6YhBo7zO~@jmDXHI4_UvhC|cut`6;pSch`&^ zRHi^|i|u)1&&K&Z_PirQZ0>T&W4EpU5+3#Pb=bS~emGVAYvBPKkA@4}JQV&EwIMvh zw>Z3Y;KJ~Y-=~LjWe9}n4Tcx^jPQn5)50Up_7AsS(<7YS=oGf^Z65xdt6uo|%Ie|U zKPrVk-Y6E%(JF6v)uQy_y1_)@Efs&ooNW3mrt0T2G1ZRmig~@>7nAkx5ixr@6p3l~ z;Oc(Id9MFUvWM|#B5)^h<-oYM>t2jBT{u*9v4HUP`eO1>oGjU7Hi++PG@3W1x%qwcQM0)*}nEBC`8lRR~ zcKox&k}}C|%hLM?EIn%+x7fa)wye)_-IBb`Jxj67Pc0*>{cCAh($|mtxMsaZBB*T z+}#x(v3q;?o3vOO-g7%T++%_}+-;;ioTJIO@SS8s!@sii3VUbQ2p@@4 zCA|H0iEy*sdBepkr483f`7x&5_lGgJUfzf?23?7HyzErWqa(2~_0#W&iP^Iurd)G( zjP=35nAUF-$2eP!+rOjz$!awgwyJ(;T7ihty_!aRT+%yY$g`-3@6WbGc-HKVc((j@ z#GtjwELo0cwfuRZoTXX18kVG!TKzwUuEQb6FABG{w^SM`ElE;R>U;0G3K>}m(UPow zi83>qR0@Sk(h#AYGD3atd5MNd5h|gKP((Yk-}f)vbDuMwbDnd^(IeZ*nqV^hg*zbvvKsDR8mSWZymG5H+%f^0W_OU&+f65EU(V&mIS3N1#-%WXoiyLB9h z&X)m0+8~M7X>}2aeVofViI%)1r6?cytn8Jxqm}>ocHDwG<{FyaO(d zt>C{)mAdXrzceun8&6zz@ksc6XYy;JD_J}2LrVDpgpG+MAxlq_@;ezs zq^p>SPQOEDnBF7l&QHkH&gbOutrl|6y_2{E_K@YD21th2AF``?jQn{d3dMnv&=#l& z{n1KbAFB-Hp*sAC&;-rXI?yPH8^Xf|@X*i%`ijk z$M0l4w_RZu#-C)rBp9#@r5;r}bY%0GcgoDC>K)9u^ux>~$4q9gRV5Rs^O~8E@{4(X zNsLT!Rwi|>6NvItZIV`FLsIJ;h~bU-WbsT7lDsRH{Iont){37a#tB(uZ0$v&(o#$U zcU6)N8*0e3)<)7e)Jy{GTF6%J4Qa^uO!Rj3kms4bq`dAo+0iTvCs7?CXDx=8mxF9~fLBxXaxuzZUcBqxbO;=*yT%R>QT zAF4t2M-BLMSsPxv>4NnkL--_O4BItL;LE;g@N}XTP%|62on9lwEiUT7 z^NU*0a$X&7-jk2_-kn1YhvT@hA{y7mgrK(b2E6OK0xKUbK(>1(zKNTRz8_Rj$B3_35+xIUo50XS4aIWj**pUsEc*_cl1Z2;IO-%~NA0+iYd7+$&;23O_LG zmW?sC+4@9!u_bZN0TQgThA8X&M`n~AAdaaKB>!48Ic0I09Pc_unx^IuQk_Ra*W4k_ zd_LK}gCl`I?h=b9b!3UuQ{wRWC3*GyJ*ltzLOw)&CzXE&$q4;Pb{rLg^D3fX^h^f& zh2>$swE}c@tANyaP1yWf3ltvdf-*B1BCCxdu-g>sn$6(P5_9NGw1j&ZGod)q0hT^u z;9nnwn`LvrHhv+*jB|yHmsi8Jw_f1vu^zsk@rQ!7+hIa@Ft~f~f#bCYK+HEB4yHvw zaA6c|oe>LPKOcv0_9r1gH4R2?or9%H`S4e)1Z;m@gT}Mf;MVm9(oFurKDB$?X_Ej> zV$(G4(-TonqHzclv|eI;WHpw`UBfN)=du6U88rNU1Z~G{MP)S~935DNIVbFJ-W5|E zYEZ!GU0v+=R*pSLa#(vB&-%33vGS>=>?aX@wnFAJ|5Wlu{vO}c4o3S2dG!jE(P%u# zEFQSS+}Zzx`F^I0`IR6``d#!$dbm09IcrPw*32bCr&p8Di@k_xTNp7s5l#Gbj}em( z3FMr6GU;*6CO%toh{%&N;y&{h@w{=DG{0;i4dQP|n)G|Jrmjz|QgT(`O=l?a+n?@w$*!rw8+v8^AJAV<_`81vTSo z(A;SayZ_FF9P8OIhqnZ*8dIl+$ijbsnbwPz=*Yp^0W z{_>yC%;j4g?yZb<2;-&SUC8VXj%Vz57BF%X?=fw$&CG24%VOOFy z)C5j~t&1i@rnwJo$kEw8ep9NGJ&Vs~svtji`dl0q+crbe&*h;%V`e|1I{#HX< z)H--)=?i&6o8XY{RjMBQB@WdxC?4Ik427!h+@Ky((8>pfDT{+B^6vhs=mpwW1f_2VF zXM;0i+2DzbS@(5ntV+XO{yx`i{(l?i^34nXRk#G?I@k`J<%#`Q&wD?p#H4;+$J8Zd zFiDH+7>UiJjAgANAsR+x!y9V?#!JZh0uS`+iIm9HZggD6FB8UAr zax%4=IInq3yn~(+kxBGzup`Uf?;{KadS`KV5>` zBe@U?rJ(ZuHaL`Uu)eGvR{QqC{tzvikt}?dxE67H*(gpue+y?Ty_nNGGlL7>t;=mS zQRn(aCAm&HAx^!k7f0E4e0!k;pH4``yRQyln(!?Aktc)!_nxqNN?Y0Eeog$xM$dRZ zrdczlW{FIDS^?vu+rs?r8DWf+)JUk`ERtb0j}$iqk@J@hlV6*TlcH}Y$ki zdAma3hh$hX{m)%8c(R`OmOdtx?(JlS@@Jx7@PqugA_5ZsWT7KN8(yXxz+p57w?T7A zHM4?hKU=6+#(?Jf`LM%xA()1_fbUyRIC9M!O1G~Esr?&3{)s=tsPBNT$$Q{L;eJSR z4uvz>M?s6kLsnrToDWKd>gHrvn34i+pU=YZ)O2X6&VYjEEYQx#hDS1a5Xt0&l2;Mz z{apegl4THFeho_B-h%A=l~7@WAm4*v=ui#mcWc4*KqFL4Y=*W)?cmh?5%%zaY_4N$@(zxzHt~V*T+DOWgHAH zI|cKXCqriN8DRgMfob#4!r0$55Dm?M6He!0y!}P6^1lRT5xG!Mnh(Y;#n2Xg74G~i zgMfWEK)n4H=!hUhF0TUCsuuqKs)utk9)sfXMv#bn2IO5cbSk#O(ZF^%6Z0Bop6GzV zymv5NrwfeTzQDw#-+_Ok51twf!aRYR%Vp;;*qJZ}tI9;_&q{InMpBACogqy<4P|Lp zlpOWVQKm)zR4I*8r|0JAQ0HrU^zlJMdTO@`y(~4I9yYe8+Fv~B?t#zT{R^?&`ldim zXZ9*?6Y6qrpU83Yl~2)IDg{?seP@G@s`H*%Co>7<21KgWnG7!rC)-4j7%4s^-CCVw zWBF&YJ)@t9u#%84qzt=SCW6QUWBAlN1HN)L&?ktGKgub%{h1G!otA-|p9hFpY=ZFd zL7=?rAS}BW3C@;BfNysUo|~M6$pU{H@1F|mj--Lq?{gsHc^=HaTmYAoxe&$Xfpush zbd?oDhEgfmo0P#kqnj{XdmAD=DqyK&6=a-$0Lfo#z+_83eDrw?6>l1#>EjbPVe$+- zN1j8AM=LDd^$L30-omRZ?;$AZBg9Yn1Pd&?VS&vz5Z3$-%iIRw;m2XfnDhs(m;M9! zJ7eHJB1DCkh|m=%N_`KD(LbK!Xnut>jo%?dyOQK6HIb(Y!HSfXRi$e=HM-6~llt(q zs9TX1O_-@mEjJj@t*u71alaW2dtgrWs0Gyuo<(hA>}c)?&;wgts8QNh`l_InYii!i z4R=oE4$M^GR)&2=QJY-c>8XPOSK4`Bef5d^;ZP#~^E%0GctwPK$4F3tAb+VVlp_-A3ymUMVL|02jQzXb8$a$%ls3A8P}25!=~phM&i*p*ho20aAVSNC8z;sJ=M z)C&H49Vjn+4En(h;Ju~^=A=9Yf|AK;@5Y_h=rc>-iX|=W(&A28`m#K}TpQ0pb zeySAh|0hjzvSjGwI$2uQC`X$G38YJ$620r9N^SbpXpNCNowz`Q?z5Ok>0&K9d{CE4 zm+RA2Uk<SySqJ-i*%fHmB1IE$Q$XYkJARMqo>O>hlHYWxY9cuGV6DbG9e_zH1M) zU**t?u5zj$G^CbjDwE&TR1;>xQ z0U@Ke&>;2!)W3IuXWJ(TbN&L=Z@b~J)DPI1*$?yW2fyQ?0y`n>eFlG z2K1=6DOLP4m42CNNk?wXp#Rxe)7Wxb>XbX1W@qqdVvQsH7wJSNq&U+jmzGm5a|;c; zy`TP`a+P~kY0mBOc*)K>*v?33#*m#s^<>Ry5eWUE4EG8q!~KmGAUfa#@*{2_{Mi@! z_&eZPS`v;j~r92Vef6f}~4g3!$0Fr@njq6`1Q{-iN@KUav3yCy_uR*Fz~CQ847 z7|j}xpkEnD>Rlj7HOEO&r>#=7qFtKqRgk43w`FNSh#Vaak*B+Y6sUf&0#zMQq*im4 z==ZD2G-11n;Hr=+T_!S~3YDr+qN7e-1a{cyIFYVY(WbVNI`nC$4wdxQrM7)~)X?65 zZjLdc?P61?+={7`d168DOq@wKKc7YO_s*tEW$o$J)eaQo=g>nx9I3vlGaWHoOfS2* z((V`^Dz12l>fKG_ikhCI>t$=AG3`F-_@xLTE>j`*nhl(ES_JDqd%*UU8=!hzIJC4K zfrgr6u;a^VfbvZE_U0l)d?0$HzWcz@`jfTs}nnEpmM zSM?MMFFgbQBhA2czJOf|TVVHC2b?bW0B(mrfz_gJc(=3%B8tC){gqz$W!(=W%nFHYqRqdAsH&qdJ^!$g$E-xsA{+r?<(3~_pD zhd34K5vQ+@O3;}mCby%-Sf2L~D*ZJDiR6&Q%OVp*- zf%>#Laxy*lz?i0OHl;y7%;*t=Y1E*>g6bH}q~rI^qHW!_)LG4*KImpBgFuU)&7o!P zbE%~GLfRzeN=v%e(IKO7S~Wk83zf=czDPbLLT62&tAYnfsdHh!m>_pt6$B@D2z=5G zK^$;D0j@vK!r!VK=o8I_Cl5=YKB^S@yUO7Yt$@CM1o!24;ogkz>m^LP^%|yrcmt1zIzaDNCmd+|07>Kvh;HkKsy99G=HWLeSNQ?1 z9{o^eIRrs_enOtfZwUYW2ZBfcLhpb7K+9K%{=F$gyS0R=DvHpZlSOIxHc@JDN0d%9 z5u-5?VswVQIL+8CPF<%<(E9?r3<`~-nRVl+!6!*Nuu+QcZA0+pYrNN4FP(dWmMsQol$x>JCa#Y%zHnS4Ck^S{^dJWe_U6Jmvm#g~UJ81NWnc<7AZ6-p*!!{)hA0Q30UYR0yaylc zt6+521DJfO8a5xPgWFp5;BCui>2ITk!Dfgn2*T z!RobLkZtoB77TxZh}AuyEAT-1Ndo^R{vE>P`oQgeKcqGcz}IC%;NUh4=}sfiw)iJJ z9sUK4l7Ari^sugcZSsfedKYcd+_JL3}=lS%0T_Bxy+mq{4O(N6UOhBQ`5_}?TfZOQ^W*b() zd2=tgR=x##4sL_u$GafIEF8j(BHkVr1W#W}gK=1?xY3(kXEK83)HD~8{0CGhWS38X!^ z3gBKQ;F)fK+@YJ05_bzSX5Il!jY{ZDrLVKxT)~xmIX)`&j6t|YgnW?6H*#%V3F2r_%n_N ztxtF$UC#*WIRLL53d1KHAtz`qD3;F!+A|MYPdWqVzW^Nl7Q?5KB_Q9v43;ce0p5~s zpt4{!*uGi~N#X7gQ@Rd%-8>;|kq@{}T@Qz6`hrxSFEr+DfY%58U|Hi2sg^fD9V@12Ja<1RpH<0Y70e;EoJ@?g!1BIvFyhQvpu5HjH= z+}~LVeMu~Ap?5)bLJf3wJOuBu=b+yx$RD#MXyjZCy28VN%XzQPYByeGHg&s_JCSF} znwSEz&-^}Ft@?yay!4X9oO?^Qtmr3(B0ou%#2C@C5r%V%MM0`c4E7xqhqyKgkpC_T z?*v@<*JrZOd{hp+PRPTWhYIkfN)gtMQvusFRrokz0*szkhwW=8f@OdfoLj2{bL}U= zl3IN@<7o(Vo)H+FGzP~c6HxV<0{Sv$u!%nvyaz2HUCR=_Y0ZEwPpn|1cP1omnFV5d zY{6!+ATNK$K-VOI+uJB?N^yiZnYp0mJ`Z|=onfE)LI~q81f5BX!SBHm`0!&HT>iBj z3Y=ZRz0?)X`?$d-vIaDD*MY)(4@hqF1iKz@=v&|m=Bqcsfygaj?jHbls(~>3_I4<* z3WC|0!Eot)FhtqyhDm}x;?YsfcD(-=o!5c^km7rrOKqfQ)Yc|v5 zlE-ZKFJ&CHZ!#wuDww==EK}@xm(gDEfN?!r$0YVWW*(+CGFz>ine^H==3LurhE9FY zeE#`~vF_<+LO=dsG>!iSveDWY|A#r)Q zgzVd~oXq*-M&j15Cfl}qkoDXKvf|n%vUZ6-Nlo2GY@X~Tr)dO{h=?L5NF0fmO(ZVs zE)vsc^<>hsZJcH3c=*h0=7^?Qb$0pm3D^-Wbk)xf;YS`sl-2R=TtM zJ}zSI^UT=Qfs@#`=O(a0MT+d*qvP28k)M2@FF*J@_Sf;RwiofI8y?^*$8X^m-tp!i zbX?AN|4I0+-XAItDuq>ATIy9;m#I6*L?3o&%j|F{|6$HkQ}^dZjAZh@I@Rzr4z==P75E8EzNM_WZAZx1;*XBAICpI_8m>QyeZj}^a&jE*e-VQ( zRz#v^NEk*|@4%0P+wk=F|Im=#imH1zqHwh@o>B6`(p~FtG;$S=MlHcLPZr_ww{y^G zK7-d*@X-0S4NA65M;Q%s-0CtFm-?AtrI8VOmrcfvL)v)rm=+fHYhq)#I&Mr>#m-=5 zELyIJfe!MxvRek!v7fg^Ks0vRh5kKycbM(mOSxb=8zY2{kT~r zI$ZQ(MNR^L>RP?W6>J-(55GXk)y-I&_87x+?_;U%UF7w#==-k{xhHqf zx#Kp*mEOX@wdHtF;|7XKUPZ~5ML0NEfZVDp*j<=|F3s7P)SZC~D^k&yNycQiQ&@B- z0mn@~j+;%7;n|JxILjzbKuyQs1HWjzefltR&XJgv9**^k!;t$Nf~qz_xIZ)i3%2+p zPi!+9b@(Coc0GRky$&_~R%7k6m1wWJ0(0e-Vfg$-=%2Fy=Wd>luWFp|U;__pV(TR<*pt}>tiIO;cEB%#{o|g(>R*jxdj|#Gj&vWk$8afYEMd>CJ8i+{ z{nTTxeo$r)4u9i6pBTZfx0B=ZmWWmUxjcop?RFGT{CYiaN3S9ic0ZUYzEQ$#``p54 zZk|d`HF$9PmbzSRhYaU^Qi{8GU!3#1B+S*`9>ZLZQS?{q!)4pQp~3WSbR=KUsr5aM zFrAn&)Q*>*wPD}G7nmo~g#Tqe!R(yJ=z8%Hiu=~!z|AVGns*QLUL$I`@-f<@0_mc1 zv>CmQOjRjXsTX5KQX%%t%f~fYIk-~pA|_g7q1WpSG#@>OG396RjZrFo6G}nf>8DXh z^CWIcIe{fQ@hJQ|7A-yl{q+@T19?Ri=jv zw}_L z@LSJj4Y;xz`c7=nCu_Fgy*{hoC(S0V8slG17vXpISvy3gX7L^>zTwFVMKGsi(-`MZ z51D4t&D@)@hSa6Fa88~&Tw9Scr{5;eogb6t{FEiQ=M|#d>%$^keugktX*!03%YF)a zi~|^u+=q@u->@e93tpmK7`NsFM(^oFk7sYuz2+6xK5W7InJ>|HZ!=o)o}rP(Q+!n3 zh&{fK(coq^s&!Xk{v!^B>{#6W_zqfJzlJMkmf+;4g*ZBtk9JotW4TriUb>l$8Zzf` zrB@afnPs9&V;Z)qoW+mn$+&;y6iT$5#07$DbWHCtoDdj?eVMVCbT%3z&O~Ax9K?Ue z_Moi&P8^~ED0Xi%roH#Ygmh0V%v*x`#?DyV?}&9~b1>B00cWkU!G9C2F#7rwEYQ(I z_dFfED5;76mZ@QtngT9dAdZKZ|6%9o4zM<}zpy0b4J)*$o^`9>Sd;QRHf((+du4AL z`)++I`*m_Wdns}ctM0vs^$#^+OV$qab;g?bVwk`;TW8Bp`!-e?8XaT*@76A!&ss%h zP;(*k&n=1Z_iJPZE5?)5^#Mc<-MKhT6HZH1o2ztF<`g%}ai&gE+`NMloX~i2F71LS zC+RN2T^$wT4ylaca3pp_A=f0j64U(S>Fl z`T7iNZazi*MNQ~*paJ<7^*C4OA=)O?;qu4_ct`s#N?f~xw##ne`MYH(pI?G8Gm9{I z#T7hnl7lagoW~{Z8R%Jl4rjbd!~Y-^``4u4$-e zk?3w1j$3E$#~A{fRN8FEgrM~=wQ#fzhoyHJ!ZS~s@N@} zw^=FPRaRK^A}d~+!g^jl%3d$p%%)6sW0}P=toQCg{^jKl`Mnon_%Fi7^Zi0jRTLe{ za_CnI=0&@_&h{$ z_&PQ(E=5cC5|mRYK!2?(=(RQn*Mwfc2~k;S^*sY$7^LIUX=kxT{0ydrp2F#I3AlL9 zag5q=6ratG!DU8=@nb?b7K(+UVo?Z6XYRyc=k1u*>y6>ntI(&{1wU+`k8jS;LwB{g zg1KjkhDRN+%48O9_L+_=9j9WOiz)v7qL1$uXy6qO1zc??fzy;k(a261L!?I8f`YGX z;^MdL`r&%E>+)R|oNlv|zFlGm#FJQ~oDlXyoiE$)W&vv(VaLwvuwvJ)GGeXnw(@76 zNabIBWXN9{^Q-d6UTud_R)V)hQJ=Azv4HuM6V6NxzQPQXQ8!88k#LU zgHJvu;mt#dXyh4-m;H`l%&G`1@D4+{fe=LHJ(%wqjNc`8;L%yz@TT8pJa>FOwl#Vn z_j)ZlB)Fq9e-X^#XIj|1ri=#t^5=X=BJ< zRjmCagNHwi!?kDLvsX?&Wy4q9Vy&$5S%HF#U22icmN`eT6=yx!dA}C3;;fUvZ#l4V z*O1NBALeJ1UjAOWr~FM4<@^W22K;N!A5_-9Q|J9z@}6g}?Zv2fA7N(CxXe83{K$AI zD3BNJ&P1y#l$-IuircJj#071b#GPu@;Ec>CaN!pexZfjk+};prZYP!E>^_Kd7EeVu zZxH6vPW;6{#SvWldH|!k`>;&C8`HbH@N`}$rWU#Ub18mqE=KNeAj}<8V{^5lp&z7{_Zy;q$mqbfA0i)TZ6& z?i!4j&iUgw_f43$$_G#G@1gq$K9=Fg*vaZ_gXHp zvs9DWr}*?Z`$5-$>_IUBHq~%wd{F~=s@mYdr^v#oZYKt_J zY`=>c>ATFF45(&mO4LZP`cfj;p~Kz3ZNoWu8FMBn27)fK2Dkf|8u#5^nG?INz>R(r zd)9Y+J(Na-=in512cEL z#$V2@Sf2(+%UxR!1-opdxcM%yDbvi5XM#oK*+nZ@fW(q@h5Ee zuHgTb;n}^8=b7dhF?V}UF&8T<7_A;rBK;(k^vWl59H+V$p6>4kA^p3PUl0E*#7|i z46AVE2M)8xD)7|va(uDzI_^}shQ150;&sU)TyrxQ|7c&rV#qPMybqLs{rD zGZU9CN=J{7GpJ^kj8)@KV!I&L99Vc9)s7!S=1CkTRY#+m%MqMj6N!`S!jaS7jh6#< z;hUHsv@YI;0V-Q@;${JsY<%&wgBQknti|P@U9n4RDTdd~#qxaw4Xtgl*3SxqZOn1W zBU7|rr;jJcw2}5I;Q~Dwe3~&1&nb%G!2Z8%tXns$GVqEu^t;V+chlKR&ZpU6$-S(t zn;V-KJdqV|>E}O=JI$Z+G@j?`Ysy^Bh+{HE(wRgXCGtXPKatkR;bv|*!!2z{v)d>AKe z8o<_&e!SK69sM4D#p+-~A}kBTfP?$7 zQ)4$u&Imw$hdb{2=!RwjTr}7TaIwq*Mbc(rz~SjwxM>P{*i6Qnx#~EwN)|60h~w$X z_w3HL5_Z*}BzFIU7}lfxAnUrqixnN7%-&q}fFCsb3jcQ3BtGnU=ai7ncT%w$OmH^5|+7$2$=?vF6{`SY6uBSOGMpz*np-U*#d+n3 za$aA?@L$FN>b~m5q>;~PIky#yq@Lld=z0vCQj4{}@8V09Td4b^6g8*kqPlPn)(bee zaJ5Vv{*j6|J{-ebqgaf2cNpceBk)+y0la^GH#X&OLLRdeO%E={@t5teJ8C9gj+%yP zj)oW!q>af968Jq;7~hnQve7F9^|-+UHfcpMd;V}5+dMmwE$Iwo!E6o7_1m!TK8$03 zsO0j?)?4ru#`ji4-79l2a8zR!|MFm*;^G+h{JTuj?=hydP>t-ju_Pr29f|CfRpfW{ z2J%a1JDH#nMur|olOywvlSTiXBPO~fL~ZtUa;oJHX{dcbw#v7X?nj?VgVO+Ub(8@+ zV|}=GXbNmuycSkD=7Pb4=iF}1tK7SQWbWXkFQ0jT zo^L0q#+NfHcDQ2_%d@!ho+ms}gK3;Pl~GHIWOjsJW_;h>WzLB7F^+p>$@C0WB6)rq z;f(BwdYKEUmD@^mdD{uT3nx3BV@UCT$>de{83MB|lEda#$)t{3WMUhV0`?WTEcAtV zUl8b6i5Tb!D}wJ!bx@Nq6wFdlnBlY*K3*#a+0e%v_7`#f+0k6!#7Hh%btk9g?$5QC zcyhZPS8`2@mvP#Lz{NM%appU%IO&QhobSTP-0N9t+>$6IZvQ%It}0nX5byt@-q;Y% z|MwXe?Ei@SGTxx`i&v;x(1P;)jX3*Y1K#g@gjXx?WBlJcxL*7k2Kg6bb$BlBlgh%k z!)f?%!zr{}n26l(IDAtp;7;C#VRQT*W4QEuhWHaL5IA+7@US{8%iNvc>k3?2X zCf{Pri28VIvVSW>-Wx6@**=@d?bvOksXKyv+kTu}TX2R{^0SDoQx5qWkV{q`DxU0$n7ApV>G~>VZH6cgV4CHI)Ljf%o^yY4I+vF~CYpwp{ z+O0NoPRedv`tUqXW2PgwNsh_$^Qn%Z!Jd8fE--D;4HrAOGdY;NoX~Yh;c!2_+%s&jaG-_ zs@UCvIz_-wec6n*EpE8Baw!JN&qIAZD_li%aWp^;qX$&+ucb7)_>HpNx?SwcwpRAx zKs{Uk`ZPP*700Hm^2iiAy}mJkhwwSO#BbzFlo%Y(DBIDXZNM&6lxxFri6#P6%cJDn)j)i8C%mw*m8(T;!cU~t8 zRy zcXFyRUzNqnF{L2o@LAtl)cm;wp>+Wse9uFP zms9cTcLVHypo?uhRZN;7g9{7A@%p|I_EY#R*7$f9yYxUTYihlcJtQo_F0t+4m%tHz z4y(sM@bGlS)V>n?Po|MP2jfJ*Z*0&L-yh z<@e0_dw-at8^)2%buz?ipDGD`txZn#=#xozrjrAmcBIa0KB-S!K~#*r$dn8IWbem7 zGNu_q`lG|il)I6{aM=;EusVi3-ycW5TuLCHT2si|ic!$>Qmch$C$#Gq1lQ>%F?j6^XCJihd_|SB{b=6~Bqk6A|#)E(_CG z6;M&rft#01V3oQZglw4uV@qRTUq~T0aw3vTZzNn|jRiNvX>+TWe#d)Z9k^**D_(s5 z0FSm7qNrCMzFu+(=Le=>bMOiLx$Q89E_$;4j1>(}}G#j`(GW;g3O%*oT_ zElIB8X=>OpRjEPD_>o*Di{luNuzF_p)n;ZW;sf)2Z7-wTGsNhN$Pw3k^p>H}(4d7So(IPRR48Ta2sL#}|; z;9mXsD0tHfJ}YA;)PDuD%qN@~ z*m{AfU6jYvhL$k-7VV7Ugr7{CoH%j(E>0g7cwsCt@V`qwY^)~d3hT(lghtY1`I4-zdqrM$z9lof zI?3`?pNW=A53#TPMkYEBleU5pQfcsu>|Y`Ty6M7@>>~n4YeXTKNPua#Bq$JBcsfZQ zPW7w8H&BOxWje4YSQpyUCqd+IJ^0UjGAK|vZtE;3T{^O+S9 zub9SrUzy7_V@y)NB$3jVCm~kKqU|^? z4?Ypq`8`DI`d2dh*#P<5KS=zx43jROKcpa51Ufh|sFRR{T__3eb7kREp90M9Rf4-a zRiNpP8oUkH0LN}kD7c{whk27=ft&%vSr|fu$W*v^aT?^P&jdxj4LnM*1DOthqPtGe zymT={m+yiPkM?un9~W`={>gG8P5s#H*nyTK7qGu75|4dz#Y;sqF?F&kR_H%s6|^eY zrs)@1>&_%rzOIKayEmV|zOSR=b*41$#MiStqn#4W<=V9jfBb(;m+k@PvT_2maz{Sn zrS_1yJpU83aQ%12d+#VSZ&ZZXCWsTCB4rXHq(gK)r;>{4w&aA69hrJ{F^RQVLCRI! zNnpSRvNY@<@t<*+e0Pr_Z@Xj3gPvpLp?Lz?A9RXbcQ{R$q0=PmbutP0a+cUW&m?=R zv&hd^7s!~#6|&<}F8QCm_W)~Z+t!8`AVmZPiGT`%AVL%yA<1f4@5BO8Q86ku>>3*u z?1%+{C>D$&Dh5H+*fok36hXx<5j%<%d%>>Q^4Qe3$i}X zikzaVkbXYaD6p?F5TP|az9 zaOPHuaA-i5aQC!P$j{j=Y)U&QO!{zKaPuz}+MItZWVd@Q#7zAxy}tW>mxlJAIBv${f8B@+1y zd!yyazQ`Oh2&tyQSRoybDA$qr&}I}K-5CRuay*KcPDYLWu?QmZ*rQBj~R31r1^+!uFv0pki6=h(r$0{rxm<`LVYtiF?5w$L^ zN7o^n5xaT|mQ>r0>QoL=(srRu)m+4G$;XDOh3NjQ5IrUx$NNPmF=l!(=_!~*NLwF@L+-w%F!x%^N1;Q_y% zUZ}I!2iXUG(V)Z+X{Q_Fo^t@i0%{oO(+u^R1YuN~43;@c3|-9Ng_Q~)*MwlIwlyAJ zY=eoh;TUwKGh8Nhh2@u?SYA5{9kqSXQ{4}5It_sJ8!hO4BhcW}DEM6+jcVJ+pqFei zChdqr{`%?Yv?Csl7KzAYL^=902?fGz>?d=e>^)c93u!*~-&_F4;ft|jZ7QDEN<+;Z z>1Zim31Rpu94TInhHb>POUM>PskdWivZ&v(@-g4-2+HP^z^~*1x^(Q++JdsCoTxvJ z{vE3PJo~R3_n(*DGyZnJ=0E$Xv;KBJ^0VKt?7!Vl{j=Yc)qlI+&u;&pH(fPt+EA1w z_2Yjz{=5;g)B5*QyPp@-6*j-0{rOyf_WRZTYf`}ZCx^vv`{Q?zyylPJu>(b@-_9(k zs+5GXqark&T6XEG>G~%XoV_7l{p}!Ba5XCUht(hNSp3oa+yDQ1oK6vccKG|od4>Oa zoHl>=IB^nFSozc>ah&EqpUZ}F5TBr9m#)7L;v_!IpU?B#z5V$dLw?@qzkk2&@AT-> zshg&i$XT^5#4Iqe&u@Q!eCCf){``Dh@p|qbmqcmy+wrHQ`u)X!`e#Ae{4vhY0W7G1 z-}_it{&C^YPycye|Lbcjas1zUI(vKl{+tx$O}W`oZZm&=lePGizrAm1`Nx2EKkvuN zg5Led{oV2V+4c9P=1;2p_L=LAz57$tu>bh>|Lf;}=z%IfsX$Tcn}6Kp|L%*uf4)KA z_}l&HpZ$j2{@eY2LPs^v`P=>eclYyu+x4!U+x~t_T|0;WY$JbfJ4d!?-a$Nz=<@qZ zs_UQaAD5|aKii+({`ifGUHkvh{n!7$g97rGmz(Fh?hlqdQpsgYKFg&^y%kYk<|*{{ z*A-@mddkvgDrM!l2}<>Y45e|$Ii)Plo>pxRrzQ1f(b_BPX)5gqZRlJ=+m}{hWL7O0 zlRkklZBJvU;scDi_%&mHf?_FmH`abkE7q{718ZMn6sry~uv*O-RwCSFwRfs;de)PZ zt!mF1HjUhE~OzhoEn@bG3-w0n@Td1qPN$^?1L#69wuSWY1;8m;i^xkX_rtgF=7@=9rOhEg{F zo-%3-MVrUArlZEpp|v|t(T4KcjHyO@rfA{>Mw6vuqz$$(RKztVuJsE>vgQLLTkFcI z*F~^~+OcfhxutCB$kVL)+gn!G+kw*>>T_`p?KxTEOiubWh12X_%*oDVa&h5%Is3_% zIAd!oKB{{?p1RYK*SL@6&BM0y6(MiB(^$mW(UW-F?0VoOUtvblX7IjyA^r@PK` z(v#geS&NVuRd=!w&P2E`|_1(OL)TyGcP$BprY>R z{^c?G4I7<|YEk*lC8)B&Wf`@@Ssv#xQLbrjlpB+N$jucK6$a-_h1qV2(!9Wdj!WxH z>zXa6&0i1GvS$@^Oq2GEdF2#FDw)qvuQrP3R~RqrAB--p0ZT=(tkz~MtJf@K4duBk zHS8j*ula#BOn2f8wOu)loszSkGl0`fo6ngCujI^WTeukWWzMVZ6V6y{!)t2>^SZc> ze9XlpzM{oezOv#5Pc@(Y{|ii1jj9F4C23{4w=;sx4hnfp+Y)(h)0zsyv8f7~%T5KA z>!+;j5u&U-v{)(I^;Vgi8cSaa3txOgcSkzV**stJAVp z-gNnBnl|i7qD!w|qp4Spj92UsM!GVUp#sh_>ho_I>4d7Ry_b}gywI@Jxik@rEv&?5 zFI$;=gDs!;o~63h;Ix(XxT0+loH2d`CkfSY+JaOrYSkvLyv}|u?#4|{Usi*cmDCdT ziH5ICP2%J7)A@>PUwA6w=D#c^ulx3G@=&605i@Uh&|dTy`%~VSnPN zqV&r{Md^Vm%A(6fO6uwxQEu7L5}!yBgE(5_w3ybquBOdX_R^-Ik7?CicSiMD%}`Ym znaYhKuQ(iI>>v5E>YE%(d3R-1dq<0WGn!xhz%ags?R zIqk9xuA=8IF0Sh>P8C_n6=mA-vQECd>NLa0bQ{fAR9(QU-+$z(u@(Qan3x95YNlPe zIY>J@GuRkiCRcZMRcIgI zU`v1OV(q&h7v)(6tNQFD@jp>27v zw&4)Dx`Uu7{V_|S*X|eRSw)K6Buk|xyRK53rBRwTq$#PB`ASWXPfD+ezO?;5o;KzV zqA7YVEjf}!$80O0RlTm#aaCT@RB}xws<0Jf7?94ytlP-QO7}44aBH?G#f>fURIs`B zgILwmY}U}Ngq4Ika^`F$Cwn`9la7t&WM4!)DmI9+tdLW$zQq~OeGzqz18=YN;Jrcy z@sfvW;@nuA3sdyJ1SVI`YMN7H{e$9OtqdxvRU-4Moe>;&bf(;pb5maalqmFXrYlqz zwkXv8HcD+nURfTwNU5^V5^>n0)YUCf8e%9~CnL01pYgP=_iDOm#UYxybCISJEJO@E z7^-3nk5;O6 z+mz<0i%Qvs=SstHXWG7dD_R;ogf_@?X#LJJw5eY?E%l%nYLOE|9cs+Ps5mBOTLh!G zi)U2!OBiZw9z*Ro!q~UJ$LLu%Hur@uOYPuTea8`OoMbK=vuO=m>TAi#x;t=1Lp->c z6on|e!Z~%fQJgtx31_;!iZh+a=X6$YxtML$dD)qsqHSd&A9HyDU+G>hVicsJ#>W3; zG10AZY@uGUPA1!hVAX7moZ68oSJku?=ejc#6&8CHy0{7jMK=-cSqaLxDQlIcA19Pi zPbk{ZwiaF4HAu9bw59D6N7AN!3u*1LBJuh;+PK?}DNU@!m>uMdAtsaYa@^18cb;R+ z9v>NLU`>|#SdTR@0c=!vxrjk)R(&}^)bTsSIoCOHJ^zp``t*&h$f?R1bnaZmEPu}Q zD43IOjo`}J*_>hVVoqX{!|A@9l@UPGSoF+JO=s6{LP za+pY7@tdt`COeZf&@^l~cKK~eM|Qc<)gQz4stRiXJP6=jl6DUHfk zR`j-{%jYR*)7pu&VZ~CK3fL^J4Uf^%{x@iK{qHojz@5<*$r#!&FL=8MARW z6C*EVa)nn+=}>2uBF$JTy&Y?OHIOYACb8zEnQUBfDk~emg*Dk;W@DZ|VfE|nxN<{1 z5tATJTCC;_<{q5>ZUSdtvWnAI-zn;l3QqFfnwRWX@T$+fd0E(SKFaGTU;g3)ugh>% z+1K?~QHM_dn7yl<&!tFK@s11U~x z9Od+ut32t+n@T(J<~zf9b&EN?%xy7m zvOURHI@DCf8C$E$M~o3=*P*{0CUKgX^ z@sLa@^A*<|Q`RZX3TxV!9ZDNw4Wd8fE^V4qDatH2rZQagiJ&c0>AjFqcizDmnw%7Q zQ9s*z!8NSn9?BQJ)vFs)XyTcJ^!5(D}27 zi47N%7sAO7XgGs1< zIZ>~CXYJw0l@AQy%;CJa1{};8A18C=Z?ZVAb*Dt0`N~o0Re9~Cn!IGR4{zTkjF&B$ z$m<&>@cPs=Ugxrgr|b^$GVU5*Njs`4I{2z&^CMNcp{XJ!m;O?i7{9y@(p`TQEZcHd zE;&9x5!YjfLR(o=X(St!RKPK1MXVRC+1Qe{A2X6JpK78Fqi)f0!F3r`fsB!Mjc24^ zvlw%rXm6)-8U55lOw5pbjCSvPM%&PiHS~05b=`Tf?at=Th-c0Bli4^2JzJz(#_HGX zW7SoUiMIK3torZ+));NeX-&?Yeqj)24(!BLtc@4(Na5m)MoxYHHmCjklG8?d^3wZ3 z;(DVaFP+v;wEK#fRNpSnukuBCb%xiEDCNyNUh~wUnyS*?jZ}u7-Bi@D^?xbZRo>Dn z$UZMCSo(F6Tzj{wBIS5nE})r;D_< zp$(&I#4zUIo=n`GRB=wVmeEYP$Y`9tGde3<(KhA8QeSJc`d(hF?4>BPeEYJc{^MC$ zVlrF)Vv+d%Qdah8BWrdz%u)f@Samri+7zpc_TJ`P=_5|$iEbQ~J)P6nnaxG{uH~Zo z<%?_dJDhQm6|V{R;MIp|zC0+5*Y1tyb@S8s%Jh9ab@C8jYITP)MXd?~N0EQprrwwpF4H@eHH7@|00KTd~@L8f?t5 z-(*%G>vdVp+PCV+8qW7)O}^7v^H&`!8=B5)dYD+M`5}={N?5N=FWDH~ch)PjmS}@- z%vDO-itF=1oW9Zvu2xYyuKcAi`8OZWP+d@^dttElU4mTex>>Gjw@g9x zC{n0nuPXH8Je0YSU6l5nW+@GQt|$$k9Ow$qv9xhm0$u5Cq|MU~(?*ALqHKFkSJZCC zR3=9~${x`QZ99{~WLu_?b(bo={3a>mdMr>DturZgHASk6_Ie(X3{| zd~sfu!AfRtXDjMIW)04cT+#cUoXT>D=);=C*{_|;$r`6}vN=0B>B^&=-o~DnMz`d( zrg+|vw~Lp}Ji+I_c*koKx`=+p-Kzg{agOeH|A^%Kzbnr1{~SF&+Wryo{fi#Iv6+>{ z)h55_@mnnPACbjY7P(!1(c?eTBR%#PJ^mv-rmXr!kN-%I9>@M~q=&jq$nUulJATe3 zet#jJ_XzwW!u{X#8~$Uy$A3Rh!|7)}$&QzQP$B#0_x)=AITYC4ZEy43ySiYlO@-XP zF{Lyqot2ud6U8|3qqHGo8Dms8Vx!IsU`^duv4(>$L|djeXTBl&S^`tKqEV$>(F1?p z>@kH`=j`V-e2Ey-Vy)8kYoem!qyBZ~$^RTu|J|c_s+ktLANpW(@iw_SWskyZ`XHs( zb!$4xt0yh%a+;3nE@5IG5GJbMb*AF7uZYPc);R1cYwsV%89aA$UbSlQ2J3FT%CJSW z0UYL~k13UDz8E8UZtO2i{%#chbH|6TTl$+loaCyk<#Kaiu%fccK&8=Xhf=$62yINy zp;f7&j4W{_L!CPz+7a}u^xbEXZvwg8$`mn%qKGpOtIOLjU&tGG6!PV3-t$rW9K<|` z@xL(n&!qkzU8#AetxVQ-d9ZP#g@SrFL0LI$9Ib0&&%``WU~*gDWX$jSuwJztIK4*2 z>DFd)mF{NIM_H5C)oshir5ky#gJR76>Bl^^X!*B^3F#>vWkBntE3n+lnK zRa#}~!$`xIFi{)Mh`#?SVr**!TRAM4lf`9nF++}U1`BV|_R@owbj;u@ryLRUg(RYH zWyLQ{{%i97x34-hpBadIoA|@D;`q671Q`t=I0iLWQdcW3^>vqX9q7^dx)=0`o!y=d8x_^g2gza9ltR72QYcmG)rm@ ze%NfDJ!`Z3hu20P zfs$&Qz^xn;~R5cqq=eSoy1r?+a+A(j?0`O#+KJiXvOOjPL#>yZ&BCZhXy=QcjQM`Zo&|hJy-a#en{FZlt&eSZznfoL<0G zIyDq?C%UplLyn1Y;vS;SWB_M!S;mU4{tts zosThFt4zB*R28d5xlnH za$ehLD{u5Z%SVlWE5;Q2t15J&UHF`sGxY2DyC@Lb=c9~D?SRPsUG_d~)* zg-OL+m7cuWX{4xAGI?1>frv>pm409+5s$=QnEYLc{ClT(iJeUSslHrNJy4O`D@)7^ z-=r*W*OfM2eN0nV)5U!Z_b{^FHyBEt!kWTvu~LVIoc*pCaSy4LV%+@(t|I0nCmsAr zeC^LuNnLrbHp_WgQ2`(G{)gyu6!({SHB@zLTW29BB}ce^^p5aM_F1Tu*dX^&HGKIX zLE4!*co|R+dtNlaDSZHLv~G;t4b3s6GzfX`?=imsmcpC{{c`sDx zdgEhpAJj@2jNUDWVZ*YK(6$|enf?+oyrmE=Ogj6*#I`iSHXTv9!-B?7XrTZC|fLY^}{mD%gf#%??!a*p0_&`*3n= z9t7<{I7S~v)Z`cklC>z8cYntgvQOVa$yiY*s!?i+Zxdi+ln<{I-o{94fbB@ zg1$jLQIgynaXtglnIDASQ?=+gYZz_^jKs{OQII-~M^&pSnCUYW2OB5h;=4IekD7}* zCl+AA%Z2E>cM%4f7vtW}Gz=fU6vN$?;rg_6P9jH>k(}yUt&GMpB#>FL>5*yA)U9&iFZGiH1q^9%iEKKBYTm9*P_Vedm~B0p_ya~ zKZi8kn?iJEJ&87?llW>zvckBBRQ-H{kP&ytmHyv}*G{#NIeD+pIc0n*gl57>M_)_ow7T!*0|aU{AIjlmCL0^-!Mxa>I-CxrwY3`>Ni z)oh#lMFWx=a;HoW?;#nn0+ z(R|isXija#eA|nC!4MQ|;gv@OiMmijbB(DbdCquW56KkPOCw)8< zNR_;KgrAr~Ze3YSz71PRM%Uj?)|@#($|l|*X`d`Z0)0Y-&}X^Atm`Ghk?#+Lmi4~~ z4Pb#dX;lnMt&Wy^B-rBRhFVK&<42SSjCH;7^0W{9JQ`yD(k4(yWw=|);7C^v9u@)? zKWUBHk3$hVCLHRy8g%Ipi4iY*A~87%%S!rTTk=4Jq=~f_zKp^h|1oISdLl-vr{LVQ zc-&Sepj+)EM7N#;?%`bQ5pgN6pMr>Hi%_e_VkrGm@zpmC{l(fD$I1-QmaT-zn1zy& zt5C6j4Zf32VttOSP)y$jCVU5Wp5G1c>ACQ76l-LOb!=Q$9D`o0moeStJf6J2f}3;7 zFyzclY&YLU%I=3qZdieNcV1%Kjn@dueFwkjPdK&3f~c-r5jAf^jEU7q>dfk7s=Nk~ ziM3GLd~+k-Q|pk&zTRYDazk=sMxa< zlC^&tVG@%_%LVgDnuv?bgB3(KXe}A@4u6D4~%c+g*Ki6$jOx=A5C%4sRgR{S0FZq z!Boj0)!4fg03G=V8H}4Mt(Yr zbm0Ov?7s^8vg^3vTaGq?_uwwpF*)A-1u8$iLG+~cDfxJ`O2~0tD?v75i*RA{DPjDu zo5H)XkA%n5-wW%jS;23kEoN{INH*2LvuDouct(N;wcTKOpe{yN@e=9hi(FFx+O2Gg zF{&1b6=c}LGZ=qE1>4k?IF#20hflY~yjmS_zf)(-HFU+qD!nkLb6>1IJsA5OwTN>X zifM~R;7!0-oOYdnn=hw8QW6iF?i+7{@ZiVmBdK3IN$l>Niq`k3>M3vZvtc?g0Hg?@2%xZU7 z7@1clRB?YMEOUP+WNo1k_t_HFs@h`nGY35HTOAFnJ7LsK2{;jxrXM^Jkm!Z+AAE46 zQ6qE?XbRJjAbf8ejIc5W4<{ZYepFj_qs ziUWg2!gbqN43v*U|GP7woD~nQe*zAtCc$dA4sEW@#>C)cOu8{2hAs=xP_M_R17Z!d zfMs}BCmr*m4G8MJ2B|I^;V^$QKD6D6gQvFP=z{HdUt=%wobnLW@&G!nJPgkjM=)r_ zaVS#H;DSRjTDB@d)d}ZuA^#GN-nfpbVGprp@>9r{zs8K@&p4R&1N-V(keJcdB)Ewk zdE#qNrk-~s?@zdp){(B{Nr=V%$I~WM;;3Lk8g#Id%g;B^S%p{i>u(x9DC&5s)lj*onSfGO{|Yq7dP5_VoGFv z6r}heHpw47n@jPyc2mrclA-l}ImQ<&Fk%^vn$tK8NfmH?UkJJ>+rfQ;26nf)q3yGt zkfrxQ)%rt#wj<$}Fb0RL#kq2REWA2R!{>mR_*5?edu}G8$HdvVxhWYXc60G*|6EjE zpMn9NdSusKEY`3~!;KUJZsf1UDot&G{hurEhiZrrHBrh+ek~(`c$=voki01Sa@+i|GWNg1k z;r6l&!KK}PVbrIK!m6A1h3u9ugkg8z3H!3Y3%e|=(JaRn&4sF{ud9xx%39cT(H%#g zd0^A$`dE9&7m;d<9O zcGUPl&7e z4d>((F}1cJCGRZB<8*71vfGwK1lp0TLPz4Oa3)WSYZ9l65>i;dHi@67GhOcArCuoL3X?`<+R5=f339=uzb8;^|~r-xSiK-*OT?1i8W`zSnX^;OvPgu=jhO9XATgWIYa@Q!xD zk_oktc-alzK6s%2cyDmu>Z9(N2I%`C0Fh;lP!bsky{Kbquacp2a|TZzsZgVL2ozge zAt*Tv&HJ^7WN`#m26V@&Hofs6voBVy><@!_AYwhVSafbIt}dI5!A)ZEb>4J@8D}CO zeHOmGNJMDTY&bfKbF#{L*eFdw^?{4=a?3I#tj<8{3bBshr`1Skw+MiYCnX6AwWVlCz;NcVpRneB7* z@PCDnoVS?K;R9+n`;6FhiacCsLA;3-nRC~cOz&w=_D5DHO*BsA%tRM**{2ps=v9X} z?D8V%0Y1bbuqnCJM@g3QEU9eTk}OPZL+nD^63?dHh<|)EiBiTB!-V-{S=bWNdA*5< zyBmhy=6|d4{<(8^@c?#iZs7aW+Sf#akDnt1eBHSIa?#WgzNS7TnIPgWIx= z__%%(DNPSzN zU8n*+w^@YNQo;D0V2~sfhcen?Zh1SAHXZP0P$yjQ(BR?GKA2TB7#AY6=yD|*h0<{d zD4qn*K2vcocN)%Cn}urmI^4q?T%S*u?Zzj&JY0R7j~Ta)V?gKA*m?UbPVc&in|&|CePStE zcfN_W(Rak(*aNh0`V`j(J`-&}75K311u{lg!gtDdbb2V(`^&H-Z=$M_n&%wJ#+NmS zYO)JyGs2ZrUtXJp^zanzD-FoxeSsvKX+iQoDT&7iHCdpDAm4(ch;`EmL@_FXT(q1^ z4B0EmNvq9-K68K!J!>CwyXQc`W72Y=bBz;1!LGA{^ZN_JQo9?%KEHBd?8_%Y_^o%s z_QfBCv8yZ*A7O)t{#D^;w#VrpM_lr*fp^i)m^G~qCO7efr{D#PFZD6DhA-CS`H6E$ zDSD4=f+|i*m{-%7=D}iXLxRs~Es<>s6Y1R+52;SbU8up>2Ho-fYfpUY(F=q2^@rk~ zD92h1$K`5ca58){6Ha{Gg3$>kOyzcCvne0FHXT9U$P@4weFnZi%*c9l z0nzU+qfucQ?&|KKNPibU*4@YKHIML(dWzDGFVVkQB^qr01}n~zytrG1IEC5}{*pae zIH?ACK)Dcig@nwm<4Pg|YLlF8o@95Tl-P$iBX*I5q$h+D`-a_#?USM8>YXWM)m0rC ze0m{q-JL-^kF6&@)%S}S)d(@_qJ^oc%Y?_vPYAQ>ToA5muLza?WkU0=<-&H^V?mbj zQka+eRVW#0iT!7-QT*H%c75%UdCdWAt?EdHGv+OFMT)g2wqCD~my;V{Q-vR7`bJ23 z9*F%%o8$PVV0_?MV6cFXt$;0KTVcH-4ATdH+@)kOUn@lFw00czP1mFPTw1 zvjiibUBW)L3KIwiR&C(y>bifnjdp^a+Q7mW1AEOCD!bC7b#< zk}fXJ#3{^$oCtI!)=S(;_PcuIS&kPO&-;^Bt(%cY=LlKfyaQ2Y_az_dj3!-P#}eDt zv&n?EOG)JD6~wuACRq_!K$=f<3b}EAoN!rb5ZWy*76L=A2yIKsgfAk!UB?b9>>Od^K>&s5m$;*WSR>Igd2`nU~+|ZVs>y@WrS;qbF#P<%dxl7?sD z-$2wSr> z;Y=v4gp~KLMLb`)kq>w45SIbdy- znl+D<{LqscvzC+V7TF|o&PH-Q;}j{+bqQH6i5J}3Y!+_4xhAA{yDMxgdo0{hzZWtu zd=m~hS!2y;Td0>;!_Z_$yhw4v>I=^3<6jHP>^f-ZP!GKt`ytCxiWd)?;Nr&?xScA8 zaU+j8*MSG8Tf-?V45wn+qqk>A*xc*{*S6iT*(M4DZ2RI$pCQPO8xFU3(OCCm43b^P z;c>_mG`Sszb1!C~f1?EOF-e$vUx(nTbMSWhTy%_CfX?m3TCca4xe3*N2&hCC~Zob{1$Tc3nx29UR|O~^Av3(~ZXigat(n&h_bLT)dKBGo<& zC+qT($n>3hVj8}b3{TA>J)~<%<0@N7J)wjQ{pb?1@XB<7d$mnaIouQ4$2}E-R=yVW zZr_BUkgAvyZZGOu2lPMbgxd}h9A8%pyRF=CbGipSU;1GEjsOIHXo^>_gTRF;aPvEZ zdh;GzF#2^ZW_EGr!b^!9P4#KL1Loo9FP_TZZ;qop9JI=?V zp?5sC9!fy^_C&~6=+N{;vRF5F9(oK+LEj0fsHs_sv?t54N-*GTK_-UJ%*KKnYY5_{`{eBGSmyfbO1!(f408gKv!pvc2d&LF3`s!=A zxvC7U67Jw@(ql|0e2H0|-XS;WGeYgYBi~>_vin()i%qRb)M*>?a;zP(xoJ;^b*fHU zzjh+MXS$Glp*HzA%7g5Ts!!C<{m2P4B5O@e$bzH6WN98pR>y>r1K-*cgJX9x_<9t% zq8~>tHcKXbxO8GQH-i+8SxqKJZ6f_|?;`F#mq|*QONh;dIH7FzPQlvoiO~GLxE>KG z+!<_(F-Pq%tXd5$&Z~)}`tI1dw>EZ9tBaS->Z8|TKZJJ+z?yZ236qLEWD77a=#VnDa)NM&X~ zH)1B%5155>50c;P z=qB7U?%{jvYkV5?1*(_dL|>W(8Q0K~^k`s3Heatop5C<~bbULL^sXAYe76Q^FwdFH ztuG2$AM&0QN=(m4az1HR~yOZHQUIeD*5C?>~*qZl}pHem&wAqx%-60w;u_) zitobl^OhL1%oc>&95^(gNRt89YD9Vn$6B zy7vr0m}eWD_6morM1zZ)dZ1sg-gx%0A95xSf@Y8w1-pi0*o!e3vvM-tg-pfJ88eX` zISchWCn9KZ66*iZ;h1?2CcIdHZOfM6@w=r+vRjS_HWO`{W}|ZYT8uoq36d%%^lOlV z#l3dH^3iVe+P+t$cOEJ&58{Nf5T~~vM$pA$*xTV0^ufglUnpXsyn^ymWq8Kk#E=$u zfb54b^m_(w_B#;2FL)(TWc*IiR}y7O8nmfG&eFDoIwRH_zi&?p7deo#r<}Ia;`!Qmy?g-h5fxr*0=?%;sP6EFHcL9aJ2 z(L?(NS@+*TY5M`Qt9*fn?g!S=mPFUzmQ)w)$enEt#BPZrc{;Q@$xL!4wAO{3IxHd6 z0^P~GsM=&w15eT{*OOG=^Cd7SDGs0*T#9=imJ>o&6PauHcnTuj1VpC!rLYKB~}(h5V|w+ZWd zJ`h%1doM&zvqa>n8i=Usf_t1hVr8D_Sgk&6-b?ZNL^J3Qw?Ja03_;=?_kIM2iK;dj z*|HsK9}mZ{*iNEPs4F%__rwg5Ke`+rf_6n(@KGZX_b>)`rcFTP#yAA>@$hk(1?{;6 zr0q|{lkUlgI4-WqWec%+#u8k6y&PBnACk^HF30|T;}y!t6Cz3?vNDp{b$sN$37_nc zmDRYfD_iy^O%X{#yC_N-$=-YKQFdlVc0}gy_`ZIB+^-(hJv!g#aUAFQxq7%C=9tLqa|YwCX_*Ve!GuB#82 zZL2@kx`Dp;CI@}L$j17r@0;pP?={y)6tvWPw{Ndsa;=MgS$#LX-J)>S`7Y)%{sNN_YJ^cfU6VUA_6^b)I39bgplv=`I|YsjE7D zp>A~QVqJd6AYIUzP@QVFOn2eP3Z7|3>RKm7>wJc<)m8i!r@NM&sC)i7Raf&_8n1V3 z(pk^T(ESb1)CJVc)*TsfAyu=r*QZ(HYIX zp^GoQr7LW6U)TS{6W#Q8&vdhIzt*)*|Dvm3w^Y|;Lz!-__a9v-{^`bL{L@uyYOMb@ zvYg)9wYAc>tgxpSeJ1vKX)mYWTflf zxx8+0ISbt*axpWjqiY*ts~euuM0fmsbKQfht#r<#+Uu&@b8@LH zxvwsEvWKpArl+oBzL#$EhcUY4KfHCVU;62GE%4WQ4wHs|lEV1aJ?o~61Y zHA8i_10!_fIxW}PWh~cKijCB@vWe1-uCP`&wqLByvR1rq*z!c3wNa|hXYEFvk4c6u z&>5j~NsOy;iNM~DIsI%|%Qa8lui!Q>fOqX=HO!sfkZ(UNAf4cGJ<@G-n zaQ8^As2}53N#8rKioT$_xjw|(T>sSYpMKefYWhzDE%pEXtEEr2t)s6OY^|SWS5N;q zr;+~U14sR%f35X4gWK!NO1tQ%Mt9fG%yQS;9Ov(@z)Rn9_IQ2rzD4@j_2K%~;=VWWOSt4zJ?@ZEarC#Upn+TGGuXkyWI`OIkLa&(&Fvf;2|H}8^iEAFZC?z$09 z_Ay1vNtJM-z6FAft7a@VA3f}A8`gz^)KL^-Bql; zeHFL$ckt~=0UR6NhxfOKm{p|^SDrn=(Kas;H0wRg%SsTQ{~Z;Tzv%JVL*4MxuEABU{AOj#1t&PF6Yw9ajF1x~B}f z|4JF##2C#Fn8E%|RhajvfhC?c=v88mAqySwv|bA+<()9GUt8q4E66O=!N;fv%)a!+ z?VbIRIbbO2IC{Y))dwYS{V*+M99*wXM#$L!c-)OV#&LLED*=l_6R~p&-@Ed6Dz-PG^FHLDis^QE)X0VV?mVou+k+Lw`|v*cAhKs4#c79=7^pjqJ{`{@>+m@=b-#?A zbFZP*``hsNT!0(BA7bU3M+p5=h;sf)sge9t`Mu0DZa~hR5xrF!wx-afxTq zYx+6Z9=(j-4Q^nj{wCf}&4*v{U0i>A4-Rb}!{4QM4ESbaior!@Y-ETv~Z{@hJ5}{JV>c2noq4Grfc@%ONgV` zTiRAQKkFb)-R&YK_UI~(b?71N1`QBt@Di7A_=~=|ONF;fut?jrLcDWG6xp#`g{MQL z;^3F5l=V2SjEE{!c3=Fcw4P;*bxlmM@>3<${APjm(h3i+T4UWP7;h9cw1NVMPTgXn_>v`y1s?mhuav!=k-ApmQ7 z&cLD-a}gJ}0Do65M%@j8NU9%%FO!yGaNiY}zdRCAL938`A{rN$#lm;q27Jtj$F0Bd zFguWli)WJY?QaT_pQj-pG#$RXHlv2~7Bp;;h4fX~C|bP@rI&YL>e-!W8LFjC__Y5j5}fYfL;d@hlTiqtnNP9H@;Q2GML28n z2JQD1<5>77WTcm%dEFn#82%Gg-u$L7|HJGQV-d&y-+fZHsrcerRs1)uhPXGWwmA98 zTD;!dROG->SoyRSwGHh>*%Ki;j_4-3n+y_vs*V*`x=$1-*^9*L2_YiDFG|$e9w!cU z$QD)+VM?1MKc8DA?W&<6?Tj{&yc3Jcgp&;^8Q|F$NWXc|&b!z?YQq zFj_VdLCRE&SvDP6181UY!XiZ0T8x19frweY6h$S$m{}_M%Q`(SkS06a<#V%zp3d_RGcXd8JBX}U{r`h6Q^`33kd>M<%# ze1=PrFYsCS7Of{2qgtPLsF3m*!CoaOdhrWRQGeju-AGJqY$7%`Hxn^qDhu1c7NY0q znxc9gYjNO~t@!K*Vn(Ct(%(r)Bo=DqH8a)r^Z_>8}c5z8+=4Sg|9f;?Jt_GGZJ?Pn}`OR$_w+G z6~(y^=Axx{HIWchOXL|@i!3W!5oO#+6t!~{dRHe=F&E0aWSIrWWraH(tr7mx7K3Wrq(&n9=k$QD56jxLzkpa*`;mvHjv z!`lGNTssV1+l_H=$3v1G@!x@o(c3xaB>=>v=`E81WXD7rn>)^55Y4?hkJNFcSUR znD9*n&4f!{WpV43xp*C2L(D8+TNp0ei6WPV;=cn9qR%uZVWzqW(*wQ4s|h2-raXTU z;xtt(`VuVq_(Tf*)mUNNGgE{G%~DDmZc`f0Jf%Ddy`p$31FaA@rQQm0wd&wm?|PgC>@ha4Il_DJy+xjO!0^ig=2Kje zb-EiCTS|0m)eFsj_Qj@E12DhJU>sM6!^$uc=U$9LikA=KXR9c*8;=2x0&wWbbhNjb z2a^X&v14u!`V0-mo%-QGk7an~xg1@4t-#d|tMGb$47}&X;^&F=h`X}^6X<2eXA=lYF83&XRC@I!z_iBg_Wr4QdJId>(;+_84Ue;XBG{l(qu^XL3SY%wO;jwd zH(Cch-&bZ``FOt9PXhMMO+tR=CfJ*7#=wDFP+*;j2c5SfZPzwD4l#ue2_SavfJFTRqQe_ME(%h#U9#=8NjCNJ<{DTFK2U;SfMID$t ztBd{N_K2I_9OLp^AgEt!G^*4IR_hdO&lK40EBS?EeJ>4(+D&h zy#kB+u14IbXrxYz!KP~K(Ahl!@xh6xyeApYzNR4N+9t$5$bj(Jf?sViVf-Wu)3di> z#764n_)fHMv%aL1(&Yk%8gqX?N)%>WrfWA z=kR{=5>vz9LQVb*xB9=(^Ti+3sA(+3PgC)IS0yq0WEHVB>Ob*jPjzvqr=1Ad-%#|Q z*i`5(oWfZSx!Gs6V*EHs z=`<#nxxQbyIPjEm_xvrTLHh#bCGIO-R=iLiEc>i1ZS+m~*5Zd!Yt$V?Io~YAs%3$2whqQ1 z?@%0d3&Uxna2PHuL!o6P#@<~;Z(oi6ezBMsy&m=_6Hp&XShYMAc8@k9`&R~YDia%z zX5r1yY`i_Z4IRGkz^|*hFzd4$sTq4=+3O(xPR&vD`f>t)PM$)7_XQ-_UV-V&8>l*@ z0FMVeL{YP+*xda&Vivr{qH3Qp=0gcQkC)-#Qe#nJVmZ-lPzBMcUS%=nQdM!Su(oIt zZzEFY*ojFE8;jGv%|(i}vlt^?gh{8a!hY~r(c!7Tu(qBjh6aR-mOa*sX>Za+=86c# zv+{Z+C^kc>ns7+@aQU3_Y0o94x7%&y_M~S@|K0DD#}1{+l660nd&7Pzy~2zTmQ)@i zYE;7R=T%XdQXQx4tGjzjA|`8>-0gk-9W_O91L6=0-vE{&~uazy3R0QK&d~XlLD~(`b_j&H4l%MFNAZm zK-BdQMw89qux$_lpMXeQ$y$lxmQm7`J&UHSLPqS zr7Yb3K=~3}sGRKnLU}srjq;-HJH>nB2j#PUsp59@r{Z132%GPhSL2`ygcB4#nk~D^RuWN}el5q2;1AIMiSrynW;Fd|Eti9!SB&o*SWSkdCZ5 zo3Su03ngv0VPob__^jK7PW|@bHH>MC;|V#o8w6!r;_Tv3WT|DfJ6cVupn)PJd#Q{=ZX|r!{hvihPgaR`U)j zHnIHeD(?9!*PT~PvMwl&^DZk^Z>}p2BX27|_B>F=g%>LK8$DO@LSHMd-9IRgT7FjM zsHIBr$Y097CPrxLS`N=`O<^X@G4?}MB#)>Gt7^5O?W&E9(gqpkws@ek1C}&Gt>z9; z>oq|`(G-O_t?(kt3A$@-;E8r

{-Ubzr|&H|&1tj*x)9@G0~_^KHY?zQR~Ur1)UT zS3higG66jYOhNGuo)=ioM&k4NFfLe%&2>XDs0uZ;eL3XUNVJ|4g->qL7;$Yal19g4 z;_F02A5TW&-c$^pve!-e4e_;Hoyx3LOOnjJHQDn@hCieHJDORJdurg^Z z@|tuK=3D!U_}ZQ#=Hm=8V`#eA`e%?5Jba?k*gQy4^H(VKJXS0G{V&DIGFSQZX}>ac z!BJ&q$w?(T|E$tv^hL#|_>z*VT~ppaxT%=bzoX2odQZ6*_*i)z_*|*m^tBQj{7wm< z^+jn?uTJBHA0fF3EtkQh~ERM;?ze=to~IC@2gqEBeE`{>(;~l3ia`I zR|E7K-4O2P4zRUphE4&FaDUqpSHC(VDybcu<2oQ9qZ39??~G0@xBKg?F5Q90^LE3-Y9G3l?nm;6 z<49J|;bGZTSQOmGN%sQ0aCwMZUkmZyxo0RES`3l+8Fk)#L%>yIac7>X_|LwgNSISy zy!~b^d@kFIz^N^S$H@+&#wl;lhbotnqm=8f)+#OU zr7Jc19#(D-IHpv$IjuN4om1S+FDe5DUR9>1-&B4bx~=#f%vXMPzN=gx`bcpudZKKK zf2|y;U#wIue6MV}_DQK?^j$d;@mqQD;h%Eds~qQfQ|ykZh~{mpBImdzx^%Y2rsp>3 zIK~#Qjy6EniALDmxCu(?w1j?ID=gDF;p=>7g!pwpQTdLTGOHsjnz`V7R{^(-ZrGD4 z8H;_Spnw>RJWof&x6LSt+KT0?w&BQ!oj6{$ z2a5+Bz-aSBs5j^YYNecq@w#i6lXx2|Ki|jR15fbbZV~!7e~T}#i}84U8E(b?f$PNb zqS0wnk-gGfJY88+y!5mcwv!x16Z5trdYBOT``v_H`C-CtRG?^XwON$L4pm++P?dzl z6-v?M)k?5&f^ucd7R95@No9Y^Ic4tPi%RL0%gT;p*OcNNS>pc#GeZ((1UY#fO0y@tWwd@QWpeNp(%4@(Yd^rMMbY&{)Cm1gq#*gQnL1foje zQh5CdhGB6i_VNtPs8=+4X01W>gjndT*W=xecy#CW0F!H}IJs>jb~jIl&G5~rRBszB z%k6~m#$Cv+x)*BdA#6%Mfp%X`L-+Rr+8w%tSNa0fcDRQ@e;*;>ei6Djf5zuSr5N0y z4ChczoQ*UUFCEOq)y!(bgS-7(p5ssJ(_Ab)rHD#Tx{1mGL&UO8qs17W+gF^ODdtR+ z%7?wGQs^3?Ol+`D(X~oYru5mStT}yL>Ed%v`S$0k^6Sq{<@Uqd%DcJw%2A5~q=v=HF3(TOu{~duPSK^x#&W-uigo`g_0}38C)$K(C>8MXVkKPLXo-g= zb@1vruUQ)E!~R+$94TswJxWV_OLfMg9UU>x)djD-fm-dlASbXJeBQVr;(m8j$mxl$ z*LuU|w+9CG_e8|MA=uca~`joWk+MQVJ%`O#q!!`JRasJ<5WQ!%5pZLYo*Otdn^;3 zJ#*k}#<_XU4vewOgL$+4XpEz9D>#7)A!lH==>krzx{055`KUbg0p2WpgF83hA#Bhm zc#ZgjIrJ>!1?9z_+Lc9se^pWapp~%qvK2|&n+u1;4q{Q3PD~G$;>F`pBJ#Q_JXeK? zcW1VWN3NZf_#aD@f$p(NGtW4sj&8H^#q)@A%|Etlm?_se6$!?eH6=WKfCX3nZ=K0gjFFQ*}L`fLoUHwOoI&cT>7bFnRNAzoKmis2?f zm|iCY=~qH=U`H4<`v@%FunKk`qcG|C8Z=+J4jX32Avq`>q2p4JZ&1)jbxF3L2cyqs19F3~VT2n%)P#i~3`<<6vBQ2yhI;v|ix|@mPjlt`Ru) zfY-(DtU}Dl)flQqW5LoG9NZF%ZRYFY*(M23RwZL`om3<#>9{az3oHV%u*)bLryp*^ zig(*NOXp!jm%VU*vkx2A9YCPhVf2nXflZ@N<4wj{1bJOXw}IDSWqk)_4TtcghdK2r2tzWIh$vwo7FTF*# zFGEF2TUC5tHchy6j1Z@1Z5Jj%Ta+QO_mpD|-zzSyN|mwxf0coHBbaBJ!OXM@LdRGk z{ahW)EwsVF{&pz)S|5MEG{nM&O)$NRBi1i)LW*NsG)!>;f6Wzh$Gf5T5{c9Q^+L;< zeL3-YU~#`eC_CVZJHR6~)w+|A`}swM3!LS_G7{7a?(tM2S~(A&ztu&a1kK?fd$O^tXe= zh^Hfk?_yQVT((dgQ6j~`Z#zUmg9xQoXs+U4c1kIm^i(nbaTZD=80x{)KAQ~N9 ziXVeR@yIj`tJ(YKRzx8FTqFYLuj22ftVYP|XuPQ%gP3TZId0v6#;fDe{$LW^lvG@O zoQ4^}>3Cdy3x2%LM6>9vIK#h5-qLsn^1tNb{U>_Yk&MIEK_K zrx9Ls9*2uAWA|;IZ~5PX>Ek;XQT0A%j(>>Ieot_+!%M7ie+RAC2Q2OIi8I?bZ2bNc zpY9up$XFAR$v3(WomyFxjH)3@GV6%Rx9an{p@UGCwh-ADT8kEGZNcw^xHM$`S=(wQ+Ki4Xl3I;kd6o_+A94SiJ>ykVmx>?NHX!g=a#5=Y1e?q#lMz zJu&mq062dgfo-csVZvJ<{Ps}cH{KruS583%hv|45I1A6O%*EiGg{by(DMmC7!OO#; zu5f-kT*NXio}GR!PGLhmCkpJ{?;M zc+IzU7S^oELASL#&>(Rq1{w2jUl-=0e2d-G%RZE^d=QO0AHj(|$6@pEEdHB%5if17 z;Prv4SW)W+9=E%Npl|s&)#U-)Za>1U+-Dg1u^3a$KjD0*uPB-L6VFHd#;or~B5Sp= zNNQ1DwCQgu^j#~7=#T%2q7}8o?OQhD*}Vp0QK^I2u%?A@$#)h{&$x)8IbB4<8+sw{ zOL5h4gh-I1#J-c7h<`O(^xe2j^t4(pcJ0X)w*7o%Kox`Bu*4usat!k0A%k>(Y>?TX z4RTYNK~Afv$~84r8D*==$4yn4-%gb!E~;$PRh4ymsdCbARhIKnrD9O!t8uFAJXMv) z=BTpMLRAKasB-WMRl2NJ<@B|x4A`K`?(wQTm#WHDn^ak4iz?@4tMbn_RnFU~O6%RK ztg%m(w+`{&Csdh!Qk7FKsB-3IRqnp2O8b0O{&!E6sSi~dQmD!!FI2hhr7H18m95^Z z^4(`XPpK*g{ZeJ6f2y>opvmKAnw(!%lPfJW>0C>b?dxc=)JBty>uK^!15J85Xfm+5 zCM{cQa#cG`T6E<1D4HDTs>wpVChzsovXKP3BF|`@ZmdEc|8fiN11?nL##>G03QFgS0+wkY8RJ{$r58s;jb| zl`6+JWF1?pvdo!`x~bB+H|sZqHS<w_Na0-IaDKuyrZg|Ko)Li_*@ss;;JftTvMfK zzAB&JRpkfr_O(r+i?Uz3v;Yw}#6CToXi@-%LdDRrE@idJRN0_1z869 z_NqZ{yKj*3?+vmynf$J-%9OgQtlXHjX{pMV?NxcSvntg-s;uD2dW=-%TXHdA$#H#XMf+{DI%fED0HYS&=+g0h5OQw0MJib?zT@Fxd zhgEriT&zy3GU%)-&B&(B4ORA_PGWAUaunHwK31hKbyD(@ujMsg7uk50sPb>ADtC}e z;7?V4HPK{0)Bk5Pw~8iREZFO0V_sX6vDAn+HPX{wljE9d@+`TGA``8XCgVGkBQ;W= zT$=ROWI=yTHYJnJ)JTodn*8d+x>6@T-6@O`)?kgA6H^{<; z26=3~K`z;4kfZYr^3NlKtp3g*FZ?jbMgP&KYN@iJ13jUIDji#^a-2fn($lBlGSN?!TPM?l=wDx_tMUf~j!)he+a~Whv?X2F) z&(Xhbl2HSCSZ(rYbck9zqDqtFtUWpXI;G0n=T-TMe7@gN<$CJpCv~&=kt*vyQKjLT zDzA}KlVVkV`AEO}LVx?FO7}AM(qC0JBd4v@&j9MDn2hdHI|+3(8Dgi&a!pu|mh4^L zwzp*+U8s>Rn%v>0$ye^I8~Hf)(`0}0$)#qBeKk2xqt2+ClF6F9MBP}-)8qm2313Qo zB%kxtOdS0yca0{eZs7Bg%~Y~!y+xCs$ft~&!9h({zoyAhavAuI{#nsqzCGhBTbLW< zEiZ#y6J(G@$p-nC{`L2^K|U-r$WO%v*{qabuRtHNWZkKmJ&ovP%~bic6?H<*tS1|@ z?)0L8s-)KIrIy#IPt z_8})jqAHD3RT;O5SwvQ4)YH)%J}!Cr>{sPyvZ_HXwIZXtWVG}gABS2>y`suH^tlph zsT^6Q-&3W{Lsgc4&gU;u<;pkoRqAOk^;F>}ALkEsU5*S)HMyoT>q3v)M@G5i^NrrN zjEpiIHF?^Jf90gTCdX1k=XA^@>Zh1|HV@F`w!!qXVPrFkJiVzcKTSrE&rxc~jQV** zM(&Hrn~cT>^Xp`^DN2*&$taB(6OpXRCF%4va#=+#POs>7<^1KU7rt`LD?eGkl|fD& zW{`W98RXMcgN)ouA3JN1wdrGfUKr$nPX-zL+aN2HXO34=E)ry?ZZ`>JBqzeP6fWIl$t8{lT|kLR76(Q=TRdIRXK7o z>ljF_klDph`dOqZ50hD4GTXhD8A5Jjl9(48Rk<;feUVK~Y-i@|;A8HgM#-%65qjJ) zRZcujpS!^4zCRG-6$wYI1oidR|9OPG|mnqR)j%W=vmAUZBq{Bd5_rH0eeyRj1co zFfjY6rI%#&ej4vbPJ0(=(khhy6Lci=ojR(QO5fb4$;1nsN#2w1PhUCxyr1k`-XO=e zFvtad25G#UT-Gu7(+x7=pg|tIY>#TA1j=vx~QCG=yz`Hdv~&+@41uN<)NI%$SjZk*ULbU zBC~{v>}N9THG_P~Z78{2C%1KEHX)2Tw1QlD8%{5@B(J3#_;2cLf1)a1rjlv8Dqn1- zZ)VfacJY4n!&}Ey*`C^pB(I$F^u)`YBbilisH=Gom}Ae#k(@qJPv<|;AHVSXnNOM# zeTANPn|j(yWSj8@ z`C*KXW(D=N zYBc@Eo0{-bWk>Q#Ag`^0pIt}J%C7uK?<*poN&`64 zF{kPeAxG-zs2ACcQDyj8&S^fJf5|C(JmS!i=oq7tT2fkaXN>}pPwv3Ovl5D6Y z#In}Z(&S`vOl7WZW{+&;=gDW`9?lc{sprGg_HoW8WaD&MmCdix6R9Pio2r~eP7Y+$ zjT&lL!dZlT2K=VqmLm`5(*JZclp1Jvfj32y{?yRpo}AsN zpB!pOrN6zN%bW_<9k%>i|K_b&2 zkM83PaL6DVoHj`FD+W26dU=1(AoHk~YUFX3Jm%8VMsU}hWAwjylyxVQy%m`Q=6vns zG0lb<(SZ3uZ#zUDCe+Nb7M#oIZ{ItxCe%zYJ+56(GU>}bbrAcXEQXQAN^0g8d03Le zNtOE$S`MAyF7vFZpKO=sCm)>kla{ajWT$=x*`FFIq;J_UQwkCda$k-?W}Py~*5q)NnR1o7 zSkApNl(}+&*88fvC8Ps5Iu>N&f|JKw)h&{c7^}kO&cvA~w!J1MF(5W)M z8~uRw_wC2o*OU9=2vsf^NqzXR=Dw28yd#R6O*~}8bEP9|zq}E5lcwB#SpN`a zi#7eK7a4q^XN~E`{gS$PNY8SjUtJl?uTSCJHA|D#7IHRUM$h6r_9v6OELP*=&%x+YPcaeX5KsR&y?H_|YKCeKp9g|Hy^C|B&_XOh1}w z%ej&@-`|uw0DIn=wZF`s_fn_>_I%|YoO9?y9R_i4WZicRV{O^**8b!)jx+WoYGn#* zIGy!m-NWbbbBkHyKzdUU`*{UFAH}-Gawdys?U%gGPcgd7BUtGw?eXh#nm+YBh&JG`$_g}edvF1t{z3MN&#=2Lk zz}>$Ry}c@HUQ?4*Soewaq5GVZS37W~b=2hZ7TguvamS<=6|nAM%!;q{qeaZ~rSzmv ztbfXIOk8aSs;F`Udw(kH|Dq*5$eFp=i8^P`uXSTj zNoGcGRq8$1Z><0S{N||2JeffM31E#`cMp2Zk)@oEf?3~Cdi65SsqFW@F|5a0@`z*Z zv+m9t$zltAh;{eMr)KJ0-iS3KrE`;1yFqPAG` zekH1W@QwY$dS|fK3ygVYWy0KLy)Dg|$?Ww*^co*)?u4xQIo?Jz)8sAo`>9skx!La% zI&hX^4r~_xzegr=o~qMZlTVn_*9Y@F#EWMFe%xpLd44vTGgJVxWF~i)*&5H9=v9m8 zTg(d2NS-GokX<&vzLSs5`M3Hp&e0dCo0mL0d&_e&BY$ZJKbiH{PtI+`xoI+ItWbmO zeuHO``3BjSGu5eb%s&g}oi*pq`t%-hSV0Z!aHR)#Wlj32a`a#_7)u}V;lJrMUaWur ziQGq~@~nhC-)E63KeG1sBk2e9_gwaS7W@4g{XLJ~o=I=-u$eOuYhR5%b8k0ul|B=F zgu4T4f9@2yU8eutV6F4%`}b67_K^1}q-VXN=e=cae_%g+=K14y?lr9c`(J!~=5A&7 zc~Uh^1~7NWvi3i@Z=`TGif+vMa4s6?q)A)WeIfh(fBVKzSN1eL|6_Mern2s9+4uYV zGmF{#BZqK*WY1gDdw#M04<@qS)3~#+_Xp8~4zvGTglJN~oElrH$-eZX^cZqq&z&%d z9CmR2Wri1%!{f`;$sOj-Jxv}g=3MrJ`>F}gy6X7Lu$F!@&cVQQIPM=S4Du%P;|DV% z;ytqX!M0#@T-ceQGXeByyNa zE#!oe7g^Yn!=Lq>XOdWNa`4|G#`M|4i0DdJtf?>EzwE{H$AAX@GPAm$bf8A=^yTanXppe{9vZ)KtOmt*Gk80VUem;OY8NzvJ6!qiBvnzk@5R>Tp)JP_M>hXN?VKy|PPI{7! zh-Te4kPCGZOr2aNpGRbKmuzfz@c!h}j(nc(u!$KEJO~gSW`@4n2-K zDW*=WsFB>y)H2y9-bkdg~{9(r!v#0^PFo2 zHAt^2H;-pD)XRbYdDRML1Nr2xrH10kVv{C)$z|O^emp@PQY*C!*ej3teB=^DF5S)j zWzH*KS+@rDKqk)2l93zfS-TCg5og?GJcsQ6&LAg$G04mc+;`2{v-GfR2hK*0%+Yr2 zTZLKc%H4yEVtaGv?aRFIq-PCfzKmq{P(z@fMbgV&FvrW!;2yq!`vx`S6wdv7InSw9 zF>hCs-&)RHH_P|!o(z}^cRx5NoZ|p`LQB#F|dFDb^QPh-gWO?8-BjbgREzVW&Hv9_HOVNv1$&+T zHW}1GSNeYsa&zbB$ZBpMYKfY9PF4rV>Ww#jX*_$JTJj;MXR|mf%;T8|d2Lz5oT0Wn z=zEpz;5l~`&%VfOpeJYG;nbuzugy##A2PBd zqkQVfF@lmnWdiRvt*S@J>4Cs$~m6oI$V{z#xh^%eRY{j@2IH`lex>0S1MVBkX8Tr zWI$GrBB;A4&Sh(uRqN=R@$^P=vLdI|)YM0E+MUBQX!_uvUG%U$yw0$f-1hVF5AwB< zTN=4VpCTi&izKf#WR;rF*Fr!1_=MVhNsYhdT=<@6E1#&{GIIS#?=_(ZF{`SRlV+jG zS=82FvKm~MS;Wj*-GK8inXPxE=E$oKSv{q`s_V&=UYJ8|xslN!YHA?;Zx;DnC7X;@ z^g+%5SE!rxlf0HQ*jI+#^OHLp^9-_!L2ezxot^u8izM#o+)>JLcYjDOAI=!$Bkt`x zA9H6RlR?x>Am`(>KRo{`M?bM(j*!dH+GIgCj`g{3G~kX!pIg(4TA|PRbl_a1W4*f5 zS9+37OKlhVWn;N8U2zN$ro}o}DV^hdsC$FCzrxwWL(nX%rbASK$glEFceiQDE?Mx@&dh%L znJsy^kwt+UeULi&(Vyoj16hC0bK%ULEweeNFXv3Rnp|?2G0c{m-F)TP_kMB{_eI-r zoQG!_tw>bZ$K93I3F+Nc~%ZRs}A$zXmy^U*zp=tWBNu5_A_hm z)`t1gp1ny1%{%k_uqV$=$YB6ESdhUu)_*UacT4+uNH_0G*1M3pcxs9{hVCo=%wU4C+%(rtE+sXU0_B&Yj->m!p{AuF}-k-Jq zMGbUh?d>jeuDC({-DX~~?sw@^iI16k)Wadx|0n%Q$851-{nvly&s9pT{p8QYY;mV9 zW>OcU$YHq^{mGiSUyt)PX945J+(pR1owGoI3$Oo@!GH9s-`#ok(u;E)b0v%nHVopt zOV4r~$sK5dCffvXcW15ykwxEiy#AHKeRVIt_L4ik&R5!e_LF~jJu8aWv)=LycsKLC z2{Yt&fkD>iO#GZa)!+yB$?|;trmVL$y`mxgs3~>blKZwZHO~4g-8kQ}{+9i?e-Gk0 z?O^sN89W=!pVyBw(Eoa$^|zi!zgSF8fti@45J4zE80e}{?W{UwX7HGzn@v5 zWB=D@@5kg&U%Qyato?5KPpuq3J_Ak#EjCpO=l-WQX-1(1vP93~vX7pfQyr(Daa^RV7 zQ%y>G(o=epdneA%WKci`8T6zL)WMVi^rOMN-amvp=4fV%FYBx^Q^?`q6ipiD@H%54 z^M(1|CX#2y>73Pf@cH&=oFnOfrPPq#SC0SXCzUC@mX*M3C)`iMuNmYD=6Mq5;38&u z6>?B3(NF90XCaGOtynwe!(DP%=f+)S0R4*$p3sMSkK!3R>t9VHuZi5rr?G|$IWMv1 zi|9S=Bk4oz`*N%43#`3$Ec-r=^-E-ar!sTWd97;;_ea)$bS|~Q+W$I44`lC8X8kX* z@1xoGAFl8mjJ5yY@3ic{$Jyc`=jA8NGuFO0YhUXfIkV?Sm2m!J&!1u4FVcG+mSZoo z=hv9?=eOVv$el$;?X!%i3%7A#2uur#G`<9Ota@+)W(_-#GMJeTQ;2a>&)*q1T-p8q?jOuRZv(G}d zhq}#jD144XTjx78aUq{y>d@Jh4sBY?^RIX4`>fRhxToEXvt28 z+VAG$y$%&S$onG>TOX%1z^Uhh zox1p&Q=P{-bvx+PkVQ^q-Qd)u!%odVA)O)mX< z%cTctZ8fQHYs7q8X@*DW`?(0s`8Pt@Qae;GuS3l%I<&ExL+xriw7s!I4O%)>sx|N1 zIrOQ6Ls4Lo2Rxb$1(T7)cAP`CCxYV?;uhx6P=`YfmqY)#9a;-U&wLJji{v@MDHHho zyU3vNURfk7RoLK``C zt_9!U-lbiBknjxil`+ zrG|}M3J!Lu@>-W3|LfA>Lbi(4wPoK5*VZ`^`g9{gDLzKXo1DL|xIJO+XVczgqkVqkCzeq(r%sM>s{KT#N=$XCU$|3G{7yNe`M%{&&D{;=;PK}Tp5t$)3V-D1R&eQ1 zJD1*dcj=7VrI8n1+V$NfM>Sh@I)uwvB0`?vO_x?k#`*( zO5BgwjzB9$I}|(Cp^6ju`zAXyH-g*@pcP;cm;+bPs>+Mu$#RG8p;b|w>A;C#B>XbvZz~cN_hqC>NcJcPh6*Ln( z>fUtdAF#*<4#mM?>T`#tpjm&SSL488G&p4Y3O3&yx&=SRCPa(CVJSG|gd^|MqFL#k zS`1HSWOXVv+$dfijNroLs!q+U;Z$01C|H~S28$0(oSKbR&4&}OyQ4$>oGLWXsTi7A_SU z;L@T;{` zlN_o84sFn;)gEdNy!bWBp;R#rJ)KV8&mHrF zg9G!x;4B=7iRY9*kyCk-I&~18Ispc?Q#&;a97ch|S#U^?R$byPJ^V;j$f;>?HLz>B>ur+TARf6kyr zfyMciaBL%3!H>&_oeDYWRGnMs)&uHEa+hXPPg0e4DSvI3PPBKaL?4$XcwG9r&!x_F zY-L{?ri{OaE2D$AdJ(e6M=0ye2<1H(p+>pM!_sI{2zpVUeE!v;)@{haF6hA^xHOKh zgHazaO6R5)gU`Ofh0Zb}@Nupz52o`P6IJ6ur z)`G)?t7sZn#DPU-a5xDLL#Pw8z~L0SbmJwM!H2K#p$k})K#vN3acDny)PfIF!Q(Y} zOo9_{(59c@P$VO9gb(l0sRDVN3i7r9JkEp1~N~tE`IhwewP#Q zlK$){c7uV=`bDn7N%&0D?n{gstDaI z9ihr&Bb0P?gu>v>f7F@>&m)v8xkEWJJ9HmyyOoFd)^@0WYvSJ7p=h*i#t?^kkAgQ~ zRoX!h;B{y=nl=zED+F&QqFdw9t!7*KcsscbK7$U>r^WFx`21hZaGj%eflWrR*+PCWZyZ|u9?ZbyIk==ir;dTib}-onCZkh2RUcf^r*kR? zm?Q?1@Z3(d%ZFZpNzKAeB>;~HrRb^Phz%xtexcTcqGjk-yEMZYql zU+cPoYd^FLu3R4m)?=OeNG)mXaH>p{Q^{vLb$l7U!fJYet?)C}sR5UH?wd}vdE!)m zdaty3z@>~!DMDP-CYM~xT$*{+r3`3SpiY<;#)heNrf{Vh8m`BC!&U3=aQ*ouTp6e{ z*&0UZVCM+6ofM&$OX<1x zYuVgu^xf2wWMFiYTJjq1i~+01;Itd=l>Go#z@`P*Y)ar%-NeM4zPDX!dLOW{z-9@& zd6We`gDY7Iz>T8RY%u8wPZpO$hpK`{O{adZMIAQq30&%cNukE%ID9DzUskn&H(>Ke z7jT9%BL@=akxpf%&ZnD%ZcU~?1D}*=*)(cPqn%DwJm6HN)8P4tkKZ{JKb}ijOSlvb zPk!y?Qu|5p=B`V(2iltOWSXkjVXAN^OowuX>#xq?dW(*=SQ)OWJHplJVmSSNxQb`IbzqbZZ9DnyfBn`>c=XqQ)V_G&k%aky z+A%!6^#A>qtWo@SbccY5S}zg zw^~y}x;hmPEo%pFJYZ98yi@;yNm)0X3-H{loQjO22Rlw5ah1CB(5X$}(kPirQ%ZwF z6Z&HGtK)r_#!RIj9z9LdI)`cCiZE3^0WOKC_tnF-tz)>%pm2?w5Uz`!a5dN&uCXV> z)&6m~CMS(hzibiOScv*#BJ^iRY7aOy?-8LE10r;0OoV!eqkFyxjR(8PbrFhVHZ5=> zLcgDl(C9}ITKY0V1HVS7XBLNw7KLNw;TKq)?(R_h!47R3OWy`o(Kg(JSA)^NJS*t| z=o{|Aty!CxNzp*p9)}*msU9b(abT7161@jKf$pH2^Z^m@=mI$90Ha;#Tzq(Rl767q zH;0^Hv;;mSg-`S0Q+v2HBm>+5qapuSZ_u{I)c#x*>7}YT^*0=<3O+Hl`54|DZ{}1J z^z33=dR*#ErGE4VBbb5c#b!<5d8Sd1obYTW-!~6TsWo%f@iR|Rx8A}fH0)~um+n?{ zsY_Rvp2SdBI@)?#e3}wgqo(u^)1HZ83f~c?DL=yWEn~P|*`yme-yhu? zLwu()lZVr@xp^GiDz}8Mfz@qr8iD@3f>#X>ICL=%{+ya7rA2q9+|z+egiTGm(#-ih?Uw3%gXc zF+CR=Hl~EFH{HTCcv!e9M~5rVyl|b{6|NF@!j&yygofsc(4tWCy;%e=ouby;5!y8u zJ^GzE?|>`xW*`5EPz2sgJM^hPIj?*U9WFsE$ouS7nL~_285)x31DOdYqEE9O%CZm* zBFC#8q+SrGEwAVs(3S_8sLi?gTg&siqZzIFTPw74>IZ*knLbWM{6=n}*A<=U`$F=M z{w5r)K6DIiIZ5rfMz8s|Q+=K~mHRch{N1Tq1zlQM+@^90Roqq*%O>u&mbA9@t(&b6eQn(z zgP$_ZR)@K~Utw$TR$F7A*=jzVzT}lt`)c614B%&taOv`FeuoV%<$OS&_SU5;iEJ&+ zVCz~bTTiOmxU-ZtLndTd}F#>Xyl^ zzB%1GQ`)VA)!mA<+*(-At?ZrLy4u&RiX+@=Fv+d6LAT62w{op;YtRO_3LkK*=^3|9 zUUKW{Ew>*2>(-0^+}af1qdG}Fs++;1CpkQ-Uc{rvl|4!*k47~1=ui`n8nyJOZx4^w z^zo?E01rK(N8w=}9rk)uZYE!w2JFeflt#k8PiZ?D6STB=HV) zYx+XBvi{{(+K+DaE9p_2Djt<@?a|EX9=&|((b6nltu5@;w$ff5tnXDwYp<5{_G<6| zuO2(SNaNwy{d4VzvFLyPkxV-|9Tbu(W?VVeEOcwr{h_C zN}SiHW2JqXSJkJ6AwK1(=hM%|KK9~U`o*tib^OZS#;>Z~ z{2JVg$Bp%?(lozz2K+iV$FIsu{3^1>uOS=!>VJUmJL#AI55Lmf@vFjPzw-R+*Kc3_ zx|<-N}7;^A2q5 z`joVVPZ=COm0s&pk-a{7-}_Yli%(WUzgz|UGW@NLYWtP7E5G|7zc!Ed>!Q!EvNQb5 zjef=BZ%et~uagJ;N^;7t#ee$M;-+7>?)f$PZ@)f1@vF~!zkdAmt83DLI;0G!W&VKn zmJetJf9JV+0X1q7(ERQJxrPK((;3h;e?Uzl1A4PCpnR(Wdb>BEW&E9MZwFNGpMa8l z2&mV80ZmI5)Th)z1=9yrJ9AKNvIX^o7I&O&6(&St50#bfj9DNOh_ksWHtX zwXbcYqB=(^^Uz2I$3$x2gh(ZciBzhCk=pS`q#E9g)bKlzs+BxSt@ZoK#YD3s9`7aV_-meMg|n0{0PhpXz_x8maGh@@FsF*cR(Sr0nIoU z(4H#+wxOU59%E8d!Hq!$Pz*QRxPM~HG@i7Kd8lRgDTxMC|AFrei=-R z!h-4+9n|#&K{eSF)X057N>7Um~@bIG*SjsY3%I%2AyMizK1xYFQR+S?O78Vh`WzdjFXy7P^I?>h zypGb|q|w@4GFo%0L~DPCXx08LTJ;x1>z~KbdX+s!&)USOa<1tzJG}bw#jA~NeY&^} zZTRHV+M<39=s@0&@GC`-Jcqk2$?2(i0-8`bz;nXUb^)dCOq~0Y)8hil?+9qzoPe%w z3#jTTaJm#w zm&OLQa#m1%mjo5HE~vA+f@%n_Kb{H7@n=vIZjtlE{>g`+n#PaR3Uc;afk^s-NL}KU zH#doOlHQT}mw0a;8L2*#Begs{QW4HbwU|v#uZh&}8zYrrTcpBHL@Lcy;{7O6MTz^d zpOHG1E=o^wN2yE6D5WEh>k|7<9ir58G#|U7G;d9m>g|kDsY6j}c{WP_UWn4=CsF$L zF-n(`MB}YRt8=MneXq{P^`q5yKr}sPw0g!wtH%CljlB@9^Dm=yDOrrF6^T)=)iD|y z7o#!%#;Eb5>GDyN;!}72XyDVFIX+c7>Qk#nXm&ckb~o~?T|d7Hp+zOu`LzuFiF)DJ z8~E`cYe0+22K2ZNIo>d!uB`%U+z%ZI2DD*rK-2dGboqEd57E!oR|BeWGoaoN0{ZU- z`uZ-QG@sDd1n5MHpbDZZgK`A*E^knk;72Em{I46-i|#=s8x_>8@Sw&AgUY-psEkX4 zy0;~$SI2^?em1D}7txoyLEU)~)V8ldRY@GFN?^1kd!&XHkJL1@C2j3U&1e!SyIUmn z4-Slq)P?bpTI7N&QIUE&BT|=_MCv2lh&vpq>1V<7HW)sS)cmiJN}ni7*)m0`d2zI+ zew6aIh|=IuQF2av@Mg;6@bDT-M=N-d=Z{T#jLbPT!iq?#_ z(aPPAkB3KV+_Gp*IS{S>1!9!8VT`W&Vzl*ej1oPWuAV17TK0=qbGGyL)+>K@Y9@8I ziOZ)ttEsE!sJnN33Qy(NyYg^{|Lto>-5fwo2%|p4_~l;A`(1vGIPBLdYQ{)7RsSWA z{O(sB>O=pu0Ts&@P>?#|EgH~0cve6G?W`Nn>Ba%gYe^015Ky840kxP&UGN1|B`Toy z)Qc`#1M0L7{X2-p!M|OX1KLjAxQX87{RvOw1=S;MP%FT73>%sgPml+UQ7igL_lq-Wv4oFx(pq?+TxZRF!9u>i#`af$UM*3g4>M zglpZsI%yGE;=CtAxlMeAU?>H5CZt&;SSvu1lV`>jWR<@YLH z8R~lmaWeHt3)Q_oXy?+>4@UiNA44WFLA@To5~Yr!|4%Kr4}Kz6?-=Jcy| zVZYjz@T+KbFb?r+S_3d{;#X{2zh=YPXxp#i^hhhG`*mYBk6q^1>-BzJ-|kn9eSSSY z7V(+gD#C?)yPy=_3J zdIq%Zw}4(v2VI+aCtx-w*)jf4t+Qg&@pmj$zOc!Gd&c!@tl5Q4!JP}u74;N z)W(WvF1gT*Ja|Cun@Ap9gX1ku&}Ur;s>qF?j>LnT|Z>}O5dNFzCTfKk4{eU$nElI z96f(M`u(hHJbJs$qsjF9O^9>2*Z%-~sPL!vAuH@Czx?Y7gXBJ=%NY~Y?b>zy)L0+vM=am)a)mX1r zhXP)`o#j=V^B(Chq6a_XRsKJ`8gPl;lHNS?Q?FjU@T${m zuj>Awp3>tK&*#%G1$Zm&)0r|pMO5%uG;^XWFeXBQq%oJ;GDyY%Q!ekMDF;x}E|aN8x{6MiRVgF2Z|e=kFz%qs!V#Udh(TTDBh7wl%w+ zt%uCh?V8w{+s4+wuI#^i+e$kaKXx?VH_lf5$+lK7U-yWx73H!ujv4Oh99zeji??pF zmGYFWniqKP>$ZB{u~m`z_|$7#b-&u``rTITByKe-?$!b3zS+#g?Z>%QY~BCnzbW^b zdDAl67VxNGMe=yEM?Y72l$07WB)34@e3>5WygZAI~ia18=m3Sm+TkubmxA=>-x;k z{EiQdFLfTja0*`GlhjTv%ffE3!2kB=!izao0srX#d|bT3-PN4B!A`0We$;{5cvSd> zbMR^0E%4g$3paGYyJZKGx))wMzTpeJsl%h#NshzkWk>Zk!l}{$_FOZ_SG>b+_&6=` z56`W{$Jygl*8}WQPT=wW$#Y-kvE=m6=XjX-iYt=gy<~7{eLj~mu}geg)uo5zaq+1x z#lycDMQ*k|i?{QR{USLPMLrZ5WveAxmFJbM2jSp}XI0{#2z6x_G92&kJbu;33hZ4i z?r&-nj|RlC1vdxn*n9K_8@%;%_<(Nk={|*976w}7r^Xa#zkw(Jot;TOJjJgg|Pb-x&e-jz;RbAr@G_0ZSKh3L>Iiu zp6pNhI@NIyz9SxR+!#I%<7eT^Us{Y$wS~vw%SVCPLp;V&ADnszP7m0XTw{N=xHEez zbY?3U<-o&=MJvwUaA|lUTM7EvYIfXKXE13`T>A3+y(O>ql0)^#lh*6FrP+%=dKf?N zFML6Ky^8;_FH6LJ5{;`=*2TXV>r!)r?_bBIkH4Znt@s*R*NdOqyZirk0v{?NxQcFhsmtExZoO~sR;^xc4H)QFvf+4llim8& z;a2|nZWUSN*35lwm3{8k_bhmI zzzxl2a_$WL$8W9sH+Kgwo%)o}rC~YPR~BQ(&+amNHN4YM>PHjy{LR>#wWKbz#lvjx z((j${SHY(*_#_+Xl7Be79_P|j@Yy}hr7dok`cZ!#fKl%G}e zNqvd=%cVc=;O*XXsR=bE?gh07d@jEy2f!y)a&SNg2IsQ1i+meV41bgwQm!VREBKtM z13uJ|a4;GNM$g*W+TF!g>mIfWQb(?hx3!+y@teoiRBFeinYKS>iHa(_AlXUEBO93^bOSiYSjMk`(6B_ z;dtNhW$;a&|GrB#AG$Q)UzaXX^H)*tJ=FSi>1-v+WNSudJZfrvo4n+Eaa)z}u><97 zWvF4RV=ZD2cRIDS^$tw3g2|Bqc-(0DvWd33huPY1+o}L(n$Nbi4c?SpNF85d>%|&d zt=8Kbx`potpWATg;z?=%*!*^ldjGerjQ_%~kJSI4;0!j~GrN@%Y(7xyT_JegE#1o9 z5pKYltJM7IOWk^R*saGG;LInt7SP}Rhp&Bpl1E$0fJrfS+SJ^Ax4DnXOucKueTN5K zT*iJ3jvPU=awkQjvXI+3*+1lF&yXKYDe2PrvM#j-lL8jG-hjI?G^`Ii8P%O0tPffV zKTZ#&R|Ah1;4yC^n1sU$;}g(e7p|l z(5|j;xas`tk_RjrB}PlouHtCdF|_L{J^hBlwuZrx7Nu>~0*l#|*gM08KDLk3Io4#y6dxyjHeaCDS zJ!PxpAMl6X>+pSBvtHVI`PNp>&*1h0jOn|sf=A&ZZtbk;){>@fUFzmm`eAtPaHOo; zEyrB9er}^DyUxdN(7MDP4XxqPvEMvO@+3@2Dmmn(4zFh?SL{FbZtT*6J?UGhTOTga zoBT^J^O4&QI1-+Yyw2)U=3MN9in!FHJT;-RO9{ZFM}6Xth9zu89e^WM;79^EGItPK z2OgWKD?cW=v~lYHdgq03q!qg50gu1Yt*3D0`!@P+FgcBO#iCuk;Kw4kG3cF3HNfMy z|KLdic0fsOePw4fGCOfcvmT;Zm*Iw^EOoL9nhy@CEL;DC+By#&htaJBt!*9YL?2E6 zJG&>C47C+8)@HS7Yp>TQ*W0N&)c5Jrd5qkB+um*P`9p4<<==y7l0^ zTa#Y8Rgj%frb-^In8@yDUYO?4`~E$co`pWCK7CFkeaawu%LL$3WCQnn)RY_4`EAts z%Jh4kKC#b(Hy1OaD`0aLzWiE=x?RhqBDKkJH0)(#bg3z~GVQ>nJ^I$gr4jw9DMQ%J zjbc85FPFlIJ2k&JxO73gX0K+ChHly0=@Y5-F{jv7-(t3T;LaJ zUkF9Vnu|NUTo481!?9|m8xfK9>|^pRkb70$T7flp$zKMj~-sp$qh-| z0Pb$bf(3I$-;>-;|6q2chi}#Se|P*7Ce!E7r0-&G%0fR@=M*=!=vq7GrQYyohXVAY@)x>UiX7S&uT9}0Hp+@el!1AMmh27}>fnirj# z!HyezTCD=RZS>KH;K)htYSA)X;RgLLm&QJEX*Born$+?cvL#ZuCq2*kKQ32M#WE(XdLBs3G8U#6$goGgqUj+2B)Q z5gY=OZ=3i&aLEQPi_g(tT(Y(B8lQWJk3c<{L_JxV%&m{<+!~$7t&8Z_?eh3jXjo4A zuA+U2|7bWu&lR@Bt#Vt?wIgmVehN03JxZ4!AF72%8M=lk@uvv=(UiM|-rSJR;V$$P zv)@f_q0@2eRF+#!!`(PN^6R0@C)2nKr~a1!n`RrSHL=WR@MraDcyyJ=-r##~(?f&P zRPGd9)S=3sxf6`%(z%r2lM}uabZHhm>Q>dIx)wck1DC$h8+4$Lo(DE1;Y~@KnJUty zp0nwD(Y1T)&^|cx9BfLS<^4r4y-E#(KLc;mpMg<>_h{vJeohkR3wp3E=-QP0wjO{@ zXE<|>nqM1i8l!3N8x#L#;L{rKr8k<_pB@Cq=8^&hY#$eF2||^a<^15X*YRw<+@n`gR6v5=^A`_Q8=T zIJ1;KcrCNfMl=%cTtAElcACfBV4k7oQ0`3julr^eovd^lKg*T1GGS ze>DbN7VNW?n)>4Tlim%^oCKF<@Z~VL$1#qd*hRppjFlzX2@F!>$-_J z)nD-XCNyr*PrMB@E_Z5bKD?<-{pnPNo*G<|htdzgn`Z5ZYac#0-lfLjE)|TScL0~e z=$V%uAkzW(b%K7IUimzlRt`<;@V85I=mlKp*@{ni0{?*(xC}~39RQoU*{A_%+6?;T zt@LFl;Y%<0QXMU8(2-fP8{B|1lfmUATGnwQ7*56~@Y&iv6MikRb#Rrf=WFl{z~w$V zcIPnl2)=B+NKbInR@^guQE(~1j53&-vYgq(Mz_?8xxX_rd{5>?>Pl7muKQpz;uJmc z3+{Hfzws6J=tyIaI&$AwwG^*q80JvTx!h*s7y7}XY@wdisF<^l0>DC7CVT)0@?Jz`fsddI4hJ9?d#T{QsecPEQZ8uQa{DFVu|M z+)klW?RpWT>Or0N!-p?BT{?hXjiZK4feTHkA^qPm-w^NQ za3M!pTZwbn8bEzNz>L_ISa&4WpX#7Z4bk{^wtDxZKcjX$8AQEDn^M99p-(Zy`YGBJ zOT4EnU{-_!PuJV}eFuH<0p^dR;BcJpC+4N$Lf0Gk$xrbM-r2hO(N^F8=$YfWl^H%1 zq1Q^s?EZ$?{SPq@!il5Z(Xc^oeTEl3sq5)yyVW1fa&88LIP?mRO#cjCaO8bW?wXph z=V-%C)X@LE2JJKPtwK+=m>Bj$_fMktkI|7odoX8=Vy1ADd-KV`-|csWL*y>h}9n{=|ZjC?fW~c7f zx*POK_xK#WX}K5d`p}dX@7xOIzUnSB-M<+;dY;vzGkHBKRLrB2rSUiTDo_9phK zSt0gF?1_?*`}gw^+YaGw`vJ?O=rjQfRsdiLd&NI&s;dv#)4pU!`^GEp2X<^peEN}<9W6VvHf7nd)?g4D#i-s~*KYW`-qZDe*g-Or;WlJ}tiBQ;YjPy?yGFJE>p)rr};8 z6MJp;+uf>g2N%k%7CY?>-2W{a#%<6_zp8)sYu;tNRy5|!R(#KV%=<;%s#K2olV1An zOt=82YktLdhO^CxdDU7T*~I*}mL4td=Rc5kR z^JaKeYc4z7C0;cmzA@Xq`nbod((H5_og*Kw67vUMjsFK6$jKh$X1<}Tn4akXFhw)r*T40o3o{MyLv;fZJ57Np?rr&vH=ZEobsQ?uwTc2U1#@yZIa ztDHO=F8(j@xWjX>OF#CnSEIgrbv%JjkCSt2o8G6aK3(hM)4M_3;&9`-9G&Rk@hSUkZZ*K*$0naX@AE0|DgOIUpEmu)KK&N=9nW~| zJ2d4x*d-+1+`48U&tGN(gFM8#xL+P_5)Tmn{&oEt&%JBOF5G|igeP566p+grZd-N0?A2Y|J(n~#hwA`coTRm!b zlHTFGN0aV&H20-P9(Zvh6FO3W_?Px-XC-vIx>v7idld;c{zH3ab@XZ?y3^m~RRQ#7 z&^#}wCFtIK$oX1@!%%S9(kh1dsUddG4=h7o3Qn20h6}9)rnQw5LKv zVh$$z8~OCGDfh$ZP1P=7*UhKkc%QxoeX0c}M;CGL04JVp^{Fj9xB({pukk&1e5&%C zyU_o9DghR~;lX9@-STDhD>t__Rk*7eR?e?ERsD)z3oKgrdEGQO5dHl+GnCuo34R5@ zWE8w8#BJ`sd$_l{;@9by@QGXF&)mk1D9KG?2sbd?#=Yh?Zgi)BN-PR!V^(fk=qKmV z4<6rZ>z9IV?dyj(5bajIWq35yU-u7oWqCYWS;nJ`?84svUybeTQ3SQZgJwm9^LZB> znGZj}WmcR=OXw?Jpjoju;21UHzxe2S60eGSaE(sR;PMn<7onv9@T`0B!?@ zdew5QR|TiS3o!Z-pl*Os0E}j@^XeNIeLlbq%2B=-oK9TxYWjU{U7k=QsHaChal4+_ zr)psIBm+9d9ZF#^vP$^$pbQ#T9h`oJOWl1+i*`*MivCa&3Q=G4F9FY0J|)}e(>`>o z^e&$o!=0(zKK;S%)8V_^JU!%A`X8Tqee!8lJnp@d`+2R6Ut_pQNm1IbuVA#M2{DH= zReJc9dN?;sQ>YJ7JdRq?hFhmh=+%Ah7V9Jo=vTOs7rpAvZS+oVqKl0T=q0zJ<5sxz zie5f1ed%)g&mx(bJ?qgc4hI|h>UD?Mx7_5uh`K)DJN;D>k5*-2FP014DnNf%&7;id z+KZOF?qHxt=calzD#D{u+!uAE7rMT}qps^c8oi0WaTm4y5Sn+1`f|;qd~e_zb*5Kx z^eVGgc5W{ow^tKN(YKWKDh0fHEU&twb75_$Mf5(K`*S-4uda@x$D#MR26iTj+CSH; zhfBTs@;k3D*yGhou-kditD|SVYIp<8ZiDYf{yUyeBNO?wGZ}cL^Jx;eH7Vv(L2ibA zspZpVR4@UyomzSNK($yVB0wq2&Ri)=2{T$z9@Z zZpNnn&8^2#X1C(hl_tyve)d5pnZ4+JQ{`ZnNuQgpHT^0*?nHXr$Ov|0L3RL(+<4h; zO{MO{KSLjOn|qmuU`8)?7H+k~dk&`-t;vp-<@RVFJ^iY}aHk6OsE$W>8nSnWcP(4d z_x^@YZF|&~8g(_=qhII~KJeO*B`ZCuPn~MC89wgdH6ip2N&Z$|u~La`%;$y7Vvo*RYm;^+Wq+ zwhm|wzDWl5>?4?|-E;Ag9<%#9q+$2~${?xn^tx7v7n;8qo1ZXKRn zrd_t~G4r;@W2g?6@Bgsnyo_&s2haQ=-s&^<6R()FnQIFDWG9=Do(|ukR95y1xtaf% ze;YCb#xMg{tAdBk%(E*Le*WrKlQztK_z~Ud0sVvUy6HV8hOrNFy1A8cE03SNKxZ4y z;XZ*GDDwuMZ!>#|ZOqTHZk=KlOnHtuh~9DARr(e3CXT$>!929`U*_y@^py1Re<#CZ z$_cj%di16^IaJQ0L7~ia_0ZlX^pxaH+K$YDT|6pAP3?&uOddv#O(y5igUQs@l;qDx zwBYD|X2w(KHMui|*A`7h2kIu`wL8hU?Msgym!qG7`g?!u9yBWTXp)wP<%dofp2(J2|ADi zJ@KUa;3Ez|cZc$tH0InJE__;if&p{L3udl;zjKebnS8)AxkUcNp0)Lsym>+1TqkF? z|HIGx!i=1ld`RV1;*9JFbAl=LwL)cHPgciGKLWlTnZA@~5JraG>cjk$0)Z}@~@EwnHw{n%a z7kN( z22b#6dJ3%Z5?|rJN8EO)%|kq}7wqBjZ_0e+KIRAbC1jtM25x3#hm`~GB{#dOLi8x$ zHmn*r*2C{@!RI^S3*zN0#p9YWkjIYX4uyVyNf_Ru8+_q-uKBjsE(5O(;Itk8Y7c%s zcy&0!4*7zuQn%O-!}A4jym5TD)`Higr0fpU;Mbt(@r%24rvhF@HC`Lokog(BY7Pb) zaN2~vAM?0n!QqGWu)V=6)h2YEnS10}?pbcJhkOB^6+F5CE|15c%WjW096{roG6OrZ zAG}Z^F79IIJaJ#9t%}VXKVfW=T=fp5`6H@U$!*tarA?J9BGg34*6wJI;(n% zV%F74m8~WPnp>m(>1(ClGSQm&#I|0v^jRtUMO#~k%(YrnTVcJqx6Vp! z&6m&&%#zGpyp_sS?~v9^ReDqTOnUR9Xl65LNoMnSVs;bKG^bgeGLKo5KCfAEqkt(B zQpEJAQ`B5`6*Ey6OPG39%9=z6%9^%$E1C+yYUZ4&VHSjkm|b&?`PQtiNl>c3*&ou- zH1FKhG@R1H++WqwEbZ3XWbE12Ozqsse2U-0q<_@g{FP&%sdZ|Y89!)}iC#I)q>l|V z3C1~0vM85H_NUu~hkMM7Ts~8Ec)%?08)@dPB$NF%rT3z&Npj@FE$6pEHjy^ zFE?K%tu-e`ZZX>&d(GD^XU*)mzf6i7FU-Qx@k6tB%o4g~UcJ!sS&JQ9-88|$KYE^u zYqN22T$XWN;+i}y5O+B&O;bAq*D zLYQ^=pvzibC208@MOz0pO}DyVo@-sbwZf|DT5C=4Y_Xnn-EPI)+G(ZVu+K`_{+MO{ zId4_^^RhMe%MGjN%STp{?Jul8-`-kxwtTWW{fuwMA537*4^3>+4oq$aOiX3MYNj@y zeoJerX3b!><;-MmyvbsohvqQ(Qsgvy*W@!t78NjsZWlJg2b469m~y7?vWjMTcr{b5 zaZPj1Tg!yhtYbQNZeWJLYG$%zXlp*E>|jdt?q=4zdYhm7elur>jyI8Ot;w0z>wUWe0!=E~GOwC0yQ2eW?6cJR}+PjUXHyW(a> zt%>V?bY7g>9gN#hY+Bs19;4#QHVlayG(Jt-nGp%&#+P^(o65gGw&87mtQnsxw#~_D z`lO6lRNKO^)ElR#yx&+#XWs*{Zap`^=Vl=bGl>_le$E5(=1a8 z)4O9DGrCH8v-wOmll5LsQ*>;8^LkGqb7_82b9g~%Q|n^|)ALeQbFIiPW?A{#ra{`K zrb_EJW`H`F!^ygu5-)q0#N+y##>Ix3#?CROa*c`R>(Xgv#FTJzs)x(u{oytvJNnEF zZ@|pl9ci9ziZ&zeOgE(t&NPi8W|>)k&NHU{d{gbqVl%kM3KKnTjoJ2Qn@Qd8fXO~Q z&h*WD-K2|tY2G&fW`@U49y+2y`p{ANTZf+OROVosw^erATdlFlpf5~hes9D5{zFFSd`?8ibKy9sa zyZc!C--TJ(8~Uu8g=bp3*Uh%-4qapwT(im=^Kp}vD0sm7cKERMVZv$a@#gc^?Z8#* z@TTgPA^3+Lfa+XeG2Kh6Z_usRaEe~>-LB2dD zreF~hd!Uqg{i3|7Ft4gf;Hhc0rI0C@t)4lPr;&+!+t^$WwlItBwr10z9wszokomHD zh?(phZnEwkW8UVNYPPQmL-QP_HCi>}nr&jHyUnrcep7T@lxdzm+WcN)hUvFymKm^Y zuBrJNt@>x72_LoCMCDy-HkMdzdQ@3&UZmM-2ISmjzTetw?zKB^s`tENjzm2$Z@#`Y z4UQxVExaamXo{TKL!Ad(hdw`3{@}D(=?`YU^)2q%*$Z)2$kw=*o1@~^mHsX6K!eb@ z6vxZOy|(hj?QZxz_Tl;Zu~9wZ#nyj3v}Wt#Piqx=8xj)ZnHS=I5f@Us;+>G?=aX9* z&hX0dbw#W!)qb%~C9G#H>(|Lj6&PWiY&OZd-q>NCSnRPT6rF1Yzb>;z3|MR38??{* zwbxPWdBxM#Of|1qOKab<`eeCp1*tP_8oakw$9%VDt&L|Y#ZPQr{gd3hKb_jR zzhyAq0-4RDquI=?s`*Sv_k!m1+oI;5P9@Fr(`C$olNHUPHI>bfHdRf50kuq^w=pC9 z4b9G3Elm}xjWK09n0@&=n|w>UnRz(|m~p)ancHzg%&lj?nN?*+o4Y3_o6%1r%mAm$ zZ2aOjqjUR==egfR9SWEwrK8NN&oO3NyP4*3!Z~JG=XvJK<@qL4$welZW{J7jY`MwR zWvy9qWt}sAxt*lApscANK&_M5KX51BzN&Y9+|?wUy{-WX?}|ID^)i9@XCK&x%%)?dY^GGxT&7>1ye3s_A+w@xVKaAS zF_X$u)=chE#hgf2!}J~zV%&X0P5)Ig7l+q18M`zzP0}|twop@@_)Lep0h48Nq#4#a z#(XY4-FODgGHa~aCRN3`Ce6}?X4{HIW=)!<=0U5Krb?eRChF*Vv-$A`^E0^F3_ris z>_58Kxb7Sp4f|a;&kNl(jS9RojxzB=3ua9c8jQ&vdSrWn(7@f=q2aZ2 z9-P_dL0s$DBXMIhFOA!Gqkde5lo{fdBz+&7s`0VdBjf$CabMondedfqNZuP)LQWNp zXB`U`vkIl{XmuYt&MH+Q%1VA}rj>KpJS%?ZwN|a<8?C2nwprg>?X_C$IAX26a?Xkx zaK$q7Zdi7KyH@*}PpmB{9W5Cp8_WCNuBi zr!)mNrZxBSXEJNc<}}OR=QEwN6*0N<6g886mNs**S2SC{R56R^S2OPLU(A|8AtuA2 z+NSfedL}%ik@?WOsku<4m3eWzt@*j5y}7frv&k}{o4Na@mr0hQpJ}nZzj-%km`M@NVI9^o?%k0m}!z;m~FDOooAX{ znrCWtUudq@TV(PaT58hWT4BB|U1b{ox5ivJxz7BPc8j@HeY+`JbEg>yKT2+oH8n4v zG(9(5G%FL{H0~a^O~l8KW_r8$q1RF;3w1op92%8Bf9M~t>xCvhndIO@%z>{$+_T)=uT6{=m#syNEAOEB_JHDhb30&#ToHSX@jUCxd_W^m$ zz9NOp*R&-}r=F#aYLzuzPgOMA`u<{CcdTi`SJyI!s?;{?`qncy($_aB<}@_g@vHgZ zXl8obtxefW?M#xEoz1tux|pzBJxroAeN6ou15KejgU!~WBTSJaqs{YPW6joA<4vQB zlZ-Q7nE8ojjXC8u>%RHSw&y|f`FNCh@G#nJpEljZi#OX;Za>%bS-!wD&%Vf%aV$2s z_b)Y9g3C?iCM(Ui0&7gB1nbSY<(o~sVcSfo+4cX}d-HfIzwdF>RD?_!Q%IB{5|ZgW zd+i352Bk=YW;78R&}b|fGbB+VX)vV{iu0_kfhdxa21&C9X;7)Cdp_^sbziUV{oU`q zf8F2j-q+pd^?LTR_gT;0XRr0_wb!uMJ}`XbB{=-A2ju6S!j99GuiO0%fk8;T`Wu3xCi)L5;y& z?p8o7Csy&06JMf98of-3QQ0z5Be#!eRHcw}J%>pA<}Bi*c$}ClFC^LdC1epNB>qPA zWYnA{lIeDz?2LUvc9eG#lgzK=`e+eNe)f^_B@!Z8ejxPj83esc27^kk3UGf^!R59F z=rj$3uolt0$khk`jfSx6vk|Bq8v|E0#=`H*7Em^R9ISVPea}b!13hEG*_~-ekn0gUQ4L|F7p1=J|dx? zPK>@}k%M@SXnw6GTa0g!p2tmOpKJ>`{=AK>JKRBTEd4+>c=wQRHzlFUdLSr_Rsb;r zWpHX&fmSsQnBbue{+owE?2qB#S*;H{FBn4W*wNrvVFW++n?k;W1q?hs4h;0I;Bbo# zyePJV37Z^X_(uZUhD?TsPEK%5X)0K(o(4X}&JcHg7EE@X1Ez+q@N26(9B^I;!d_2! zz19obm-+xVZY_wFt%vrWKydC00c_d+EpY)3z7%diOy~P%Lp|CGc2QG-~fvk!F z%rrNGS4YM`*rc)WqH7%N&m0fRS8d_m9XrT)X%Av=I9OH!aQ-}ncQYn~iIj-HZJq|> z<((nRdIpGZoed?|-J$HuB1pNl3~XXo!Yb!g@G06C;symnLBig{e38WjtU1Yi3kvzzXyui zqrtN>2ApN%U`%@)G)>$O#6ArySvt5!WWrj*qcB=DPnxr{;pT;7u=>jh_&WDAsO~L* zr1&C`zFrF2WtU*xLIxK3w?MGqF}!{C5_H6RKtW23?z${R&;3!PBdtwo`!R3&wQ4yl z{GrSqW=paUR`0NP6OU~t4&W*6I4sq2#jORSaGv&Oe#y@Byzv}ee*A%tLYpZ@T*aD3 z?!|94lEO_Pj~986Pa)gM%F7W%xF(Uz*q%#-C56QPSs6JupoXZYw~&;8ZnCxPD;bvj zht&R*f>}NTp(k4z99`8RdbAegRp`Q&wFdBH!#EJPwT3h4Hn3CN9u|$KuxImRaIBjF z-H+$OD#dwFHQ60Ztd_tB)uj+NWhJzEu7$aFzVK?;M$kzNfaium5cGH(WSk9yRe6!1 zusjwX)+a*Tp8c?TSQ2>sNdgJgR2ZY40Ty?%VD6V}80nD$BIFw^(=Iq zFN61~7s0Ua65O57L(2{X{_Hhi+4az~q5%s18bPeousjo0H+5aT@HOo(YQ@vf}6x<2O2XC)J$irf=DJlWq!{@M_@|s8p!|tCBj{HoE~< z@4N-kcDF(7^=(+V^$x^6zY7yTwt`DQ8w6B7fzA=np}z1neB0SATH`-~%D`TT+x!b$ zPyc~W!QwP+vJ`D996E>?4xM>Kk2XCrq!)=1ZJ%yJ6BEs;!cA-XKE#3EeI=mP-zL$y zX>+OP=SB44-3YqKat@0ftHv^O^UyoJg)fTQ%}ujB#rd2aK{U0Nk@2btLYMJ_9&RW$O7x!9GH-w3%0}ZV6oFl7$H^!TU<+^dq^4R_c2g@ zaTzpQYQVy<4hFxx0T9v%&$918h1o;cE&c>_JD)>w-D|jc{~g%mbwiHTXBcsz2h_X2 z!)o>e=K1wOVN^dngx|1tof!SNPMnHoNl+b4DLS1DpvN{0q{nW^(7DBf=zL3A>iA2Z zPSsGP@oyCAxM3=Eb@mWC>aiMC$kd{NpS9^-n_+ZYfDYXRy7Zfz9!<&5P(d(N2H1X+PDzB{0_>>L|%o%|4&OL&$-NvNh{So4FrIA?4J|XMV zq~Oz4B{ zhIN;-VDIl-Xbn06OZJLzexV2~lFx&IXBn&|7hy_8CG3t(BSw1)(JmC?vbw$==%e9B>fV}Hxg9ty98bH zS&H73lA(_e$DSr#*BYwbLG z!rYxI3YO5HuRUo^<8mq(?oC&{-Aca<-Az}g#n8!9&th6|9DV@>@@bO#OMA1Ka=g5Rq|y3>JFD0Vpr1wNTDYt2!35^(~Y(@ujy zP7!G8p8?15XJK8+Ie5GK0)$0Yz{x%VeH>D>ckoqoc0_dbw|5~IU@h|_qP0km+u zG#xxzhNdqcM5k?#qsPtVDU(p3?NUnAqfLp<`lw95mky!Xm8x`&f;t^@N`u-352e8e zwP}d%aB7$_oZd*&qp3GX&_xsVse#)_8WL_m-GfHcRUt<7c$+bm4L6}eUD9tLvgtioT>K2-_20pB&@ZUv`rw?Li2sOe5xuje zXx+$x^zH3|^tzZVl@Q9&O>+j*1#=YWvl2ymWRMaK9%JdPliyRI#pUJtw~P_ zwdleL!>C@k4$aOOL0w(+X?U7G-7{|_jgvK`ll@0gVcTeGw$hk3ikr}x*G%XIEmP{m zo6`A8X0*fDf>xa$N4w8jQbh@CI?r+f?c8ol=g+mL`DjmNTO8=F1_9mT3Up@xrGES* z`mWuH9*vn!kCo1$8E)>hx6p&`4_iw6z>7+hdehr_KJ;mUFTL?)8$EI|oMvhy(hl;1 zpS5lg(b)KfynAW}-Fo)0b^A1UB)=VwzYxiT<}qNtC<(5I900OD8?LWB0j16PaDH(i zc()eArNFb0Kl}p7p1KJ7&lzO(T!wP>8jy*vg(VI*VY+Ayke6?UD5JZOI-?cT&O88! zdIUqZw!!5KPoUrWDeOP>9G+}^39%}jFeLmP^iS!6A?LfHH}w-N`rQMoNH09j5!p|@ zf5U-|V)Uqv1XXk&Ku6#}$}N$hKHjplce^~bzBHIxu2mG#ONm}zp-fkvSE1+ksZ#yV zYShS2i%L6d(`9zU=p4u4bP3a;>biRLPTUAOX^=jhuuh-)O)#JVD@IXW$IOP<@ODZef7ltbIrQ;B~!B&l`S*nZXhz8v+vcpcDH!{TVZk9$jih zTf?nsK;w8i)5exQingOyMmW%Y>H?Z~gwStwljwU>Ct7yPnLY}1r7x`)P?E8L_G&Gn zo9(@5^W%;5WlRWd4~(N9UBtLGGwq0Z?Fd+BV-63FyTFn|i@?ueIqcB)1HLWe~5s0(J>HbEVBR9B|_1`BoH@Efs(grV6T1{9(>P&JmX{VRVfcL)K0?S`Vtsa zaSkfBl|sK=1<1{~2un&X!I!q2D{&@R6Rq0;w3JNzN|UVZ{|&pw5k>Cd6O>IMAV{~Ej&cY*tjPjK4)D_k%8 z36VklP$(@%9a>hi>p4pkdXKWQ0JO_3~MJKKcz+&81r2h8bZ1xu>jVNIn5+Eb&gl=g>D zp{1Xu(w`MG>672{>E=I+>FTwQDYB|2*L9_U+cOx>6=;fln8RVM zn;v+CjRgNBLy>RV6b5i+aP5u-9Qia3+@h_Z*Te?$SJ{EdLJAQ_CP7-J6Z9OM4t`T- zK;xG=P~q$bTm3v>oth^IHZOzt&nw|W{VE8|_kz}ZZ!o|$@JeDGs2<(`n!OuAY(gOD zXm0@{J{acB+W}X5cSF*faIgrA1mzEr;Nrg*ymO+#C@coDMY_zb14(cxM&!?kOoxui z8PJ%X342wtAn<55B)aFpq{ju2V_ytGa30Rsm4Vl^i?IAvC3s2-LA8*Fft?IyKdy#B zBEQBgiCXBZz77NT+-!-3aeyLC->gBS zA8OIPTDr6#OP`LOX+&4fu%vq5?CGnmGwGqmHB?Sc8$I7;b3q#xk#3)hM6tVrB+vOw z#2S8*UL|n|IxY!`KL^6$?Sml6LlIV*slbF;Lm+gks%Q_120UA%1ARLB5MVS4E_RHD z_uGx3VZj)%zHbIi%I4tlZY->Bvw;5Yaj;>FH54Syx5*p8rYivI2XBGf=Yk+HW*cM;-3d#$-Ow^J3@!`} z2Y16rDCpb++Un8p_FFWV4UGfAm;{iMPJ|?lB$!^A441{ypl3=tsC+&I7b_1#x&Kjk zTAU4)yK*5$>?EA2IRz(Pq;PHlAp{N+UCJGl^N-3DPvOm;~IqK;Dh#iE>#pS#EQm^y@w$PA}e( zTM<3vgJd81`cNDyTP5MWjmSr6BLjbYWx-rZ4(4>oLr|UqyslA#o_j+ewnz=K)@i|Y zItv$U^EqV)HSH42+0x4=&u0x+yPov^u1I5I}MCq@~ ze=?*0l*Gg({;tsct0X4o^-um^%aUT6e?6+Krz#oD_uk4?TiU|Hc=|8zf3mYb;=YmF$u9h z|0VtV1A95?zi$7gwwRcV+242nj_Y5z|3Cl#CI;li;4gk5E;dL^RW#@4xVlW7H-E~U ze@CmVNB-diAZ9HmX8lLE%_7D?pAEdFU{pqRYqiP`@&q9l5q{(ht2 zuLu8q&g-8Jl>V9F%g@UHRkP-w`v0>``5*ZGU4PHF=%;_r&;Ri_T<8DW&i~ao{)PJ= zx8w4E8y|nS^HGG4|G9Ym-}{&Ti>~ur+*~J#IDwY9_+J?O4`2P87JpOcU%F_E>MQ=P z60J!8N&j-zzY70{%m2J}OH=D_&h@|6x+N}p+y8^hf3f!O`+fh}pucdp|6fG^7fkA0 z9#nllsFJOIx0N-?EoahKEtq72D*M4nv(u-);*66o@toWP)Rt<(x-<0{qj?F>WnIAc zTXWFhWd@cjq~Z+sBouoXjnO;9QTqNC^!4+{Y~d>Ge!Lt#C(pz54KvYr^b{-`O;97v z5mha1&```0cO4#uneK-8u2>VZt5k4sp91~?Ib7*3g{>}P7;@t~zwT8Rf9+fcuQvEG z-|2CW|GfMb|5`)H`z)^FzYe;{3wlfWm}$rP()dHXF?x^X?M< zx^NmFnMV2h_Ysb=a1?&n^SSExSFBoAt6h~5 zx!Qiw$XrLO&rOc<>W+fV=lleBTcQNfZpQ^9#cv5N4C@snwX1PEhZu5|-AlOD<2P`` zGK8D!8^^s_n#8RROy`#8ALF(tRB~p0SGlWVP28;`t=ytlFSt$xF*0D2B#Eq2B*Q;x z5nnSaV%Id8`1Q{v)?S|E*!;uf;G@CSdrixk=7MOJKWa7G_Y_!++h}%RlN$5&kYgUt zrPw9MK1}uK!fy4aIOg&L{Khrn*X8wCAII>QjSw3x%F$=tIlK^i5|gIn;CzKcI6~z2 zKNg;fZMI2R+8c|XNAAU?N)dS4V;5S!--fS3g0VS10OMmeV3F}!oVv#gr|$N|llwey z_Y`*wIUu5CxC=hJ>s@A^l!XR;~QR6_609l+rc04c+7uJdca#K z-Q`c*y~5w0dWkRiaGvkJR>W^jImtJbX7S$}GWk!tlK9FM34D(GK7M$zAAdW;lON*d z&X=c*<@0BZ=3fU4;tzQ>3%5Vb6+T}aE!1@2gk!JD3(t(Nts3Q*P?h+z=wh_vK*9B? zdIDGfuY&I*Ot>`(OF8w|k(|152{)y(j=R0EkGmQ>oNUEiuQ2@B3k-Ym1ivZYN4t>Q_{FmU zFILrKhfyv1Y^laLJ0V_Zu0Y$Y^Eklz96ITo!7aD)5z|lM=hr82%7SB9)|!bzopiiU z4`TMURJ8FRc zSbk}SHYUHHiW8eCTG$ahvD+3y z62{}8U~9DgJ`N*)nWICWF-9LX#Q6IoQ1bFHbQ!CG>El#UFGU_5cS>WB!vK6BFOCuE zeSCp?4?jZUJ)ipP1+TdCG5?j^<718P@>gcv+f{o76++4ZST$hhO*>-qv zweGXCOjj+Qy`Qy_ZB|;u!rnNtYf|Ib3?qHEJ5ZHP2pGr;XNj@9y?rR;dvRi5H~x<7 z#JsGR=uJEDTm2&(R^Eip!UmMdzlr&E^*BAb4y8J;;oHv)rA!gu6;z>OmguC1>laab zM+FvTo=4;QvzWKN2({)HV)TQ2)H`t+{Yp-X_}U3<&O3%VUyq{s?@U~heF!JcOhb92 zB>ea$5r=gq;6Cd(6b47*n=5;8tYj3vzYu|ks>5+Ydl)Wq3B$sjyYWVD2s+IV!m=w{ zaQW0gj5YGdDtlk7D_?`2j;pY7@KQW{Z!vCMu^8v>@xT!>^DrZIHm*!`Mx$r}HhS12 zy~&{qd7GIrxFMo7ce~wR*_s zPQ1$(Sv2y!glk@ozzYp<;ha~cCr*`n~Gp+fRKCQw|hd9CfBQFJZHx#&F z4{xsfq${zBkgv{3yuf~UCa^^_gIV9bW$bj_BzEbt9iw+N*^EjRb}&+r&1sZk3Lhj` z=%w$tE4>HpDnDY?g|}!h`ZW%c@4yefkI=y3A!?qxhniRKV&}L!*gdZqUnt$eOY^Ve z`J8KbJ@qQ~^jyXPI@K5%j=29p6$*7L@Y3n?7}#HoUY5l;Ah8HXwH4rSuM_CI`xqK| zW#hAeqiDME2+q2ciHDx0VP!xnx-Lz@3zEsWtZ_ejXC$CSe;j7qkHtlWG5Fgp2KO(B zMvrxSaM!s=OgR>YQmaF8#i^aBx-kTY`fkQU#r}9Kb0dZ?T8DwbE3vf39dA|7!zI7w zU|i*N{Ih*B&hP+yu)q=TJfDECmyXB!ek+{TZ-Pz!M%dzMh!6Dj@XYF=m=mRnv%976 z+NOT~@Z%5ssNEer^L@y>nzZnXa|(H}pe)|CESbOKypO*kKZBRgw&t(RlH#qjtAv@x zCc;nSe>+z6NppUcIh?*#FPEewUHzb|k`+57vbN1ZYzJMz?*68%HF_*l9ID0U+)-h3 ztEJhG`x4A|RzIT0S9B`-fC`Q8(dztL%nj|t(|Rv(&-$nM=<{Q|xBVg3Ol`pzzi#7q z%|<*h{{~((uEm7d%Xmtk$0kyR-!EK3&-9CUv$7nM|CC~H!8!bxc^0?tEWw{!iqX`m z2pv@l@Nwn|{O}|fuZ};8hh`qew_gvT->gG;!7L4%RSw`Qmt?f>OT=YriMaZH9EP8Z z#;Ecr42y`w+2s-F{wEAWM}}co?@m;c2|=56n^EeXFZO5oVDp=mIG}Age$85psoZ>Y zsGWgbJEvpYR*@_+f}+h@N4!*SjSem2u&%@mqnDcCjs1o=dAdH%pFJF{P7Fpx4_S;k zJ`ke{CGb;u7Y~b`^Jb^o__x7ze78FC7vLD5s2P#w z?#$bGL(qs@`Wi6BsUF9P*W#1z%jmtDVZa0-3f5f0&(q5A$AfdY-QWyX*5>06snb~2 zn}>C7$8lO-4t{daLixhOI6g2P&)!W#@!$h!P>_OKN<{qYLn5B|5Qk6IWAV?e7_>gP z7c*4%pnPB?%HEE^SHmJOiVMZeCEIZ6`XH>{>yHoDnw7rxC{j&D{jLi=fR z(cN(pX2x;YbI2Yudu{NDn>C(XJr+;ojlp{o#<+XAF6OkWV2S%lVpRXZa0dPVtjua`~=^SiVSlJOBLUX1;lqKL0%7 zkI-sRv@oE5RApV$S4Zc$cLc@H2XKS?r*Y-{VXkh!1Q~PAm<+U%s`eB=&m68FWF8^A zSZkXzlUO&NIoXV4Tgrzq*=jk~aY}-X{L_n>tG=N6!*2AR+=W5+U!!mB3+%i81OtXY z#>f$mQ1{1ulorvYLi08*OKHG0tFL3*u`6i#j$u<|6;{l?h@Gqq$NfBqbLvZwg`7d% z!^LQSu>jM}PGR(^Y;?~&f}1lkaryQPjBh-IgBsHD)9!vmPAa$1z#rP^e47hAU+PiaAR@trIGmcyB@Br8;;LZHF0;5GOiKGH-+pN2cbhlyMy6MJ*CQABJ>5C{F`fN4U%5zE15%u1LyGdG zs@;DSv6k>GcC;Xb`8;uE^QFeJ`^WX!!cuKk9W{g%OGz`8{2wT<{uRG({)7gLyV1Ap z4F+4j!V~5lcwZE0!x=rqTmG%MtEL&Z1UF!%;SD?(UW31Eui#CGYUJ8^G`(Gg(YG$* zqT^+Fv+Dv5YdC|q`9geaTY%kVr?6mmE~Y9T!&zzB_`c`}E}eQ;Ic zGJFUgsH#5)=gypoO53L3=X8o4SscbGO~7x9Eiuq&92%*cqCv`NoFhIIzr|?a7cmvI z)g6oxJ7myuULWrq|Bc@=^$q_q=rJEZtByC3yvn;hDCJq#S-$y97H>Z^hA+^L;_oPj z@x8YK_oS|IZg{cAW&b`CD=Vxi(8uE!OeYto0A-= zP1-t4$hCJ9iOp5<>KXQDSwz|qmIp!X#gX}})5w%faMEFiCup-7^OP7@Fo@l$kYHBV z#MoJ{UQ`u+!3BG|@nqb49De#0K2CXx83)^tmOVm?*AFl+vjvCDXvX`MO}MT12L2pe zha-!x;@e3)9&M?>gl}ayUhM)dQ7^@fjc0M}ubG<{zT4StNh$ShX8p zKi+{YVWPQ`zaC!)cwxl%<@j4?DJFY+VAx()+;Djgc5a)20|rmUYT;xI%(KPHp0W5l z#0Z!87-EG`7ZtoT(Z*O6!!49>!dz*5dHN6E`tcplnLOt;$Rpmit(hNkJX!_rJFLXwSMCB{nZf8nd?-|=ns7p!gQ#`3oJxV*X(k3N2dA>PmM z*045A-QR+@H{C(rxe;^CZ=lhNS~O9t!Eu4rXyQrjE$L(gOApJLqd zy$HEgg{W7Yj}Jmm>>yboe*Un+8oQ&1r|8B;5h zP;$h6bWV=LMDrLdC>QCdd69TLISlQ+LvdYx2#z@ygjeMQQM!2@N+^3_*yF|MZoCM$ z#m>bCrnB)`tuyLenv9!o0CvB#NB1Th5v@&dSimTJ7od$U^HtDf@E|M<7e|@KU%V0j zfq$Od$&c{w;0Nz$R-vfC;FJvOI5U7 z6J9^}4p+Z_i+Mj^;p_L$@K5Ap)KO@|Gab#C$2H-k+(tZo;07wn*5Ra&H8?c98egC1 zan4#HM%z>&Utf+z(@Rld-#M)HIg4F4&fxAtMK~n45Wf#OjWr)m;HDqB_;E@u&P~ig zmXn1`yF_xbX#MdtOh-41G}JLl#e+kV@P>UNj&+PjpPpE}RT_=v52A3>+6eq*5Q1Nh zZo=Tn0eGIR$6nnvIO@SFoYS)e-)wTnBPp{mS#27wSvCpx&UL`ofwq{RH354TTjA)< zMrgKf81|1Gf(P^!apiCY3|b|FBksNCC0w5JMH-F#)kBx~&a^!Kc1t$@^GqaPS+|Bi zF7L{7VM_e=PgjMeqeoSR#|i~Yw+-O#w0UteKIU>?HScmCToehMe@fQm=~O?Lea{wX zG_nT6Jodaffz2r0$j)9^$|NSZGpkXqY=xf-GmW3Y8s|=C0~-iivf7^gPPSo7?5)}G zk}+)A)6vXi=SVgpK!=q?XtO`IT5QTtP3CY+m9=CkvU73@Y=^2GyHhrZxz$QD?}h;^ zDp86(vyx;l%Ku=;+I~FP^cgQlbYcFAclhzu3w&YSfsNxI;E31va3;HhM|+zv@k1T@ zR$s@KqAU1pcQt<8CPe9@7jfT|a=d%46u&G!hbiMqP`$7i9Tpd%jcz_F$R0=cdD%E7 zArpUHNJF3M6inZngjEL<@tt)79^Dy>gzUxN3nS3qdK(mYTK5Os_J-A z{Pa{f`D{$}#kd|;A=kiOgjKPT6*+8H&_Pz*xu4BAzK>A8p^U?s4={$#1#AnGq)glc0OE|{p=pV-uX+gWYymoakm%Am~YtmtP5pZ zpQ826#~8lg9>V%&)b_uP!9#AL?!GHHeFu+!c30rrCB+!#dKz!A9Q;Kx@K%Bd-#Ys+ z^T{5ZcZz>TiBm2iv82Dtd@}Y$=;OJvrpoeS>A|j_J^diM=Xt{N2ap*CUH#K zau1U`8pcMB4Q8q?zU-5i4{O@8f`yfNvgyViOc&=ftNNL2oy!bn@xh5r2FjFeY?=9e zGxpBSl&x$S&E6c)V|pn%OigVVvsTq&raB3mLAN7?ud`Jb#Nu9?DD^KIfg(5xj?!X}f zx8gm~KV8kd?1Rs0mf=m?Stz$pfb|^{@O`5Z?lT{W+d7r8`sZ)HRJE0#@+XhaS5D(q zizE2Q#`3)N!`s61WeLI)Lsko|s%tARai(0}B5&?{-9_$zY%RC4;yd?s!(cM`r80?{ zs!NU}Sdc@iQ;5&8W#rrO05aPnibyU`CbLU(iJwOaS$nL8)bY)vOY0#aKCj7D^?q`7 zlOkM=*Mx?JBVpQ$Q4rKJ4u*ah4`+ogu>bi=@RJIKHCM(~Usn6bj0e`UftDp~qf7=H z?U};n`zEoPThVN3dL&bA3t`h9u4f%(D_M=aCo2tG$kzC~Gylo6+1~8wtf-taFB8JN z&N;HZYWB>g)RG11h-}Mq^_bK9;f&M{WlPP}n8OxjHu0S-lg^N4#Rms4ZxCmXXZ4_B z_D2l()`{**o}pG`8~*xq6Q^1W(f8#Ej0()fyw5opA1soIgHmzzhXj188-sOc_uy)s zP`rG54SEYau{3=SZrv~y<*n^;-+D`Y^~(fD`s!ii7%hBzPaebf55y*)ue|w^4*p^Y z<9&DK@KPxe{P8Wu{06Nv!mBwQRqU9KYy2 zmlG#LUR$b>a-vJxy{yT~wrS+f0ar5P#1its)0bS6i6_O{S!6oNCrf&Gl3GaWZ)A&nE~$-tP}4ws?7p>!7NhWY_`cHGy z7PN9cf7UXOj15K+PV2kZc2o}YZ zk3H)H=Gu`kTi&|*joce{xT1zRCzY{Tma)w2LO9DFxRXij4PYM%*DosvO5p-n46z28*p2jY2O{fY(fSzY0>#1eu2_#!X*i&aIFWAu6U2@ zroP7BM-7M-S4DEJNXA-!5hJf%z_y#k7&<-+@75o}f|dj{c(50rr-Y;D+feL2xdk^_ z`{Q-dz;6ARg8k7BxN@!y_9cwLH!?;jkW$Ajv!!sv#rOQpf+qg4_jz6;FOMG*pUm(6 zvXVcy#GNm@G=;wuYsxzYXz+^@o(rAYl!aXq!Bumny>YA!u@mgPUnj8a8!58FIC9~C z7IT}6qPSO6Q@PlP3T{$*2d50%d@NwkjfCD6H^kMSjY94v5SU^DH3NhH(NUk~FA!(W)Nkr@qvam!HV= zt1|Q03dLEhdZ!~x)gI40j+?PaH3Md!ug7vEwb@R4Syr`YAba06fUSA{2lZd}qS2|3 z$nAfPW#^vagw{u>Q_zYn({JMTS?6(Ub{>{C9mK%z3E0#ai+6OQF=B2cZpTopbPB-o zH!INj;SA(&IpOT=iHK7iQROq2Fn^}VRv)2W;)wH;qHul_t-}AVw^?sawVKleg zG@WZK$>Bbxo#obtT<88gZ07dJe&&>){^CqlNs`oPX_Bx(B%99FA+Dk`WCY!IWQQC_ z95#57tv5H5HyBDjt&JzM)+G{_kV;;EJW6!FK)!k56*9Q+0g)c| zlALLONA&E!lXtuO$^ArG&|}Kru}mF$+Kj+cSpa%5vw=)^7wzBlg=@=4SFZ_q##+s; zu<_@Pu@Ce2u#YB7nd2FEcA<+h1)(+TksHJ2pe|EfK7`FbrpQFLWajBC$p(M;iuOyM zt+cI{XdvOsz$;?}#SDFXD!<63jk(94p?X;-iy$vHV0B{&^IP zSI=%i%f+4|UzRI+OH9MnZMN7T(xbOO)x|k8G;l+W0$OGD@zS?H^VYvx`3gfJKRGm% zuQajXEoOZYHf=m9l&b$z^|0}TW3j_Q!D^?6f-4vL1&0&#IPI>9+zW%1+|0rV&Z#|} zJDi@!HOpS-#5dmIPFua@#$-qmm&-E5wOW(B_0cD$Qbt63m>F4K!4Ve+7ot-=mu%Hs zNETKjA4(uA&Lt7E)rW|?+7a^dQ#M&|l}8Mfi^(ae z3&d`H6El~VR1B9YloS;NQ)7Z&)#j-{j;vF*9T*(PIk2Fdawol=~s ze*1~Wzk6^+^#io~(u_^VYp`|ZRcy!-`G7S_G5Fa@4E>pbcV8dCX{jj~^*RCPPm99x zyzOW*H2{?qHlX~V)kwA6@ZBMn!p6vWVSqrfMGf zExm}`l2}e|XL^&-vVlZZd@J#4+D7EsB1v*UH2KgKL(2Jhaw;O3Om93uLgNmR*&4@) z(eq;>8sw1@nopjtJWFO_IkCBYnP_y}B+z%8^q#m+?2opQyVu%DN#Zl&-uHtn`}l{n zqZE8!B@K;}?0$JFo43J(<>**4aX)>w)Kfyyz)N5)r`l2Ij%nh1KLeF(E-z3 z=YqLh4+e6Uor&D>{#LX6>GBNXKK=-i zsK_DCGxG^vDkZyJ%gK#WB;)2Zl2VCQa(2ffvU$@-a{h)G4A`IzEoMDz&DUJE`ppt{ z;in#(@=1ZcTK5L`8eYH+>yM+w)T7w@CkE{dLa|G8fF zRxuLWGIjA@u{s6^{^7m9yyZvJ27dm{%ltfzbNpGkJbvP}3_eL_Kfk9vigzyw<#jUx z`TihJe(Z8Lz*#QeNi?3VziHpizwT2-e{)uN}#`y_>;p7J6{)zF}PC`@`xjtbW8Axmhv9Tw^bE#;JnM6L^b|M~AW{|_-vxvr`xg=E5jac1T zNGz}U5Kcarv>Qbc?F%u)AtHgia7-e7lTwIpa5|Y0beyz4I7ND^^2xZ>6(sZiMKV3@ z2C-l8h$OfV1fA{T)e_>xY*LvY>sd?KvQ|B2xmAKq_|u8cj?`h2$QR{fQh*ba4x#tP zG)%1Bk6}GgDF10Yj{C6@CFZY2=V7yO<9P?v^tZ%%H8ZSvG739D7lm6i@u-?St~=Aq zTbg$9`-9)`%X*&i;jXQ`x?(e*H>HmMsVn4XE;_{@&`RRZIPKz>#|QFs)Ovnqq9@-H zvXD=UapA*)DK8MW;g>cU@DF-)_)r+akMfe@{}h}SKKT(N-0vbT^iQd*N<47xQjz0Z z$Nld)L8eBUz}>A}U|G>1(A__PvvN@3{MKl5TdR#Y*?3D%W&+`g>gRHwqE~VWCF?ok zpsk$zr5)UvxA7v|c{(?dWN}4wq<3;6Nsmt_v;FglyhS_tFWbHTUs=5Fv1qK=%~VUP%%@fvr_;v= zc(sfdyB}|3b)(WIqt0_eVt&|!?U9WWRFD3iV0B*IHsFVjt#O~X?Ue^XwyHWwwi&le zY%`udvAv=tW4C?sc)Mv`8|@m#9Iz8yx?>k`MBLu^zM}ncy_xo6N(=3$iEp%*EswB2 z{xQei`@>Cpv5yiCiQfMkd*2<@)Z1+x5CH=s3N}PRL?kwlkmR(S^{A+**ifvop~hZ7 ztWm@gK`B8{gop(UilQi@2#OUe8Y>tT8}9*XmjvOz_)nWNk^|Jt@W4=tmFXqx zWd`Q}xp~-dd9`@2T*rSAFzTbI7VlS-9q^!(-v(2f6={@l(kV(3Rfjfm&FBba5Up)I zkd|JbK)ZH1N=I~fK${xBq%Co-jL6I|)l*WKh}T(+q5odSaOEANUsZ<{E84T#yg&hu zeymY7ofYNAvcX^1v98$}Y(!lHt6sR16%`k-;zvbnu~Ri`IQ5&=jJM@PLtAsIdILC7 zdo`z8zm3!SJ?BKu7yfpbga#k%q>jvyif%3nR6iUlE9)kbo617u+N#ZR-62PXdTvLB zs_{vMD9(Wjb?-r$manDs8Jj56*5j0V?l;O9;!hWc52i(5bhI&K3oUgyLzm4HG1aRb z8BuzBMl-%Q6KYOmf_olkst;XbjHU0H-~%?Sg*0JJ+uO4G1>IPY|76w@vzAr2*u+Xy zJ6Pk2!>rMhM=R8jI z;w(+_IY~kzr6}gp-wG4sBin$`Nlyc+8&yj!aX~V*+9*@%`^(L%cFXl&T@|iktwNts ztuT&nL6tS1Mww-4l=$-nO4`ne)~ry_!MFO+8bcKA`f@3)?!JpI?sbYT&NtJl&$f)k zz7Zq(7|3Wf-5H_doN>)hW<1vD8KZeWlfU&Oqn%yOm|I?FG~PEEW9Dl{G^QbIZW_cI zGpDmUr=_fM&^lJ~u9Vf-7}5PPLQl$L$13N&qXldE5zm75EG$|aesqOA5@g|yvvh1ybw()hQhG&W->y>clf znU+ea?;R3+{58sB*cZxpF_1PS_oLPPeA**yCoOV*PV0*78Ew}lj3}xtW3JCK#WT7w z(hdEX>ZhX__0#!`ehl?bZ4dGrm>=T39NBi z25V}ykJYxR#c6CBbJDKEIL)9XoXReZ69u2<#O*$Cq6bI*K6Y}Yhrc?oQK06dy{vk0 zH<|jzYnlGY2zma*H}d>rO%xuV{1xJ6qZP&J4)i%gpzFWpdu23DC2_(l%!%C zW$IT%1)sP>sr}p&|oA$4F$N%nVuR&2KVENDp~L z!wk8}WuIJH*ifOJJw}m#|GmNx)rt}~?oFxprBhEXDCMDVl&Q*z7QOSMm1ic> z+IAXR7adQFJr2{N=l5vk>L0Z7D9u!N8O5lcr8CmyyBJO8DW(ZH3%KNhFH_O=SH_2?o$X%=|^)9P%{LYH{H{pT{{J4m_(>Ter<6O0u zgHp72_umea|G03KCnrjcQOg7MFZ^U4VXQnfxm5UgFIUS06xH%o3T;gz%1Di*G!?Ow z`nG`*IiI46Z@;D@`qZPf-<#8-SzT!H%(1j-Yc#ELSw$O5(`eD8esk%`tz+g(~=pSGCPS=jSw*DpZ~YRq-;);PSr1aOD&>hTuxxp*&<<%%zJmDY@6i@VQYLw!?NRjWO$c=TOXz1o4(zff>SLvOC^ zr=BaT|4z8O&j0N&`G=htf`d9}GIIj-OBtE#j5jiEVOP0%>@|5rpJocRJW!$6E>(zb z9#j_Xf0iKAdz-;%crGXOK4H)MOu9J zGad27feE#DXIxJPGDi0?LY$Svh|cY2%&(6#`OnH3>5?~0bz3`D^4OV`ENa1q+RIsK zR{@uUL)fw>5v*%|HY-_sfHemHVokLg3x1>nCwiC8iATTTM529vJ4}qK0a9~fA6fqW zYci3+PF@@xEY~$km0NPF0qn-beWZyF-hAnh6*TQ?=?T&tG3Cmo_^o z#KBc^|@UwDq6GEZVFc1qd%+8pUN7R#jp}bJ!?LFoz-MMVWr1xIb~a#Gj|>( z++Qi2$WzEq_AdR)V`3V($?x3m3aRLLU0KA43$jq(c)6zdwY+Td28A)|hC)Q_Ddpg% z!kPOCc}Wyy%q*f@`<|e*g|8{Il>==`ZAcq0wx%uqGP=0SMB3sTBjh8iXi@)ET3=^3 zUETIPoxkB0Ej|357Cmmw7#{RzbT3tmYIGtK+CGcX-^yduQ?4_?o!&6&lV2E#qXVmN z-Ix`)8&;Rnl@%oqV5_6&v&MutR_T+)N;o4cne>!Zi|shIS9?zXbsATFDUH*GpW{T+ zK69cgS$|1Pj3H4S4O=z`1V+O^%`5C2o;3VXPZE2M$fR4C3Pl%_q0{^R{T?c2=s^4d5b2pO^ z3xB6|PIdy1;>n1mEaQ4<1S4vnz~~-sVyZ*8GSc3qj4r&2QLA4v)tl^vJft2QVdch_ zoo>w<7s*)*)rYkhX0s7@GgvWufYrZ0!g^%gV5`etvt^U5g?q}G)7Nd!nOe=@%oDb8 znsd%d@iB=~ME3k;Fj19#571SJWa1vaGLiQwnbM1u8%eHQ+(WD=>%=NV$44kcF?kAI z_!ouYiUTD{U@7&KBuZJnk5aWO7qGcUnGb6X4-t6B<_%bB ztEK|K*h;XA03ja{>L+c-vZBCf*7axx>+#dTYNGbD#rz>wUviO+$a&00^r_EZ0%OFbRnM`S83#rgAFDOxy+O)RNjW+r! zXj9D`TGd-iOQ-Ipb%!p}#vfI*zNs}+9p0D;oz|QY`L|(=#ePh_0sJFn^{g@GLvKHze4nkfb#u&1UF-(eOlx>q4rOy^d+HSY-ag>qHu3#kO38U-vh7s9+Ve~t!SxK_MXEkcZ zR(EgDijGsP`RYK{weKWWU%iagPEBL=i?^`aM>|-L(?zT$u8MWt`B{j0-8j<-o--}) z#+eR}ofM#mZ1mfhy%xH6I(&MCfP93`wonFW@Ev32t53*QHjb)2{7PFz$a)HN+V@=lu{`YvQz-JlQ z;B}W+@!319o((r#!sX|y;i0gu}&^=KP?wEc`Mhq9H`Jsq7@!F`xMp8Ifdv-1Im)#o02SA zLP?vg5o%TIDD|&GA8>;9%e973w4@j7qwZF?ydDYQNQjum8pR+Op*dt^8JpQGIUCsBiRRlulEHn)^b=b<`ThqcV*VHQB+4uAE>D#g)R{_l+^8 z+p_AFby>qA7qc(lujSy1VK(2O3s+W)*8G7GkiZQdackEo-`8n+wjW&sp}%1dT)x zCvu<2iR~70`GuP~o!f4qF7K-py)!BQZC<6#zfbKHiDq^Ex9Md6zccXs_aBVu@?XD> z`uEo*cl)nj_wOUyMDO1J@0>&V|2v2O_xt~I^W?sC|F;wV-!@O~e`+r5&1rwm;~%pt z|80_AEvw>`Kj-m(pGSJ}pY!;?&tv|*Kj-m(pT|&}fAdNIyYAEf*Wdj4_y7C}hCH+4wSum78|&Kr zJF9bZ6>7p>oU&ZTX@`v9T$3U>^X?)+x2pVm#L54I@E9+?ks7{a$aEH+++$;wB6y82 zWqf{>%AeVVcD-lEl;sRz#8vT(dX$W{EQ(|$cUH623(Hw;)i+kHughsp`En6qA)J0q zgz)u0F!A|=$-e+kkJDp1X%@u`oT-4xRd-7NGm;Vo2h!4|R)VIo6JuF%U(krTvm*D- zthrz+E84P_)r7ofja+R`ZJ{}jBV#%Js@0sn{!yV0i9eY93o!YQU!ikrCO7Q;EYzWc zDQ&wtwD@f3Zg|jJ zVZ7)`1!vq9Y^61=Y@1C-9BINB$KPbsm)wQ>Y8T5Ovk00;{lm8|rYW1&9>PBud)x>#nkw2?256Du80>@FBkXw|-VTIDWsX{wJRbj)Zfxbt}`)ZUH`{!}Vx z!~GcJiakukbT3wA67DZ9mz7Qp;w$fTq1$z2EEQ&>W(C}Tnbtyy$Us7Lo=sv9g6;$ah0 zUF0Wdi>I^Fd1qK{yI-v4W+N_GlFqrVJR;~;gX#kqkZe}zI~vtOY(*OW4kHBkD7p0xUmj@BPhFs7PyLXX}LM*FBIYqXyu z^l@)vJ#Kk&7IQ2o?VTm~lP7;L`4=G~?r|bO9Gep8Aq$lmy@^6P;*28y@C3@052DrE zgkGa$TP9eU%V>KGHDu`wR$G|E8gnnOQkSNj{w~jX^bzvL(hI`f^^abNKhMA71k?{+ z2Pi|v%k*m!6zUt-6s}2rl(GLcN;_gIEp}+c=+~TM4EsL{Ju9B9^36Op^xbiR_wwRg ze+}h~@8)pQ5y@PBO|`(SwE5%X{{?`wDX--#F$kL4bX$eS`?^B(agERyc94>||E8p= zD`}C#bdOjA_~ zjr&zae${$PI=ca_pWd6+WUi)zC8uf0#Y9Hpd_vGEKNV)_NQHjwJhtrWVOBTg87q1m zzXWpMVR zjX^oIw3$6)TpPqh9NEN3R&-+ZO^37U>_XPyd`9R=y~k?f8*-vMp`1P=iVH1{7jQYu zRfh|_O8~qLzpLZRM6%>Vv7UZxayM>XAGXe=?h}Gh^BhZkekazJoyK&GUE>>|0E{H zQ|@wI+jND|@rR=L*AU91@)D&yr=|5L92jGG3?u%$nkgH3gYh_R&5B&ZS!ugCVHU^* zK@a|gHPg*F-61Kbjvmfw^Hy+5w^UBqC707h3B1ZXzdxA#lbD!BoC?$?UYF^2tWpFo zwV^~qH&N2Mw4is*qNN&VMr;?w=ywbG;j4>Gby^=*nwrUm?#gAg4zF3))pa?eqcHQs zmJ(*KOyJ7KF5^^fws5+Y&xBYw^baQgBqpJrxzge{%VcVwxpGy3lhB)CR)j_kq?A)U zX=6nu?a{}Ek$49%rdkz@!7!5*Jyn`njMzZpVoX3Ql9_!UeAz&xOts zW)r&X;za%b)Z_n2NdEI}(yo#~(K~OM(LG2Z9o?L=JpV%JwJKVdol09SIx(T?3Zd`l zgU}N;jn&v|+3H?v*QxS=GF_)e}BF+{}M?@|l0N#tu1#I%wkSgf1lwFhtT64sTmvoqub*|K@|# z$?b7?3cH&-k}O>GOOWq^cfzl zdWD|%Ye3ik4HnkqLLD)wqo_wDc8$nM2Y1rXy*aUSZ$Y?5UgX6NAJQbc0}-!QkZB`9 zHeByPI@|XqMWsW?-C3hZ^G)MOi05PyJ~4{qUtLK)d#8|{eK!#L_!cs5%N1fbbO=BH zSr-3#elGuOTM6&lsgf_7_kh2$>N&q}>l=RQgwK4MlPy*TI^kmkLnqa+!5xI7e^vCVGdrmnTI5H0bK8BKp$ET&!1ZOR>eWmbTtwdB;b>8 z5^_50aJEMZytky`@XSmY%eP=w-E36MHDEcDi^3gw*f4bue%>fR$K-t&6}2A?ejk7_ zums*c4`EGQsj#ErC`MHs#rlb-u&mo@xVAcna{=e^B=#cCEVzQA<=4@9#w|>qc@G8I zj}X`IIZh_L!m#kq=#VZV#^Ty!L5V#n@9RWVsr8A=#)c$kL{l=M$eW~8v>`8&JCMh1 z9I-V)w3xi)hU8JZ@r8(zne(z*4{vhK5iwgzF#4J zTe|Zd2Bq^K>+Im8rx){8p{Mw?ZWs8sEyR6^-vjKAKwNx z!ROU(AP+pTBS3;z@oiBnvpov0bi&R3QvALq$Cxon%+2B9)u;=uXY|0zbs=cGWgwMEj~mNEo~v>-K4} zxLGW8KVz}ZF#%q)*I zA2UK9V(PpnsG?uv{*1Q>n`lA3TEEfM+lFk-szaXKb|6=GIg_E)F63RY8}a<+Lq0c` z63>e?`Sgk<)gO2w+S!jBnmvf5cOOHBW{xL69j6lZN(2eLwvc>0v5G`DTu*K`$tCg9 zYa}a*@IMNp`O@t<{NiJKcsFq=pL6y+pFOpbf5Dph6E{BbJ??zxmu|2|D`g$zjID>V zd9J86#2o|1wZMy&Epa8y2Ybg!kYH#BTizda#|8*H2xYk0kb-;*jrgd}IKL_w<~DtC z$SD*x-3ALbG8EP)N1^2RSk$(fh;=U~W9{N8=ur`gbUX@23(B&p(6;?iiZ2IE|3LXHYZt0ur`gLd}P3=wA00k{(pS z(DD%`S)O2U*$XH=USmwd_gHrB6GjdG3CmF{GNqFZ2|i*+u1M;VnH}pBomUg`Zb(Z~ zZze_J_R!>|oG0Rrok?1QzNF~LKyq!w5YqeDC^EQ8O$?W3l6_60$WcCl*se<@vrp|L zhu9m$GqD>#vgKsH<>MT_2D^Eys)PK|mZkj8H`n>JcW?Oo<~4lg;op4ta4QTrQ44OX z91(HE8O0A8A}^u|N_u&~yGbi7nCF9nxHbrJkl=mqj>voKk98q3_}a^1?ag9$9Y8g@ z8;V3dk-NG-HtZe@&p|3QSB67cdpyKzrl3{)G?a9i4%5>a7@rn}gm)TfUdN!7unXqC z_X;erkA?PH9AaYQ(NmoShqPp@>6VJ;jshl~((x%vkE0=*@c6_wBwyQ!i}QD5@4GF_Cw1A_^C0f?OA}KcW?B z(^E$3Hs;9U=Us{0tsaE6>P6wj!e?7H=UpV3=pLy*qAFBAs*WdO_z^FFt zo;afS4QGTeY=FB7E_l+X8C3qlo};&(2;c7wRh}z>M#dOrSnnwcp;Y7ECSnZ z2{OHwqI0MgErmNwb21JSA0{9qAsNjcu7&rIG>kfzhSBlqn0O!)6Yg&V<&g_*>@GxC z?ZV^5`A`knFU%=Fh*fDNC>nkg_H9mJbM*-!E;)@kf-l+Dyc|cLUq+pf>&P8*1KvJ2 z(KO~BMt6RKt^-~mYSSz5-fuCh>ldU4e?{`j-&o(nifqoZAxnSQlO~jy6f|r|J`QR^ zrgrlpjZ^$d!af;kvYI8mws$4g!-C0|k9|nUgHX~bb|~pzHIg(epF~c7(2)6aR*+?t zIx=|8PNHpaiCkRo&dbJ);-kl{;hPQH%BSS#@usvAzFkozzpkj7KkD_Gzg_u}zt=;A zwnu9txSJSvpE-bY6n0rSyW(oCrjYnF!!H*PlxMaCE%k=`f_BJX?+4SmK)lG7J5;gw@jDK-%M$S@ZVj$PrGhxGM*~d;v~{;&?;-=HWp2aJ zx7#7h+KK6(cH{7i{n)VoAXF|TI2Cvp?)*^%e>#RNo733Y{vx)Wy#)I4HC#ATiG~wz zVa2X{F!B$uea90xEqH^PHSciy*aw78tih4FpD}Xa4+Ne6jhqN;(z~r4`Q%uSOzPT@ z^l)(_^?SD@PnWbKA)Wk4nUj>fS|cZwgS!%4t=@ziGk{=J7>S%Xj9h*;iHz&Hl&ChZ zBA?2WiN4-Wl6U+(NuOSam!(hU&t$IRt*)l=De4@4>7e8Mc>X*;`P)-|+>&>Ed{qs9 zI!J_J5&P<_---r zdAo{KxvwD$bbCqu@5{t-@MmSOV>9_7gIDogQ&ae9IVF6?<72$G)X2LpsN%&3UhtXW zHM~joga4fOlkYyk8oy6DVuoNRg+4A=RJ$QkwlzeL4o$KCaC0n~*b1Q4KKTk#b%jxKLF9X-@Gr=_6 z1n;+-G0BjP+=)48ba@9d&hEywlmfg>E)wFp639Q4;@P(oNcTB|0aX`Zf8Z(}gWPxuBvfu1M8nyN&lV;0j|P#3!$y-Y zuV;~)%6MXVu%29au$Sz*ca?;{zo85|62=Uh z{DPV9cx8aFcf!*e=jPUdi_`({lbtXywH{iwXn;f3jWGCZW4!Ox1Xi=%Vco73?&kSm zhra~%7Pm$2g+NTVC@>(L#s()P%HIPvL;GNCWPfbxI1rs2hJqb868&F*CI^1rTjwOfm zs4K~Y-^oqrwQCDX$8JS60p~kWXfBS0$X2Fgm%Q^dnZ!26HAV4>PdzV>Q83W4kXz*}CgQzRdzH-z6%Uk3OHrZ>>4bciDZ0e;I#; zpI~F+&6l6@JGOt~f2zOj(tNU3Pl>h~5OB?^)=%bGs0y??R(f1rYr#gqPDn>|0WT_f1M+6>}U<`<}vi zhco!-d=ZzPT*vUjn~03Li|@G)ak2IjxXphG_xdmKJMtAqwznWb^$Rz9S&_T1tO>ok z7Fjc+HaR!jfsAZapGeO(AhKhPh}Yd_XIX&NxEKX69(WkqSF&+Dm zuOatTJjmL=@YV2G-6)lEL!-R7)@gyJ;)#Y=rC1O5UYB9ED#o$!*O61$Df~h6} zx{gT*E7pOznu1z^=?GEiu{U%hJV$Q^{c#H{Be$XLsXTax?LoTdK{yHdN%a1s=3h*dyiqi-eYauFNiMs z4b5e1vNzI>+@4m4G>H+DpAYMj6H^TJJXwZeaBt= zxCdwX8Fhu-N0RG&u?Yn^+*8_Qb`k?>%{uumi zAX*O_iivep7-1ZZ>)*oht=0rYb)1Af+ozz5)hrks=AeP!5)j2we15e8b!)AJrz{p< zZ^t3bCLXui>fmZl#)Og-GiRUmbm2_tTpuxQ>*^m%v>!zv%4D(D$z&v}6xZC@jP<$Fx7 z_XS1Szd--0Mb`JTBX1r!kn{z@{WYjQ$(`;(Tx(p(?Jvzqi-E1l`W|h`qt%^AyDAxJ z^_C+SNFUNCdjNSpX#}C3&miI6apV-8N{(uCNY$Pbf}`>IR%gMH8POV=Cuj6IdS$ErKLUS_aL;$7y`+MQE2~bERG!>#cMGneH~Wp%!F0_P55|!6Fk;r;rL+#I{N2hchEr` z8h!*bg*|?ocb$g+g>$Ii{sOdCmvQOz6|}y14UK~;VIEk86}bXBtBF64o-5lKAXl&mlJAnTj8 zCh23_5c{>hr1vv_@>buS9P{Z-_DmJ{$a&L9$cp*If7uFhMZJ!gIvL1CuR`KKBAS1? zY7KwLPS3wNv5hyU?dJ#cNBH>m$NBm_ukvX}O?*J&eLnKhJHC?o%CCk9x9ZqH?rn>Y z@pZ7t!3maz&WIl6ipF^@@I>o@88^IOcgP35N414VatHi!@P~h`K%~u4z|m2OD}_Af z<#vVmYA{q^`Xc{$C`QK)f_TGF1npEo>p2E*wvI=yUQ_X;at3xiS^(cg(MX!J3|(I? z$BXML5OO6Ban}+sCo2)p*d#cfPrt6D)S;^ z?|YLWt$oSwuN{f@9Z!5FbtN-3eMnfs2tvNiAp2h}Ap_;HMEy05IP}OP=d%jP!``d; zxLOX6bUozt>`J z$toOQmH@;Fu~zmPH0-q&iObd@+&Udszv!V3-GrF-SvXKH8@kpxXwhsZ!cOi%6M8%l1vIIHRhw-}paa@>t77M3cM8f#XXkF_XEaD0nY;S>YcOSok9wF`WGb~6l zW9rz?=?hqL{o|;YcP5n5aq{kooEs>rO#vxwX7XtJ_YOM0!%AS)N=ll8I1r2p$g z{${sae(1J?yxrNOykYGXK6>+Y{%F`uen_upyrcI!ekAja|78?mKJ4&xsRJ^;IAUzR zGk#^&hso0gZhIPIsI(axDO=)!#20hNcYsFe4~>rkUp6p!c#IQxsxD|}+fCq@df?Kr zUij|R4=sZRpmWJU*!58%D1HodBc@~ch}mE|%*B$I3t<-#gG}oca38P|EqzzvEfbF! z>k|=@l#GKj*CI!;UWk3uaXWkiYW54Bqul= zal!0vjX{5Iil)BJkT#<^GAmnPYq$^66dka>P9VCu%W<%XMR))ZaJw_s)pSL#Yduk1 z(-(fZ17O(_hMUHrFfSP;>@y6<#O%qi7DZtAj72cai@}SMrMQ1-1t#rTg~a-a7#*36 zWnEL?B2R_ymvyj6GLU5Rpq!4e+3OS6^Q9{19SDa1>3lX9~U3s*Z5~RQ2GKr zAHBr=@EW|V^Be27)*^g?4VjT*M}FmsNk8I9=3J^r4zv?E`socwg@A`-r5A}RZbNnr zZ%+a!e-g4@Mj91U0V(ZXW>K=y*<3^%T8Nd4mm`1bN?eYN!|bAX*e2)@ z?VAeY$24TM&A_6~8?eV`Bld`d825fQ4v)-1XKcs)$XwV?5Nf}#3UT1ve)yh0faFmJ z@qSh@G_4N9vDpzUZGQri3uWMSXAl&A0XN@X$G!$8Tym|#!|e}HQ}+=<6Q82%%x5qx zG{bi18?0#h87}v}A^WNoS-PMW8GflYsT1ZzCLOLv7L05_UW*!%ns;vGL|-pbVl5%z z{W}t?gaBe6LJ{Y1mSj!^iHPe$l7f1Yl?VHidR`+)$1byo`|;(Zd56`ca*~dmS8pbR zp5~ELJxWOPRk6Ix{Sd$0=Pci&>MCEb={Emn)O|jp!CU^2=sW*%p*4a7?cq4m0rB_i z;X&^PkPdE$h3A^!_IbfJe7*4cKpV__;tPqNKU5n7F=>q)du?fSeZir9pCFh+dc&r; z9|~uOVtY@4cT5>6>|hVarbsoGG@6aiPv+ue%win>9*vhdF*tl>xsW4h(W@vHCk7>8 z__$;+tx`d0)}vaz0YA7+=;E^lquOMHa?3%|knL#wFc%H_?#9d=yWzKcFDCfx$4u)I z%rPB;QLvG`x&jA0_!P#?It}|v=U`uP9=@k8W60-gsNd!$7LBQbZP|So#ymoN{tKx0 zn(-z3E%MIRz^DIb?Ct*pzmNXL)Jz-VInSO*3mnPUmG#K}jSa|v1&xXLx;t6W-isJp zO33diQZicDfqf*JA>mp=qK9@R+=AZZ>ZE?;#_u5{HGM1@sGLKb?<^;!l0?$tQ4;yF zTTh-1*g>rK9VIFKm-8>a7V{61%lMrh7y00XoBZX9=e%V6d;V)P5q8wI!yWHBu)b0U zpPigh;^Kmkg$>cNepCEv?1{mZKG=MwEjBdwL-d1Ac(qZAi$yZ5IznU2G9}^$2VroX z-mrY{hY7R7aHT|ryGKT&B|QOFu9M)^a~dM4dHBt1pj!}w@|xucJsykX#8sG+7>}nC z9R}@LgUUYZ5S*2co)sA=^vJ~Qu#Gr1Jqtqh6|y!tSlNFYMt{gfmqxpB=ym~`{wPE{ z?LkEQmEfbM6g!q3!w%y~JY0PS`J>O{%DRjAHs>;?JimgVsN0yj^)8OrdyJUzFW|rL zB_xioF}3PFT6O+{tQ%j^_VRbQEv-e=CAK8>dmZvXsQ0#BU6hK-qk}wFIUXYZHx(rnqkApmMA*xgDOEQvG1xs!W^Zry&*%9FOAvPIBa79O+*mV zPV~lrn|-n9`d~Pv4M&sjqw(4>5p9E~pvl^qa9A0ICPM9MTLYoSo3KKNv0@RbUyY#s ziKr@0LO`cv?4eR{H7ON&Od3M6(=l#ZCQ`MV(BaG$y!?`dyTi9*bbcNtkJ$ysqCKeT zF2q^Fj`v3=i%@*F1Zn0{^iUkbSr9ZGxCPN$Lw$mf;;{|M9wcX{$NG=9j#5;U9uL0m=(GIBivs7k@Ck3b#lm0K{^AGaTA^mL z9d>ISaBs2`BCTDpRN#Lj=Xv4m?$)Sy(*`#dOR)TBJ6sC$$I;jTj6BSsa2C*EW)Qaa z?Fp}=eev3TAo^AgLYmERwA&C4tKji4FPIAR+8OBcC=!#;%tMN75uOQr{I$##kiUyV zpnet3dd8#Ml>{ugnS@W($(X--EvomWVdF+U`UY>p)^1yH;$jx=eaJ?ezPX5+wF3jY z?!woSJus*3MR3GEc%9vkvv$Q86LA=kZ;xS*|4C>|Pa|c^Mf~1$83%>D{n*V494xzm z%HucDcC^5CHMooEtM6g|wMU56y}+2hZ_sA>C(P?68%&y3_9FE^^dTMh4J2ys;p7rEhD1mvlLEK- z^Mxv?klJLp5LW|V$JlvV_)g(tR0iz ztuE8KgA?<3wKFqlb$!Ok)POnItTB@~s425+uRFu6ZqAfXZ^@kL?#l$PYs-wj*OoD< zIx^ga04BmakXdA-V0LL}W?fGu)1?)dn_D_FD;joXrWm?1yV>qc`ENArP< z+vzao>-Ql{#TXUS?crGFW9M+jdAFMJXD2iDmri58oteoTI1|O(HqB#p4c9RBpDtkz zFI~!1Ze7k;%T_TCfr*TCx{g`&IfYs0mcfu&8<?ICGH$!W zoIdq{$sYQcIX&zpvqabfJjwqHle_yT6MpPBb9YQ_*3r5yJ8f`1Hf;9)A?d8cqUyRR z4j3pZDvBbCfMS4HAi`P4zUaZgLa__xUTm-xiMZTkrGz zaUW#j&ai)btsNJcQZo6FlXP~kAbnd@ltB&M<(EsI(v;#SI|o4?`4=c%rv*v3y)C89 z+4jB zSm*UaD`y>xm-ff{;sa5+d=%dQ9*(5xV{q)u1T;*J#d;NwFNtRCont}oZBubDXF8UB zpM?{ib1>FsKI}Hnhj-_t=n=jgxyM#wS?C%}e4Yjm-wo*YeiPC@Z$*gR4s7kd8;?Wx z!OQj#_ANS!y!2x@vfw1jrk}?R%Oxy4e-)puUWe{|3#L`~khk>_98%um`;;&E75WP~ z_=}Pktz}x4trTtTqCv=ya3k;BND@V%NW5-MH{S8FhJsINrME?JkTBpUTi}%HxA+N-&;P;|k ziJw9w7eozN1fJiEVqFgh9Nbk(5RGmgMa1BB!@aT+td0o+GYfluI*9RTn_C;BTa5Sp5_@LhNZ z!}Hz8k&h3NeftrT9zVnH1@AF);CGx@@*9gHZRN9-Mday0#bk7&((+@RvwRxuCQaL_ z$p^Q5rPEk{+4)a1S*2P_Su~-e+%>ePyy7uHhLjpEzYmL%_cb5U`|>ieEqc8;`f`VO z^6r30k3B7dUtbmW9#2H2dvAoxwh!X>DJzWVk`L4J*`nQ^B6!`lB#uuiiw_^m;el&; z_y<=+^`mas)3_44RQAB)3so?=yeDRwy-??pH;TK~g#F99_@wF~`h_3Vc7LoW+5rCB z8)Dun4J}_b$LyU!D0HPAtUrdpzHcZVFE;^&mQDt*EWnStxqK=PMQ_LBR+<@wHY6bS$8-cG&&13Rv$1dP9Q2Bv zhYES~kk}y^{SNW(ZOl!<^iippa(pF%wKZtAY&{}3$_RSLa8(VaP+9H?2}wf`b3wKhqslL z-!_$(r>nckeM#Q3^Z`HFZAvq_=W|QhF07+0{Ia(^nLb3O{v9ssRfv5O9#6O_CRgGaXu?@~YPOZru*UM|QxPjy>=yuNV9>dSmU6zOel0 zhqd-5lwYhPF=!wHE=FN>>TsA3jlvYSF=+5+EX0P17&jsY1Ikgqh1i!h zP^W%PRBBcSZ@k!>UGhcuO7)TQ*B_m1C8}<3gy{)^$UdwvU{6cDt=1maeL`>|y)zzU zg<^4&?&uZK54m+rSh`ck{nQ9_xJovwhQRe|Br0qeh9k>HLhC*TtNkXS)Z!R8?~29E z*f=b}6!hyb6^3UbvO{Mhsp(wg+@1%AcF8E$e+l~TOTk{>R8$IGiOE4}2&uCbE$8gR z?O)kgQvC?*_8o^l@*?KcxPi6*ZsKRvJ9u{d9{kQegopM9w_E){*2KT~k(ggD^DHFS zn~KOo-uCi`?jT)_a`MOX3bNOfDl)f^ubkv4W!^3&-;QV_{|yS40|pL|(ru`;E*d9` zIyDqs+Qy37f8&JT%S2KA_(Da!z7^^cvMhJ}T2)E?U`FHr->$PB$Xu znw~Lo;96g?w#RU>|I`FApiZn9++mgoEWbotN?#>b&fO>mhU^qq{v8!7N1PFDV$O*o zuU?8S{vXAbdSArV+&}#F11mi1WrIN<^C5q70er7g1b6-w!7tt6*vEYOtx}iIuNvpwWq1Fv`}&pIJUg4f6xa*T;w+0jMZ|0#_O!wMb*= zyP9D7x@N3dEnsca5@FG;VJX!Xwc|RVUq&a43+W1cEMDjf}$^Q(wPxCN^zW~x- zrM;mvOu0wlC zIZKz7heyc@D<;STZcZX>UtiI;^9b=YWUN?{GF|j|v_NEDTOvM$t`j*IHi}P^wu%wy z`$hbj3*ybGT+t=ux$vCzN=)kZPBifTC~m*~DF$8pD~8X=hhc4OF=8eLhGXZM49 zQ4^kCHQ{(<1a{vUg6rNRkX3LTrrF1!@u~#8{xOBmosJDhW+EeaHoCaa!vuW^>bF~p zTK1{fxL`GoOiaU&9vOIFA`^ea4(vU*3#CK%<5bEal>2xbQ*$q&>4)pc8hHzkiafw2 zzmKrF_6rjRTFW`#3&>JCZRPguMWk1ZqudbUB-1XImjee>l;M@D%LD7`%7bHrlExp-iI>9%UP)bqy6Wbr-Vn|){Tx=EOLIW0zX@Q4?QE9Z*d#TJUycNU7b z{G5<-bvBEk8QVpt_){YL`c<(n_MUJV@l#*tON1x>%<)3k zs@^!guMRqNsD}-K^|2t0K!w;wHzYfkx8&NM}E50|(Lj9$Cad}@hMsGTSDKoEN z!}Du6asC#LoxP7q9bV(~xX);2^AE=&Y-Fcj1?7-gMWju9aam}&BR^BfSx)coE^F(a za_imtvh0NBa%YubxyPe}%;(olPVW>UEjB~s?b{QjUD%C)!+{~9Vf`pkt$eJQ`aNDO zNlOx~eV2#}aVtb>+By-uey4afYp;0Xeq5N0^PRd8j5Dto6aR1%4!kX$Rzq~(6jTnSy6(aFCatsdMpNI?Yu~?B3 zkE}BmbXqYDaR+DN=;}FGU4Jg}8YN?dc?qg~SjxFJ6?-47L5)u9G1+SaqI++F^GNC? z>mYFL3?{$M!H2M`u$yxmb|>zm6+d0-PTq3_fB1%{iGPuilTV(VX(xTF7L&mTO2{3T zN=xT9mE@Eoo-%m7uiRqaRJv7eAv+Zemg)2RO0VAol5#9|x>% z8Yl9{PZ6m*XNf(Xl0?Xu6=L_JwZfy&X7Q-^b}{|*E-`%6J~4a$aWUk=1yMZZnyB38 zuDEgUv3P&sxri+BMhq+SLmc_`OZ+Tgjl|wI_??y?VF3kjx_Dt6990DG2b4hEG9Ft? z!6(TPi-$QOP&s3{YX!V2>VhX9D#C9_B}^LXfii(r(QR5ayx!~$&nb0~udEMe0Y6;v z7P#9P7&lTP=6O>%Pj3O2A3@~Y3S$el<({+y;nuj@vlLkJuf(;d ztC7`q6SfWBiYnuG;M|l$Sh4m9o>?77iOOg2x7#)BGj3tmmiw6f_a&+h{f_XxzYyHP zO6qfMG_Y<6=Q5*#dJ>-CkXHwVdl z0qX;D_BIk3X+6b?OS%{}XqYHhGgh1$nIN9DNECjP7l`r)mWa-0QpJXot3~rA8DiLq zO`-=sGi-2?JtCw1VUcs`s8~M#lvuI(ym&wOvRHrTzPOO`NZecbN;v#_D~_D_AexW( zENZ*`5PtFh#O>qcmYW|f7TIE5ZXuL&=3bz2QMeDa$Ij4_7%`?4dUbMw^QCeq?p7WL zzq%sE;sI2yhK8MLppa87R4(F+#Si=t}*MA33kRkPqrN zmPf3+%l4;qnfCldz|u{v#kJ|-A~bcN$ow-#L`*Y_Rp(}ikFVy4sOs~?I<-*L?z~tW zp1xFUdA?jY?_MLG=4=%Q!m>n8(k|f_v|rerI3lXHJ|oH%y)5=My)Gu_yD8Rnd?-3T zcp}b!c`ar{eh{x8eicEMAHwO-Z?WU?KQViNHLP9oVZzg-76UV#QE0Rq`b?~ZR}Ph7EU1pEE?(FZgE#L zf|rUNJ8Nj?9)!e&A?WMV6@DJQ@Oq31i$Cl5;W7YgHbi0Fq*3tvIv!`I#bWu@cm&K% zz>h{#abWsPe=S{1jbxy~e(@cld1l#jo+!a`ME2GG|^9xo=KM*)i2whPSIK z^{ut#ljlCNmu~|Zo7Y+XIyOk=PIwqFYhPPoTeZJ9SUpJy7XyA6QoRhx6<~$nG)%$BvFe;gks| zXEzC3Q!NNzoPb*Ors8G!*$6o>7wys(A+}B`LdgRo*C1o%TKt=kfjLfF@N)8YOgXUw zgZl4A+xmx4V8apYb32D>3v#gJ{5@nEkGXevj-gXO;@<7=*w@^eBS=A6y<1VaYhH1= zrg|AUu9%0Mch*a~ZKy4~e1xpHypKFKpT_*_Yrybhy11A+Ty(OVEHc~1i?-oY#6SNT zBC~sv80NiDjOw#oY?LcS#-cP)d%zaaeEK%A=i^S1d19X^-Sx28^z)do+&(QjyI&Dr zRd0!6tL}>ExLlDkj~Xrp3i z)3Z4KPA-XJuN^SqPig%5;fQ@&IV5gz#*_6fuyt|6OMc#E^>bC>Rnr?CD*K?C<_Cwv z{wO?9!f9S(>`rfr7Un=4+{`+&y&W7rbi}6}p*Zi<3oU1d^BS0ezJGXq?B5_nFB<`K z_X&`pu?X4{hp(X)RLD0CucN1|NIH#4i5GET@m;(v`vkk+yuzCCpJ9Cah7a?s<>;fF zC%Y7raki!9#p`8d?PZna#j>7q+?6`=?f1sgJfXKt8W}C`U9k~e^-yv3&3N(db-ZZy zd4~92ZMGQHI7xinI9Jpzu}FBeUn!nMt`*+y8-;!DR&n~`PO<0Q0kQn`VPSRsn5f(6 zlqeH@Q^a(?EjF&p75kH(ifyf4iOgpwx*e9Fn$jF*Ce5Z&my$OQq-@If(`%HVO6Dc z5adLL(f41KkekYmon`%n(Hjei8k3NLWA!e=yD`W?jr ztz~vB_mbu8yRLSGp&a*_;NP*9xG28+#2 zV}w`Vcrhbprs$oVEIzznBK8hQ5mDz>iPj-$V(tBOam#y~xb$?l*xKcw$c;KG7VSSF zp4UGk-2Pq>SHka!hh-iK_v?>EXuEe}#^JA`uhlP+zV^46_}UunR}{o(qY#R$w!_|H z_IS{)I3|sA!0&0Lad@U9BA%AP!9ZtJ*;gJz5-TFAzB}IZtBjM`9_V$g8Xnd1LinLt zI5f!zS@{AmyfLuxQzKm4tf7AXAO!3Sf|k)1(+0G|DvvHW6xR){?)OH~@%?de(O?v* zKLR@kkHeAp35X4jL3(vFQeIn-HHu;TF63U`^hq< z3G#-Woru{pMbvOf5veWH#KV>w_;dS~1^(gm$ccE_~BJuo6I4846!u)l5KX~QAN=pKc| zuScR@_9P5D6^q%4aqw(v|$PPT+mI2hk2w}Z*2J^ork z;CeO`gSewFT&EXoV#DBLgyVOi{@CAf5bn!Jcnll`8$BAAx=ui{XAE9s^7kfon~_{G z0fC8Ak-y<|w5~o2N1o5Yiy;ePpRyP`?=Qu_#O3G}l8OQ@t5EU&YP9&U1{e8xzoN`W z?CO|_dcj%ntiKDlgAQPa)gk!%9R+Hiz|N{?G1&P$f=}kajo0blzq^fZZJ%P{j67Tn z_=~F5Y~<}*1!c3TMP!RvC1l+hj-zVWP)C%IRt(6XIw++ZTx*6+z zx8P~+HblU|K194YU_#;_uSE|k_Tp0^2D*_ z-q^mqCLVg%;dMV>yt`fx`33~w!%4Q5y&B-DR};98ZHB9h0#R*n3#_qfiNj?%=k02X z==<&Q_-PM>{I`R5?*rRk{V;B-2?;$0VoR+kSnVExnlr{=pWiqXO`U|Pi7_yLv*4MS zj+QHDA-!7?mRgc=bMitYA6bM?6P6%(%Szb3U4`fG*1$PzJszCMz>z{*vF!Oa4Edae zTcdX(x882NFS8E^-Lvug6FiS z)5J>JZ?KWER{3Pj%wqC=PX~E*a(*!u8)>CW8-Cswc#q(%A}SxHmTxaCRHTUq?(;JshL+z>PN0g z<$rEc_IW0C>4!-<+v{pbXnoNx{7tz)yvAds^Y1utF?9YRp=^zQ(aXH($$j? zUAc7Am8(ft>j&w|W4NyD#`F2Hx_XqLtJG<_a!=IN`PsUvHCI>u^K|93P*+!$@_ebf z8n;4M&DQHGcdM@6ZRc&fbaii!uB;B|YX1pcEj_ENb?14mi@fiOu9{xg)$CikDsWd< zpLiV4)m7a`x?1^CR}LR^B|qsZ@SCo*pSp6kHdLVkhVn0Lr~^d}m9Mm+{x})R)zwf% zD;a8`hoQb#GgSMUhVt_@)NUS){0!v|Lq#?;RQ@K0df(Jg)0!JN$C=sm{1~>FRDx#sXx~L|6aFBdm+AHuhj_`sm7jpswmgGA1MVn2EY-VAfUW zOfn;jGjn+Rd|esIy5jY9RVzhTGsz=&wXTxVbk#mXSJvD3y8Cryo2{#Uhjq35G|xvC z23f4X%DCUuRo6SZI!O+5$RYcIuHHQ6QuXQ#5jjnP&>#FJxT_yhFdH?bK(eV#;}Z`)|5BYg^Gr{>t?9wO`TMcYiPUCCt;^^(7;p{~_p ztjT4BKUp<2RJLZQ=B*8NCfHDR9m$qVW|PUrZif0mjkF_^mL{H`Iw?yoM~4_{z!-9g z<~b%A>Q}s>&d=fZ$%d+x%5$X~s@G{l&ADc%rXLJ7ux5m6u_9c#+zwYBjZ7+Sf=PKK z@z}$EpQM-3zlzbn9=)SZsFi=;OscdMe@+|b8uc>3MOO!?mn&qG>&YDT*VP7c$!*Sq zY^G2z-+ECSeRP$lQ#1W_#^`Ei4C5Y0&q<*FkdK(9t7YVqOwC-RW)3aYRqs?? z-K1tpQ!idynEyL;^<$T=?vPKBY+da+q^nLxbv6DZ&v}-5C7<-`yzegahMIAI!1F!S zRXOS=;DxSMz17ul@^Sp8s|(bP13heYK121OhyCT@P9Iy#;~BMc$1FvU8>)MC zL$&ZWl#?IxU+}Sw4E2NFH8GIdYGJ69AVWQ(b_!5COUdUZJ?sZ{Q&y)2$Ym$>vV|UY zlpgjUo6H2BbDE(>C(^rC8Y*@=x*drj)xIg_e?-K6HfG^xL@P3rJldfaD|+DJAwg>&(@|oOESA`7TPBsmP=_+cZuEvhlmA6Hw&yrap5Bl2rBwgK~ z$J-X_%8$D7UZJb#HM;7UMt!bl?b)cSv~4`cPU>v0u0RdhlF#{L)D{^vI>+~tqpNr_ z+H*r!H*V@`AX&YCN}ayc)pu&B3HcQGLe|ty${*fN{rIrXWRc6|668ZR|5$50=xf>3 z&Uz1OhJ3Q=Yh$UOyt;US{IIkNGfZjz{(sRIlZNIoX& z=HLiJou!xcjG=GNGgOfbL*?u)j8KSF+Lm5NzX-_^AC+OS3y=sI6O&c6>8`seeNPTnaIidy{_WE()Yg8?|$j(aX~}9rG74v(N{9EBcIyT z&sZnsIypH}N7c!xr6+59O?nqSZe0Ltbu-o=>Zb?!{Aq8fV)VFLofvmAs!m3eP4vq_ zhH5;L-bk%vEuoL@VQrySLVuG>uW;2OEnF==8?KHtGpT_COe%YvNj0BpQtM}#)Pe;j zRe@~AZ8oXQ?Itz(Aa6fzQs2*;)FAc&2OgVL4Ea2yhN9V<8-oj zcO77@J5IJIb;g0Ob6!`zSJ-DI}VY0`=39`U#-76}VzjE2*E457?`*Z$AE$oUC-Uw-9}%7<(`WU2Q7O z-kJWk!-etjVD49AUsjtvgCF%3psT(OboGIpE;ncW4x-kA85?RUxs$HCchObr?u>Cy z)(vXQCW3LHt_BR(RsWGZUo?B%3FJMIoUus@gH>6^9$d{A9`^S_F)dJ@ny-R9BX$4LpiupKdejX?B^cR@7_^Qo7poA zYRvwby+VgD-Z!0$Rv4-ywNkHAxN18%TpiyRt~L~;Pc=2E@hv&8bT_F}eHq(mlUhqZ z>onb@!e*P4PG7r4E;Xr}<>a#F03Sy-?Jv^HZkkkK_G`n~KVK)CFZ8ylWb?fM{ojr~ z5&6UvXMLx~RVmAU$(gz9MIWondPEIn_|eo?R@4RSQhHnV3uF{Y zJFQ{ztK=8buExqhn-Kmpa;?z&U3c*~}o<+01k5NlVsMJ9^*e zRqU(NbajG!(&>FgxA7di$dk3H5&3jI!W=lso`709cb@Mzhwq7u&fcKzZ!rhRsoYb( z7Wuq*#U7NpTJw<_W$%zh-%BT-!R+Zql`_;f^4VLFp2dD{E7{zp*VUzVx{^yK`+~<~ zdHXDS+y>50WD-7!TA9?=P?K7yo7B5eCbh$CQri;AW3frirdHh7 zo7AgqCgsTfY!H3yEaw@GwWio@lPdAhq{fm*P4;Pjf1A|w{PYN0=9L|5KABjTqR%@q z&&lIZ6M){I{l&Ko`0ON8lal!<+V z!Tb8N)(oP*P&XS!b7mcxbs>@K5Iy8x%G?RT6S-c{L!qi1Ka#);gQoT9DbYo44 zyT-?mMGMxG-yc|esEzX0x?0WnpL1Z%X8Z#y(3jm=Lp+#w)%cYhdeza@TYo;L5qqp= z=o$)D{twVlWJL(gjZmA(28dLeajjhZOUKH$IkbDOK7 zelq?y*`J+cU1`t$tPSh`I~b|~dw^!FD^9HWE9hH;SX2C_vOevgM%aIK>=Le;Cx@%T zdEx4HJ=X7jCe?1ZNu{!ej99`Q%NCO=MGl9Lnp6$ej@^uZWBSt-*7f&qO)Bf7N$vV! zQcrD|Z`8wn$N%qJKGk4dWZrLL-Y=H)UB=#-UR0iWU#}&5t9H!i4)hJi{BkeGB#gI5 zFwY0+YQYeC+erF0^Zt1><2;cy-b@b6dFw>(Wo9#u^Qa5v{P-015R7^A^~_o3`;1Kb zU>0@9oS)2`-+6$K&!!d``<;xt?FHsB;~sE>?}srrG3KF9sL$toEynuaJNgjg?edu% z81J>ebk*lC`#c-gVdj6+Li8beQgzn!1bR|i){P#l8Ozy!oueoD)02L(R?KK%s3VOG zHJ|Z6zGzEJROb28aOV5~)_lf1VJPDsMIDTw7clNuCXh=EcL0oei<$JP zB<@)l?@#oY+*F=t6(6&XF-_;|Y+&whV$I&l+PZ_iG-Lm2Kbaq9pKzY%V7%X4p`MuQ zdCc`9jQQk8e4o#F|6AtGd(M`h*gt=#CVuM5ll{}d{G1PM>HGAXyY!n##`>M3p`J3{ zevEe)H$xSs?<6wsGuij%XPs`#UaB+mehFv$j)9yP=|MAF(UaO5Y6pAY0*rrY#=nY* zv5#QPIX7+`&ADeBH5AJ_l0-iv3;z@{$uv}7*7lvx_|+ytZJQ9TY;(i8^D(J@Q%tJ+ zYLmiF=KnFy@y|FzvNnAF$N8}cXT9Q#0U3l;*4gLL6Ubm&0R4vfAIkhs=)fMU3*%{G zufq7x8^S)5J`=z^kBes=rN<1J#{JU_U1iN;jxzSw<}*K;>ozOsag4i}abHJ&nU~3) zY&W07T%X5W|3;s2V6Kln#mAjvoG z{yQJ@lkv{Sp0ze(K8>?R3}bHVLLX8)_(X{!6%_4)y13F@#K`$ZQmSiW+zyO)e8TcP4OOHjDGi zJihKyz8Cga>$Y&-*~R(gn4w&*8tTSd?pX>(s0SVq%6@LR>h>vIm8f7+w>gXa+DaCz z*=0C;+-99NsEIKpxxXTd|8~K$Iz6=>y_77bHe=of)0^5d=gDGZcg8e~b-O=vk~%mw zmRcIm$HmgC$l>H1T@72v@0YO`rq92n=agE{$7PVqHnPiNE!fE(iC%w;9L^u%^N!Pd z*!OIv22PU0^vkRZH<$yDnCtYL>#sRee4suU|G@9;3FtMG=rzy&aVJR@t?4x(^!Vg5 z%t!V<8O-}ZJoh z`%ik2Lm0mfS-0&C)IOtgX};`E1)oFWblS9i)!vP4WbNl3bo% zqW@DP>JE2b59w*F2UlOPj=y3ZphkL9Bez(0my^#QEBZg#TqsCCDQqau;;gYHSzF6; zu68z5tBQ;d`IxG62j#_{hkTAwA47zpy0Io?QzNnDGfrb|XhEN%H-&}pzHZ#p_u(EZ zoO&?~^^!U{Gnlgl8JUOE3rEuP$YxcXp-xdJjmf9@0`9mLb0N1RVsM|N>HyoY>R z=3}iWL*F8sJ#OqO|G$oMcq98(vf4scf_3B#IUUa? z3$m(8Erp%oIda%fU*~JyW-oM)Ju^M+1@+XM%qEjrnK#_Eyyt6vWq(X&ZT^rWnXSmr z*x0ddlh>wFjI|TJsXXUx@=B>h&u8rpB{Ne^>ZdL_P)jA7Ft$7jvVZDKJ>753`b=J* z$?F64lt4Y*>c>8qwWP)n>XeK&QA;ipsGWGe7QL+fe9kCo^uI0C**W&lpAEGvUxZrU zHC*{0;Jo;hy;nPGg>$%LlwHJ{TIP?OJ^rip*4#hX za&EGtR_y5~<(S80RjUSPLvPmQx@1w0-b9~!-jqETIklwM)#}LprYq}7FWwi%xzI#^ zieOHYS>>VZuSRknj%KYVuTV2JGnF$Tz3;^=?jp%-**x}T^uHge+%3}oqNuU;>shBZ zu#ecx{$VF~BYVl6NBjZmh3tx)q)yK=FQ~D_)Y!>e)NC$ikNf;L>(DRqdh?ckNR9D# zB~|cm_QwCHNgi%C%=vt*CF}$K+oebc?%&ICN6&in*MqUG%KakikPY>@tQ}d56M=M`$m zrZ5i&_PJ%avvA@(;>=i9WY1cejHs#gbvT>&)BBq+o*LszO+6s1yf*azw)C@3WJg{D z$*C|o%^g7B9LyR|ecd6eGSt^D@`|U%=2KsD66kwVsK1%q8I#%SBzoa|a!jTsmT=Bk zP7W*i{B-inV$RSHwLR3}A@+2K`8s5^|2%WyGWB(pz2j}3;~pPFjdiBR<~*hjU-7lc zY!x+@MP?`#ddCS>-@>%Y{u2NICI5Uz-`e&0$q;`tZ-&R@cs(Jy&*Pc6| zlH7ASlS_H_Q0`eB=1b*~#zSAq06@_F8pv1?C%WNn&AHh%rsuaeJa zgRvgSIeQ4bZWz6CIP3W+G9JzIky^_zPp=qA2n2&Y)nTv`<9f6yzbCp&7$k+te8`?^o8RV(R#eaWZ_wd6<-e9T_rF1;_vm%SGCGlcp{qjvhx z=SK9TR)+FEdRmVp)|lhm3%3YYRj8AD?1v}#nbb?pjHM@Xe?=avQaQV?s9)}31MxM@;NxOcqr-lQVF@?g(AIX~;PovwD1$1Uo`u`KspWHQi=c}pJC zy}4Ja%iF1yOXRVvG5cidrAQEG&0y{e$>TU#H0#M8tuN0*9v8@?NCeppVyvT>vt*J% z9(fZO4|2Fj4*%`6&d;DOsg+(yyf(j(wP*=_bp_|WHS7!5GPY@a?grMaOzvE=8P8+9 zW_Ff3C5Iy9ka3B72eNo_o#(twedhA}$LwFo4Fm%)(wE zW&!(;qnwR;hpRs)d3@&Hs|oX-zSWpBeC0G=hbN0$^em^tj5qt^k@T(MtSNENP0EgQ zd>VHtm3ZyZfxb1uir2A<@j6i%=AR4q5LKvoG6=85pT8dKvS8mV*)Nkpk!JtjEv8Zv zDedSFWMJyfoF<3GyPs+Dbi?BZC^$LHXOfUc%V_xzGNY zUe$;3ufq5TOpQ=PXWEEmhRg86+e$|P3uwsurpL!@lzq-PHU`18- z24rxg7JZ1GwU-Q18}d2qulFg|`;KHs&)U_S9K*?lb-x;YYxq(_MQmj4zsvrMeV5Dg za8;J`$+gzJ?*3ojwUNCrz3Ml;>g{=6&!Ru=eQZ({*&EyBQAcF3BOm)-Tjmrsaf~e5 zl;Tc<3>0-x&xc-BpPmlpVk^!aZONn)f7UL{$!_e^dNIcs`=4R-0OtSkL5vw0)Sw1F zGyjvuvj)UcCuVAa8W=F0@t(t;i?NSb#++SA?`KVKoX%dG@n5o)-nWe!*-8J}$AfWq zJi=>OC+UZ0=$(vzxD;JwLC{(~F|% zMGu+(*Gn^|tQB_7oH^)4V;KK8?wmn9Ip44syHMLucj|Gbm;Bud#=jK3X+j|DIvM+534M`&NcSadE5^#30J*;v+t_Ic|Dld zKUSEO4|z18F7hs!RA4T5k@TiSYN9pwS~vchR4f@pJ2F4ZaYn4bqauCBllwigm@gPp z>fmB?&KA_c&QSU}c|shQC{nEyu^|JaF)6?1>vEY9hSeH-TgF2=tV z^FDe7wYHkC!T29y{MT)!4`s2p+RvJk&G;XsUXJrQr@7ZTOMheR^RDo{+@RiW(kmJN zFU)=YHP7{qzV(sk`$}*7$)4^ndo^q3IdlKN-IX+bDiN zp7lJQv(g;yVUo!_<^MdEpJ9#v%l@rogi2l&r#X4XYi}pUYvYc@Yp%cIwLdOqt!qoO zR+@!<{6(`?@0wZLaKo(qx@Fdi+&63LiCHWC%B;C*=W-aiiS=;s3 zti3N_(cJ7T+WsOI&DGwb1(&dBXB;eAL|KdGSl*%ycd=-nDq6G`9u}>2HH-GKxqXwf}+uHF`{LSKs( zXtHQA5f-gOltueH+@g&iY0<`wwP=15Et)CDqIsJw+B%Cxt3Ab{eVAd<9?r37*-I_j zs1+9N&nkz2Y zK^y3jpxJpQXvuXFw4VM6+O(hqt!ldjEx}08qT&*?j;9l}=&uQyzfYVNHzQ8FpFduc zRpYfKt>d+l)8e(&`{T88SL3xxh0NN&wq|W!FS9nCT*BhbTKffNEpw4stG3dtZAvq1 zN6F^+HpZB23LY?P6%LuTxyQ_!?P;?Xe1TjpljC)>R`Ir3Ynw}EkIdSDXJ*aqjal={ zGi%L1o3*On%$f_?c>XqPb*(JgQSvETh(8Y*9VDCQWMfsvqAevKPiKqPo?NQeuxQ6> zl2aYV#@C{Gk;$9@iYJxWAV}kZAJyui2;gkr zX4Xb+GHWd|&075(X6*?X)jna?ep5q-&Y87HYUc$Rjk#&oT<`F452!aX8cs%WpUm2@ z?`AE4d~WSO_}@6sjU_kZ46ljcBCFUG1sY|pm2+JLAPi- z23WL8!!6nmYG-epMQf74bIu^2`4+ABGU|+4=|DD_Yxw*OYIg^3KVi{c=UB9rH!Yg) zeT!E5Ewx21YpoNszJ(GrL9H})OVBP?P0&XACTN=*C1|TVCusf1BseKS3)`QdHTjdE zefEvj_O6fBtnS8YcPqtdjXK0>GAT~$yfseq+Z(4H{2r&xa*fxvkBQeNEsfX0_QY$$ z565da&cti2Z^dgp-o$Gw3!AlfWvLzV8eYe&rMKYMc4lp853`m+UUu}pSc_RJJI$;$ znrqhnQe%6Tn6(kB&D!b=>Wln_QD4_jQhR628qS)vV>xEcm+S_ST^`wKWLNHqS<85C z)(+G6?owYP$gexu4g5zhrKXA(W8A1I2kNPilSR8yfqCv~(FRtsXqT&5w6~rXZ638{ zN55N6Z8fH@7LZYUMePJxH0NN8){K0LceQBdJ{IjzKeCA+Yij5PHB>s9+M8_AJf>1N zi54wup+zgT)}keCrRPyAKggum1&g+jI`MtLe0f5~?=9M@JbwTCzkEv2ij+*y%2i3w zGV3L1_1Yw8wR_XQhVtuDGC4|*D>FrFay~{|+dfts+b>r0JRGY%ej2O&kG;2!s&aq7 zMZxaw0J{}i40s=yScoW!EsEXU2`k@N0b;k<-Pi(lcXumxcb(~WfA1LgjB|eD{BdrK zJFa7}7p!8f&wS>5<`auuDx(?pW@c=QG$VbK8L5|f3`>7JMFN zL7Iscyj3kYA7{ak)fSXJXaR`T#S{y)uNLe~V?~1;R)l$zJ3dwvuSs6`TQLJX(=aQJ zk!y_yTCr@H75m3h=S)^qA%0m6D_XnBG2%CMg%$b8xeME=fy8ddL1Ohs6Ny*Hn^s)A zXT_YSeEdr*>Jz6)#HlKIl{JkGmC36!)W1u)nVZO`fy_q}iPHh%l$V@3Mx1I8qaWnd zoZ2?5s%t}tpAAdIhK9sxL$D2=t;tF1U7HBz8uBQ90CO4fIW^gag|Xzvd~#(i`MKGK zwp(o2z1@cSNj9uKV#BR-d=2-huTS|LpP6HbO~!o0fw*k0XGbu#tH}U6s*bT^RI(i_ zORIP|I2tV;M`P~WXcUbwq2xFd`X4tT?u7|=vY8QB%#2N~%!nLd#++$p#OyYs~jzorG5+gb3XuLY0B5-*Dd6Q^2GevSoMms)Tl!Gc{!nWJt{ z7k^k#hImy;&+{f`^YSoXl_Ov3TG1qsJR{di1k(#Nv!ZKjD?X8PTl!cra;O#8$5892 zll_R>`6<*&a&8Yf*MF52eYROKp12h_V#U#OR@5Qa4iU4|#B43Kam*inF@LrHWX0pJ zyf3lqM(omMv_YdLhEoeqkYlw<@@y+JKh$FW_hoLO-VJDKLuT@*A9*yeJF)9$L+3%{ z&j|8?_;fRqE96lAIO-mC?e`|?<3Z-Sv*g+(V)~eQg}iCY9J4mH9T#%gvDL?pRm8=w zj~x?N+2O6Iu;eFC%+a{JC>ph&MU&qqr0Qit`>7@z-9wCSm=OHc1kaLY*vOs8o@P9s zWX5;98GY&5Hf}OQO*G>f^H9`9Gr}I3k@V7xA*n1Fna_fA)V)8xu8FS&p~1{k8_BJ2)WbW87F;7<+b&peSe0gRwOO3;wN!)lW#%0tcXeE z?T7fh$H`&(5bG7@KYGI}ulQWV>lm>bOO8Ed?wU`4a>9G(VAYY z6mf~4Z%3C#D*UHM;dq5;oaz{j2{F-dzl0-u1dW3mn%s3WJe3*|eE;QqAf*GzO#PF;c{V$u5@wpil z($jMnx8Qmu3#O7+$^I5}pcb~F7S<=nJO^6PcBBQ7)WnQd3sxEyY;{{OYaa33V8QJq z3sMeQ@bNgWKWRa_>(of%SAls={YgB@y`$-vn=;XVkbB#UlVcUF2(C}v46-6a8!Hxe zVQ%V2EgVLSh+B>*Y9aNqrpt=(xmGM%ZpFK;R`eoP1&P(lYvl3+D-s`C;dn+L@|ii0 zc#R^r3XxmBIqAQcySfsuUUlfj=@&NA*9{(G!{rG!tTr*nQRB)@x8eSBUhb#2zGj1k zn4C#tN7>?bRHp|UFw~ApWmI^*n2i4GqmZ^>G=2m|W8Dm5b2u7#zedBH&V-c>Oz;_I zLV!k1TVX{{6@xa6{+7P(i5I$47JyS(0ls(Y!QlyI-mh=K$Y$(v1-eZyt z9b;_x%p7!u+O~|GDMKwA@RQodT%XljMWu+z$eTL~lQu_T!tW?-A}-yDi)C>%T3_M! z8ORlH6GAFe)BH`i)YF6$6HJ)uGNIc%6Si+Qq0nIy>QLh%pP6uoI+y0R37b-x(Yd@C zJ?oee8)U|?P&2Z&G2>Q*8M*tI(R830&BvN?n>u)o-0H89Z%#9&GiUXVH)G67{^b;6 zmyOsxzG24VhveoHV)@35{lCnpN4^!$X~9YA<5VvTMpUpMe{Bo)Q6ra$1vf%0$QRDr zBP$RKiSrb-V&FETIQini3lZ1npLn7@|2rlxw*JCI9O z>Rw23YFZ%keG7W`*7R=Fw*$;QV`tmY;tX+nYD4=lI||Tu<#|5|VUs5#SK}xw93KVS zhbVk+7>ypI$eC5tvE$Jwdm|c;A4a3k>uBU5cj{L)!Ml+OOFEixj~wzKhmvNSU|Vj& zwe=?G^lxp5(R1ou&Uf?*pG`PGoMva{S>`a~EIAeKWybC@^bgg{I7Mz9;{V;`PKX&2 zZbq68Ov-^dfC*T97Kpf|$k@m^u-M!OUqBEO3%*o>MHSJfHeV ztW3Ks7;}V}o+J0jt%>g~c=D52QWuxxwW4of>Kr`@|1W6B9YKBT!y0V_y`qzKFYB{i zYpJ1|*v}w`irr_9`@uSz8fPuWdXo9*mmhr{{X%786JephJ3$}coL)JHisa-d9LpGu zZT+K>c11L@+>b_=0wz>#Y{Hs8JbO3ef0haHOHG)_yfX8&37x*1;Fr#f`6bPmTbDVa zu^HdGn^9>n^Ss%N6D~8VG2ZhoH>1Wn`e^2rGdw@p$Lo%ovFrr-aNUfGjQt|aF-1R{ zah{%_XBx(K77J$NWX)5AXTrR*pd@omEelRD?*udFWbQy+>P$b-ja=w&!6b4Z-zW=u zv3}alvn^z`pohbPY1FYEv+2uMS@4)=?Am5Qv1AJxkrRb)Tkzr&bI@-K#;3KSMRqGr zdeO_3x8k1vU(eUG75f)$Ssyd^tRpW@kQcA0W7%Tp>ld+K#4|5@f||x$)1A2{_&Klp zMbAj@y^p*oKwh*9Vh@6Pm79L-gF)}L#)kSEZ0Nu|fAu!M-caAN+p&l_e#Hno7N2GB zrK^h9jD-iF$6#LbV-8CsmX*reu##S)2^1t#>#`ytQLD zYu?bbDvZo3CTCYsEvJf)g;eaXprTG~6-WJ5a!i2imz0WcC(6!+g0RBRFUtjih9>n%)F-}?vV=L z_l#)|4QDcFC|FR#(qbC+mDRAWv4)5a8anmVP-dux{9`mcH)#l*qQP^ihW%?bbWhaq z<%x#j8FiGZu47z`jtu8?tgCMz{E8jNC#(2+T!rdHFa2kJP`SVxKWIxcq9(V@GJ`NMUj8lz)FD=Uhug!z=gbD&ISe!`Xdqir1EtFuI9J)gkD3N%`x(gCh_^R0Ful2fwc!RHMHooi z-N3v7296IiFm?>@6J_9p$v{ihfNO?j4x}sPz%(BRnur5anmSXm56*Odk!G19TK~=yO?>pCrXreqF7ZYZr10|flmAkaUyG5C;U4( zaiEJ6<9j)AdXN)Y#yIgL%86jji6bs2CQNa%es!YLd?)rUccS=KC%pGM(f^ne8%{fM z{XFk~(TQxgok+Rog!0r0`v)g_e{}_eUJ+if?YV=&4s4@ zT*x@lh2|Ds7vsXLr7rOQ-GVv$T{w5b#k$RfN^e|voXU*{1>G24*^Q;m+$h=BO)YXG zzsU{%?QX1j;6}}pG4NwtKZAji%MG+SZJ?sJ1B*Km<3SD#BF2q2I#BSq1FLzCeeOG8 z`Np5Uoan~$=`K#JlV`MmXSKwMG8>)vw3CgB@0(k`T_>_Tu=7e3T-p)}7y1K)F~ z3wJuZFfG!BPCZ<3_jX~*pEDWeLa9+MJfGmg89QTb0ncrv3$dGA_?qCt{X`cMc|PZP zK4ovakUhnP@$dQgpDx_>bhEB;L(9SQDdHyfZZxao#uh&}dIY&)3U%XCM>iICbF;6` zbDQAC2ZtM$Id1e?>qgZbZVWoe@6WrD^{N{+p16_WyBoJWV-S@u21m-oU}=LG)M_1r z7yV-Jct#BFtd7C<w0IYbanysA`wUE|;9$?dfy@qGGNx^e z=c6eO?0iRDia9Z~k`wnkII(Al6AdPlXHF;T&vhc_DktXfyeC{>{5^DH>1!t(Kb$D- z;lidgE)>b;!s)y&>Wm9c##pS63t1ZQjCj6rEqI3QT*%KeHuHS13~}MX2*%z>7cxwC zVT|TNKR0im#`B-!!j>gG%QY?xU+co=4aAIRJ!TK%l=0|s+J#)_U9jJB;lM{1`${hS z%;-kn>~7@9%eXA$#_qCil&j%J9M8E22V6Q99X8U*g8ab8ZyB#K&H7qx*B-m*>5;Kn%VYi^1^nF<4nW1|dyj(5i0?LPy0Q zXmSj^Y%y4)@$47IVE48dqxP8g4=8`Of4)Pl{#E9SQoQJGvwmEDE6C3(IT z$ZPW946!;MK-`+Uup*2ciEzP>e5lsTg#qM4HgX|mvT`=}e6w~{wUT$pr>*xqzOdE&yZ=Psmv>%t6jBP9dlnmD~H<_1c;ajKjf0~@&E z*T{|A!Q?>;H!cyYY<=8VFv^V){LUB@I8MG?jKPysv3OQ27Ts&c!WABi zLOo(JLyg5rV+#5%*U_z%fwR#Dwld#c-(bMWe0Kk{!Cs65>6tG!wQyinHwPXLabSFu z1NO}hRNd}Ccjkv_`y6<6mO1XR10SCfJLZBmd7U^`lKd&_gpgl9YdSHwu9N5O#Gz)y zq`4DUS`okYPW+! z7?^4hyB0A>7)Tx&F^Jw8i|zDgyYlK78mYrUzjS=7j;A+u6rt~_`$fmbJO=xc2K<}AjgEHTh&m4Vzl=~WIG>`xnbL9dcPKd|z(!9ToXAkf2s zJ!u`tk;#D~Ir){3KQniID$blF0ffl~>CiNZgQpndv4)kvB!1LDhER2U*y&do! z?7-Mz4!oeJDL&ePp%aD)^P4l-Bf`autpk=mQn2_NRnp_QGO;p;?4#YrFI z#IhDnyzfQKhco7wKUdO29HoCa|0j2@b7FXc6HN{}k?yz?Kdw13i+*7hb7zMf)YL-M zZR+>8n)CxrT$tR}g)c*0$TP!*h8wBz2VB_p(T((_RGeI|BJdQw_-7SuyfmP$#+jRj zWn(o=aA+vDM1!_fLp^%pGRNqxPiy#oNki>N8oXaB#*^$J`V?*IOM|Khg8k zhc{!+U7nG#nbp8C#;CKffg?p2hb0WWt6-pjuK}S?JsoJ^UPA+WS{NwE_+3EXTCEGU zrx(5YKzjGF25M7}n&^zzI0KayGLO@Px7|TseuS6T4K!!|?#VhoAM3&(_FckR7ZzY& z?AuQEGC7lRAF`q1N%oXkcTT#-+45cXpC7Ovze)`-(enaIh)`tPhpMPZwdS2tWis@V4b^$&$Zr;+x*JBna|Cd zb@49F%J}Zui+8rPfzVA`ofM~uk2XziLdXA9i4vIu`HE} z->hMGXH^kaKt)FO6W^9l@wu#ubQM(;^5JC-70+1D_F_Gory1+PFcqg+%X*Jw%{oiP zV^7wDmo2DS-HKcRR%~D&dq@W>Dt2S9U;^jo>|u}8IKyKPyZt@ar^t zuG@q?Be1vH(1sVBUwjH-&yPL6I_#CFZ^!%b>qbZ3&Yo%+L~GLrMgF*Xz($GPq#8+uzgV`m>^XbgKKu{IQDfBne<&VCm$23OmVW&`_&TiM^; z%h?xu?jKHb_RE;9cbl_b#%b>_Hmu>iYI$yPKyV8xL`R%~NWXd1Ec zy2O2=>)df+PdnWc&W>KNCr5mO-g4gek$v&6ocaA^Jg}EqkoYvuV#8r#v@;L)7xHqZ zl;4JuMLAb0K|Yt_?noJ)b2*-c59jGM+1sl_uGiwD z^J33$L^v_#oG^@7C3F7tj{T@6ec3-JULT0p?XjGR5S#na#MwklRATL*Moh6Gj`(Do z#~v>6NnXa;$2!ijwy+<+$A%fnoF^XVjF}k4ULw{vxEn)FS^J**9PB;m#AP#QB)$!( z5!8h}4eco2g1EG^VtJAaBMQ&W*#{L^)`6=IH55~d#o<%-PJIS17gS9(5&vIuf&jU#skxLk%g+e%IhlcC_8jx!WFU$9_A`9KW4D^<#nF>P>iPo*X)S8!?>cBJmFWNC*0GamiT<;b<`1mPZh?nMSEwt4sVn*I zd_GM@jW`u!X7e#iRE$`sB5$&a1IPH-b1GKfA%_xQ$H zXfS*JjFs&(*xz4D9XP;vy276SJsVmxpUfdo-!q?V{>6SJ@fq#SUSml+PFCQozoH#s z%oE#)P3g9F^zFOz^=Ti|-gy&6c?3GjusIKCLpNjGUDi$|Z@rKwWv}8_b zucAvA75xXYPs!e7I%@vadAy8Qp)6OCeU*w$n~1?KV!W5nM{Js1bf;dp!cC3>hYgEiRP>|t(TU-F;^^EnNrf6+U=Fk{LT3sOJf+;jl@ z&5Nyg!1>u^>O-3_?ypg6SH^LFfI2#Mrww)4=l7<5HYGnNus{6d1NQ;b+3_qp&nk}{ z?~8DjS&sWmwV5;P@vIwAKO1ugusP2)j5#x$yDA>}m$`)U~1oIZAlh7Th(M2@G2)ie~J&pt9e?U8pHtn{qEIn&56)PiH2 zmkzGVoX&hQ_AK?k0Cl<^b80j0Xbd19GGCz;&oMb311 z%)b6}daJj@;S=}S$d?Snq&azVFCFOYa^6~8MF;Zb5Vfo{aY+jm z55mb6ViFa}zCU?#X@rUm?CFnis8~V1R3KOO5|dFIRP5NTqT&G+%ZSP5+r<7gImaG; zf#MpHeW+2QAt!a~V_Tj(`}Y~6SYJ%lpw7`yVk5t9YDoW^ef}ys=5g-SZUkpVb_+Iq zvB2M#-imp5U}hU;tf6POF#!P+!3Izbx%dDp(j3_iyoO6ZKJ;RDZ%rr%6&ibXQSBBpc%ELwH>EA(4%(y>v>;} zw8O>R(}%g|rj35n#rU5_uFbV$GI5%*kl!z7URlLGTjDihD|xqz93xJth|iF7)Wu8G z$D7o<`}984v>mUx2l}3xNQ^2FpZ?@aEoxXdVp4^kbpSQ&QE3%}%B$SRQ*o{yzYm~J zkuQfrsCg|I`(1heVbmj=O0TUV(_H45r7EtiQQ_K14(*}8JX!hH=}SvztOqrv3RJ3pRpBRe93sd2<< zZ4YW!Kk6DWsx;1y@5HFuB-Rzt#M8o@LcIJ`a?(xRBvz%C{LM*rYTmOQoNe!BE=wjx z*R^quEFloaA%c<1qU=_`Jv%aOC8(BaVMw-^GO%ZspncSo%w!0z3z=; zR^;Ve^UGt_E}qCet)eR{;x`2 z8$CceU*_-TDy~z{KKJ9?k(?QLIsAFCidI{g}`^chIntGs7{HHS}hmWHmMH$0F+4V-2@E=_onK48164*W4SZ%$auD-4@s< z^K%NU2g7#IAG6lz!I^No&)gH`ytxuNGoUzm;X~hEkKAZTFVL14kw2g4_bb!yFC<3G zCez5uEF}h9yQ3H`ADIb1@SiGHk=xGybblNGd)9B&WC%GYvk9V z$@CN&`4meHok}h-PeoG$edEc|71TTCsYXf6gXGfnJJie<+(&$G$B7@r-jlh7`R7n} z6+MYlZ~Ewu70H)cD#`|{Xi47GByYa-SMhQb^M_eQcpQBLHSO?P6~n1zc~4TqQdqC2 zq2J2NUQ`7QdFqfe%qxW>>AyHr++)`8bv0+iN11onTRPv4dpqeZn9p6DhTOL~eAa?> zk=&gbPmavy=R?+T7mfAb##5|YUU7Gx`xgs{OEcDQ&q{I!x-9cfU7l+Lo;m$n-AwOfFU2NnPAa9v)=PM%{CfPwR+} z?HzgfmHOzx7|uYgBA0IEU=GSh9u(x~G`z{7Qsf4C)r?q`@}*Z0=KiL{r4`Sad|KMY zj)wi{3kL9UBkWj14XkXkqZV<>>!cqbHrdIe;;Z<43Dn60^dihj8R_R1kwZ-$@^=%b zwIAqNe$zKl=Mqb(NU5wM3$dBpSVeRz<|F2sH^WtgjAIRCQn8-c93XFI(wmhy!PgCKB`^Ai3RV=vK-NGGHV!}Kh z!kvv#{(o&T)^4HF%fQ)SE2aThBvp?8S4Y513SgpW7gBD*JISgWRdn zoc*tG=Aw4|tO4=K-iznnm%X$>+({eF`eqXQ9x6Y>VX#K0U+A}l{fsr-f8Wg7cZZEL zYkIzm>~GxW&iVuDA#0)v)VnpG*gqjoEt#8&dNOwtr>QxKLtg3*G0H+dwXViYNek|2PoxFNz!*}A7;W?j={%#X7di|ZgFf~7OlbJZ=rk5sP9da*Vg#ri*reFdF;f>`1^gE~mgd|6K4Ky2o1r#B#P zN}poik+|f)&P(#<(^Gmx`U6u2)-}W>RXKiGkGx31K*zoMzTi6%mxu+&4B4^ofg!$zH&;J;6%)+(oFKy*#c9OV5dyx6%G;t#aXK%6& zdQ89lg1PBEartxhpV{{#KML~vr%=mMXQFqemWAYJFQ6#3i+N{!d7d*d7)O3=haKla z*qd%cZ$%AT7Rmn3Kx)(&YMGrq;wkj#^N0oWN)zV)w&X|oeXNZR(yJY%cPAEIh(*LB z_MhMJ_c6!dH?@#C<`l8mNbl@dowa-;6;C>=xY$$02j-L+273eZSyQegX1i77Jk5IN zJZsOZtV^k1JDKlumthVePrN$ui#erloQ5V#HI&nJEVyGv0{8h|Y$O(M__?iW>=>sys@z6 zQyEM2sXG{h#TbJF`Tp-LV(c*%Gf}?_G8UgFFn1^N9QRWjcn)Vz^8V*}26xyudBzxh z$Eyygv)Q|d z=lQJQ9F9G{r`zZ^cWbD?Jlgz-hG?G88lKPK%NlxI*HHeh2Geu)1>b4t`ddSN_5#=B z(XpopdmW{8%&DrQOdTDrhB`jB&=J6S+mI1Dx|(!!;=Y{MT+YHcORK+0M?=of#vjo! z{JzdPla5-PjWtSbU@_-nfyE7sEN{Tnz(5bqZqG(?Zrj(us!0a2OgB(smIK$W*^rZY zZ^018;ZFAR7?a~0a>l}#dhKHD9cA6aI9d9edw=<*F738vIcr~NW-GzJUhPUsW&v-yQQJfLk*kXYIyOLb!%!JUo-K&XVc+V zkn_S4oI97-QLmzoMXYC2)zVRw@v~ZVtZKq|;%stoTODOP^1fYld>f+U67{!Tw2ol2 zj$@oZ*5mn{oT|e*gOB0tu`%b4nK*NN#hLcSo%}tCyzhRUI~+P^mW#~AP$v~dG2Cf!0kh-RUGGgFnWA68He%Pjifkk}`EFI1><^Em)n}KAffv0l~ zOiM7Z-9C ztQuqUy9P@d9b0*(UYzOAEY36b(b3eGvC&vZi%|9oc!u{l(_O*1x;>a@I!ecsaXLRE z$8)mkh*9asr|7uFIIGC>o4!iN@(ntAGv12r;I1X-w1W=oxWHL$SI%iCUgYh!h~rZo z@{zOO@0>S#aE$6Wo?z-LLPUJeyWY6t0@c5X4Q0~6<=T2m%2b=@HG~mzu zf>zusSY3#F1jQWaRM7!YhMHsV157B3wppG(T%iYzHcuz<8BhKodb8q!6xzD)G@tgRj zF%ZU`)ZGQRlUc}sH#MM2N$#j}9)7Q~fu24FhScX7a~__Nv+&jJsR=y$8jR`mLk)Br z%XzrPz#E4FPwwYro@ZdwG6Nnw_en_xo}A=<=VgPQ*MRcKz{989|9E5I;Cln(Q#(*C zs{`e@i`t#Lp8;hYc*OH>#JKOxou2ev94Iw__nE*w43`7j;~c2Hk=X3#Ue+V-^?Y?8 zXL|1AlNhGMsk8W;^4kQ*6I$YNj_@wiSsDO;GD;bp)~OgzYgwd6rx;?cV*udmKM1MVs;ZDL?d2=VB} z?*|&FJI+9Kl!3uk14oI;*SXZA)dpH^;x6$)UY;^g^Q?gn7Y$sxLu?)s@3+*g4}2ct zk({1q%YB6 z$LdE`?zE8`26w1#OyrKDmHTotocI{e9hfanynMx7m;5f2=MLNbb@V!>)Bpn^6aKB_a_dw<_cP?ZeT*e*mwK_g*qfYJCx$nb$Zfe!l zW5kg-&AzPT#0?!|nJdpxt2Pp=ub;X1^ox(DUTsZH-Vn3X%$?hbStxf(QgMf{Ci%0J zd30A@11rcIAL4VQ4R@Zp8F=5{p!XtAM)Cf{Cu+KZU*wGUD)MH7fvme31LV#A0|pKp zC9dZU1d~6J_Y7os!Z@IA?aRphr`*(~vJR9aK1tk{PH95i!no5!-6}xNcyh<71b3X$ z%%Q%KFVncIH2brI@#Dl+?zuheNWJ1-Tb0q=ZHsmy;V}1+UOO?pzY85&aQ?%7P7pn0 zj{fwPvFsbrLv8#vBMQpnC;I7a(4QuIZ`cKi|BzKyvp)aDAb-TzsewFumNt`|s zuT;#DIdvQ@Kus&g{rD2h85OBx)pU&T)3KL%rVjUq&a`13ZqJ?Zu7CIOZ&UL+6TibF zn2(rma#QnW+POCrM?TKfv4mW!PYg#9!+V>_qXg#o?c^!*_SPis_LFm!sDq`LcdA?> zhPSyh^n~{zj%|K2_oO8@*$jNj$vw1uJa^*eB5n_>(0dZQO~J&VwSih44U}iD+1rmh zKO+pxXCAL)p%yv~hP{l?lho} z5S~ADfY@IP){&1MZ4~+Xh5ll~FzRCz_de;*`qQ6Pou*?Y@g6--M<{dMIBIG|ay5ip zEp&p|pP`2%u9@EH$oy4@JhI6;;S|xJQm%c&abwLKEv@p;#+`xC{vHJ85 zS#9(TvklB#VqgvPR<a*ul6^yA4HrMA7MY@6Z>+RS&J0mte_0*sOsz;1hTGb!d?P>T0DC^k*v!y zFefFle?4+IdqAW3IUUw6dFkQA&Dw1W=b`K|Ok-_3Y5_SI&wlrE6~WA3`POpIz&b5E z``f#AurI_~?F(zQ(FfVDIHsc9Db_S+RgAkp{?e0sQCo}OWlxr|(&nLxMXcq1F=lqL zo}2fYzw13;A8R1LuPSQ);A>(mDH+J)d>X10Wt~@y9-K8$WBQoctbHaiw(^L^&s6h% ztbM+4#<4e)oM#R6A(FG%A@r8?{7WY=*64S93=I!h19hIx8R2r)0rdXsw(v7M$&5M1 zSim*LAmeMzea;wJ>$tu#FXzy)BQHHp9rD+o@fD`yZ4d4gs*Ixr%)iv?kSjWRJf^RV zq0bn?o+5iDdFpdVg?++TGuR7R%^t})_Huu*-{i%>;&f?cR~K zdjxx>J-C;^p6j~d?47dbTGYlqYYgXtGdT~O%fDa2y8Y#HJDeN2+qQ>ujJ@1vN@6el z5c{?#IR7}s+W!oDPORPU({EJ1#(lS&oYS*b4SB*D2{F?!+4;VMgw=$j>C+`pNcbp{l=h=h2&F4ws z?bP_YS%_m^a=s$#cGhm)iSZ)h`kOWFmEP2QV(Gbzv#gc$NUYs_kFvhKK#Zve-sFEM zIscjbZba>_J(v8wz|RYI=I-1|D{^v1@`Ak;C$U*OnZ4ZY?Dw)S8^fMT^Hl7GWaNA= zC(o%MXF$dO&NLcTXRn-im9J0Sf;o@i>?wb1J3jX0UJtQyapn{}%8vG9*e_wf#oNVM z)O2!@Slwr@z;_+5+ra&Y1omaOv&TZ*s(7N%ly}swhyId9w!Rgq$57 zVee&J5PR$7`_mBav9zP_WdE>uclK`wb52E^ZcSnz-pYL>*3J{i^Q0y8r>n^6&73{5 zk5%djZznd@UUBE>BQ=1$T$@_sT#mi?609T0#f}R56anlJv?G5evewpFTa#l?=4*(e zo~L6@o_v$M%D_4~|KHl!(<7aSrw9N2^PfK#<^KHS;hF016$SqJ$-|@9znB00AM z{PWP9#k@Vz{BxgT9{DqPKXp|?r!OSF8=*b zF5dsozdyhIM#W;09v*%F{P=&{zyIgXqd@!rH=f6T4n91r=|A6whlTum+4i5yu(s8z zHs^OOTmSQ;NBDm(|6c9U=HJVI@AeO_f9@Ii_wN7O|JypWMUbaQT8|w6KC?*r|MGYI zC!Sjd{MYxZ`tP$u2mROglYj5$Tj4L7|Fd7-^=hdgNgaY1yhpVC(Q!;s1F7Ig2U(W)|_N znc1V}@6>;9`8VzT=WZVVhPbH2hzFo(Xx@H@-LC%IB&2@B2!} z*OtK}v~~DDPm+Ug`|mrKe_s24-p7CPbdG<|Y31U-S-J23@ecpb-`w6)GP#)3NIEga zH_88ba^jbAv56}xvUKl|{^zkrff>E~HNIX|zP z(x<@?Wv+FylDV2q8S}=iESfS$8Bk)0f(fgX5uFp172A#}oyuKPmX^P#bUg4%(IpFT?>~>3)+a6-c}Z=?ZwrnlN>;# zB$e$U*W(6B=ljE?NWYOXxaVl;Ic2;Q+&W2~42zPfkD{gVO{)wmt%{mMmreV0Srh4! z9f%QQ!4xTTH%?X>(`4ZM>Eg;ZQxY%Cl#n8`Wnj%Y;#GF8oQjz*=~EWUt5b`l_l#BeMb=AIpG^|^akI>uxlLv~*)GeX5~cm|{ZeVbahdOZR^syBlN;+E%gIM? zWl)|B0q;NO4DicWHXwgo(c}*Ci<3_7wk6eSIv{CnyAnxV&!tbwRsC^d$At?LhpCkl zH~c)hcUNxHo`^S%{0y%M|Cn#<{6msY`p17xIbl{M?vW;QGdsn%f=jv-ijf&xQRMJ&%{j+1$%z$I_Kj@zWYw**uye4fH+?A9o&m^eEN7?M1IiPG{?tsvZB?2~W_DVjbCnUv; zS(g+dU6L+z_f68Cl}kF4=4)d6KzHJo`2LCBZPF)BFy->=cWS%ewDK|j4L2|I@A>Gk ze{8vQO33zH%FD(+O6`yUWwn1pW#X@<%GvR~l=4|7E4Mmn%I*tul@?VND>cHlDDBo9 zR?0=3R+>j$Q9P$UR2Kh!q4e7INvY!XQ>lG2l{nvIlw*6cNn@{k;`OnJSoakd&+Db+ z&6x6%vAmDGPN^w%=hTrwpX0JF};)-FiX~sxN^qL?kVUy&HceDhKH;Lahv(zbMlU^RW zWT@|u7f)TXs8XDm{HIF9xM^Y(nIS8_%#gx+XUX`Lb7ktgc~bAzLTPpQ!c4Qq^W1lTIHgoiuk#o}}@EcP4JC zmL@T@=fgd>tA6s!pLvY`k=aN6Lw5f54?2)naSbb_WNcnd327OmEK)-i^;Lw@YWfJJ zWigBL=#-&+E4)x?cXqWhI%JR1>iJmi$zJ`xeyU%ssvAg2comMT+5O1;73WRgsfK4DRk zIod2I<1DgtuvLB?vdNo)s(58~$hKN8DVY=_Rc6IX+g(#-dc$cFFm;AB_%ut5OLOEz zrUlYv_F{1+Efb&HD`Y{n)zW|F8fi6sooq2}lB=-^QaRN&N!qhRwhr1Y`=0I<|I&xV zdGomJkGvpJ&#p`R>v!eB*vHb%|G5+qCKKWPNnpSyLub8skKUf*n zxU*8qHbBwBY|8p`Q`z-mG{INm3rZIQtUVL$L~s#w?Q3wU(#P9BSlsR z1;q&~@KA9q8!EHI zM$3zcadLm$M9GpPTIMx1$&zIjY2#~?QiC+fqUthWrb7yybBc&t4oa*; z@+=u$VYWSYAC^Du;@%kkRAU%BCV4WP9CB@}^CK~(q_#baMQ~>;BM`*0ae5ECjT7rHtBWX*reBot0#3#yMAx+g(3cvdgN6`wyUi~ znA$37A9YofLc^3*i6&)K23^V0J62ipZn5Iqb)7Q%#a^ZSl>eo z9x7?~zErvl{-hjSm0D`e$sj3XvPkN6*(9k*Zpl)-h~z3(QuNSrqCTxGqWDPAQy*E} zw5rtoUR%bl^^+n>fIPm|NX`rkmi9xNiF-hE`SCnVc3o^M59fE5S_2}btw>bPHts^93@Y(PEieVwwEzwOj%!tQ5~T zYve@G28npHK~k^TDtpgvlgXoY%H*tjq<7n7aemz=Lk1m@>(M7AZtVrBGw6~WTX{yC=W%&us3d3>@ZCg09CZXSdE&8fBfY^lhF9NANyp+ zuETP;;W0^Ra9mC-J1PAyU6e%wu8EJ=P4QlEUzCw4QuEad`P%ZGG_Ckq)UZ?m&(>xQ za0GY4D7mwYSA5^jRD!TTspq>) zDcNDYQe|AS;<@RZGXLcr<$Hq%%H_Ea6<@qj=Fj`AEZg8IP3NVNA}!KOxILrXO3oq~ z6LZL`Re2@z>_Sqol(&@gD<^3VmY2g$-y)<4BAZdkO8%4(7yV@&d?mPNe6t#UQBO_HwLrB*s!0`}9)h%A4hF z)h#kFahuGmyG!Ce?UA|7_RG@ehxqa$#F;=55f@ghN)HubpT_;x|Dce<&3o%B*^`|7oFXUlsf zsPk8))HDyNGd`W1T9rX&-_0gp)8v)cv+_%iJB1~9p10iHQ$}u0FE3lxR*~=1s>#Gs zH6`PXI`Y}oKzgkAmsM{AWLQd|+z4(c9fmfRLGi)TCUbMCxj0nJ&sxgWQ?2Cc+%{4^ zO?%0yb(5rry(Kp5AUS_)~ms zIqSS69J(wkF5i*LdGE`tn@^;9r`J+<$WKXKh#bk7DInA5ECE?=77pl$mjL?&|JC5%SsTl2lO26x0G1YH>ho!J`C~v6JBB-0vs=rm4e|zchFjA_-_LMK@`-peBelk19V5zrY zgj^mqTHbk%lZE@ESNLk<(ZU)>$M~J>uo#g~f8W{8DM-y-W_3SRuRDtr1(6b>h=ugM^mbBq1fYig9_H zTu9j|GyHc;|Jes6$a+|6R6Q#W?!e@V2gHzX+Pmej{%8ByoC4FB?8iquUN zFsXjpfUmW(2Lz7H6L7}2Qb6;1U6bR6h`Wgq-6!pR)4!zODX$p6DqnZ{ zS2*_4fB(UJ%BN!0lw6a#D9=s0a->0=qO^@y-j!adoWHbHxgUQ(d9m@BQoZmc#dq~x zB`P6BdH?LWl9=ng;(GF1Y1Ju}3>u$C&KAfZgF~}Ooq>5|RPh2*{Ay9@S-*rNekm!| zxuxa3vz&N1%S-bc6(q7@6EThgr5kQFbB+nS>Ju?oKyF`U+%YC=iK_AZ)@*bwf9u_O!w1I&ph)j+z-ml17PEX zK)CWP7)+lng<~`Xq;gim%fhwrDSZ?49f*VoL<~3sAT?zk zOiEnyT;J0yYy`p~AWo zuFLeohpYWC`|3Mzdh!X>T_i~KYALcTM2?8!G>Jp4DN)utKp)?fqCd3nqHoJQR2Nv| z`_U4(a`$acImeExIebMhlre*u@7&BBF_2>+$Ao<#Az*W4z1dr#QS8Bc$?Szm>1^M_ zd{(BpmUa2oz`9FaXX6fZu;Z8AW7Y0HXA|Atu{Bfwvg)_xA}MC4O)c9VC=4RC?1~y zzk)O2#pjbyx$G2}n-s$3!$snCRtjI^&%no~GSI541ZG1uOiQYTvYw0Z?9C;(wWSVn zuXFG!io*Jm2GCKz2FuKEK#OAs#C_|6`t)w-59@`AA+UNW6*o>81{rd z2f0`8pfhj~F8Kceg%yeflSh!7-5P{*(<9k-hNLvxgnVnaBQd$-NMUIu{Wdg_X7!xK zpapysOi{+<17o=|CXxAQBh5C{__7kKqF5%noaOa(vVv`o*|jjhPMazNNj}=Z+hzz? z`c2_!gbT!65km1P4`5eL0l~MqaF#BBzbAqqNq?mnH*SQ@Iy*pKoNvp;H%&&4*$;oj zwVLI)qtIfS4xRp)V5(FE6Dm%_hVLb?&8iFOmw3y@)Y5x)6df{t_%EV+3F zO3WK!*S71RefB0uwX{RemQK(de;3Nv-vgQYhfq1x4 z2HwN+luvN&*B4L?AB6YnLon*@4@h1vLD&dMGUCiI;V7DUKw`l$k~#!i8AmQc0-_aFPNWx1PcS6f!36l(5Uww8e%^~!_^@ua`_Gu zEB?UBMZ<`RrWCn9QJSp(Dno3G<%#xiMbh+Ek<1e+k&Y26WZZNWqMWNjLMEw_K}L-j z{8l5{3r2`#Hq?pWod!9|X%eqbT4Y+ZHpy9|L)y3Ml1Gd_F+6NYl%|g)J-tRmCES>t zX)+^ar!2^c+R^0Q0!#AM#G1?(jv*7QY)M{$ElF5sM~Y|J6L(!la)CFFyxinOj`g^Z z#7;LdMuksi$qC8!EQZuO1JOQ8$kAX=G9q>osUDn6L_#WL~@xiX%U!^edA3@X1FPlu{0w;6)Z?j&uCJ=*@|raXiX-2+K}zZ zwj_RnJqi8pK!VqdBQvi$5z|BC$?-|9hjR!R0m0Fh->f z{02MWpXqmCPk%4$H+u*(EuMkfrVn6q;xqJmeS=$;f8pC}Nit!cG&#RRh6v5%NKfZ* zVz)$z_{FM{k`Jnc8KzDYZ8b=ei54ko(k2P>b;+QtKKW2&KwjkB;ZOBtuJCdDbM<$ur6N`LD zawc*dDH-QPtV5kiM3W1dcGZ>Gn7NZN%LPQ}C?pp5g(T}ML#hLT`0EieD{%sOX)=*q zALmJYBE3kQ(1+*=CXp#6lZk=!bh2~r3}UcxHmS>=L#|KsC*LQ|Cksd*d0!Gldi{gR z@DD4^@j?^$}FgdLqWB&*1dXOIX_U4!WAZz_yRyLGr~fh!jYY>x0r{(kwaB zaYmjv|5hZEmMD|Fn<}JWxjI?3PlIf@r%76)w8+dxZE~jEIWbnX9Vsd5_ zX_GZ3h4W0vHbYY)_+v_rrJ51_*B0c!6iZShXGNYbwjyS(V@PYD4bfU+OSs{7L}uPt zGSA78yq9z)fx_`5;^TNCT;f7{EL_Q_>2Boo4R_KOz$a<#e6pfnNR0L~U{A zAX*v2i1T_G(sW6V9EliCN~M%Yi?_^EJsM)FO0=xQr$Sf-dRoyCBx2YQT z?Y{s|o?QlpA)ySJK>O|qO2C=NwBx<|0${Fu{0vfrNzEdHzBKb zn36?`W@MC#IqBJCPVQ@tCg=M{lbgksM8evN$mm;>;~%UE6Jta2#kkb_l0C69aUeaX z9f_aVXA7^6BO{BP$l%)X}Qqk7US(K$e)gh}ROIkaY8j zWMQ}$=}7V=Ehi?E=DSnKlRGoWiJh~_pVk1PAQebX{|+WoFRUcKRU65%cacPSO1v0% z@v+Rxl+~9}0!<}TSS{wC+`Zi3h2-rS<8>pS#4k@g- zE{NO%eVIvc@c9vFZA^n-b2DIYYbHcjWI@8}Y}lq&2sg}1L2Y*xsGU0x`;J@$=hVw! zqFWEx*bF1zwLpJDE2IqT0O8gi*navk+>w0_tF;E;liFKoSpN<#PWlMvw|xfY@}PLb z#V^=p{ugeF>t5plNusfD7-Z%*lujdOI#K%^d_dio@*#DK`3 zG$iwrN0H6Xj7W{03HkcWl+4&*M)owDk?*nQMDm0MiL@R~!fdRFXoofFpJGD>4DCqQ z277XYI1mGYBPlO;CS$Z+$;y-NB<(Mc2=@xfX$eAZhIx}`zLSW_jVUDK$1Gx@u#~+2 z9YP|$MG`G87hl@2?9|E6*sK+U?9X?@z+$F644hSf;LZ_{G+GOO9MOf|KZbB(fhl}s zEa0`4B{co8g3;f`Kt!z_{P1*y-G`iDONu*iNB9sv$im8KBIZ#i!Yh?YF#hZ`@EL)*i741(U{lP2vM@`(X^?eIEOW%Qi+DABW;|myW{05^k zenE}+7SDs3lH}$(X|nH_Eb)>aPQETuB3WrFq_A9rbol8Kmp_I?G1ZieHLxJV7L6ed z!H%RsASAr`oB+YRYpBFKcU1t-BEFAJt!&4zUQT#(p) z3f3IVgKv-W;mwBvC=zqQzD=jW+`Jg7+{Juwa4C$*I}4Z6%D__G=iPdv3Q8x|fUV3$ zNbJ7^%NB^>S2n`?b_&vhdLTiK@VWB_ywGchz^Aw1-sjt3G2=dbl^lSq&`%IHVGtr@ zB#D-uEYUx#Le4d6k{_j`$&DF6DkbfxlAZx72%7~Lg3_30!BXtdQe*a1v49<|GlQLd zcPZN(vWsn*9nW$yN7?LvZ1&mV0#?{p$~xv%u?}sG>|N_-_K!~+yVCRyYvc2R-E#Xa z>#zHn9nk*CV)h?)ZGjXl7*vEMpau%Lda#We!UQp|((u6wCakp)*V|4IXXgxK?~3;f zt#N~J86KQPA%rNi5bO`|+}#uMMotE!rpcgnavEGQoCT6sXT#ji0FW6G2&E|Qrwm&T z_K#OW!^+h#l3fo`t2e-%jBqHpu?d##M|PlroyGhkSLCLCFO5;nijf{nMc;qr)F*co#Q?n&gs zyPJN>S=JCT_WzCmO|jKGcYu!90qi%U~^eDNZ4I~A@z$ODduGw=_dohhSoJEsV zr_jlw2{dzs10~Y7bi!kETJzSJj=rcv(+d@;$0iwiWBN~AzvmrVX+Oe&p-#Mz+J=4I zP58{G4)5n&!0YlgXtbaTtzQ-4G1FX(c%O-eC(^O2`Y6t~O~qjzDL4}okuS9um!!wx z(8*mmB0U;ImPO+2UlAC5YBLHBg=41VIt;tH5_{VhW0!vbYB@~D%{#nt;TI2#UMNKE zw|q3)=7zT8$7A9uCtM|R!2C!Xd>dkg{YyvVne*oOJirY5e;8xSenWhHPZy(~YvJz@ zP3&Bxgoia1a7(2e=KLLoG19-emaIW;rpJ4(I_DWTcJ2dif#Y3nvBW=I+3sp~*@;hOAkSo*mEPz?N_9!!Oq>kxUQ)JnR3s{Tut#pcCE`4C0 zN}1}twDjU`s&s5UZS9&%LnqFnH3~ozeBG&j@HkrWVKgmRV@AuX3~0Z+Hno#hquSRM z>D-etRPxprTsr$P&cAUF{cZ1|%DG;2ebt2#-M28!qzzk~Z(x>36K*=vfNBODe%fAx z&wHzIO^qf5?P)b1K8?k5~6p4kW!*NREI&|By2In&&DCfKk zTQr04MfN=G@0yE4@-xvab2?UPPQy8#zG$pE5&L}kXuaPBtuBnm$q`Q2^2!eD-ddty zf(7Q?F~E=av~lHQHGKF;89O6~qsuTkOsSB>z|)^OU3P#wFs+vpHgs@{el&62tIlxQ z#o3%bFO_>eDV{TXx|BQmWd>LG-kGz>x+^O4zAWlUJt7)$?Uo=t(vI0N`~c(C?#!OM z-bkzN<>2$CjU)Te{ckTWUU3%# z)$X9J)ot8ceiOZ7ZlZ2+3!eCV4U0xLV(x=_)GMMGo6BMCggUflFXQ9A7sWk<^LWOk z0)>(1P-J}$|C*k~Yl@}V=UR*|2a2#>vH*Xd%tJ5x96Yf;3w1h=qlbPP_VuJPPvshd3hl0q&A}fZZ8AxH6_2-NkXQu(b_;Yqp5j%r)E{--t#@S5UlsaCHfX z`$pE`GMP*0x8?$#tUQlKu~is7t`Z;4DaV~_&)^;ZQY=|ng5KK2sHu{VTHj9L9MdfH zug$>ktJ0A@eGKm|J&NhukD#MjDz?QPLhm03vB)wBrCkqTna@64Q?&;RzQtkp$la(> zzZ0KGZN=-}8&Grm3Y?J|jBYIfIOoE2R15aS+)6JDv-ZS7af|=FwgBIEIpap#u{f5o z!{8mp6k@`e>^-V1Gw%$P(#jU3K7ei?I_er!XML>UC zcBT%`$I|^0wlw#-1=Z3vrV^73sQi#Rjc`|@dm0qzlPhvmI#Y(Ooj;7KWd6ll?|-3a z=o{wvy+=KzXXqi>k6j*pDDBaMUG3diYTt#bA3Jd5=vL&%-oTkH*U>eg2~Ec|;Hcnw zthBs}!K<%eeWnNt`!1o0{6*Y4@d9>;&SRHy724O8W`4TV@d z@)X`VnT-QaGjaL7<5)NOI2M&3#?t6R;yjj&HPwmeYP=8UDeXnIPjToVxf>0?#^N94 z7?k=Ej_<k{cU^R&h2!k0tJ0L|3UtU_nl94& zi^uYY@Y%7?*cSaB1N7cuaP$k@arQCt-}T}`tGl?2?Z)2T4(#-6N9i{=@$HBkIQ397 z-kR2gGiEj5HgOCy_|BoG(`8KSynr6;d5nBeg|*Yq;rGt7=w@*i*PSTA#}7~AW8*^H zA}l~z%Tw6#DF<_}oItz0<5;~o4OQI^qpsIMbnx4U&%^fMnwofVJc+~VZ!zdSY$yJ@ zABDXn0uQ{~j0Hwvm@sY~F4td+ar0K=w!&o?jPp^n&<~d!nvSn3rr>soNvLP-f$qy$ zyxQfCWs#P6GIkVJFuGWHMFrEA$z!zC5AILb3vRhn2WPI$akn#4xR}x?QdkiJ8aw-ilhnDSwv;+dbo$gB#NrhVf=20&Y{!jsm(UHl6AmPN1rSSlT>n z1HCzN1%2*6g-+?=(=DrPshf=@y{>LTV|7PS#?g=(IqK5Ys_Jw_ybAr=sZ4*3Q=n&~ z|X4#GQd;Y$;q1&MnEEFO_p1kD*{uNdd_ETs+04N+dQ<&G0=%?~XlA z?L8CezHR%cLHRm5)_N|Lc<4p=Q=0zeh)h7dUX_0Y-nghtG}g;x>&A3>tkCC!05; z)U_teU0;u+_X;YeAlAfR#7|}yFz{=&I4@LU-S%?Sy>t#=RF z0&H#1$EiW5kd??rOaDx~Fed{ax29umTN+M&a~L!B9>h~ylMsLI!@v!D@!I?Z^zzz` znj2$rO!`)=+r1GpJU8H@J?k*Ad=*ZrT#jK2m*F;^zt}gt^Wo`M)J;{E=X>q+A8P&vftvbPl zJl@8otkC1$B;ONl3_2ysr;QgYYb=DWOSTDBLrR5FYgCxvOEa0-Uw1JzWA8A$xy#v7 z*)|&3nMZ>vk5kvMgEVyWF8V=pJ)Il2gua>NOQ{Esdeu1475){E zF`!H4>(YN7E7P82Ioj}1hH9q$#l=@KS6ezFpFQ#bYVz=U&2m-HX^CUyG@67x0ATd3-C*9r16@ z;kltRcxGQI8j5qd^zj0WZ9Ij%;2hi}lZ9!{$FZ~EFrJuw2nY0&QQadEXS*H1iTeBS z`lbW~iFgc**o7(mG3Xx|jVGT(qPgx?Y?s)Cw|=d~W6zf&+*ycldmbL*&qa&msitw;qUExfJM#xOaqSZ>;$1V> zzw{EjU7<6Jwq51XtS$$dx73Q>NH(Xt zWR0l*as#?0O}zIzN`tO%P^Co=l<0{Hd1{*_OVDp`33|!HODxa^Q)lUwmo~c8hvP(Eu zjD<~mYjDQ-YOMcSi7P`YaPHc3C{#a#d2&SDlR{O&G+`=J>@uj$lr;w@1xN8)K<)Wy&2c`hGV$-T5LC2gVWSj>H3o&~dT{pp#Q=j?;GODAAwfDqmHyW{P=aX5I?292C8Q9aQVckUmB zZhpF0Z~KR9(frQEJn!Px_@3dMBVxEe%L2Hk60^7|ie8*`wI%mMMVtF{|E|bt$|TV& z*&71Mluluk%6jHF`UpCm#?O{grdb%N1?$@Wo+%@U!Pa4#7%m}(1ROzH^%2ZlbfhPPTN8j%oMsJ?| zi*vU9L>rGG^w#)-kqYmy^zs|b`0yOt#y!JnK98|b^AWynx`#)6?_$QH+eoY0QES;v zEIiSS4;Ed=+`=a0`8J^a0uKKyufyqCm+_?GW$gF4fWdOr7%x|eA0^6hi8!vcsFomq zS0Vb&&POwwZ2Wd36Z1x;V@9M{iQvUyG-^DABNry4h3Y|kYJUL5`UV(d6OZ+)cjM19 zJ8?s56qekNK>2qYv4O6^b#qqWiMLB}|ASz3>|KC)e*$n^xF1&M%|I1#EW1%C#DfAJ zCKQgtMb1_@($^GiPU+*U7F8@>tAMNHe{ggAo^Z2PT;o=ei`=MRl^lQT0Zzluo%7|7 z%j#0eMFwa^GnP;V?tVe4b-EUG#(-lro53f`@aZEBDEQqDM zdbiQD@{8yUnF*9Wa;M%~Z0Y?aqv>S%k@R_qKJ~t(O$VqJt^BJ_S9ht=KaW)C!Ntn- zRi6?K>XWBMnzFPiQHoa0m!R`6en;NVPvSZB9{=umgFyzbaZK7vlx}*0Pp3XYU%iKz zw&6YoX!oJVKX>ubtJ_#GxdUGhwxL(w4cxY;1(R)>@q+F(yj0PEqfa6Z*j~nu=W20+ zTn!4ZS7Y3=a-@=Fc)z9;W0g;%w7wXNY2>5xtX$mCl#TbZGV#Qm<9POTIwl@HiceDx zVc@A`9A2A*^>_E7_zo%De1A9gnD4^iw>xlU`gTm^N8k$E^=RI<8uu?=j2iz0;Zn;v zc-Ptw_oht2q?cYeX1)NQh@7!VD<1_EMfu)zZ zSz*PT*`dYU;=E z_si*+KSeY`GmTbD9HF_B66k>~yJ_<8g;f2PKW%jKq9<1Xt@aYoiW%;7Ri`7RJUhC@ zatsw*GZ*t^#xx{jB=w9kpqJL^QR&$_GF0_k|MZx!-$o!5%?cZCF5r(1KzO~|U zF2NVvJUsE-56xs|;Am+tv`ZR~#tKf@w08`4#aZL|W(&M_zzA1K=wMCg2-Li)f};Zd za`zGjxZ$swxP`AyaGQ26=LSo%L>^Dn1v4u52uC_uGe!5~n8QoIFj-lF>@@Lt(5`tB zSFY8)qaVlIrfTOoy0iQOO^_<5i{lFE&*QnYJ}8ZrH5{b*6$fb9$2i&`A4k^ExG4If67jvb4&%)B;S?lTyvnG;>Xgh_iX8Fc^hig zV?~WjM^jOlF%3OnK-a5l(9*7-A3x7(`!HM5-gxqJ04tS3z z=Do!&VK30GzYn)x?nTp;-8dv@#q)O8F?4eiCM7f?Ay;uMC&G&RmvOB{EzTQu9^=(2 za9w8^*6uCF&Jjfz{N)rrd7p#7luqKl&zY#=e-vGhrr@sGiP)I2AHO!n;uN(j@gyXF3kITJ z2%pwiFmIP9GaZ|H7+330%xybG_QEm^cHOa&Y~5^6cAH>1d$DUb>+^6A`~FZiD|&y1 zeOCLJeZl|3D*8G?POAKs2jzqGnE5}{SCP_-Obz`tSV5lEqmTm>38L!PRtOVhkv!)Odof|_~$ zM43^87;@}2-k&Lw4h@^qr_7u^nO5CM@<}i~71Nv3A!?)V7$27JXxJfrAxFfHn%ID&p#~KRF+x zKJKJ`K4)H*z}+Zb%~h`YAXM3@a~Q?BpXZyr>T(;cl9E4AC$SGuzryCrgzW}&Xl?;RMX(qr>Wk;d@8M) zL#NNkq^qwSr6!Y;=&S2{DJL68pKgnx3fs2Rkj>kuX-7D9DO^qKrv%auALr4BwKJ(t zgBOjC6ws%Wo#`L*vGi!EC2fy2r>}mEqW$W6w9Z_R@5OmjvAq z_!SxR&**UaCH6!<#bD(g{CTtsUtDg(-kf^uF}Q+@hgGAgpbWKB&fu+q(`Z$45?5SJ z$61c4Smn1L{bYAyTKi@^Q5=d7zWJkLod@>2jl&5~%rUV|7xPV&@LJ{o=X;jprtgm9 zj{223alZ2924m*FQOTM94Ek zmLnxJc1jL4cgUivxOfu6(XrbHq;|J@ir#Z(Ja>9h$~CIPYdk zo~AISEjdhwBr=|*FPY3Iifn|_NOqaIB`Z2Pj$Ix!ft|Q%A?v(&FRTBeh<$MTEIY%m zid~_1nH{0n#2zedV;4TW&*~lc!d5fVko-p(^gbKG9CrsulOwR<&15h(T`WE$vlSer zvcYJf8}$I~tA9jVfJd-^KHh9(d*$|~s73=d68vQ%ki zj{-F-k)~d464db55SllS~?1e?$k)9o3FsE;XQ(@G?ru*5LZoGNen2 zaCT`XzP+4?F4N*L!)S+C9c%+$II|pIo|%WYDyCuY3xa`LoiXK<9cn%pjcx)%tQ{|h zR-68Cio<%j3!^x0pD>U6;gHM?yp7|=EnCNZ9P7z_x!);D7+qWUWrL0I>FyoEH6m%| z=`c4&x?ltIV!|QDAg+SBp54Y=(|W+1a#Lb;3-#Cq4@a>DNjB``=WguIv%c&L@Aa(w z^d0QsU%S|x8+%x3Q6@WKZ3%nn*I9P;nDcBvS_>=6zR5b=?Pp8vKeH7IG7ws*11jxS z;`4;I@Y{F-Oy%and9zUH5})TgRV#hPq4Wja^QDn?*A`F-?M&LJn@Ue>CeiiV_fW~m zNc!>Kdio({1^vfw8FhZNm3PwPB>AkF)LX3L-BmpM-{wD=$Ve76;+XH&GhQ-`O@Ytd^) zHCB($N9E#dY-~D=VY~L==cBP0VJfa264&DR%fUGP$UgWHBdLfMY7hq0>$a zB=_}D9Px35t_0q&dBKhMxWirATE~r)zsRYr%i!{^?dG)Qyf}X|6V7W#w`h@PnP}ex zRiS)IxiCaUgBg?0GI`h6G9z9eWp>UgVv0-387cJ!<_J7wyqtHYP8jm)-=@hp7wb?fHH*bK_Kcew-)mJKU`7H?N{q(7(rvgbB@}hv1Y^xge=O{E!_J+yILX)& z&8Hh<#aeX?Gg8JAf*;(ezPH?tJNG!7^%dNdrXo%)KZCn}bqzOvG|MgBqRHj;NpfYk z4~srFj}RKUW(u|HI)xGJFh;S_l)3FXhH<+#f%&^Lk}3O^$E@7l$Y|EzW->=TXPSz> zFzsK4v)?>NvSVCaSP8L|X3oQz?78_1*sm^|+0%{DtX<1)_TGI zx52p4Uuj_C6`E{PM0b`ZQ4q$_Cs~oS^iU|3RbEPOM+Z@}$Nuzm)nvNxxcGX93O+p^ z=}bFv9O#r*OX^=~My1V7s8*3Kt*le0uM3pvhje*bD8^8C-ibMu%^&d0$`=S?jy}8K zHtK6N<36t|xKgeT&7N0a;+SIGw75Xbcje&s9jSP)ARd3oY{L9Oah+zj5Vx65!uo1A zoYOi6o6GfZ=hzW=@~#BBzJ0?TiGRo`rg2<^PafCnn9L>LU&}o|=*vAgIiAaX^-9!g zP%Qd!C|Go7fvl)E)mgZ1(=XwMNDC%5*^x1Gn8&;?+{Qc!*vq8nCo&7x6fq$ST9_3M zU5xXGdyLN8L1uf69Ge!P$Znmd%ohGJVE279VZ$vP*?_}5_P|#GD{t+|4!TTdgBQ(a zJ?<=Fr)>^p7eC&{-q{k%&X}=>BXjjIcv^uqvQ>Jp_p4DT0Nm_jGCvGbLfVxLs;O@{~{NdM(wwhNkihlve+g0Ma zv;xfYJ%Kr8NAca%WNhh(!=T?Wn0abD&N~){{i_yWmVvmhn(u`(#CseJ{ax^%8@4!Y z%qYCNW&~y^eC8B3-QhMpZQ|}%p5y$Eo!|yAle5w~#zo8w=gw&?=j3w)+{{_}++J3S zJ98UF%*H&?)8o@c+1AFQ*poN-JG$!yAr_v(j?g0E%8~)0|3-Ia%eaXQ(s|6{?)A*7 z8QYo2qFqeks07Ap&I#seW&G%rnlYH1}?*;7N z4I%9Mq?K&6N;vyGV+Z@qK8Cf~5YL+KJkHk7EMn`tF0db7wzHoUhQsrhRrG^aDs2=k zrPph`DAR3E8xD-7y4MYEp9xw6stpmaD(;sKF^jjvohntb(OfPqCwGXrcm@q+FRtXYozE* z$Bnv?J2L8E@aW~So8tv@LZyXIm-`5dRU?E!kIRMr-uHzHM>HAB0()l5eU{03Fqi3X zSi$JDMl#>`9AG@JBr*9x8BAPV5mRGc%{0hzOlB-)J_R%|p%dGgaYp|zUK{(Eo~C~0 zlH60j@4wEBP_y=Lf5hfm?rhzGW`z08EF>*&*t5>@(by8^x2 zBSj}KlcW>=e#h0Hd&NDWRty_oC+4{?;!e|Yv>IE4wh4Lo_VGzne|;2_XC`5&U_b8o zoq%!Ty$R{!KDWn-ZMaQ!HM-6WME8xev2Cg^=1EP!(>sNDVwyYt4YkMB-j?XO-53q| zqtHm8htowGc=EXlE-4(24vlg+`J1E|>;B@jf(AL|;qSTNgg2a{Ngt=OvW=5C+r*vt zM!C737r0YHC0u`dA!qs_kK4C6lk1a8 z%)H42M|@|rTIJb`EhenW)UoWPHg9%}(Gu2Q@jUxqr&swc{@?Y-{(I?n{=5F8^na+B zN=U@){`XP-@2Y42>yz<+JZ{+ae}2#HKi+d&^?%m4`_Etbw&Opa{?~u~zx4E9H?QuN z{=4P=d(Er=I(yW1{Et2UP3!PKb%WEt{*OKW|MrL&`R@_wf7Z*t#aAiGc^>~A?eYIo z?EmjN*6uG~?K7r&I}PmMIhS24aSlm2GG5`>1(y)@-!4AF6xV@Q-L6AEZ(WCG&vwfo zrEUWjZ`?w(_1&F91@1ok4!BFK%yAEK8Fa6YmE&0-na=AEKgE+UuI5Rsyv9o&GsGJp z@_Z*Ld%n{XKHs`-Er002LB6o}4!`zxKfl#jQV?~^Owb?cC-j<@Bikwl1+o0h6FLrAqtu<8DcAEd>rX2@p6f)(O|x7 z@`S&xf#=NJoH9$@YD3iByPf8_2Zru**PQ#>U1GK@FYLY6p3Sa;7yPwz!gzAhS zd02oTWOa<#AE^SL_&kBN;Tu8y?~!7E%>9@0iDtBqy;C0NB(Z6Nv#@3P_>gfLE>3v` zE>0^nU5$R{xOTr$bqh%t@0RgcL_~-Ui};)&-(gQUX=7&o~C{jFXVY1Pt&V|*Pr^17w;s`4{O!oXDqkn z8#PYjI~`ui4{=H4D^#82OZaenVfbzFy7|gC3ep!0&9E1=t|S8Md;S6=_jtiTbEzQT z`KLev?EWQu64-v&G5OLYC%=>oXD5xq@jk~iT!w13T+2$MTx2d?KY5yx zAWYd@ATcmOAZ(c-P~h(nM46Why3Ga!5*u9pC4BO8Rfj{j)GDX?Sn2Ujo!&0Vt3SKc zC+WE+tHirz^z^wpeU);H(lm2Rez4PRNIumqO!7@p3{pO?qWe@S`yiJiP;{b773#nJruDo=jMk6?an zSvbG-^gh0XTQxr<^&vk#^e4YNN>N~~WGD#HcNAn;≤&Z4`9B$rU)M$_OQ9O#7Gc zNz|7e_5+6mPClEhoLd9!Tr|0JE@kfrTr&E>HKVxQwe_ZkTkRG*xBA~1Zc+K~-F%kI zxre;dclVpZa}Ufu?yj&?ASzc|LD=%an;b|UR%5&1#$qO_)AjVur zc$y(sd9@F^c@lN6dHKJj`3g}w{DH91{MxnzzSGZh{6MD`zMs`&ewouCzkB0IL53$0 zgxM_?WXwzuM4d?y42@G2O5{ZTOZep9FkD$^m!sypWlkB^Y2ybD=(+UodgJ1E&(77) zC&kriXPv8$=Ww^OS#ECi=Y!lN_8xN!`E%bbx$>`DchzwB3|}Yrj8$Rc*p=WO6<6Rs zF!ipx^_pS4z{8Hb3~L`=E8QUWM+&b#zLw{-@eZ$b!v~(xeJOsjmp0$8mhj6A=J7*@ zcJrMU9p{JC)bW!?zvh>*ih_`Eb3sVLG(lkZ8bSZA9fFKS+F~qR`!C}Y1^-K9`zM(@ zOQ@wg*Z;$e4{P*wF|sRn$)7ROH6tj&HB8v!`v0-_=210%fBX0;&7w4#OK8%hQ=N0~ z`#$^H2q9CHDYG&b88T!{BdL(eDHSCpk`kGRkZ>qdGABdGOyNy^@6YpFpS7NUf6w#& z>+`(NTCIKlIrnPa*V_B_dR^D`+OM$m;qtJt+vH74_sAt#x8)&oo8^%Y#e8@`53aG= znv=-Kag7~Ta|+j8Tu#S>66?GS#!MGKoYXH_?igs}`il%Z#$+ zdNz6T!1xMzp+S?FkJ)eu{TWwhl*lReq;WNe4s()cWn7b`xNl61iQYI$Lb`1tk}4&s zGdf2sxTmDCLksbj_T^Pe7xETdB44LG!+TvS7qR)yt4y>6ySP3=-L|np!YZZUKcS04 z;-W_X^}qgK_|%;^+s97-l~mfZ&P~;;(cSL1gGc$xV2{FzTo0-Fb&s%59b`=|L>88_ zTjnpVkf|(}#=1z*Io zOgz7S5g{ZXTu_V`>v%)S{~1h7a-O*aTHTdOTwc2Qha|e!w7Kmb`PIQgv44_>YQ-}T zJ#BMY`RH-75XVGWzLZQZZKUbv|CM+y~fJh(v!2DcqMQ#+PeEp==)Sa&zv`WMvp7)Uy z@nPOqc*%iBe8M(kA!mh$VBxq=2vO7vlC0|g6K9uR%`4ZMl96r}A#2^0m98HCUyD4X zQ`^g=V>ZhqiN|D;D|cm0SwCcj&35vHTb}Zo*;C|!XZFY=-<^>w?IfIq!j_ACDCZ<> zDre`vk(0RW;=+<{a6aSTae>Jl#8_`ZLQFXla@w2JY+pGp2--zGUg`81Vf@1Lk zq3m9n;Qv>%AW59|AB~Ch6DNhda7ltZ z#Iam1ecB)o^k~aj{B-03{YG)hHIq1H{t_$J8by!HO{8DeB-6QrTV>*pw_w*wZA-88uU0xNx)FYwKyb#J^6ixX_sk>^zVwd$NuzKN!QQ z($hKrV<$Pg19!Qw70p}|(XJIpZLhM_CkotO(@qKAlSM23KC(xAlcmpiYQ*JN*(2kt*9_mOlKFKb=#5zYD^M-btRG` zgoJDvO7!YOiDG{o(HnePtQp^tkT*X_!i;WwxuzrEI9)ua(xZ62vm*CckR;ae2YIg@ z??tY@jo|g&T9B4QuxPhH2pJeHNU!Y>%0^rlq{p5LD#20_mNQ!+vHZ`%MA|;hxrTFb z^Pk_&y|8A2yX3_M_j1z(9wLV%v#{GNv#7l#Qx54V*X!vga*dnijcbbKChm=we8ww-ItqcWAm|m$6ZC4L1j(#8 z!9O8ikRB=)l)6=d#OtHr|Jp)P;~b`tBxe8T?34bp+!gy}9ws#2qh{m{56QHyvcNOj zWhPUaWPxiv?))E^F(SQ?}mSzs(PKX?d)NbfKkK15TF(S}m5d9Rux@?<-OwADhP_gbb1WT}tk` zRF&7sU9V4f4{4V>9(MZLvhpA*)7uv(Ycjba)`-32KHsRQkp#&T;?v}Mvb%C+r?2v| z_V%1Ib_7>gGmUFpnZZf3^SQF27r2_4Z@G}iM#O(P#9DneQ7wojQj1I?$+|_F4tyZe zaw8EBD?Z0|Dz9*g<(sY~^Qwq@e0lE|yj?F{!KBmJw@j&PBro^%6}iXl^18R8H{zGN$j{q~d2s-zc#y;; zJjvip3eRzps@vlFjLUh}i4@udh`H8YqMZAK6rwxdBp<-noteN#{2Pdw1=Sl=`MuSjT3UF1qdN~mk0?ykwW0*9b#UbE9AVW5qu1LC<>R$6q0Fk z|D!SaH#YxXr%!iul_dU_>Sc$yg{??+w@csS(d2d4Lt;B#)-*FDh}ITRUZQ8|UH+m`rW9tr`S~LDnVe)Tm{?1j|ZPS=Y!bS9%D_^Q(lK>}(;S>6`dnUt#xK)U>9p_y?1}0ic?_ z?rtXSyUG;z6tYM@Q5HxNajq;QO=2QTWthaDNhD|O@evdcxHhoy@k zt&R}h15-)&MPOE@#XF&g0yvv=#fYjRQC@H2}A1yi%9Y8s!I9? zlfMCze?4M$?@*~~gv2BA;cE|-;X#?#=I$=`&@zaQiO^ocaEEZHN>@}Mkoqe9fg zcFOBQKXB5MhQ!NG)VeDA^Lo)o_>e7Mc$JQ`&@^Y0P?%6CdhTuuWqVr0I%fVqnEd-? z{!MFkc(qjWd$NaOqLs{KPIu9pq{(^R9UyYfYDBtIUGz+N@{J=Sd4Gt!>5`Ig&*^Tum$IZx*Lt6H+925nEXwcDBfO|T1+_Y zp3uLihqPq7hblEq=B4N+&#AgCw=n&}NfxakffLUVX-{`PXT@VaC+iz8&89+A$9Y28 z(Kx}UVYg7&O;5~)H~oXj--OA3e~j|C;9efI)?@v{dvcX9g=^F`Ad*otNX@#vBG><( z1cpZNddu4g%AK}?qO-4Hm!2pjyxk*6?yHIVN%%jQ{P&mnH+{dU-{9uoZ0nKmYo@4Y z8gOA(f;m-^9+9lxO_~~>6N_3uzUFqN*rU=>XzbuEdwt zBv%d!%DXj!M6&cBO#b`J{F}ZjM;?)ewIsTiJ34xV?CL2iuWXP>e)Q(bRy`Ma&0^BH z!H-uxD&r-qO$6l|CM2u~7kuQILc-cgQD+JL2a~@Ehkrjtx$~toaL`fr$S%$vg(E9G z^v1c$6N2~1rD18J2fR0tOe+)ps;+!s;0`gycNPiK`o!&w0ep`z<*8x6kskRVkHCTw86fKDQ zfDrabtmCJ>6ZMpTV*K9-#eY4@r{SiXvd1=$u%$J!#!J`5I$rd_j2|WXy5op+@GCLr zGT@bpalG=gnjn2TN$~0`YQZB)gq*xe!As3h0qJ%A2T1@*!)k@se}ng}P1gLYVh)p>W-6A)#|uF&EDL z=j{3$K_QodY!s_Hc}TL1WXj_`YQFBWT)mLr0Hikd=ies59JI`$7H|4U4y^+#R9)-Uu38R#kt*;yp>+PYpYoiBPX zqP@7L#%-KM>3z;bd6-Dsz9Bg=YxuxHXLz3r9(gsVv+AAEri)A-7lFEbMFED$+iag3g1{Ch#=t*=Nh61}O5#2fJc`TURG zfNH^T_a?E~R9fOH_6VDZw^0OgVKEJ2??ZQ@68uQvp#UOzc9~f0)!`NKvqZmeCT}tD z5??p*o!B$4C8+fK2$GoHLfDZC!RNlZ!sncULRmXd^miTn2b2F1A`(f?r;%=rAG17E zjoR|C@weo54pv-ESh47BdBLeHgNW)*DUrT@L~4v@@yf{Uyd*1+58QN&Z#>+<`@dEb zdrG?rfrFL`b@OV4Iv;IC&Uhn{ryr`2eELWG{U1Ui2^^8>Cf&Y5?By<$C0x*wTLgH? z3rC)lCoIko`%b%%ro4th3Xv!O2o}5a6_GaV zA71!>fycjo+T=M!s*KI>2%PxK!{^FGS>d~0a{rGbIO(LrT;%2s#OK8o(PM1Rmkk=n zOKhi$_uoYSS5J`tF4K_t|@3m7jVD@=h-u^m$kXJ>```)FIC+^YUkB{lx<#qHxz-PL<>rZ;xPXfEb zHb|PGg}^2~oaEafx!wSV5;Fv@Fvo9=POuB<0@ILgh#cAjMOM90a?cu1+VsKFDhJG3 z?u3fpQnVV&abv6;qdsz|zoo!jcVL;SADk7QP$dk8doOQ9-5(CK#L+nWbPN)nj79TI zKm1sUCh z+>D3k<8Y+&HiTbF#>}u(#I4;0SymQOQnN82YA^1s-jAQ|g;2Sc;B(a(+ z(niWU&K$ZO66XBw3JW)DWQOm8skOsOI%j4Q zT`*@4ZJ(V_d*&9?l)iGEA|nz#Jn^bVBQg&L}w44N*Ckm>*$xyYk?r=(x;qE04*9QymO`w>V4!pVTiC4S5u_Apmeir$n zoBcR!&Yy%;Su@Z#cov51&c~#H1sLYB2xZwzk$gE6PdBf?qK&Iid2Kz6#Cy%B9*e*Q z^(fr2jDh~5&G74-h_vu+7sc^?kfA7E(v2KZKe!1n83;UD<}jf2!!ajhn+x}w8chZwRS#Dp2VwP3fJ zY}r}^j$I2K%%-m!%c37FX5`HVR@`kn+bPbuR~xa9&3u2DHKt!<81Rad;b?8mW!XWAtEor#(`;bwF;BG4d20G52H_?5XI6<*M$u zptQv1-94}~yC;HrS>r_qTjZX%$6-rnJnPsO;K}3h2G*k6uwjp{eHs zTpWvr>T%e&Vj`kT{9#6>VzDd$g9gpPkk|9E)+88n*DS_}`%7`qZ5?9fZotCn;fOl5 z5n7Fr82%&*lY7MASlK50(2K(yk8QaBb32v|Pk~9mP8>U%foiM05K0RWHnap=IvhpI zlaolhdI_8DZeqvr+lc7>5Hrs_!SPEC`1t!1w8S|yYYf#`rwk27hHEpaIFsgiq9J>E z)S8`BIxr_CVeP*RXZ?kb<+Hc;%TElbLBb&oaG2{~4bm$Sg{%sJ| zS+={0!*nfwga)~sqEUKh=`Q&t>JodEUR-;V?vdQ3I>#PR zU7go-T1_K;|Kt5hh8a=475*gM1%nf(SK zFJKtl42A>Nqe15S!a#EZG`mm2nVKm`e=-|Vv$@##V?O3g3r5`cr5N{q1=^XfL(=UH zSSQ&C?n?v)zYUAEn{T!fb39lLw!uV#t$@ z!EW;zoOZc{WyfwJA>uatHr<1F&w6yOdx?pQ-=lHzFI;>02dca3%zudnJ3mIBwWhRZ zt*dNUozRyZQh2b&ok`H@v+YzfN~n-K4!w z-=mEqYN@mR6RM?rMXQZJ(y6>X(-V+!!XF8h)6s5O0E(;UK;uCWZU-&GrR1duc^?Xm z1FKQ(ydK?RHbDP!1m3QV!KuERkXW%9i`ORLL}U_bccwzGdm3(a*o9A?S@>qMABzo3 zVcB{dYqc++--gS0nq37t=TYYLP^*Oi8E3y18`ry8;?>VTJ=(taLwaIS9Zgu&NGIuhq|pQaP~{6X2TUC34CiQ9aN|5MFI0{M zUV*AZK&Wg0M!gyY({ICIadtS`%^Zn=8osFLF%dD2{^(mg9k)NvL00yBJU+VsdwqhD zlClVThZdv5qm^*r%#E-&PvQOOGky~ZVaf&Mp$Su zO6127{NWU8-<0FWyely8dK1QTtI=!eJrt@QV|o8qII{8$Mkv1_A^aC6o{+F2@%-{~ zX~#5s7_v2OJF!)rU04$nSZv?^?BtSRY}Ti-tjTO4+xsG%Rfw2;_1w#DUCv{!7Ymtt zRUK=*Z%Vg67)vk3tfJfG(UdRWOxHyysYzBIJ@NT4t(kR{j-Gm)Hn*Oqz%lfIUC0@g1v0gs{(c>@hB_k z^NOXP(xj#*N7FDNgpTVUPQ!EK==pA`bhRvp&S^ME-GyTMVNfYmX`d6ZxJdWEy+Q}x zx<)%y-lZ?-18TqfF)ch=M>DRyqt+|G&?iHF(-$4upjkr`N%yr7?$j21SUUtH8)EPh zV>tXW#e7?HoSWDQpI&yx)gxU|_m`N9HTA@yPqqlju}5yI18%%>#Fn|vFb#8s?PDqC ze{{#gBsuQp5Cm2VICPZa@XbN+@*RUmQR5K$d^{fI`6GVTbo6*M6D!)zM*pAlvEfND zoKJ+{aOrZid9ezmuQ%ZM^hkJ4k3wr=3yqGTDS9 z?9Jm>%&SSA?wC4^zBLP>w{-)#=Hcb&lQb;Xu)3v3PSiJ^UaBk7$r^xN2DOlcp4tam^|vm?f? z?2EQpZg^uZLs)k?e2)W?^nOU0Iv8giM~>Ra)8W)dtHrlPcc8YDKEXydX6 zvatOa+^!h28joYl;j{QzUyiafml1um5_Owy;>M}FNOOFIK;Z?HRqx?a&{1r=@{Y{}2%4p2?U^-;W8oJawRs_QYjf5#d6 z6_xaDUlp|2m5Ga)@dDRIOsz9 zr!5Y?(8sjihNyrMmMcw>D9*0B-LDIZjk{yncuPDUYK4Be)^NCCk5wc4fHpW{zyude z9^Du7E=!^0-EroX49nXIaGSueta1QW^&gB+OGlt{`~=+XI0b)|&cI;xKrFj78;?fL z2feZY{iF-A@>vKx4=zJ`!3r@4ScM_n26TP65pf-(u@9TEk;Wm?cMDQ3Z-?XFU0D2b zH)_uoU{%Or40v?}Wc6v}Zaj-{^KzK`SEAt8b>P-*^gMD87uP;RC#MhCtonwqy%JV2 zNR#FL(r3qY4VavEVosr5*xFocHY82P+HC2^0_-QS((U2|<##KYP_T(*Uddo<7Vcvc z%u1NujaO{OrWcBnW8J8m_X7H1_EABMWRrh|;)YUEY8&gB@84bMrt%(Vm+8DG> z2UlL`p;EUUrf+PIlFo+ct!)e)g$Yczb%e&fF8H&=663SxO~mQ`BQ~R_AN!%u_s|F*7L%sb4cA%j+;{}u(*3Qj%D7#)J+c%`1>USkA27Q zDmAv)S(lmD>anec2FxwngxP=X#8PQbrW)YF&N%jGdRzRMe!p4lW%v@7q8-H^?pCq~ z`Z>(F-4S*=;5EBtdtdP}w=WHN5I|#HBk0%aI9m5Gks3_fNvG&%P>14ubn}9I+AHJ~ zl})-xjcO}t@XVW3sdI;({qlsm@2aN*^4`*Jvp><*2H$DtK0oLXH3`~2SI6*uZ4ec$ zfj^Eqh#uA!%f{;CrnDW(ej6Z0-w6BqbV4UdXFT-j3a9lw;C{a+*6i+$C5LTcLF^DV z-X6O*I$+^8M>uP^K+nh>!S6ZD3ldKVuzvW*9cf#^+RaXco@x_ z3Pt5K*v$yUrEzo7a%3)Y{+f@@ISa)+U?n~kuS4Hg>tV1d9HS1zVD*$lr{xSvKb^;@x0f(Z^D1u7sD|GB=O}5@fTF^$ znA@)vOUG-o=mI^avNK>tosC%NjZVz}OAl7$Z_7?zXUuc@5cbJ%26GGwW_vus*>1aJ z7PxvZlj;<+DKp+O((k5X#uiXaok6P>gwx+OTj;m1Tj>cdl}^=5qq(|SG(0Jf`mquk zm3*9fnVzB<#n)*1tZJHY`XP;;{*>O)d_xC|wd1@iAL-ogU+MVmO*G?M3+?wv9kV^P zP%G6%{dGMYcW8%cE{3pA>3|U$=Ex}Qh@4xUuukM$7EH82#DgBtK4t~C`d&yoZ;jD@ zws`Wh58CZ@MAp&1sEv@K(@J;ji{jAqNx+8?hUOvt5wvdrhKCLUxi}aP(!F80U@WE# z8Heb*labti1}={ZfbIP`uq?1X9j##G;X1^p95>O&XXcZ3?~I4vgZoSei()>0c-HQU_E>{L}1OyD9Dz?!Y4Wo zj;@Jl$lHk?Bn_Ch3&-E>gWsZqFcLYI>&jA`dT|OJho3{~%$Dk6PDvMjP-IE$1YEs!P=WH zXE~o@SO?vGtZHNtGpqZ+-ul%kB6H?Yo$VWGamQF1wskx0-n^5ZGfAhPbhBy7hkRb?(>AUb}^pU|!I$Hl7tx5Yr2S+#2c?Q4fUyItH&Qk+d zFKOY&9c}z-uLtupZSgH#ADqxZoW*8@C!I{O|8^JH26uz@SqsE1w8BY~-snEZ8g(aZ z5T|d8n&I~N(7O)?q&uR+rM~cQaYvLrhoe~nhUPQOiy44N4nttyFdR%e8qckK(YkXo z23O3)*UbUgT{Rm%P7C1rCK!>!mZ0a`WiT#Sh47VO&^KKN>(&i8<`oScAr^aP#Ub7^ z5r;qTz>tmNJ~2+i80Sp*S?0j9`yn_#FTqCjV+b%kg%Rt{L7sjQQ^wwam+^gZ-#x*A zCm+zV<~J5jZNSa7~KTOo2RGs)DBH)0_jW(dQ8R*7%WabW@|Dz8mQ<;{>V~x|NRbRMLwNchQZld+7S0z0}yM zm{wgmLMIJANf)VKrKhy3s9%l94;$Cf`Gq&SPPlr|1qXFp@lzp#N3DR3?i9;U_Q#;@195t}7Y02XiMA)l<5%@0EPOW^ z_v)u(UQGb{9G{0X&*#H^$5M>%zZ~f%YtW{3jmVXSWB136a7mBE-9ynhJ|YfEmv~fR zE6SsHptdp`?%@92 z=cv5@9o7R|AXnF7_cL{v-;cITcH4|OS9WIfO;4u3)1E!-fQw2psf77m^0kZs#29iBwxAC=T=kQhHE?4^kd^J#qFBC4n< zrCS!Apug@_P~*|J=na>9)b!9(8eCRSr`ElreiuH`@tI#~%Y!ER{FWMKiLpJXt2PW~ z>*3`leW;lkVDx1}Offe``v<1*N-#%=Wfv56>4rY@Eikg62keZjv8BH)_8ql{(MSip z9pr>Nu`UR+a>e|&?#Of}c;A-d_Obpba`Z%9doNrUyfNR(2eI9Jv0T?57eyVV%pw2@ zE9Sv$%|b*pEXC{kMSqxTO*o@`riSRzW9lg#Z!6_;Q zBr^>uyVBv^CIfdi?8e*6d8od25Hk;zVDZ+YXybDVU$V~O>#VCt4Zev6mpkY(>k-!d zcniZl-%&qHo%NZi#Y!Ud*pXTTW_qjxD>m!N-Wm61(g-J3^i0OchyiTm{1MEA1hVT7 z*D&LvI2M|g!HkUa*o5IH*oKRr*sDBu`o%kuh6bfk*{d8n>_sl^wxo#mQk7Esn&Y&A zo}rG(=V`iq6Kqkg=?KjRa zv613IZ#kB$bEtg6#Mmh}dfN~rdV3?O;s4{fELM%fAFGKN`(d)ksm;cZkqgi< zIvDj&LZN;w46%LJ<3OJcD6@$~c49Q%oR7u1e(`8|6c5{7TXDfD3H@@C(W4*@%Qo!7 z#!(rlTeKS+f8}6TX&%NF6`;bo2(tG_@uKhq9*Z2y-l=CH&AW_ymuXZ?LoT zcSN}Sg$>`;*s^L zSu5V+wvBz>kj-As%VY0xinV$2o=u!JgB~zSq;+=LG_YVFJ>GDL`q`Gz5i8HoDJ2(Z z>`)cm(Z8Dd^u9|!PI*dO54@no!biH&yqWIr{hPLkd9T`U34R^cK#+khOlsTW_W%Rr zwHNF42PXKOWriL5I>KU?CHh|J3Hj#Uh?!vxveq880S-9!+8LUfQXJXnj&#T|pclbg zHvy45DBfu>JTn`FSfincdhd-73rFJErE$<5Jwenm{KZ*^(^0q}0QHhN2%8v;yWxxQ zc;s>D`(xV~-^f_&oOH$M?cn%mK`FBwm#r@~^?PQ0j3hoW;9 z+B9Zkzh5pUcF4yY(*k@~6heLSVeHH}h7#@^YNuVmiGoVJ$f`zo(=(iY^A>MMe8$R# zA8?=i2NU!qEGwW5lYVZ?p1m?)KU}&nnM-fx+hEV6=RMej<~W9K7Xbp6p54%-aS=CdJg`x+so+!&ef zMJ&WReQ;f8>`S+R?YkaW(62X2TkK)|)ET)$Tu~P1hJ!cVu{BwamZ=ID8Zn%I&>!zp zJaKaNKz!Fgyl3&z1sAy}^;iisnZ zBe-QXVsyf>++`zTy&@1Ha*d~_#6Tl_3;I`W#n@iSc(*ATYbU25!X_09Dt1C}O-DrE z-5BGUi+MqL$k==kS2`bpUfy9ehLz#Uwo|xPbP+zEDzVJ-HcD32V(_$jRF7-GpuQhb zaP=2zpQ^L)iyExaP>Zqk?O8;rF=IL1SoLsgHu{W|mAY}POnWGk`%PrOdW5o~^Wm&N zjbSxYceC8|eCC-@!b0-Tvk}&B*vsZ&)Ryg_)81#&F-a$AjrLg@|D~LcJbaBdOjXft zS=IDM@;#~@`+~OBZlJ#cKhay_pqzTkHVC__q(b?Ppqk@bv*V+uUz9S0lyCGHR z4qZ{Nn%1KygejuV8`}qpsZMCV?TmHpUGdvltToK!*k+}`)O8ds0>kp915hvOR=bW4 z!?o|jVXPY3~$hw@XE2ixQe*&2Cni1s$(! zJYJQHOAh-{ee)2=;u2gEOaYXI8j75nmXqi;un&EBKdep#X(NjEjdj*qAO|ZM! ziaPf;Ea0go`!cE>JMSazku{x|>$4tg%wt;?&{@X5OdQ18ISgZJJ^k5+)l1pR+8DNJ z_!hQgXdY`(KfqMZ#VjG9oLSUiYF49ogv=l?*SZd+e^#pF67Q z5>e-!yP%%7O>dx|_kE&1ADifGofbMUx0RkL(Z!(?`uKIg0Fip8u=eSMH6`7!qSX>E zBduWgq!-S%u|>aVCxm*sprgJkR+mWep~@ZK7s~Lxg2%Ba1s)FVkKiQ(K}BshP|F(^ zHVj9t-$?8T8H+ou<3v7WJWeg22sOLu$elkEE^PPxo@{}Y9s8@1W3x95WE~%k5OtHuti9$^*7-|3 zo4s)xYhApT2}Ot4sS79A>)gvM+xjQdGM_|m)TL4Nz6I3Rpp@PZKTg9xpQRt#T%)0T zZqPs8wY2leIvS_`f%e+?jaGU8pl&h0slKR@%y8Dm(dBw*Dl}nKM|{Y% zz>nGuD!{U@Au#kNmdb9!d&E;@R6fTqyhVLp zBXSr2Kw^eE3$|;=R_!ogA08VsosM1DVt*TUNbDRvu}9A4JyNha$A+_v4*u*}o8?Tl zW-Hs8u!Fg|X0nQ_A|E{NBzs;|&W1hz&KySu(V;_=X7D^E8avISXH8vthVuKJ-e1 zFiskZ)iYLu4_S)?VH-r>OE~l%Z^YxdQFyRW^s4x5#<%@(C@t8E*8NG?6O@K~M=~*Y zP&PjA-wSQ)JTTz^_JkZl@aSTUy>kNHMw~&u=S4U@xq@X8*D!mr?5Ing7 z`<8sfyB>d`SlWhdxT?oI`?P1iFHP9e04ugMq&M@|v12-Qj?BK&jm7>oo=vKp%DQe2 zVFmgT%&B!JTX1_fGv9rH75J1gw>uSVi~lD!aPBBBfJAfBvR&Nh3pt#@lzeW?r9<2* zzJyC&bClaucaD4U>@s)ou8Mmw?+&-vwT3&<@gY~{``anHuk*4c7UcF&8}jLm zJz0IunMf}673YXZ$&4}XB;Xz)eWp@!RD`i0wjZ%C89)Zt4J2Ozh7i*S-b5{aIBEB6 zBw=hcF^(TgW(18VXV*<4*D-|*>p6>L70xGv7cV4PI-#Vy!%C9&Cyb==>qzmNaPqz^ zie!z5AuWG4kz32R5Swj@wMGY zL(iD;!6&SE;j9h+)k5Hd3&0;-Gn988J(=$x7{oi=Sk7Coir~!+l)U%A65eX=04~5j zifhi_#a&&U#mx>Z;Cih;z%6w-!mYho#?4%Cn)|JBk^BAq3YYQv8aMB94X0N1n7g^G zjw>AVf>UO{<}?qy1K&>6o%`hMp>#J+p{W$Q<(W z`aCk`(+VO?UQJ57Z6Hgy2qH0wArDW9e_uBfs~_ME7?~}>c zF{xz!I3*eWAcG`t%O)#*b4gC|KGGbupLE`TfczL$L_P$TkShhHWRLU&Id|d$>36n* zIA>fV2c(aP~mOxVrd`q>qP2;yO0z`SK@DPLEb1V$&fw0iH5li z*=A=?_+m#Qd+AKJy1A0sC#9szOrCuHr67N1fgCUIM?T;6Bo;P<$e`H4WKy>wMa?2b;d9BI@I~a-*A+yTwwkm_UPmgz!bwO$Bym0y zMFvlaB^g~d6UD_ia%o{a$sD|m?Awq;MzK_~&pDl_Yh{qxV|SC>MSID|Ub#fGaUWT` zIiGxNS3n%<50N?KDABAxPIe5fAl#?hM0&B7!Pa6;Bo zZYg=r30ki?tJX$t#`Vu!?B!Ss zNGVD3@*r132{|BIG4I9=AdBfxVtQc&v8f$N)*SL7F?wUk_6R@nvT6b$Hq%MADu8J8 znnU_F29cnNi-?8G3Zk(mjQsLjPh6gb6YIfIL?<$ulz)#Uro-Y%!-OPa+b@N}bTC=_ zekk!hIg)&sJep*Q_*7L)Bt3j46SJgwBxBhEa{sTzWW@EA#PIbR^6kuea`RFou~-&E z9_`#jX10wZhMTsK62XVY8dGI&* zVf-e$iTq4)*4CJyNIv3O9A7>unNQr-o(qq2=l-f3#l`%Z!pWLea^{cMaszfnbC2m3 zuFu!)-1oC7Twtd(E@;{=ZiiYnw|(b6&amMiXP0)0%R6|UyWaT)_bHt?d7aGaT`S8=L%<`_lZKGdcH*`x7Q1Er#mkc)ccZ8ly`F zq_-t+i`xmHQ%zPh%80TM|RAeTyZZQZ|#|{)vS8rjUU7JBil4 zoy28l2C*NRO?s8>BbNspB3JqxA$>EBlThhtV*KY4G3{PWuKa#LzUjOtqr`cDi}$zX z?`3r1?cF#c)35uXz5}uVnt<4Gqqs z&W$s+`+p>zd0b6j7se9_No0&rRD@K9wAUfSPTM@s6m`!zDG3oW-mxS?B0_`6kXgpe zBt&MB$SgCNGriC6^Zs!^>DIkB>${)lS!*4-UD^Gbg1IdUWz$YiV~#F!nXaRrJv|xC zZronRj=WA{!{XPmyVusU?3U@wYtB|SIYq4cut&`L`eT;$`wgR2@0nk~CpLb`XLf7vH)dC-oTb;UL~V9iP_2fQsrnX6 z%06F(-b}Efy!0Bh@J>z2Pq(G759`p6$hy?mz5xZ6Hl%~W_O$4*1I^EOqB)ORkeJ+> zj;U>^+{c}yp!Ouny{YorF4WPtJN0(xLqGioQL{cHY04RY%55jp`_>a_#=c4Pw@VZq z2%AeivbEG8%s|%7jTCS-nmV|~k=3Ln6z&#JEpNxu_K-wsdMSx+O;4s5L)KGHm2|2! zGL!Zc?xuq^vgyjXqjZa%qQXDUOz=4lVyp*Sc6quCJ}7?eTS_XRqz0A*~!G zo1_*}^YAv(+~tzgx3{0PB5LUHQN%Gfs>wX5ZeuT+7t)1XR&lDu`jAK&MqSU0 zqHN1?)F3;EdY_(1DZ$g|_LiBHF>VfRKB}elk972}$Ve^P$B_Q&QkpSy8J+vEf(Bhm zB>I^|4Lh!x&DczxFJ;kK^P{94d5UV<=F-mFSLt!JBC_uFjM$%_WIwx_ z)Zn+Z6q;E_vTN2*N~gxs7Rg!K)V8gZ^Uq6ip50yAe`J8v`ct4Za_Ky&xW`f{e%l&p zS(Z88N39Nf@u&m4)}1qxPcODY8p%>yPGpY8NOobgp7pD`h|P>y%C?M4U~kG+v30kS z7*{s1DnHZM(|=po{2N*9TlL+{75^7zUF-cU?9(CEMw7$N_CLkyUb)C_v@Brn&Rl1W z!tOEOza=ca;$v3+_8AL0_lDKC`O15|FG2tF!@}G+%qea_dQG6ztf5yY_X>0 zp*9p+ZbMJ!*P;$BYtz|d_2}QnhICkjjJiudE7{9IJb$^dcULe=T~cK`_fKQ$L#LX z)9L-B_OeFG&z~pFsJ2w9cVvxpC#NFQ{H)1-M7lGx;hk8|3Y{72-IJBQ9Krs+n#{Vi zn9nA)j%F9O#Ic)!OIgVIcs3wxB@0=xicNi$%r19MWdl~GvnppYSnrjYOgBG^+44QC zUE)F3!Yqg7m!D!zJ>;l^|^9mdM{2Ghdcb929-)B2~O4yU2Pi$Aua#kh!H*?xz zMlXFU(cgU*G-$FVP4%rxeU?qEVl^&sn~{iw-~!Q|vUng*T> zpaTaqbhqU+${aX@zG2N6VcPPc?A2_4R{uf-+Zv-~;qFE@Ga!aVhAv@EW-eu#jw{#*VI;b;xH{$>S#*zWE=E^RGZ2z0Iljc?-JYW=ThjEa}L%YIJ{hbvo~C zLrI-#QBCXud{#E0O&1-gMZIRUen&Gp;?|t}Bb=yOzm_y_QCpf>rQYpwM8(o$*R zskPG4i<#b2zBFb{i=9}*&fd(Yo(~JX)1Qrq3uN|c2&=MaD!X@SItz@Q&7RCPv6jP@ zu#CP-S-`t^R?Bu3yB@xt-Eqrc`!8-{=O=Gr>d7qTK5P$bKX*T~ZJWa`rJZFyCvw@{ zunX+#rvkRW{|#pK`X+OFQpmnk!h47He$J*Pyk?>D2j;NNwegGFnuolYUjHaIrPH?zAP(SM}&oe><|!H=@dQn^5Lp2kPR{j0#OH$UVrFT*tSj ziR<0yVUi~edEbskTE`*fkqMS@PO?@Pf`22<*TA+-JGNIG)CpH$NXIzKs>eovZ2 zZ>on=R;>tf;R|Tg1wB=1ZKBkGI7}(Q`(2MG~#CpRhW`a@1rxx*z6!RxO$k5 zUph;kXY#4+dJ#48dP@5re4w-bKd9+QbE*D_N)j7rE8VzZCskb0TykFECT({0#``?= zkX8lvl`52}lG9SXbn5UDX@$pHY5jsE?<1Z2u`hXOV}EK6b2kEAod=uB28Z5!({cNg2xb03R5ca)9jaGGT=KFj`|&SgI}7ufga1x(xi zI@`SII@4O-W{a#IvYw}3uuR=c#%{b}mDay!(c3<=IhVh(?$dv;&?djxnoedktGpuV zE?dx}c2(&1^XfzsZRojuZR%FD9=+<|98ANrD4wQZ2uHWESp8w4lgACE=GE3y@+-XT0*Tl##80*D=9N3iJp&0CcSPw ztxecSsRuI2f7O2ac<3M{ZaPa3_Fbl`xz|YkeVdxbeW09SzewNqAKkI9B3X^FlIFFv zmGZ7NlpY;#AvGA~A*D6!E(QPaleUZTQbd$iTJ(33)Glw8bSt#0_bdDk?~3ESm}Xl~ zR=GtV_OECF8)@#(7Uc&s$E#s%d)XXTv++V!@2r7!>l?#LisJBlK$o)Z8<#T^a>Z_Z z3cI){gKb{0h1p3vS@`8W%>370=8$=qHHkmUPWk095qXxKi_2%%=3ix_L$L$saEBdf zevdVOexI$p{D3`nDPixupR@bNUa%D#-?2IJXEr_jJ4=**v(zttShMhdtk$~&Ou%%VE( zQPeyzn!^4qqP_>?sPUL(G=JV2s`fmY#tqm&19zm*$++zl@a_QhZ*-RGH@-}|+&gq+ z`a`N_TSg!5nMoc~EhP5WQreSQLt1{Pwsgt1ndF*`_Y3q9(*2^*lEst|sl?7C?e$wD zdGsFRy>hQ5+p*t){TblI(j-qdB%=eHVDw?%tb4N$Gsd#Ai1Dm*VIZ5hD41Eghp{hJ zr?EPBBA7*TB_R(MLmWw&%E~!Kw%`D0MhBbw>vZ0C&wWxHP zEluWj)IYv4Z8_D9rg3MQwxTqBYZLPnYeitTEFV76BLYeeD zeh(!SAEg1BlTt?+sbUBxm-nX=`?VlElj0yRc3kJy=lK0G9S-1gpL# zfW03pv((5CcA)PhcGG4m)9#64SNknseV1ri?hz9+PhG@Pw=8F$d=puI&>H5HmCWY) zY+wmjQ<#}sCcB-tm8G8C#j2F=W#@yBv8w4O*|1^f*y~&ZHt5x|1JK}kh ztuMaK&KxOXtzAnnXFg`?!>8TAq#a2$MKp)>#q_3+isNKWL zRNS^2-TY@w5ze+$>rowQRM(E0`ZlJk(;et{xHGl)Z%OsBJ8QDojkbLDq=EU2TC|cV zAh9!*E$uIv0MPPM^O@w--F4^B2ESTuXCle?(>J zU1<&JT7E;Rb(*tOsj|D2Rk@dRZ$hB7DOM|Os<}$CxwX=J%7_Nc#-lYGnAM3@9ovf~ zzaPN9jvCIK&-=6JKY=XWU1fK7g|Z{F!`OK?jeQJ?Wc6AtWHAp7%>7CX%e}amdH616 zhjS8GNTXG($*;BST+%w$@p1-hHFqoPcXtQtb72>I@nJu+h&{qAppom-PO-4w=*9B# z*q7W(%xTVb*5l1h)?Ig-x!k_b?AtwHNx>y7uSy5)tC9CNE1Gw`I&H;Xrtf%Lnw#B#w$ErxVc_wkLUTGh#D!>-E44n+ zmM*MsN9NNz&;{pCbbOT$)#=!c@}2x>K#e{$_iZ2i8m_@q{Cf=T%@{{DHwRMGL6x5T zn?z;dQz^CYLaNc(K!vXs)8sk{6xetzJ^ZIkW;>#`?Sr7aBKS#)H;W?UX zcAcg)E~DfdKPYgEnG`?VQW~+{M%weNp)_n?GwH{mwo>?KKdDK~7-^E1UfO_O_;iKM z-hUU?W5eXutZF&2de%ePubHD*y59u0^p4CzDu=UK;ZvFPX*$byp2PZGT)yUKokzRk8jyvu9~idkXD5_Tfx8GD*p%Brn@$8H3D zWw9TAu<|Lt*!W6jlttz=#Hli^7*~~C7gZxCeRVo_*M|6w>q+(p+EE*OM*8Lwjce^g5g&WdfzG{X5bH;yZHLl#pW);_ zc??zE7f4n+CsJJ2WUAkC3Rza3N!gKcjkQ{F&JXQyLtx0$MA-*R^A z7P{9ao9w!upd({W)7}%e>F2_?l(69swRN(T$_G}F8ttqp&HqtfdeziXT4CWXHA?Cw z-Ch$Qm01|2bq?#K=X;NM%W6ILx@CLz)3Gy~P&$ImT{o81v{PBVZlUZ|$Ej@n_8F`g zdrAE*9s5>dVnJ`BSwi_@*7?v<=6WcR-Kv(%&f`7R?M9}u_tiEsw?UhkU({A+S#KBH zuiM8)o<78GYLBsm>{F~0JI8dpFR{qxms!u{SJ{YvcbM7Vd+hP@`z%Ud!Xl!dvU%0t zv3Z&@cJu5v)<5Y7)4ce}0t$aKk7c+nlV6cCKH_@W?5fmzw-wo+wxK&CYta}}9XjJv zpMqD~(YD_9l=9Vq9!++l_t%|?&BLz0*p1$G_M}mH9cft{ql|q-&!_dEy#2nElh=nT zJRd+|od;9E)?rk&-dNgg3?SRp3U#g;O6K>clEHd9>08dE$O%TO9u-I9`!A=~x;0es zMKTqYrjl>kCTe50lU!Qvqju%ciS{^kOg~S5obQsh_DdQ(&`ff3t1gYKV=G-vsVf~E zXfNgMY$pAE*Ir6B>nB-^l%;s@XlYH_I%(^w8{RiYH()ytODy(DPj<4)5Vr9`0NeOV zWtydv*~`<@+0vbJSlhRHX6L(znTN-*Pv@2~{xN|WF0Ew`53groi_%$0NhS+&+0Lfr zZfEvi_OS6^_OqCK#~Ax^f|*r1&-xF^XH6eoW_>kRS*sXs= z4X;(oLNTYjU-yYMJY2>c@BYBG=Rd4-GjmG2T#3B9Ri@#is}O}(qj$%xX!#!-lGAKy z$I&|ETGD{l97C^s5WAL82Z}t{j51fb(wK{_$ooqh%2M1ZHOrGSYImYuMMSANT`9*= zP~qRc^s)623Wym>Y|{ubYdebMLu2S`$3U7E5KOCsLugEmX*521Hm%35GVRDhni_8+ zue@c{>_ifIoL)ysHBzW#*k*FQyptMw?4{BUN2y+mV>CGDEUlk)mE4yVk?io43S%ov zC1-0$v%c4rR+rjK3oAKDADD~O^@k+gUoueYJ~mWJNQ#wKUt1%&6ue!^$OW4_wD_Du;O15}bGW#|(ojG>i#=IZyV(#|)nZMT| z_O$#so9}j-&3}>0R{yxf$fF%8BR9I?F!MJ8@Q?oWra;it${xzh6 zYE9^YodeDAX+{mtxKO3du2dY^hFW^LlY(oQ2S+ zzT_kIrOIDNkWVJEqIm$x6(&&G?FlrBYp6!9N?U7BqP;!B>F?=Cia#}*vSa5{-5*-| z8)Be>?nWApeev_+Wwd1328x}VN;|)#QLQlShkNa#Ad5rP70w#$Rq0y1=PIH zeX1V+n5Nt;rO0>Hq#o{e(wFkaQeV|c+R?3*WS{6Rnd$tbA-zUOJ4LuO?s%Lel#SBj zsQRpX;YhZ4M=Z+@OE#WKji*^01&6 zlPlAR=&F?Q)S8a&u0h^iZRvw!Z8{WEmzMOZPX)e>C|0tkT}K+znB~pr>L(XUPi;Z# z=ey$HR3Q;0n)7ca75$q-H?AVLoecDNpOHrAndq%W zEM2@1OLxC4p&o9_Dc>`Zf_1CNV(VJkb$A1fa7&|!)wa?ObvH$@YznJ>l%zjrXhg?+ z3M;!z!@M8RfPxY-AO4aCo%l??(G{e!8?~gA1&-4DPfk+sJ2%PV6|T#Nbdx5h43;Y0 z3Xnbzj*u=~St)rB-zi17^=2Qd8<^5>1>4v%nblvgo_%?@mDR?6#JA2Nb_YAFRuhi1 zkb@_g*~omRnR}guw!FnAgcY*Ady3g->u2n5{%cmr>OISe{J`Ek`NDo(`pxEDGbhc+ zO0@ER6*@SuI{6>8CeOf{bbeiJdfmPr-qWx?-RxyYe;U|RoLOV~hkwWNxq&0iz2ig| zH@i?wZYw%!YD;HK9@Ns*jt*{aPlIAQ(Zm3W7PRj|n+m&=-)vu6P`x*`cIi)cw+7Se z4WsDssIm0k+@Ji6<0++uhT7%@)6n{1R62e#(Vr>gG$w*}4v(Tz(|o!Ww-DE#v{bN6 zPbJPVWGq-hh8fGKXk!AMf4_?A#IB*Qosvm3Tt}`s8|YQ)Ml!zHMA-wj)0D%zFq7`1 zC5^KwDdZ^C&ds9$|3W(M`j}Eqy`W0YpQ!f0?{xl4MQK=$we;#p9f__wNsae=O6k=) zNy%TibjD?jw0ZFasphUJlE2Ly>D-dF5<9R@npkBt``B#}YgmZuRzo(h!!uJ^WKkB| zF?~M^$T-3ZDxPACu{&vdI*;wFeU+Vkafhir?z4tA57?Mnk69 zWbU2+u#Md-P~tmtniOA|o>Mi--dLT+Y_p-#!F8$efd-Va)Sg}!JJO}u&FI9%7BqE< zD{1{(le4i6S>0?)>yCNQrf2PmuxC&8l4yr{7g|558x?2tps-h*N>}>P)m^=4vfp4j zkvW2#6vp#S~HinY}8V- zKYDsODTZo4jV0&POUUh0JPkaTK+|`w!tN`Xw!YawjfSL?K7A8;m~Ww;)3?&RtnH-n z-9^r4chjcZ`{-HgLnL=UPUc@u(t$shY3;BQsu%Z~zU_TO#hxFjW|NB2Ja;SUTx30| z;Jc&ruS!el*T7Cv_U$fGjBS6Zj>810Y}7RAR(yo?`G-yl#s6&X`gOl_d0UXE;H44u z7HGulGa8ZnMI&xBl*PUFvN+=_i++P;;U6T60g@EzKbH#dMM&uFGYOruZU{H z6cIC85!VA0@lR1iuMkD-jZ{RHIf}5IrwH!_ig>nA5kF!S;j~l{QoJG#Cn%z8vLZUJ zSHzBVMKsu=2r52xm)G%(7C&x>~Bx+Nxq$eO2_fQ$;{CRorZeueqrr(^D0AcXDCxql#xe zRbe++74wFx;__%!yc(;DdV#9gsHkG*R8`!Vsfwx#RB_v=iZ(H-D2-Fa#}%p=ze*Jo zH>jf4MpZo8ri!NrRpEI>6&>!X!lFbK)^Akd-8@*NY!4QRD}%(UCK@3}X+)cRjTli; z7SkKa!qQC^FTmu}Kv~odlSNvLEM~>Y;@A#ZSRa6@413 zVwIyR(p^+x+Zy$3hx&rYwyvtE;fK2SSA_{o27!n9cvXaGRPk9>g+F*WPgh09C{^g^ zsiJ~G6-~fnaDpmMu2#j%wW?U0ri#MNs_3~(74;9R!snVQGKy3Y_Y}|h0ng1!#~^8DI}Cv|SO4cPe7UZbg`9E28MIA{=uR5q2ExEK2}Mv8n<%Ypbhm<$}LilhFh{RCAk z1($BBDyoCatQo4fHCq*3=c=N=Ruy*^t77C*RTL!QaqICBI$6C%75Rr$vFNla_S{j$ zdobDdQ58W=pp%Wk;@prRVOt&~K913dm^~V?@U2GFtSpP>(8}o6vKZ+j3s-p8yP2{` zGRR_Tf-Gu*i^qOh1Rs-y9k|HQN*j3B&@x&0R6xCu$k_q3MX)}>#mBq(2HYV zRag#DMfNCo*BDj24uA%rn@;eszG12uK1~(Vz^0Q<72zg454h}1RK@x=s;HTw3Y#qG zFb7<(s-iBmasxWi)eRPx)&&a}_aO11BuJd~)dd;sbmzpc$FLa}ELuPm=qP!jI0!DvH5r3eeaxk*(jrxJlwV{fbJ_h*#KE*2P3}37A zpRdhO#4a#u5(O=QQJxN&Wm3c$aB2%q_rWO~j4FYVH}d8!{Owu>SVKo6c7Vk}MI1b- zh|17S*ZYN!G9lZCw5-30Z8zu7w}qO6S~c7oF~FGVPlB3eO5m!Y4JeQ=J!s056@fYB%D zC<~nG2P#4j9W|Q*O-xmU4_I-q8Z}1|7GUKLPOTRsU%=@wI2A%q)spet;1mTuVaT5? z&`#O~XyuY3#@|5R-vZM|ia7QX{`o->=gM$C%JF>v6p@TRAh9~mcO7Ik^t00u*?|0a zX#)lx$cy&K5vB?PCjcUz-oaN`XC$h1@#q?2~AxGr>Wqy9hw^CiQWpHx4)wz#(|X!^yKb~dJEJT zyj~9azvn$eCbb2xfB^U%cooU0H?nCkwB-<~h{<5JQ;VJptP;Sg5ZQEQ1M)u$=K{UK zXXwZCBz}f7c+T_a7r>{e80P}nQ}Z>Lfzi*;$id%=umqzzl~ggNnkojs>j<14)oNyAe*3_?(yjJwy7cq`O^O_ z`i-%{!uN2XxVAh<48I;EY@9XX-$af0vs5Dnq-(^EYZ_6dR3oPR(1>{r(2se_B8X&R z(GA-1m&JBuzuh$SUrV5iwX(RMhJIi>`ebBHvxBlYe^M58&&eVoUlt?4Xv-a0Gz6z% z=$#+FlEu1rc{{_$D-Dc^&?D6VFwMyaJ;+PZcryIhdA0qu-F1(2;uuco`Vg zuA++jVDuhYRA&GG99jw;De$>N9aXU%8k)~l@lHTf{gLg+pacay9(3aXKWmn%io0J` zk?a>N3Q_|_!>Ax}=6H~3W`p{V)rjj68qprv(iHPb)!Q0zwFJGgHToi?JT) zk$uq*kCufBy*SQ+evIh5kTa83gULo&WMGbIfxH>M7v2UoW1ycw=+*v$kK;}7cpwY* zSQeL`qqhT_y&q)J^$RlUhb#=`vIvKc0xKbps=&K!(RbCxIR~5QCdd)+IqQsb4n{M< zXly&^pc66%TH4n|5i9@mypg8R4jFlq@3Albw%~#q9O62;PiY203H3M;7A`$f6uf1bF-akD6Cyk$7JgCD6(( z^lHn%V>ozhLIxH7#&i8eR#|{U6`W~nMf`@Z6+kc6&`Y5`yu=Y&Xn~%fH5hoJw}Mul z5b~re?(d-p6)cMT!fT+HNa$rU^x_Lo8>vBE!J`RuVl^9l=PKf~39kda{EkPafrZ6( z%q}~io5SeCpq09(F>AuhR4_5!K+Zrb1D}95GUxh7^f1uML+}^~9^asob2ZTe!^pe5!PSJcJ*|NUC1H~##eSM^uKse#Cpp~&Ph;Dy?E zRuu7gq9TT&{>`T-qQx{t8~_Ii95yV34?-915)^TC6?BrUh(^%H_-%N6HuB;Gye*lo=3)~&d~_MoU;CcMwI_UA67>e*IVLjBS-dv#R~YMcqB3 z!3MRDSb{ld8G5`#coXVBIt^Kpg^dz7qwKyMdZaecv72os&GSQgd^9Vz>`W*|B28*VE}SI z5W4{M#p^?n8DLO09}E^F^Aq50DXNe+VjcnyzeB2MiN5Q?lwi^AK%jVrUf5VqBXpC{ zV=dAM1w5Yq(Flzb`d-Y8J^SD`92tyWcrUVJunB$dGFcQs7j@xLmhh(pXyU{TS=56L zs(`_{YRCW^%zSp(l{la-IPaZYF+aD){>4oZci{h#@c*T#xs@Lnpyus{V~&K^^h4e6 zAOl*V?$$y0Gkj-U1b9Y*$sEk~3*h;B^tq`01=M}>a`<@?JZKH}1}TbYx)HgPsfgv< z;dh7NZ}6f!sQb{%nES8dBhG)p?f+|_8gje(GeunafU^QGYWWL&p1CTbpo6dQq7m>S z+omdXtBN|PzZqdJL0%v3sfx!q|69R83_xxVL;b24;8EihVF>(x_6m5-Ab8B%S?GUI_jNj)WrHF% z;k@sL-#aHDKUTp%HYma$HRqea8X4fR8@awu5vAGak5T)aQ|OD&BCpS51_6WlcaZb& zo=>QIb)0?YPhg4KPy2zN>#rhi{sTAU!A0b98GI-b^T%b>-_;do9UfHZjrkEd5R7d0 zMixY%_OH-0EeKKtn}{Ab1he5J^K-K^kzZgTmy};_d|}PX~gn38d25``zsY0 zF$w!e%S)`y$7CDvBUxJ013;vGiRl9V-S@ng- zv<2Nh`pjNhkPHH|$2Bh3@d8)A!|0^zRlQaK@Ta=!oD-AI14RYx`O@hb$Ho*%>H2E z@DlUPCuGTA%%V2IB3lX;w{o%XvI`Qv=z;%^L%y%mh&1f#?+Em|6QK>{_os!JOZ3R( zSXr!EDvNGl!Zyjm4_sW4=HUKlTj6OdE45wfhHCquj3$W)x9t;7S|2(NFw9#=jo)5lsB3TjlppPv0 z(ZfB^!U4<*(1qDC?Bcf9pMJf?&-4+q4|H+^ zY=%|DTme2ZxcD|u1&2nuHC9CzCsiD4f&B@3r8~%p^d9h@-teV|$ zWC8Me%1rpwJopsYOj(2(G8WH~h@1zPsykJo?!oRKYz~}I#mZCIom~YB=ww!jDq1|l z>{0=}R+V5u&`G<~!6LMIkl6S)NcwhJ_L9bPtI4m6-eKM;eyAP&#F9QT7$ zCupeNCg=bfI+=+b*%s7$C-Q1H{1AF-e+aLg0}lnSj$qXTtekHmFADMV6l2c@PP`O+ zK0%}3(R-D{Gr_4dG_(R<76mU;8mS_%8TIwag0;NLh!k|8up64Yr3Qh43;PeG&1KG0!yl(x*d8!2OstFxn9()5Jr2^UmZP3$0 zcU6q-jI$3tUFeTqY#_8V6ka(TxenjEhD`F9fjM7`?17FVz~=p4WX-r>u_yxb`{^Lj z#|kX47w&i&z3_XDn1Nh*^BdQ?>SJdDF6#(=SWoP%(2Ko=Zkj8&_6tvQK|fa8Bn#7G z%!rs%-et+cYZrF-&`d74nBBrPuR_d+Pti9ZcV>OS_m*ML1vazLGk5=ozQGbTwn3k2 zi#mZ1YY4w-jv6^B!q@^{(*|A#Egkj79tB)#_rTc(o91Bi5IsaX^z;OJiW~`@A*;&Z zfkVLO3HaE-122Hje_HzQ8d(n`9v2Oc@yMeUm`m2+`N2m6F0FUq^`WCTV6y@mT6-3~ z2iW|*49^9hp!?7$H01UkI{Jj4?Js&4c-?tu$GHk-bnux8CikJAc+8K~8d>C}iU(kG z!w0=C$9_`4{!atGngR{Dq;6m89h@iobB2;kM)rQ_UI3qqCSq$O-uA? zZ8773hbMIM7aWRuW|RiXZYbi_#$|msf^wVEXtr26XwZQj@T_iCkxRc}M60`>mRo zRjdYLw>uu!P{Oc>pD&BP2ILF!WK)tXEH|Ma*o|G)VOiV(kNIE_3%_#49RCKoSP#!y z16}-rCWcnS-?b+G-gej@H9@UV|B(*x4<~p%7`V9Oet6b(FU&ZdF{?uhH{erRFxZ0H zyNyEa$AJ%O&sEHhp~$02INMV&Z%@a*1%6d|Hfjk?a6M*4a5w=DLzd$;!N4a4`rC*z z4_$oR2~Rqph^VFosPe!H>iNS2Y7-xMc>c0XzumsdK5qqk&&;@+yZz}GG7Pf=IpnYHt4o~4vIeEBd zUx1za4fFuT=%b(GdEeo+Q2(}{(RcjB&jDY$03A#~uE&5uTQG2d7HnE#c0^_@LAF0Y zt`itM8-VLo$c%1aaMvHbFfyYRday6JEe}VpH61=T2cIv%4YPgG8uVQm$dH5B*_?o; z?xMF_5iHc*K|FUCeyKGsB>K?_@GzYH`61|cLh=1!@NM`{W7OX}690xJ3cauvH3x^!OK?qoCH_4H zYG1YnGYM)RnU0-(2Il-N=zVu#)!L{G9IO{Tc z+7Qg^sQ&}#K$(ZR1>SVnh&d$&GyYQOB>}w{=KBR(Fh?H3j^Ly!EDMl94>6~{g2vVd zi|2}tzsS?^VoyEaAE4)^J$iopx1Qf{Fz{4&179-8z`a5Y+|^*<9o885sonVVMFUTH zZ{R`ZM!u)Mk*lsoey*F5XAd%RX}FQwO)&B|;YJ?6(8w!A8@a_2Bfq%P$V(HAeBWv# zAF#&APp>obr)fq$Cd0^!w-|ZFEF*ur%gFcdG4khojokU5ktZE7a*rG%pL!DCd&bC5 zoj39q7mfVvRU==0-N@~Wjr`LCBR}}e$Sc3ZYv8uz6@H#~c-@bTBX@BTW3v1QXvMg0F|0c)@fNUlnEI{?R7hZmEgqtu^uQ87BU7n~A?U zZ{jZ>n|SaC6CeH6#A}*I^XR70{87tjZs;D(a|5FJn^n;~W@|KWd^4K2sepG(>JY;{ z9%#93ppJJst>f3e>39j#^UeNxes+$Y|H;tv&wKT}&K*7X{h{YQoejJnGw_Xl4E#f& zflmuH@Wf~XpSQ!n4<0x0d#?<+y3QDW+VT-13bW{VYZR)IE2T8%Y);1zEegX2`>Bc zz_h@~EpLEBAvoMK^6f=N?gT!0B}QKU#K;T4=ha&yzw*JzCx6EI_-^Fiz-5w|i9fAi z;;+q3d_ZLrUr`O8S2yt+HBCIHj){-3H}PjKCjPIjiQn@y@zRd?NG9&r9Sm`W;7mUg zj~i^_^+%idB-zAM!c2VaJQM$8H1STcU>gT6n@zmsUVJSZe2Psx#yZSHadP!)$!w-bo|ye9Y0y3 zGpEOv{Lu5T~I$O`}R_l2<82vb<=fg|&{L^PW_xz{lU+Nlooi+w;(*YmH z8u;f~2Ht+5frn-q_?N>5zB%8(V+##D?2&<6zc%pI6^;0{Hb!3Nj5F?L0bhUA-k#~fSYVC)vz{=&Uk^5W%`)fwN z{I-#|y=&y}z-K=AWP#7Gr$$b1@O{wEXK2Uai;)NY150oTu4Lj(!J`zqX#vgbsB7Y* zpqnRlChp^C;_aPrzM+{zZYI93lZhvD6Yt;C#BYJipiw5?6MD%HHt|a{OnkY{#5XN7 z@r^6Nc0Cw?%gTc$-sHH62c0o-^8yo}@z=z!*NW!F&7yfu*JvIKojiz&=B2UGTn{dj zo<#Fxw;2A!R?Bbh)$%`ebv#_?cvhH>yKK<$`e$|g^(`G=@>s{^k2>D9ik?4jqvx6J z^gKrB`GF~VzA#$PrzGgP-$p(60IRr@dTvpo=R4l%d0*)2R6PS<+{(b8b~A7ne*^ym z4~(3S`xYB`vjhV_n`Pi@uNgRdY~YsP;gfYx?9JgB!Fq4mvB_WaOK78+p3}Mji^^^8>3$XlXAPy@JohJU8;L zrC|Qr$bH`%`OmL73qOo}HnihpY2r4}%wyNi0XKkbT z%+}F-G`P&`AI<+xiRLAV(cB>yfA)ysd$JdDuF>*^N3}fBO2;QO(D7zm$A3XL7pLmr zCpzw&rQ`b!>UhRU9WQ*Uj+U+dJraWLG`UAFAhT6+Pb;q36@JdcJeH zp3hvP=Q;4dl3YFidjssA>3N0kdR}gB;8pDm{F{W$~wcGt0%J_PN-{Df z&BQ-#G4WP=pqCu@+Eo+RltPbxz_MmE_i7u>_xeS1yV247Zb&p&c181DB8C_7g?wHe zE&n!F%R|m;`SoHg-ZnzZb-%Q{S7#k>I!wp=gU_xRI-WXP$A>M}@#aZ7K76x|@5s^d z6L)pI@+Tc%`BleTSJLy>R(igyA?n^q&s!p!X7|wZHRv7whU)p18G7y-tLKAP>G|GG zdR}ls&(+I%e is$9<#8X%kCfn7Tqcz9O>?>pGQ?~gO^3DDP;NCPjJYv3K@4E$fB zf%~Ny_~o4j9=O-QcjXy)@ofWiVBjY{8hCGO)VTq2sil!00juTxkUis#ylAqKM@JcX z-xWsQ0ok=>yOFO&W=(_tZN6ya`N*iWhekdNd9>}5kyisRzduHPq6*GD*u*t9@ypFk zyq_zw0)AEnJ-`t7*^EKR8D#(F0OWiivShM}&w*}U%`)*~%nE(T=>}@c-Zh5zR`Li|F(P~_xZArub!jj+Vb^gL^fp0}E-=eHN@`S(OU588pg z4|$b#OV58k(({)u_52(9z$?`Zd|Q3!sEL6Gdm8vT35*6pXUH$3V&KoftR^z-%2EU0 z4PG7x4E+6h12?;ZufH(xAO8$|jFpkUZe`@l;D?Fa$j6UE4*(CmKh4Oe%ro+COTY-Z zb#WU!^std%z5s2(`~DQ-^GE3QkV^x>>f0~m6gVBMZQ@%RfsKQSUxa>k!sC1x@*kN~ z72b9LeL#zeCO&1FiM!4>@n0Lj^oWVyKMhv-=0JakCp> z^<2ld{D3D`fVS%Dd9V|_4<2}S7%~cdp8{PGd~gXww*sfp&kVfV zF9UxJOf z&r8rlAfKWvOuQSs?gg~;*a?{g9TiI^{v7%98#)>^0p1mcTtP3eY6<$hR1-IYpGEw^ z*N`vcCq#44qG;|kF@`60T)<=2E#OtoF5u5@EabZ@YPrS>-ql~rOTlK{N-bAYwEWdh zEuWpI<*Tl1c{KXx53jU56`U5E>3Dn<=mV^h?R5M$G-Y(t@w84lo(*1ULdSdbgU-go z+ro9+K1#=P!E4VlXe>j=m!Hz{>o?IyfKebcHM+8%->RYK?$A^zI8Ady?V+dmv3fpt z5;SGh^9vjFyi2;CUxNQ_*rn(5kAmT8J%5;w$D(hq{|Y|%LCUB#(yF28nciO z*}afIw$bvYowfWva>g-K%QwT{uddsxj07gr>>3Gd#9iO*R#|Q7% z@v2~S;gXKOxvk@+PjuV~Gn4rr=+HvXwRQEphKrsTcLcNEdOjYUtS0FBs$eifR@rFv z{5*P#o2h!9lljQYxeWaYKLH7=5chYE&=uJu}gFDO&D=7&QQ2e{ojJ8<9ib z!SlWX?{?UtH+4oFTy;DFSa;r6$3f$DJS7wT<~qa%qvgxL6dsRg2<}t)kejLlhf;YYJjC68STHW)ydv8^sRukvCCM zJV-$-mPYaEgedk+L9XqI;(*L3t_bWqdO3=B-iqR&q9_jhipNWUx4EJYwTfmn05}&M z%`Znsb8FP6UkjqSDe6-@V4iLxa%da+rroGtC!%=%@<7&!F;a_n&oU;Gin zJIZQ!W@QaWqA#B1rQwR`iN8$(XOGZub;N1wN)4abt>Nv+se&UKUWZt1xTE14uQcpk zM$6&gfHUi9@mlm=$fr$n(I0Dpf#7`a)4?^4gJax8uZuW!LtWZyqvLyB(2p+F@$!6N zTp2yjFzVU6p~SIgBYB}~6u0h+wGWNr>$)gTdl|(KKSuF|iqY)X7`4L}Jh)Rd=VR@@ zp|-!Dfi*(S*dGPFF-3E^WzoC{Yxs6uG@sv!x&rQ8?_@Na&PQ{T>)_SsZ&rPZ=HK6t zE5N2!E-`Gxe(&goz7)KyfA<(J9TLO-BVxGQc+`yPG5i{JzeY4_N<7wlJ?ac`vFImi zS5+O?M9u03PB98K>kYVHf7Gmds9A5oDcok^{pWxO&Ih;0&-@9DEVD+(J{xr0bq}tw zVjrB)aq&~omI`#d;sG?Hr|7|obbRoOj&;TL{Hd~@2|7@t#(Iuyp=T3%DVtt;{?$*< zw?<&^j6p9IipOW@`RhFJl*M`;rPcGLRp@^ z5}&;!an);h&0UFWKf~8MiQ~WFIvbg*mXa2 zyIN#!x>07E12PXfD|5?2nYX@^Is22$%}Xl$*h^uTE(%ZAE4*Ne3La+Q&*Qb+J50~5 zZ6yx(l(-CHRed7<4U)t&_e%Wcti*d9WawrxxA&5{Fhu6dV`ZK?MdmV!%pUPF*Upf+ z_XC;BKf~8YnMZw-`J|1)H%ckIyR5=XYAZaop~B@o6h78o;o^eAz8w`F7^Lv}J_=7B zr0~u03O}5p@Qj5D)?49rDxO<{_ejC@+ZFDgq422_3ftXM*!HEu!Cw?!_6LvIsl4A= zlYxgdFz^^p15fldaP^)Bb{cK~Pcm?Y zi3WbQ$iTB?To-5H+41;F#^YNIyll6DTb(lSlj{cdcxqtXHv^BVV&s#JjC{YHkq3=4 za#Vzo>zp$3@s1`w?qufLo6xi7NgRo_ZJ8}|z$3&I@w@M?aN}MIdm8Y2Ur@Lne*adk zDmV00xf*`Ie!(g`N2@$`h06Q3t9&(E<(lVJE_Yky@V6=l{8YKOoq?N`!SCW~;7|Bn z9YG8Ssfe+yKDKpc+KP3Y%!tXOZ!N4_B@Vo9e@X=!iE_KepSp^0z zd}!bnzYUygXTM1 z%#}vY*klBMFmmV_BM-i9WaWX82fjA)#ZN|lSlYx5YnixT0~3p_O?=tg#KZkee6pK~ zpA0aub*u^c2cBDC;$tx;zF;!(?F3x2!Nil&P2BXLi60#^u{Pht#cr9nT%n1ZKQVE^ zTNCfHH}mxJW-eC6%x>OhP8w+D-#RmA>@c&>Lo@d%GV|MSW`5Gp!ZEWf(B5LXd4itn z?UlJEATuu@#z<17>zTVrGwA zGkc#i^Zr6Jzq7Hhr?Z8xR<>}38WxUdX<_RK3%{IfVc!K7KA>3m`U(s8$+htOD;CZw zvhYy5Sl(STmIHlbx%&KA&X14fkH=$K*E5d$0b|Q}EBvO6%6>sAx7MgEA%_b;sod^2 z?l&{=pI`&0>kPbLqk*4e@9w`~;L*s#EpH9nxP%e-WaJBExVr# z&cr`vnAkA_x7eT0R1s%M}9W* zIy(zTm$vZK@)ovWy~)+Wry5vT)5^kS{4LyixP^bsvGBe{7B0|O_=3^GQ`T5`@(~Mn zykX%1Z!NskCYF1ak7cjwv3#jlERPF|{GQY*~KQ zz^@UTcVF@4VB~mUO5cV?9)|cd9%SV33CNFyM&771^0H+{-n_=hlTwVlVV{vN9R&tm zFmmzxMxOS<$oZd*yx7*nv&)&ds;h}#);00hh9+p&CN2V&Oh>GK^)PW)V2N(DiQmpL z@dQ2cA=$(g_L;aa1Gl**a2XSOA}=;SHSve{Chm>AkR8nY5qZ(7j+x)Pn>iM-vL`bS z=wW83K4vaE#>|yxoB06pV&_W4aI=}W?KSf!#Od*Q#PWuj`HqX*p76%P56Z@})isu<1;=um zA+h{>A#!9-EJq&QqR5|vz%8@sLWr$S*>clP=14q?2@JK%c3&^8@P}GMR2Hq2ac&P@yjhtGD*sabo zu)}EsN8UE@;rj+&`OLt--=KEb7+Eosna}xrsxK>^2U0g`B&L zoU4QyI6V@NYmA(S*tSYWecWT@rN@A8=TSdy8F?&f!_!ZAzwf9as1LPWfOUxLVh$( z0_0wC3$Jpr@HY?SRBsEP2)A%fw1uy&w{ZPJ3x}48<+}A^*`;C}FMfhqK}DHePNBxX zmbrdSg@?9O_zZIA$4G_mPE)w&Duqj=DI9W8;kxG)9#g3Ba>VZnYR`9Yo?`AQ4{D?G zo9-%~90U$DLgo51!FRxI_E}U8OH_I98Wp;|%Cu9(+)(9=TyUZb;6a5d=RQ;U*%y_) ziUSv&QL}1+*EBS6uf_&W1~(|x*1#WpP_F`zuYq_BHSHNVLf3JqZBvoE$ls581HVcz zaNi9EXov>R-ecgghmhOg2y2ngF?WH%*bg}$4D5-Tmx?`MaW-;H10#F4H1feVMh@v{ zVgMlXQ9?ae%Y zx|tU>v+y}+Ys0|pKMn>*2T$*_P2xIP5?8z^K@XL<^dpJYvNF%EE%SqBGT--+d1*(P zpZAn`VYtlBkus+xf{TNLze$t%Yr4!Oz{UIKfUBREx$#Ar58jq}$z%Latig{R!RcHo zD_pCV!q>p<(wi&j=fRQv6wY9U?{rf5Q#XZ&_dwnTD;(8dVf$eUhlVO#W1_+prz)H~ zTVV!g{vauwWl=aNL1CZu;3%mIm%`o|2Hw0i6Z_{R_R%?odtFla?N#h6@M!IQg$F-T z_}*KEo!={5@LAzXzZ8zOM{j`r)w?pdO-+?2*H<|Zdu=oJn!l*>p)M*1_f%O3QF+Y( zm0u0SeQ@iIp(;0-sKS4sLfcR|dy&f5^(xnn2c`gHQh_o3un&(M!2L`xa_qw!;M(>t zRQ`$DFuRU{zjwsfU|`1y1J|8m6uPB?ezWuq z=+Aecc|O9-`8oPQXw1={FeCVZUeXr59p(UW&;~x2)AM3zoKMjM##h&~SVzwjpyP~c zrss2>(4yPwIm27e#oD6>>;TQ}uimgDbhqx9v-QT@vyYye57zS{%tXgPCtWdC&&|U1 zJb#j&qcMvMorc#y)6T_>sLY|&qoznAh;Cj#% zJ-;i!^$+zt`GuaBz0!03Yy7;odOr0D*Z} z3A({$%rUM)6S{$UZ-JJr&=Vq|S=FV?Sah^-aq|en2Px16{qOj-$%JA5tE( zRcIRLFt=^v0$rgR_6%l&anLxXG=LTh?fgb7?6TH!FMdSKQ83<(~HxMPl(4^7Eo8s@r)RRQ8O z9D0%^4(|nQ*$qu8d?$2x#AgKXM0|&z{RiiebKPo~_X0O2bVKeA(esyudJas| zv;78W!k6(_Y0MZ~BX4X<+7z?FpTGb5>&E@B?`(?M{_{Z9zrVAw8Taq)|N4D#o2q}m z)Y+}JO^Ls+akHsnZ&M}m-|Ovc?Eik=*7lz_w8!;-{V86|XL{+s@7-(g_sze5RlxOs z{rT&s7kIdhw6O{Q>)Zdw{`|kU9#s?m&wAJ(`v2JiWZ_JjVD-y@^{=j&Sh`v$Sv|M@!l_qr|<{)y&)`?+2IePZCm|M|NA_WJ+b zkM|7;479PawfXbE{Q1w%o%i^kfA@b@W3%M&_f!A*z5a9E|GEAD=L&QQ6np>mo2CEz z>;3%=?!SHh&%XF?&tK^2`1dRR+w+@Wmi_k~|M%QH8HHKjT=+b|ll7RxeX0mw6nOGS zKlrM_jqid>WrFkSz?ZIH#=H$&^%3}~9r)-3aL_8A@JfTLECWw@u|UW3z)>nD!zZv8 zbFky^JKfXqOU$@xfTL*N!%tWoeG53sV;9V=>Y~4Cg}IIo=2(7u-p~bp=MX(_1(%FO ze==}3dIQY1)`L&hHt5-X1$ZYo%rNvFBe&>TP1kefOg(Qqi`RqG1mD!N!+q#T&-Hxf z9p3vR`kJ45c5#$=dPU5Ls!BY$fy4uvWA@roVl#S+lD-mG7A4->6}?1HiGAR`@)?Rb z6J}wDCP>_1vBXKT#Az0ZH3Y#C8qll zhdr0r9Wya~DVZajFduP|`A>bBV?1U4-5K+!1(-ptka_6Vf97BhF+Yi}ukcFrzKx|A z-rY&V#TUbagBj_=>zGqxHu~B@%g21R{AU2>{}MbM;JarsFslTIjX}R*0XOvlAN5C{ zu)zbpLJRbn@M9(5%bGpdVc7t z=Ozu&?;$oj5tnD+_vaCtvVHVCyg&NLVdx#l>p2&k@~#H`qzOIgQusc>Erm_+OzqTj z_C7sV&(`z)Q{ev>5nIG&FJhC6{_oW{Js15(-(W9s|FRO-uOxvdOMJB^`UDS&i_rIJ zFq7QTLE?Jo8?M5m_$~Ak!Wn?Z@LFNXoGB*=2uN)=w-gz=#TPAZ} zs?0MF$=vyx%u^yT!-|XHVKp>dbGU~4CBPpAk3gk-4JSQ;PrHhi-`0U&4RL8YU(5Z_ zgY-lHVcD+bfMe(n?!YJTR?C&(`+QakeVLn%&$iTY)d0+gM}fo7)A3Fd{D7FL4c`wR zN0yHHBs|cU;IFxhYcLyri`?k|eINzg{#-RZw{HM1XHz{d_d*X34Zyjxp1%i!=MO}# zOwh9#x#NKNG)E6_i9kI^gO;0y*y&_Zyl%6^tq>1ep5Nj>Pv7lXp)gzVr%x{3nS!+hT?b&GB0c znG-w7?A}}Ej9Hj1V~$&IiOj!tVaANPaac2jUl`yGc?*vzdo(c~^I^qXir{O91qHpQ=F0k$Kcg*lhp!X_;_@GCgS5wbU z&}{zFYx4lI+{De;kf|(!5W^A+HznE z&IrNxG#p++##xgz^!J@zwH(|5KI)-3FERnX!nyFJp_f;XJ2TO@zsW?LPT_0`RpgeJ%n)ShaHPYdMF?hxovBY{_w^_;2Gb8$0bwhS>@2W@RT zbgO;vRiNICJE!MzH}zcN4m7Q2dam^n?}J(s`3wDaao`MatugW@*hS)3ZkUmKNo<4I zH0vhu#V|Y<0iT5q`GFkSmW=PWLciT5@zD(68uH}8QHk#%SIz*tR->LQx+}5kGvv(& ziSs@K0}+=p)n)$D1ilI6Nj}S*6D;#?_#JlZWp-LG^A_m2eWBqVeIs-KjxjtGzMXyW z>P&B~VFzGXjbwOlwrY4}riM43*1$&q&suR{7;1kR)R?;r@AN3>S>ebTXfAUXL;F|) zpC@o_2e^QE9$s72n{Gc*bL^l&xkGzuhxI1ZlTJFm-W%s=P_)@0hG(VG%qr z2Ka3^;=D&1Vs;Rk6>8Ax0v&2LJkxKWcYN0IY+K+K@@O}7rjF1sJQ1JCf$&WM-)4>0 zb7$mE-W+I|;0Ei!3!;}JH`eRn@q{jl_ynHSa~JS|F36v;PxL$$_;&6C&WU{0v*8Ec zqZB;v<)AZGK@K1`y?dk1gn}cWrj$lpY68C+#G%fk-v35D8I%UB+76z8ntvBHKM^r1 zj=Tv*O?i&EoJ2j@g<4+;^<+yG+(LKPwv)NaaG95lM@^Ar_Su45IR}r+Q()H*U>ZDZ z$?y^mF0J8d@ZuisgTbZf5^#a63pl?6ebEYy>X!{_Jv77*(4d~wfv>g+Vg+7s7#dV~ zXL!|nq4rNeOr{|(z~6o$hisC7e>-)YwFkQOAsrh}LnFKfk2_*i1e_CX^!yTa$geiy zR1f-dV?7^%CcRC>+V(cQt^%+zDqthQlugtulQE&L>7|xyEMHm)+2ifoV>_vda&W&h_*-1K(#QoY8^5HWYPbE@HCR1pLA|kXGPgQjUhJ-qou623vs=VvpI;*o}ZZcxS;;K;jD+Jp6|Co{&?y6K^N2_9~y zyBH{Q^_eoS(8{cAkom(I%oTxM<-bSsCwSP~;#}O%ia7J@rD6Ae$dwWB&(7BHF+I3f zD)fD;hL7fJjKOfn^KV zAuo1fE`t8%>k-5-4|9>LIAc|a8O$qqLlK)%z_$H}O(k&Z#n9eAH9{P~tDkvcRsx>z zas>GF6zK2BnHQ)#7vugpPuOrfJkuw@2`&K(fooq-f95dnTRrPgf1-Mz=L|+28izQ6V^^3CogEzeu};r2_%-d(b8;@YAL5d99kY>ldcN`< zxmObPzP!ZN2IxCH(T||7S=bXby+8Obub9Z~ahZoz|#zS0?<+Zfat$qsXkYUnRfXZ{fC%rJ1V>4=T2 z`CjC{F?z9s|t(l#G82>iLyZQ$H};M;S|&R*hN!8dTXUs^7Pn1s~U@kr#% zT5xI>albow0&=DVFf9nN`33Iw#0V{69q2--uUCR5fY`_l!J}Ja{SlYh;MMgzm?od&L%(EDs&hrSj0l6nGX5Wx$= zF6lY80G#_N&hUZ@OehO}RttQ$rNp0+H*THKKO#0Mh|Si}e`bs=fn^&vNIVyH=Hh1b z_9rCvxQ3hpmNiGs@8Bb|Zivj2kt=gBUw^p@{!4t*lS`#cXC z%W4h#Y}fEEc+3wT$Nlpf9tVHndPI81zrSKQRn*|47Uhfm>}* zPu?Ea^8PD0E048r_7u2=b$3J^nPdk|2DRjvBhJ!R1Lp;1%>p;u-x%j=TjNZwfP4u8 z9)Uw&4#WHp{5f5(8`S+FsQdMSUoX&GZUKHR94qnYX?Sj-#N{>MZn3EKYk*UT zMb;_kHkaVteuCc8S?1R6GTXP2d2B~u7wY^Z^eC3qGFQw5Uk*|@88u!3pR!<${l{qe z*g|;5*I-X)0*B55_py$8tX-~)j$3#DH-IJiUEuK!#(ta)eAi(Hiv2oi4RC%3W`3x_ zt%2_kQGb1a?LNQ`U+mE**rVoJ@Lpqmu4A7@07H&;MNilld~^`@>rkv+IOg_~;XlWI zeFPkt9R(f51ndWv)Z74!*^RoKgBd#Z?iQ?BJ8;_k`@kfu*^yUR+b@{W|G}DL?Or%Y zJPYgRfW7R4eVi?#FAoG~>Ww-)1T`9K_?MnF0Qes_A9X>-nxQs3q(DCbcUrec;$v9L z{g=Uq?@1i`5_JLfdA=j+aaHgq^cxKt$XpidJHLa>KmBFi&=WOb1bidmG8dbTeUCcP zX06P<(4(8P;IBRn-K#+6kq>1)@>1qnzwlnI;R}YhI%|f)myPh1z$d*4=L}9|W9Hf$ z^HQww71Yt*t5HJ_>v#k9-4WEu)*>)E|6LE?_Tu&$_$4J*M5RSmx%IHec%+DBmW4FWcIlex}NnWv4D zdGQSR&A~B!bTYREPEUuX)MkauVv5XbfZ1DeWqhUsUULh+ocpM;MeyOg!k+zzzWOWr zY53AxI4SH@Tj2|h;pHXxZ{V@H(obOveCP&vujKvV*Y*b9 z48K&hsyK@Qe0W#`d##zCjSTKL1iD`W&ZwrMHWomq{R;ll2D$?Sb^2LN;qGaz1=S_VDXpj8gfb4zGh}KOqi2 zqm?TES_^+sn#zri!ux+4ez=$LGk#Ec!B51Y516~aSA+ZJK$+w7iHk)W&^*p z!g~!5c3LGP2gCP!8{Sa~XJ@ulM=e{X=k9xf5oh#V{$Ss=||19sA5pVF&2cw(tSgg$Ha?0QMaGfV;8Qrza`wiaqJ4Q#cI%F2Ah` zk36jKclfwkK2mu0d#o$GTgfGGexZ!Y?_5=G(@5p{@Zs+AQaQyBe)T{+4&T=3ekz*> z2k^fK63z<1`~GHvf%75^-06UU!}IX`b@!D5!g`g5HR8d^a1og zvw#Ifw(u!CV2`>g{H`AMw1>id;3sbZ46V>vK@KbMR01a^D>$!+Yq0)TH8_hB3!Ghv zIANa`0T1GV15XO!!F#UovF{4EhkvdT_VzY-<{q@feSdi2;7`9f6kg16@Ik|K+hvx@ zgXgP!3;yUe@X7W4%O|%=Pz+*{oL1}}AKtfiBI zw^cFlIIR7xrr6_c;LY{MK8II#GWPw7-Ue=i^9Qk5|5EU2c8J7T4cWkZQ*rKNn}NT< z3!iomUOVK#`G!V52QTJl#32Yi%r1ySja4|8u?v1&_}`=9vo+%kM%!o;yVnJe&4Z`x zqn;}^hpyH~V()OA7eYLK8-Pt2nBzbrc>5b?n#$pvP%W9iHwA|SCS^d^tPCuQK~BsC z{v1t{x%gq|Y=tyL z?4(urqDkRB%M@OisIWis;`dI4HNa*^;Bp(}Ma~!G6>=iLQRU7xRIXkN{#V3AC7jjh zg?PX>yB@yTI-xkf0lX;#yjea^<>*M{19HO^d9e=u*}eN!jyi$o;8`qoTjkq`N7_f6 z8Tx|v_^xteTLX8jU|=`++t0wqxDRI!c62cC95!(2P6p_o22L0S|NA8P-Ql5r3D4}E zB%BdiXJE~G1A8GqMj$`#78=;Ms*#s8G4dwC2u;PvN#RC54d45-)kgM*_w@+QdsM{P zm>oTFUSk8|(i*%L+MN9z@Rr8NhmM%t41`9F{--@+5(s^55qO+G@ah5b+}gVqZoXmRj{^*&r4v{&t}-~ zJ__##R>cRxvmByueZ*&LIL>Y%J`)$=teL8CUV_3Mb|}0Ccva;DFslGH0p8OiPmq7l z;5+@O@X?Yg8=X~la|MRiSJ?@;<>Re#Q3rU_8M)G1F%X)y4BRR#`9!MUwN2Cne}XGp#w$BG#_r5?`R;Jl6pJnUXm zjr>vryt;!kOiu6~PlydTq<3p@Y{aGM3}6{zGXTBM zs@*s%co_NsbWd;8^F~i47QlrD)Kj z{=MhA%zntDdx+IvJSY99%*T*R^~<1kqsFX69sf}q{&9GW!+~!V;U(4#SNJPp)oixH z59h%q0>mvCx#a+?3qdU(>;W9}GH`>=h*^J}rh${OK76 zzK{Hw^3%W@%NcnpeC2^dkU#t3HP18h44eb#ZpP<5p^qrp9=+;(%<8~*oi?K1MGxTq z0yA-Fa5J3I57oiU06iUWfR7viQHB=cL~UW0iu&LPNrSAuukh@NSQ%y$sKUYI9501l=B2N@hJ z`?Acwh~s9&F%Wndj=7H)xL5$<81D>TRRj5jIyJ@{wWkC4DL6u-uJCXNgKrI1_|+JA zzro9Jfd<1 z#IfRiSEx5gK>r=8fOL0;#{Pyk$+tVp5ZeXVxXO$T!PQxNrAr;dQlSe zqM0=@qi6}wb~|_v{Nc4m-_6M7Dw1@th7Zos2}_z!u4ITNt;&R2XEKrxwb+oLB# z?VAif-q8hlSp)mP4ZOP{`W8?0cr8)uyuimrnF*Y6QxD`b_R6<`G9N&mo50x@fv?9b zME??v+{d2j4O}h_{nie9rrtWt(XnsFq+(BPN6zp6M{8ev5xo!g&jR#>p4dOL-^yI! zyUaDdr9WzGIOZif2rMIaZCJqqKsy(h2^caD497 zbbKBUw5VlC@I7pX#tseY3^XY3yYNgvdphUoX6`NEZvb8n zhej|m2)YVlR&O}w9*9@MY^-qVsr?;q2tgC zFX-77z3qg%I5UA5?Lu9&`+)e{A}-}Ie|5#&9oT!ep2P>e!7cqTn?l~^bw#X#pbPed z9@S6cX`|44p(j>=$&RR_+YqN8@z4<0fbg9k1Cg4yB^{2at<67qB)_|VYisJFf{Z$pfDy388nn1uQ|7?`{aHT8I9h24Oq zso;;L`rXXganCcSQPBfyg54VH>K*;9r|&Mw?lj=Z;lhYn{)REj{g2)`=->1D1OJ#O zZ~W_eItoDrRfIL8S_!F-1`3gG(**ZVijYrnLR~dcNR3!8bb7l<_?nY0d<{P-jF_1( zWIJ6K8os+LRQmWxkfPoRXEHwu_7{H$zl&_C%Apd}{7Wf%RLzm9cvqld8>-OsnKh{W zkGholx)I%L(~Ok*Evd(jR^)S}4NV{4p1yq$sPw%+O0A8LlwA-+AI|rpB{lle+$R0$ z$;tsVGI}5x%M78w5~JvM;27HcDU2TLCQ$X3Q%F8BgKk7Epn~^{=)$aM`Z!lZPw(jH zvzJ6=+bDGTj6%9lBNe?c(~DIWs_;3EG!gNnJ-LjQ$0pFyS*s{~RT6!@x|TMLTu+B@ zr_dy23-xNajRK1Aq}OA1(edqj>BgH3deizK%^jXaEk0Q3xX&@-c6rpW;jk&l=+YqMX_PSED2T-U&baD{gTV8s2U! zc<%5QGSm7B=EyKXyC_;{*IE@4dc+EL<&%XQZ}terFNcH=Pmc-f63z=heeVcWZaffj zGoK6lcfJwY&-*EyE@ww^JMF2qy)z~5cOmbzY80SFe5TZ<$R716u0c~8Y}Er| zmr=@`mE_nekq%c{O^ti3BVk@LeX^{ldmfu8ukmJbudt0$#-`Ju?t3V_K?be8c!2IY zXOa|pgv#E^rH0$`>2=U;s^I;UW?n>0;!B8ac9j>)RiH;Z zg(l?=3f@1C2+cp85&}=36BaeUEHs?|NHErUBTOs)UC7;BoHllMposb9sZ}j!%6(je z#%k+PR7NBEIJPCN6WUUGe=k~o*oPw5_>tDFBPCt#Opj`HC+)gkq}cVPQ}zQX-(wJ2 z+=tK)bp*|J zbP93VOFfD*$dr75><%BMuh%o_oUN4(dgjuFFK6kp^G(WhFQlJWi>RW*Z<;gSK}=Xv zMO@sqrg(ExQ_;I?fM{bkO;nt|L6vYwK+TFbrFSZnO+ZVkKbVm;^o zE!&}UQg*ZFbF+Q&4OzzSDSpR)|J3Bg0rKt0l z()6x!c^clQBF$G_sQagyG{~ZL{?&UNxOS380yok!LjHVmYW9WQS zEiK!pqtVx8l5qn>b0n{-n zkg5y~qO@)yv|`r~`j|X|Qd36Lw$q^$wQ&M{tv;EGl&MtodODq%Gl#-r7STwrDB8Y0 znldvq)TpM8XqH6dYb&JhVW9fiMryRqL}}TvRB6o;>YWr%Gbb)3zi|mYprSiWw)8h-T-TC6c4G(iV*KVHUw&R#}U4EEVujy@lf4_s(u%xQB(N$CSq}p3D2YJ2rvr9bY zf5+jc|F<12ge4tE3(3Kf@Tr+9JpH&ra5%DAC^08P$f}hcB3=g~{%x!R6IMLv> zFiI#+&AZ!?@uEHHyd7wvZzVe4qbl8tu1Rm|x>4+f2GsAI2N~x!r+}Jm>0Z4Kq;DqB z_rW4fE!Bm-i9Kn1VjpVut3Q>^8ARj94=1;_V<~T57-jaGNXza_rL`Mo(8!S4^we)I zEwoukj@K8G^O6Y45Tj_3))LFFBDN%JL-UVl4FsaG#k^}*MuY{Ff7^XL_Q9rK&s zws#Qgwy7pA?o?Cs)HfFOp6$g(4xPmc`y)lE|HT}$TjQM9aLyl`F~nNCk!aOas&8%4 zIyF0~Y?7bVeue+w-Dmt0%#{T7fwz!oGeqdKR}vgnt`ZXNrwbi#WedMM_HQ>%&9==7BuwCrLndR(s|jc?bSO4MvYA(5@A z=$j9%>fM2c&KD`?CzJQ)PGo23PFE`i)7uq&Xvo|Cw14Li^7uWRE|niiO{R~gR%^yl zQm=`0YvN>zv!6xj!SiTD>;jtBcoBG86uD+bQ;9bka*WfFZ9|zRZ&Yc`O#}5GW1^0V znU?O4CAXSO>0-(>QQH* zDcQ_wMjd=SsYiMn@@n8kL2uhpsban~d4xcnT$o(Ey3woaLDVm^Ck3g!$kV1D4IDL) zoWBkspWIg;H`d2}pw{TxQG@+VMhaS{!)oklx5%%argbEy5Vc~q``G+jO!Lxoc` zWLHN|iGecdGF5VFVWcKK&6IcAOq-TjXn9&ZjhwrJCe>X<8=I^l?f7+cq-ioGzDuT} zQya*~F_ot8+(Prur%}71yQzKAK5ACDpBz^lpksWH8f`s93(g-V>w|2%*XJk|Ii4iv zI;SZ6!fARsDxdyTIZtmA3g~)=M>M(U6|n0Ib!k~#EXpb^&iGJ49NDafn9;3~7<)EI zJmfuAEE%8`n~m9(Q@o~IPWze()`k~V>jyhu>(Q|jvV$Af%DOl7fdA>m1^$&I?1j^_ zyoFntqlIe=Nq1^d-}pMTx_*6n@UbDCd*7Iv1$ok~J#ERn!;2Cc`p~_&_Ea&-pWKoJ z>Sh+{N^DmO1HbK&7eZGm_M;EC2GFsggXrms!PI`&aB4qs6g^usmP$MyM`p+IRI9>d z3Vk-6y!~g9kNpBl91ux^hiXVTp{GCPWa1=+Hdi)Kiy{LRd@$1U1Pe7S7DvVI#*v(` zlyZhFr`!QabXK>T>Wp4TyIXFcl@&J8`951IJ!JPj}I&J$vbWmwgm;dp`}C zd5}7*hiUd3EB%;%gc^n9(y@W3$gy-j%`3b>H`E(+!}cMim48lczrCUp$|s_&HsZ$b zc4GI{rNvH7s*8)fJj4;2V6mGyRQyz1BT~ZNoQhZK<#?o=wCL<{Frl7;dEcM2{`vW1gEu5i%ziBKZ@m5}`M zgK$_aMpr)Dkw;AjaQHIhC_B-?h)T5eb5&~KTAe&XYEXCYTC@o{6L6tE9dT+xUkf}) zdDxUfAGe}OeZAUQC0hFYI-Aw4%W@1mDf+v@fT<4c*U#q_Id$%l`N!( z4o_%#y_Zy{?mJpl>kFlgFDA~YT~aKets>6-(_D1c_7Pi+o+vKuuZka!sdqoHvdS96G>ZxE~{;ja`!dKy5r(zWGrW6H~aioBwWoV9T z1#-2kNW%uY(2@pKD8FfSnwwXXf(zZ~$@_X#FWa52S7}alrnIDko4tsH_VnttFMX~{ zWc?IC121)^;(fYM=4?QIEF%6ji3X9tvH3om!NJt>?qaFo~DJAA=lqdwCqAvYIe5<)qPTz z-g~-J`+FYrU_x{1{JJILh5T94mOLx^lI>@IN}3|lCOLp+kL^scPF-nAW>=EN^dM9H zVER>K5dGRPiY%wbkjvq4Iy7_+ZFOHvUw=lCrGE_7>#C*GvvoB2l0-khDCGWICHpiZ zl^kZFO6FMdxVV&DV^)w?_aw4(UrmAT>*(&rWU5LVsNIrHG$MHm4ZfU4A&<7x_tv{; zym22zSTksM?tU71Fq3%kFtez%aeS|3s0?XPL!?5{M9 zZN#IyONoK`!&k6cGBxi*D)#_XIzV*f#)hxF)zkScPTH_x#^tXTW#|?!ug?)vTlF`C0f1|J~Z_;Jbf(-@qv!njGr6>dZYrnBF)jnC3>esA8aDXJ z-|?Uho0?IV#5R=I&6^xI2(bDjuCieyw&;i5)vBKXnh4s+mFcsvV-O^|Q%vESr|C zwbF-YN9k~MF5P&SPrA7m$Z6UYva~NC#r_c;Zu^YJy1k{<^*+(jI$x<>$Dfq8+E%PE zqolZel)X4+Qdu$iaCuR?sft)QzM9zXP zt>beZ9V?MjYx7rYVvW7l$+pX_Ew}5eKNeQ8+P^xPtqtv#8J^xr_}piL(8O<#aFt&P zb&viL%2g~+KGF4QO=3H`7uu1geD6&6LkCg6Eu-juKqy@fn?ggL&!=i0i>dbn4JlSF z&A%Yg{bpuL&9~5m&P&KRGoHMwtfb=;SJC>)Ybc{2nL0k)K-;dQP?O7>$b4}#ovX5) zytkxNX6^m-!*YlMM`qEP=~l8HJ3%9F<%vvh9Wd3qjrkk3xBqnaHAd2%Vi$n5V#Fs0piYSuekukNz_vSj{ z6ubK3f|Cu!x62!eqZ6Bm*W8+kCv%&L&0{=8I@nrtj%X)-I@n%}Sm!6keH6qS{$0gi zZTg9~hRzkQ9Mp&|_sybVAsz%dwed*bPv9#~(e0uAwqg7oLs#9SJ?HjU^PR>ZA3Y*rG z{9+URf)goMlyA;V_Rdq4NxNJWy;APJ#CCCwVh7(z6#lO{<-#!`5$+f*r}R7z5s z2WgTf)wj=ihyVYa^?v8P-&$v_bG~)jYi-ZCFZ;fJ_ch_Y=Er~j=@gaM%Znh zF^;J-!F%SK;Rr)nZzoR8KEVIPsw&U=aSL5)EkJh+$uMPINXNMP>F!8}^ z7TzJn#U1DQSn;F)r_>0s>!pc!{j|yWn&A|@zI7_@9dg61=V#*9Nwe^U?Ou5NOm94L z+k9L-V-dcyJP==&3di0qvDoLtUfi@R3kRG!foFx9)GvH{fH-q{VFsluTxw7tim(_E zCA~JIv=m#ZHJ{qO0Q$Q18CCQ@4Y^QA|g5v~!~ca;s*c6y$^UkE|HH-PrMe}}})U(qGM z?^v7Py8x9w*JV z#gDY@@KP&#d|c#!$rwkR_tXh%STV6iyE8T}alsFU*qBqv$L>A?JV_wL+;<|p%LU=e zGZ>dGoQUOmCgJZtr{EX*({cPPckDgvfql1mVsrnw*jj=?hvx#kTH6=Tlk>w{V*T+s zW&nO@ABa5`EW^()gy5IuEAX>LtME*Za4a(4iZ@)_g(c5+;vHiX@#bPNb}Pxnk*pGY zZiZTY?XvM?!B{0K{>MT}y!;)d|4|QV8QG%=hbN=L<{&hwU=_NNF3DFk#3H@b8R&;g zE-DW@jjs4tBAo{`x-#Q3I{CZ}eYn_(hHARdB*A_3UgHr`fBYP+U(}C&$$msnbOsTO z{DKnRe@Bj&e}Y&ZrUeyg_-a`d9%wIw)bAFcrK=Zv-AT#J_6b?co{uayM`*RccP7pyHNhAUR0v?6kR?367g95=+m?T zw0O)rRDbd#nkfE)CRThy>Ak}!ipRhuo276?oix7ME{iW5k;jTDiuenqgy-H-!ZU0~ z;3BpL-kPL=wK$r%>GVh(_(L0~OwqxamvwNQf-cUyqKl==^su*)0hZDjgZGyh;k-g) zyzPStZg(=rtDcR;Pv2Nz|I6dBt&cT6^410~mU6_GlAQ3dTqd@igL7b<70N?o_Txl>!&$b{(UiST0NpZ+qw(V`~s-XgBs|mivTUmn2Q+Ni;=v^ z4kYK5fKu+JAYZe+s9XCG;%zQK3!j&xUxDY*#LPOxsUc{aw4^>@Ey;h*yn*&|?x4{& z-KdLxh#IFqM&+f?k>{(INU!=evi;mI$;o~|6W@G9ZGoTB-LS7nwC)GGz4RAq(qZ6o zO&MJ0E{hX(%Hix#1>E^t5g&F@!S&Zwafd`^-dm{Q=mZVCMPfr3|Lo^dd+;PW+`m^v|MK9dD%p2F_%)?XL7T`VBzW9KyA9nNf$1>gl zSl1#1+bFHXVN1iY%fpR$pez~>r|iOa!{V{AU>{!awh*fpd?v^Bro+n_vnflp0m{4H z04+xXG&yeuT5dfbZPoQh9T!4SgvDC)DPtE3$x1}(YMH3X;~*;7lY?&Xj-vS_AH@xv zMzI-ZP=nlAbJrpgT`ZdT|1nnYL6{< zIAEOy2kh1Fh-)S~;Tb_poU7x4&*rf42U9-oloR4lkA(R9K?*B-PQfGPTyf+KH|(-z zI^JY86H8UP;|Eb*cwXFGJS5R`+d~%P*&BoKrgbav@ylEAm*8zU^UN+>IQb|R`5q$6 zHaF6ZuhprbdsWoD+xID{t4b(QPaPS*8HoyJ>YyE~43Xw)3v_aW6+#c}P*uDGN{(fr zxxE~ekS;{r^BBcHn1}{~rl9Sk+>qV)8K{Xd8?CzLjr?XTL9;h3LytRy(QU)!NZMs3 z;yHz(8_Uvrx#Y<|h8Ljz9<4qCrtSgoHc9rP$*eWz4_&hqZ{{k`$uSGN52buy6RPVK;D<5v6+dq2Hz|_Ym;n_1}5c>kDOz20?H@rtzrhP+8$1?CT zTWJjIWN^hf8T`9e61&JL;x)F)_?m_)9z)b{`F(Y~T%d{XXli563|+kS`e>Z#uZNF& z8sb;ujqxRab6i?)j+ZD|;3aRz=nj~-mTzJ_Gi+m{SS*MhPa!$#O|XqqTf=RY`#!3m z?<641Ohj&x$*62uCc0>P0L@>LCCNc&qhIgE=u>qLQZzh*rpq2f-Mdeq&We*rv`>QZ zjUv>vs{~nXEueR^AQ?f{RB-edx=_3y+QXi-=V>spAdKd7ZfZ0f#&tfb&en z9f4@d^5Q;d!VnTBpXK{8D3dW3D+$|1u8ghu9(Q z21j%$o{6FgUC_H?5pr3FQMcMuv}M!`bd~9ej^=uyojP+7``0`aeRKgz*s%~PqD9Ce z(I1tj2O-r%A!ymlmFSm!n50$`j^4~&kG9R+f-0S&k>bi2RDLTCIqjCnVNjAp4kC|elY=!KBuMoymg<6G&dB1u1eeBWrEOkZf-qk~>k2 z)Vx zj4cEEJtdMfK|cIQ&xO0mhajX?3`T_q;idOJXnwU97Jp6x+0HmPR2>D0Mms?B+8Q|N zyaHBc1VfZd5FBw>0<_;E=y96|ZJF+1d1fk1+rWdH(_P@?14oE6vH`pE=Fq8b3LEwq z!a^qlxcEXBY@^jcsz3?m+DXHU&p+rD8@|%<;cw}CbDz;kkMGly+&k!!dzb0m`ammQ zJx@DXm(Yhs=h96VGiWb|6uNdz486D_fS$#eNw119q%U;H(;LnE>n3n&>zd{ts~a;g%z*KTcr!+qv$Zvv`8X48=}a{J!?qi z@)hJxMGzUI8$dQ*^(DH2o+RV#6mt2jkg#>QB=?9j`MkxB#Q(M?4W}(h(@7I@#Mqb& zHtUm5D|E@z&DzAUOoI$4s}RQqMRF%lmc)5TkqYPEaCqxSFkSy1W`(|k;qHE*yI;WK zq^GcN@G&^GKZJE(dLh@~9*j`D4WY+wLeSR^P+?sM(+StWEU^WOo?n86YY2G0tA%vC zi(m*46iPQkH61As#YN^Oez4fBM&?)k3-_=!!Xe;3oc526+^v!FJwrtx;JSL z#H>sL@umb|W$pr}t|-8Vw*$3&D}*?0hNYStVTH9t19hhw)2b4&?(I?B+nt zM-LdKH4%1X@?q+2XGnbD1bz8-Fji?i3L>oO%q(d^?>F6>wy4qZczkXxAD5Y4VK5L~yeQV_)IlAsLk;%;=_}LyZ zmK#mv*_()RK`=QratVQFb4h`;7YR|ENpcoVA^P`(B+;KkD3LR{)M!J5+boFwG9wZe zYd}`+8chmL=#Wbln&jXqH6mB9N|H>KNUyFUnW`yAp5KxpWEF!nAN~nRH@<^;_g7fz zHw52IK0{dA2Y52#9dyXNhKfBe;Bn#;un2ezzqZ|nIk&pt$)j7a%i|^}C|`%APg)_u z^%B@lZGf#t1dM!Z;pff^uy^!%$lh8hnfDd2+_e;%D~sW4eIcw|n-8t!M*u!#LtWni z7~ZuXRL*7qJ39^3=ca%#J{i;%<3aEu7Q`E(VZFr`FtyzPYd5chiqC7|N@@_C(e{Ta zC+0)IL{A9WJRKq@O#_4Dli|#D1R)Zgr}2RWHC0SF7iI^woAh9b{V0fPRDs7fQc&jb zo9>y_Pj`NKPV=0u)AC%JmS1p)rhGUwhucthYn)5n`@KUVQaP$#kjtnKzy67|%HARC z?i?ZK8ycvnQRGvvCh6XxL0+vkwN{ES zR{n&5@~;rD$8Z>-fK-~4G@JZMQxAJ>n?c6Tdd94!|`)`11LOZ-) zdkt2;y9~+88{o%S0-^3SL|v_chli@+_p3^9{7?bNE$p5_)+_!Cl z0Lx$)%kYH}{@!qN>>QBV;|T+gCxeNM5UM+vP&;4;iRZ?{uQQgQaBwtuu2qMUr^+C6 z?JLcZeM1l2yiHqfZm0FMt7%?>m~NStK=%c0p+{HFqOZ&_q}@kd;=8}Kpwep0>YqGR ztzQx*Sr3`mMe=T)CL27{$y1qF;(UDtQQYH8YIEGl({Zk3=8VZCq*#w4?V38ob($7g7pF!Vxg$tJrV@$2tw3rn$P=SE zG9*9y7xWJdgW9TZaDzVthN6$ae)t;BoPG|uv-_ZaT@Q3jxCcuP-hpoex8TaYcDNJX z3J`P^v~Rb7Qrl(F{Ll=I_Xw1v(J=qP1=#Sc5+<9LK`5^j{CAXq&ek)~`Kl0%C!T`J z;FBP)aRR!YksBntPJwY%e3*608Klds;A@hG#1Bw|?aviq zZ;%u?s=lTlKklZFm)xMSMhzV>kVDUzx0^013#CVn5Ynq!_34Q&MRiWm+Xc(}koF z` zu&_K51eTk@y*nJ%Tdjc$_d~!n&KE2{d%(V9Gr+oJ8mRY8hH5JWX3y-QwA}>wFUP>V zfKfmdtAJ|#ApQBIPKy*Dc&4Unl4} zDF{3&Bl^0#Q1t0SI@R!;U4JH6vEJI_8Sx_R#J5G_d!_9m7uLoQqO@Al5_%!oqdkKp z)K4V*(G=PLi9>`|OrpY@Ky2Sh;uJGOq8qMH=w>})te{8uCZoxc%Q_@OeIyw?p-Ogt zQ6^UnmB{Q6d4l9+Br+vMBG&x^y`x`Y`T5WAj2?g!0k6Q^`x*4U>VqimLueOt!~Wqe zU<}`aV6B_5e?mL7^t8eY+pCZp(*&*kL{gt7AiIZ#@cuezZLN{WN;PC{JrASSRl)aD zXW`uEVh}(f9KCQFtPbQus`Lq%5`7fLSs#M^`q}V7ybr#A-wQ75(|~1@0yheiAU-b< zI9hSgCbJV<2DgCU^G(3NzYfORuK>$;0bo+O6f${BKyLd&h~D7?QElGf8SVyaEHV6y zbb$euHEb{+4g5v2u;S`4T^{g~jyisuJ}7LaL0CjD+?PwEs5!LvG-q1LjnrjVKdKGw z6$w}LS&NKYHj9$q-w_2)IYpVColrlpPQE_Rrk}hDzex-fPLud;`^cb8Jei>pLSogu z$k^Aeq#BpG*AT6&B>}F(cwU`1y?Xz^ZxAO8uc z_TPt1CU?Oy?hY`Q-Gs999bm532IijEAV>Bp_Le}e2ShM3SsCk_MIaU!InsyqbmYss}QF(Cb$T5kIItn^Lhaq#>A(%Hk z3*rm*!Hq>3@S`UMCQVNS#e4B!))5Q6E2F{6XDb*vuY;qr!a%xx8I12*3NU*yJQgkh zuG2i2*fR@K-n+szLl(H$*u#~lw%{f-ggNgt;H|y_92@>YtExPr{TS!zT`H&Oqsr6h zH8ZW~6IGG5zh%1wpp~x0x7qTkW&Hm^GjjJ<`VFiDu(%fMex)+ zA5L_hfEWEo;ay)YOb*Y1Q*v3*ps*ii9n1vvm^9G8lnh~&iSX@7JeZ4OAgW{s$a!u7 z6t)%=HiUq2P!MFw`9sZ(g%Bn)8{P%G!a7F^x?i#(#=su7Ju(I1IUV@1RUJNcNWfVL)#vE#_J zRwE++YYgE((kG+Eqe*3*HhG&jlKdK0CzGtziFAi5DUVViLzk6E;dNP(dz?XDmi`2} zh;Q&~#aGa1{RDv)@8R)>ez@840=~sPgGQI9@F=hk3R@pS7_S#xZr%fj?{{E)$}Mny zcoUWw+<^Jc?Vx||Dm)Ln3@7e3!QDNL5c{(px)%e8D{A58$cr#{?|CTqJ_r6QD`BNy z8BDe)1ta!Ey*S(68+y^q5v=c8a>kpsmG55dDlVi-4jKM3kFL18c* za=VkkYX{$K*^`(+7-8w|kX z#b_{8lm|r@2Jo`J(ILP3>FcX%=o1CI>G}3^>A_>C>w<#%gK|?XO1Q?43GLTG6gU+mXH6b?dG08&FwG#=cjC#d=x9>>ZUtd@gp!XpmXHbS+)4I> zDTMtNk$W_sq?fpmvzm^?YnBbEm$o7wA6pQ`gXW|w)r@!r8I#|u3`w}10ofd=N6K%F zBBHZeM4DD73w>0H%W)<0xmccIlW7|F8 zNQNskPnih1r98;`pa&i@D$t+sfOcA4Lyr$Gql4SRXs*bV4hvW#{Cz}06rn$tDlHtM z*p^oHvN{^|i%$%Z#r|)|hUTZFWz`LGg+ohnS!HBVTs|3RpGzF9vItXkFM0c54{4T5 zA{J`#WZkMLvM72RIs0)VNk6`ZXpLDx#t#G&%lJSNlQy6D^E^q7>n!5GVJ7L-awGb! zQ;5AeCQ62UqASHEHK}Zp_Q078oOdLC9`@wfSQ~QD-HJ@wIhL>vnvoUz42i%%;#-d$ zMIw+oImH=4?hmODmmFnMsVYzM>SV~Sy;5X!=1+*7@C~$cK7*3UAUt#b0M*h1ux{#W zNU3`PZn?c+Eq@mhB=M&2<_;LOrv(h@OVDt?3H)5@0aw>T@5>ADd|nxh-;fWJ)Q&++ zR}M&}Wx$pDNzkvK2rd%4&zZFm(j3-<)zh^S+p`?hw0)r|)EgAmPX@l04ZQLh1-5<) zkh@HE| zq;u3_lEt4#Qbu}_>(8f>8^I!SHIYpY#4w4^J_nMOVozpP*pb@c@x)Hwn&c{4lIy+Z zAzlH5)&tahA5yZFL zgdL`BP`I}lDoq+-OL+|hdz=T;Q{_-@UJU8`3xO+l62iQ5VWCSJ6xbv~b!9y4NZcvO zXNAJ8@>#H6SF+^$B^MsQHv#L_ig5DTFFO0t2U>~!gx+D@Ov{*L(NcG!==PI(^s7nF z>dvG{2|t9{i!@}@MK{xriE0eKi$=Ydr3{XZp++xTOs$3;)b}Z|lzL(ob#VL{YD!Eq zb(eFOVnu(Urdvs);Aj?#(e_8@x+BoEgJSgPF;&mCQ?8%R`$m>Zo(~#hbD6w5Sw#|O zoF&t3i%F6}5$XDTlzhn&lh?chWLZ%<**%y-+LL#a!r&NUWxbQ^__>Wty0?W)Roz4) zaX8uZWeq6~_9y4V7L)e{3&=o&7ZG3dAl?V2lQuRY8A1-3YUxb6&p41)Q(MB>Y(e%$ zn2^4~(IhNRn}m+jB6j;nl0L2maoC_nLiNbz*RTiP&{4Au^=Eu-HuT^wU ziw^DW7FZXOeM?Bc7%N)fFjeHVgN-&j59g?%8qnL+Y;Ls z)@0>P3-WWZ5wT0uBP%U+$cX?AGQmonG=EYcL2fc+K`w)+?fwGp^8GM1!?*5wFPZ^{PYsdV_^o(e9#QLs>B z3)tFhfW!ONfLVJWtoB_1!6WBF?4dc5SW*O=I&45vT!J|nO0ed^H+rk)Q#$`zJ3XhN zi8kAIh|V8&rG?mnK64|#?yjv5DZq`RGndQPZOlE?h1_aA(z z$eNYZv(>58GyNmf&U2+yXwf-}xvPch%I~65{yB9g=)joy;~LLEhX_ATGOQ$nPvE;$O!gb7%g7bpc;thU_cw zkMD&sW!K^P>q}tyq8{4cR>L@41^Ypgr`IR~hDJVYIDZ2CI&*+{9R%~@H0YTg3yDjj z;Iqdj@QPalH@bb{pwoOvmzoQ|BHiI)(iHf1*9iuD$3p8YZP>4)09E^6(N_bzXziGG zdNnSlw-)8mx#fPeRJ}4SKG{&W@yD?`MzmdR+`3-D7v*T7?O7F3*=rAxN?g2XYJZgo zk~&3kowC$Jt_|h0-M0sj=QDz4Rsla*a zC~}vsMYqj{}kbFJ3&U~?Q9uj;vkqFPn5XR^zQg6PF@ODYoP8Nic?kkIl#tL83>pzPOo^d64zC2>L#);f9 zwI@cmt%XA!8zTFaD7QR+*?}&_sk2SvFjup56y*Y2GD1u1Lsz&!7CjZSo!b`JvjO< zed7|)r%xWDed=QA4?SGE>c<#*-y&st?y+Naq6-OiCwBzb-9B_xaLc1u_#)+@uxkED zQTeQ3(R9{PQPn3}^ufGSWN7_a^e{-78v2PS%ZNo(Sj>8AVER@Hb;MIoo+ncty&2Td zX36ga3|CSPaZQx&>^oG)u_4NeC?Vb)Ta+C<3-MyY&=<)wFLMTk^|jF&^&Ul?gcDRr zw%j^Nb_tG>N$0Z&gP%#Jf7nf~M8%S&YP-nV)CjU9HH>IaUO}FI3Lz?!{K$%*9^~uI zN#uiLp~$P)#Dl@ex^E!@Kq-E+Z70Nj4Vl4_yzj# z5sK8_LZtO$XmxCZBkGr7#;ZEG(O(WiR0c)#X{el&4@MGyv@tddiudgUYU~~e$lncx z13Mu(VFT=)umV;G2E&sMN&V@K4=nsBg0S&SID6a(eAv3M*;yTY^yJ~rsQ2{1?QS|S z3h3P174(}Ysq~G9adhg^`SkK_lj-&^<7mc}QM9PBrLOnRlDa#AuWH>5uh-mt>ms}% zxFYPouO|9=WQ!U zp?zqCw`2WlN~J#h;2pBYsE~Mg9wTOB_mSbKJwz@cjNy-d`2=R}>Vaj2H$Z-F0~~a|2toN(@P;di4bJ3(!m5K%D4PnS5_ZFu z>?nAgwgC>jUI_hw5s!Yt_k1^J^p4tP~vT2@!6do+a#A z`bF3u<|tCDpD$8oY!mVM>7rhoD;jnv72Q46CCU@N5NR)ZFOrHJMZL1Kq`cK^sBp@O zdg#TajEjWSkF2Rw;GDUX@^OD^X8l^K;bk=C#7U+OWu#FG_YP2zOLD2Syb9`C8c^+v zZ&8*ZAE~C(BT##V9+FyOiCRszpyE#B`m`Xa`Xr+>#IQMsylYJ(4KXpqm zdNGGY4^1SN=OuZrWfI$PejM?PG9pjc>XW>Eqsh1yZNieo>b(a>ke^GGNrG64G{$}e z7P}t;ragw!wLPG#eFJjmG(cGU1&9ni15SI7LyTSyTvbU0@0eXsR2T`-sbR3`RRHW{ z%!XIjCj-Zy1vJYER1)>!rSb^qd7=mtH~ysOHNK+<%^uNl+i%f_7uD0_3~T5vk8(QX z=?U8QRxG{d`zm_nY%f|)$C(Zd)}|fHKGki%o>zD4q_D2KFQ-<`Km&$&tPi>U3qy{V;sh|cPwRa7orf--|4YbXn@~$nV zW*UW4Hde7zmsUJ=DsB%o7%8T1+7wYnhpMQ%>uE~6^b(~t=LNOrwkDeJLQ+4mek;T4S`Obu;P^+ba_I-yLKPm?7j6B$G`YY^h4 zUcrO;k3hqr3+}gGhN;5B`W3K?+tJ z{X`$Ra))N0Y^O~^n&@##<@A`=JlZ_{2;F;TFa4`Ep3Whw=)>xMbnzWGdipvSy8MSe zZG3SA{r$$b!M){V^XuSM3GH7`E;2{X4O3l*Fm33aoyMbl4NiKMT& ziWK%N5oLC47g57|MImK5qH}OcwB=-_X!?Q6BH5Q6BF&-aqVpB+M4cNxi`L)zE?RFU zMRBF%DZaA~RUvvKto&wV2A^J|IlK^AE{n@T!cqKMXLU$Pdak;hYnWI&%q7Ot@& z*(E0AnM6LUeIzyArK)5_l^kJJ$`I2LAK+o?Ysl(<1oQWIgU$O}&}VT2!1*#P{!|ZP zLDg_ox(p_;3njUuli(!DnJ!&wP1e@Yw#e~kiLgRQ`FTL-7@SHtA} zK`^Yb1e!H{A=-B~MBBRp-XequF9~k6BXoW>gHDbCxP8=uM^@_47^(!ea~SaW-Y5E& z`y;xzxSN)&*q{ya>*?XFbMy{DF}-uzak}3uo8J8@o?cY9j9zV7>a%ve$@ zbhy{~W%KvH|Cr9KzqPMq{Bv~PQ~#LG zlz*B|q;|=)zOAtmIu3swmpnsNG6Ih|-v3HOLo%4Zjptv!{NKj0?5`vFKmT9hU->a- z)?BYiQy2_ADXD+upvK=n^A{?A`@DhVeDz;W{gwXTI^h15@W0#q_w|Rq`}!})|Ig}| z{g?j2vVZLw&S1>nrTp(W_HPeO{(AW6qoWOX2ND!CYA?P}?coLIX7Pqr z-R4Eg%JRd7<`Q}F;I~a~Z+EnZ)@4xh1h3!f>|C6N&26*Y$&Kv4F2QDy8@y4D*Y?zs$9$8?OY6PM zOLH>h8^YeAFFrPlUw!EPkYBU zp05Ij-x!w5Z+qFr4}NYgU`|^s5N971v<=e&hW~;;nEVqM{QVYBhK*}>x>I-?kEyM% z;@tMj#ih+QfR#2ZyP?r*H(MX_j)Q6SU){3U>1Fp$Vu@ZO#XQ&{(h(LrW2Al<)*W>vz7}p z;=N1wh;mk%^Z|BbbTeCcRhA<@HksqPC5R)IYT+nfHsJbBisJ^e?sK&dDD#*Rk&>7t zmsgU|&kO!#z-Kc1_{Pty1i`&o0_*Cl0!CErA58wv>HmBuDtfy^mrOM?@1~YZUs5)! zI_VQjyoJRMe!Z0)8LPw*-wWn=y^Q6wS(S5?e|K{tIRdWoRMhn+EN{-j} z`V`M=TRYFoQ;{!@#(Zn-On&tNLjf~lguPg$o2`(Y=em(yGV2GsYn(GjoV|<_Jam|2Y|+9oj??058`*OkEn*~k ztC5?wLyG6RbQ!Nd>;ccKbQHhMe-^)QdKJIx{CmF8%vIp}LSD$w=>CJre}IXXm7~MZ z{5YpBmFvtrlR#$%ncz}#{HhI6RW@ZHft!$fX%o&o85n-l&!5H&FL$& z=R~eu#9^#W;{>xVakNuax#AJV+%z*5mszurJLI3tHU4&%TXG(_kz*fl85(9h>;6@| zYC4-2x8?~i^64yo@YKEhMvJHXHWN2N^-!|FH(3(Ldw%(Y$^V6kFU)cDJ#v`IeDcbf zX&&$5Rq4X=-KoZA{Gj8Vc`lXV2!t#@nn33ig-)oWBlb1Z^!BKqQgc*FX+d;lp)-%5E zCPzVJ(>_6(<$XzRMN0I4eWm}^yFU_-I+dK#ak2KTb}3;_XED~cu&e{L*pWfo*}l-g z9&*s(FsvqVN-igJ@O*cRM0YlKmb`%T=sswGZMnXoZ z`X5XrGqG*ebY|pyHJ85pR2MJBnJnc)=`2b9kQH7W%l18+!B$?mjic?ikHc{8<+w&0 zaQn^)xlFB4u6D^jE;I8O*Vwe4tNh|KH_kwdmu4p7G5P+yw5xl0)|^wk$Xj=LjY~E8 z%uEhnIWw9+bT^l8e4FOy9qr;X#wrQI+q4DR3qu9Uzpn_yX)2PMmiGT@Od5-~+YTKq zcNXs{kYI6~<@)vuOQ_6a`;Nxm;*thAM@ z{X9z|BPCqM+RI$+jK|!5DLG!AC!443v4F>%6V2=2vX|$oS;p%x`^n3TQss-WkYC*p z%l8VE*pQz!d~KuW{NRxeg6dq3z<8;@AT6mtP`%=Em~Z(Nu|FW0&-*Bo-m z)3#uRyLGeT_!HT}q&#-8nlZ;~6^p}^iQ}YgJ;yOF{>5SVOX`SuPTV}t5U%yUgWRF_ z1rqylfvf!K9@p1Lj>pK+lhk-8@WijC@FKVQN$kl%UU*wGFI;boL{28~ORml2_b-j& z$L-AImoOCsZAT{y`j;*e2us!o@=Es#j9Xqx;uEd^)tE#+I^|$(o9oP&x65Vdt{F?b zL7iRQD`FeBM6iRWX0!Xuez7GsoWt-A=afJ(haoL3u^XegUN6UT(;6mot+O_9gP$jH z<1S}%BL&4=<+BZ3271gDZ_wf?f3e`Dc?I&UeG_>7d(ZR4Zju`OM|*x_oFCtJ%PxNZ z=@a~5D@}p&!P$aFyit(Hk<@T+oex>4~x-rhUIHt$8wDx zW_2Y?{IS3?HZ!7!9oM;#!%QyVSkvlU;R_M*e=zw6 zeEhFGHR|XzYq4FcOIwg5C-12nH`4hO*DGiQ&$zfvvbHEpvR>j2KQD+O5HDCFsP5h` zsh`;Y!Q{UZfd3a-{w%UM4h;X}*jf{_`YXd5!h|!`_!h zWBq+^8hzHc0wgD7BSai1b+ei^+fR z<3GL38xJxz-e6;v*5|=hg`xVcZ=Y(t@;xl-G3vPs=T#%H|+Y1f|$WMtJW0@wOkcwg-op2tu77nA?w!+&~x zPq9g_YOt9|kQ0k98cUX=%E=gtf)@)H30@q~vvAL|vxx5y5!Q^8{>9{<_WggbC(MOS zWd8P>iew6F%~NTtb@*7i*;Pkr*QX2ggRTfhdN^7r6$yDV$LL>7{(}$y8_Vv?_Dw0; zddjqQ_&2jUs|74=e>0OgTTVFxLj~^EMiw$Rz6&v#`7b8_4d?$4HY=?L>BWz#owDJB zvx)mj8&mJ1AT#55N9OGvM#j5a1r^*Ri*Ugs3z1Vh|Hb5g@ZW!9>Bc!Ny+FOGrru*+ zO)CZ;Hm&Y4vI`E(U@r8Hsl)#pGrj`oY=Dm9Yg_{0xDmr*c5HHk*WuRd3 z;%b&qgZ%eS&_C<-|Jjl{cpPPZG^+^2f8e_17uw_Ap9_zTkB zWeUQdy%+Q@v9PeYCyXZ|dH-Vak9q(9H56fJW32z=v`KGQh?&T~R&)KB#T5R&hBgfS zOB)Kk1>P4c1Ql^o78RU^MVFeTMV)A*Q18n97nA?jmH*gEQDc!_(Xe*YUWvnI#)s^f z(urfjz9Jcc_W>t?v0|biP5Oesk`ygk6`4hmuA4<~eSi>?%zrWY#}N2GhE`9=&=dJ6 zW^(gYtx4d79i}pG8qDg{JcPYq@0k8(dn%IpMB#@l1amV>1!=z@3N{o%!DOufQ-9|KfXV+h8S(z*l$*Ph%wEhoWF|6a zl)3Ty0m6RP(d2&n1@#V`Cnop6NFcfvd}-j!op@nz7Ug$e=+$lVN$eJ zU=o--!gRyU8sWP+XU%Uq&t~3pN706LB~(!^E!d#!DD2@b7RHmu0-2ZQ7T)E{Eo7?m zgy->5|6=lA!sP#&Z9OVB<;72L(=?~e<}xD>vlr@%$-TOby57kMM4lH2jGG?|TDLk{ zc;{}jXblwRUC)gz*N@-AneNQy#_zbsH66Rj{Y-7}`h02S< zuU{1Andqb?T2>LapCAJ&`3 zV?eV328c|Ct)wv?=o1_+a~LNw1Io*-z`vOZ`EF}871-cUqCGNK%!aR>GsK)-;3~cV zd!-g*o9+_mxGjat<`q!XT8$YR>+xdQCg^Bxg_P+ItWMsA3#LJ6tqDesUl;~o3CHiK zNWAIUgN@gt&~iN*=Vr&_m~#Tkb|&J8WD*8qKaRT{#5ILfsMw}KU+ge`R%gPaAsZ(S zG?u?80duHtuWBchz{plV+WMr?inxfSj3 zd;A)Lz#bg_{sooqzhU{}KM?;U&c7>?=DSX+@NwB{{DImr{MgyXy!Av|Ud??zU%qM! z|NCb!|HL4LznOc5PYLzq^xmX%Ztf*qU{5_aUA>8$v9OhMmVe3x9qHsuBzw5*Uq86o zJ%6~HyG5XTUL0ndBr)lQ4EFWQ;oiEz2wE`|w|W#&>ZbyA9d(=Im<5T&4k*x^gAdyC z@YQ@iMs&`{?(++A^U4w&8n+Dht-TSyYc59;tBdjOOCGeh~;V^sNwt=fEnGj62E8}4Fz zH|I9#3+JEH%eh4U;+#i{B05715(^}e;wXiM0WwIEm4j-aJc6bxAfiMGR%XLcJaagf z2>CP9Yb4@6YauRQ7xK6Caaef*ipU76AB-_|DnsfP4sV-zh_0WGEgNUR?T8gF`C3CS z!xoe7I%2)dd>k}$!*92RSUTSw+pQPlOo11&v{&Ga*&0|W_+q2lMhq(ON5jBCIJxeE zooFy3b_HXKbQp9x!tm5G0=r&B;>*@(R21)pvUnV3ARg;3B%oze5{}sIN5qH&kd{ip z1E0hAo0pE8lnFzTqnNch2WhuYV2f%A5+9V|qRct`8F2x-4_(Ibw5zC>yNBrN`*@)F z2+=n>(9`!8s>?rur#>Wo6XSh`NbnyH%koE84dM6QP~vaB*W!Df^myr1o^Q(b;J&z6xAkv3 zmwxCS=eM<+OD+7&DP8F2iZ=e@{*DmEhTGzp7a|F_5i*!nIv7bML$Et$D0l+}9N(yf z&}LSMHDXFD~cxU+!b77>1Tgz;wkxI0X*y<5T zBZ1T5xWx+bTdm=4J_|qYJ0SkXJnUFHAL0kx;4bNo-+3O88@d?d`aE&0d>ICLuf+72 zb#S-bjKofVXp9JeT*waWso#yDgka3J2*t?oaP)qR#O7^LklY@PrAzjr^G+_S` z`TRC&YMPORdvKO|h+)s(;f+l?W}DrnuUj8%Ah>mk%?tNGdgG1fY8G4OD&FaTJLn(gR&%wNNhB_bUuf?AbAIJarX2zRe zy<+)2eYuFd{6H*M~FZuhT`oZaoO+(*x!oY;`RoMN^JOj|@@6CjC`uLimQkr8SP zjB&8r7@^Y$@d6I-5_v>~PRGmhR*+h6iz+n-XmhiXJkJ>p4Q^Ps&I=2du0Zb^A0&=h zg_YyhWAOeBm>#_et{MJ#zIGc_oOWW&&=4%}4#T#);Rp-kiXe6WO z*+H0w9>Pw)Oo-HEbD|>Q+(4ZJ+~ndc?zU4Q*L(g9r#a^w_f`8U7pmI8 zWgcwi-et9N75r1~^x3!EoXq!}cGx$r-}eW1)b}U%H}e-a@3ts@SxLZO$dxU*(#Thl z!!y4@n7CRVHKs!$GENc3HOfdYSHm*#VF~;unDfruEhSTN(;j9WC8*8S)d6pI4>D!^&+5w{@=b&lxe0+~{$9W$wST6Q~ z=L;W5i>yL^#d_@Z-2klxTM?hU18I|jQ126rWx}{}^;IbB>>_ZjEfV1aVlZP&0*3!i z#QmGeaIHT86N7a8=*xg%b0&W6&c>!a$8gUm51o$+(L4GSHWrpb@A6s5ZMcN=tF>@_ zdl`n}*Ri~%5qwS)CPY8R;oNrN{{0%KKEH=mKsTmM?8DBy0lZP3Jip|g0>5UH2A@_# zy!#eQe&%=^K0a#|AN)0i7td+rbu`Cu+vCHyj9ICib3!IJGx;PZu3y6WekkXbKdItU zS6}AFsy1>)E%&(Dcb{{&roQ4HX?*0SZ~DaTl=;kgTK95F0iy68B96FF2^i`RK((hd z0$<8uzQJH@J~S9R?1x~qjsjwsD$0*oYIK(E8gWp{P`1ntP?`AVJKeK>m5|5i*({ZiX26w00<8l3L-2OffXYRORZo3Dh z4ZLtyW;xOxd4o$^gMFUsF-CqX-oyl8Quz){-W>vGi7@yUgrR9c1k(0If_;jD&g#8r z>5ak4*nOy{B;3?Gh^V?$n4HbP@C%tZdoLS)qB-d8%Y%$z0gQJPqrl}fYz61>EUivh zcU{G;#Cj}Q+6aAi2TH$NU{=|RH_~k=DeXj`%Lg>{|G=XCBK+sH!TfGZ75;jICjYf{ zEH6+Ze(f-OUcJzZw_lLX-%)PlpWim+BC|ue=@iF3{FKUBNELA56VGsR=PJ1uz&78=i6+8((pY4s~!J_jPfRlRt9x@n1Q|=pUTwRZ&ch7RTu%2}qBT z#*P~@$S{zDk@Fy&w3Np&g`v1UOA)D7D(Jg93>mJf7|^bcTY)36_m~FQ7fl=}(8i=< z9n2Xmocj`|k7Fv6;MHgXEd@eejs-f#O+)E=D_m)r1rr+wOndK$xyzj4nd5@Tjtk(i za|y2cFU73x71(aP3Ws&p;XzDdncov|GbkCWQx9Nd??D*8K7_*_83=Da0{;b>C>oXpx7uthGtR?KrD7y4I*lxa z^YC`6#s%G4%&EMFp*I__^?Vb&6dz-M)hnFUev6rlK4N`&Mx6jil5_t z-8s*V5Uu8Z%HH6db?y3T>RP^lPrPqmvuj^`6t}?cqKi`^Fs({mCs;{l&eX zAPR|>;%GIN#FsZx*z{c%fp6u|`CcBMWEC-Br!rDbt6=c6VOVCahDm+uNPjEjOu8lx z{vCJZkdXkQdX#rw#Kw{TWp%_fPg|rnE5yf z_Y`Nud>7_pR_!kMLOU=ZMkpog1r>DhLSSQ`ZGpTW3n9g5nj z2u!q%LIJ-Q>-WTBk#Ric3*&uiS}JbeNkh7@hCKB!1NAjWV5gUbtY=4YV1GU)N)}<% z@e+9Xl|f`iB@Q%JA-wG(hW)Nb`@X)0a5)=?hdh{Y2*%QC@qS6wg=6 z^A=vi`3v2n`E^NCc-#GRc`sj2e$dZ6{z}Dlo*z7(tL%*ACbYzIF9Q#8x?Qq zY;6T+nODUP=&R*M{;cOL?%v`|9=32DzaDW<7d_)_GTw5*o3KpaOti6eHQ6sAHNXKLkOxLY1^OBLbuPzgs0l;N+aio^QqSh{{VerS!r z>;_F3@uP(MxDE`qj)hUAF1XAI__$^w9$z;=r-Lv@mSM0MYk?TeX?PcGh0E7%@hry< zcheovP~nKvIkU0HejYrGUE%X`5ma|CM&8V&_?zYp?=35lwbB;~uY|Sb-2n9G?tsPJ zAPC-tVAF~S*cU}&wp28>d&Xhr;C(0;B;fLjWSm_gtXHn2;bDFTE)UFxWlIjAo{K3- zC(*pF6m+2sKAS30+;R~$ziLo%sSb~%8WG%l3*VHRVa~tCrh#7&@Z&4KKNjQtg9q~K zZw}?nbF}!Nzq))%)kJ>RDkq*|7V@w5gz+T@ukhvJV>rh-(cIa?3Eb$n8QjlFIo!Cx z`P}p0h1{2EXSt@b^IYcj%iPuH_1uo~M(%l9D|cvBJ2#>99rsbLi;G+Mg*#T$%PG6` zbN##ia++SEnE6`_0g!;=Jt-ImN@Lm_IcPkULuTnvBpp}A;!7&9y`_rOP<5fMH5|`I zXu`uv3))(v5Pnk!Kkf7}Yu$L9t~5ZxIU_u}V+`e=Q*hzD2@aQ=qnxwE7Ne;+);9xB z&e_1=sy&*IJ3vWsHnPN>aG+`~YPKwZ(ZofVHrf;7UzcN>%1RjgT?M5>>v7LvGy3=) zDA(GJCFQ}8e;0}!h2c118HujPQJAwe7KUc==>NM9O1(*FdU^;eo@8L*noN|O$;M>g zqiBgchGxGLh<|$;FP$ooKD!EPmo8#KKrO7QZz9p`F?MWw4oiB86MY>x_3H;}j74}q zyCJ+ZX}lVKNY2Ky_7$bJEiU9?2t_i2ch zodKq3g`Ns)WUASLX*)vJe-75}nTHO47x*;`HB1o?1gEaR45w9)F!#gk*Z#=Z9)J|B z9eBEaHjodh8ykf7ye$A$wsh8;de^xUmLyVyk>ud0E4&=qX$xP`BFE!d*@9O4ddaPwm)mY?~KtHZ?j zUmhyFcefgUDnW~n{W6w6H;dzcXgl#1YAg7H8EL$+#5w+sEpe(g5nROI{oLN;hq&bR zncREJJnrM}a!#|ilDl#D0#_nl#~IAL&4pclz*U`j%H@yg;M}Y~aM`PRIIVYIxs1A> zTmt`>J82?{JuQ;h*Ej%vzA{LAEQ^kXa!BhNjKL;~s3;mH{GTI`Eusl~CoQz@)yAXO zqmWWD8oN_hm1b0%T?T@!TsmG&yre<^Sg>(r|TGEe+z|g?;@t{9@?DSu(zy3sOfyhhg*I4 zk|)R8UK_?23AGi;6n#GU>UiE$-Hgv0?#7>0Sjiura+sI9R>@ELHItLw7s9PpILiGU zn8)d?&*!?{oZ|K?Uf}Bg)NsaQuX9=+x46q+TDYLSkGSynZ@3qQpSaHDy2;-$5F9Acsc#p>RuAM8T?IsEZZq#fL}YmES0Q*f1K;Qgkr3 zV+>Ae=;NLLco?0V2=xpDytr+I>{BMlK4}WOICJdGWq9yMfFEs^@Etc5m)=ar=~!Dd z|Fpv@&p8-fJP&pY=40Ap7kJvc3v+4@q$he}!qjDG6Rbd8oDU=}t;HAJjYt&@fT-9u z%pbBF`8vVSYzV=Co-l;c9!#4Mh5pRFDEJzS2Gd0No;ZMv1u6KkHw`}$4<&SM&n!&b`LHn)kT2 z`w#q?Dt~H$Cck^(D8A^~I6mjT5r1%-B_DR$g|8N@;%}c%;Z*~w`1V>WuJYY(Zo9?_ zF0t$sH|cdHclS*-Czo}F6R2M2$|YO4;@^)s{jD#!XHIXqK__~-l*RqrP#qD71d1U^ zO9E@%B%%3j01gL8LwA=f+>3``A5*~m-O3o=q6U%MT3C5!6iQC%;OpD5!Z@M_-y`Fo zqA(G^tql=0%m^*YQ&1u>!}RIqLXC?dX0t#TUoA0^p9-s6Q}IOF3XA7kgPGdl*v{FQ zRyY@u(ysVD#S@Yvm%vwMsZbMJ28l_lFvo5+3dXF#`*c4X9!j>eULwKHPkn+gT103#tJ!c`SfOd9vXm`Z?|K%%TCxd2BG_LFiw_+ zBY#aK`i!HnUSTgL&5XyegNZ_{_d{z*3Z(WQ#@UDrVg8V!HS#GY;M922k}e!sgABae4j}wBI&? zzN!T{QA?N$<3z|>;Bd`U=smW=U2|(}JunLnqa6?;Iu8LhF0kD03ZvKVXfR)fi*?>u zJ8Tu~``4htVI5pI`@-?rdg!h3gT&v>2+RsV=9BFRcpn56E)+&mVYn^S=krwdAn{H# zJ|Bq3a*+g7n+#-e0ZU>akC_tk*BOOWE(acBdblTB!({$BH99ND}qg12MdI5FWM3!`?>` z>4#LHeNPpu7N|qcbvQB?Xh995QPwvG>gE$LlP2QaR6~RpPl2SEIYc)w405uB&1E3n zZz?M6tspbe2FLHt!p3f4{`AKYBQoY-YMm>#xOt#7Td2ivTMEO^E8t$Z5_@u1L#Jsi z&Qz|41LiC6b{Hh7V>ZC|m3G>_F^u1Wv6oYBc<1u4; zBGy*y$BhMt5G<@aB)PwO}E=cVUzCw7%^H$I%> zF6dWto!@S87wsN$ZR1{Wub;i+hS+@Mc&}d0WSS^0G)cguXdotc%3{O0A^4%Jgza)HT?_erTA1E93LhnOA+mKG>@OR@bh!}>3{5cmvN`HI3DTXG!tZISkVn(t z-)Rf6Qaj8ObHMl7+4#{p7fVIwLs{PqxepfNy!>Lwukk`?)Cz>Q`#^m1YGgn0gIdT2 zP{u}R@7spti#svAcQ>|-4M*sj2*eK9gJ8udq^^&GqEa-p*6l@DbPV#l;;~Xc2`lEO zVDI`=eB7Uo!!DU<&=;O19?n6_pA+zTTYxj6h4{ST6t*gs3G;z+Om?nB?d(ekbEyL* zT!q`l>o_^G5e9v?kXU#h7n2@iK-NpR9(V)0TV41ptlKy3{(n^d96@0DH0K$14v@#Dj67u%i-@tMJPw9K(I*- zN~XhcGD!pbbVnh!SQodO#^b<4LkuW2!eE+=-!n{+Eu691Ss;Kz5{Ft$L&mb1a2aij zJzMNCX~i7W|Co!!!Sk`Azy&wM+%T$cAn|1fvsVn!d~Y7SlG zV!Cc}KaR9;KbF1Z#N$75^DVw{Zy$Z<(mwv?wnvKL?sN&1smP+`lPvDeR>GEA6?8hQ zB6zYo*b)s~@fn4hFgoC9Vt;>dvC<=mnH| zUPapPM!cGM5A9B^xEs@kq2Jn3vsM^KBH!U!{RjN|`WcIab$j##F<$;hRs4ZPo|%Bo8-`fzJQ+J2r{G+sDN=`7pfnv=_K-(a@HC7U)_b}k zvtZ;a)J5DKvC4T4bnNEB>YX!W&$~c2ZvoVkJ@9S57dkb(ak6VQQi|3htZ+S=nl@nQ z$1Mnw+5zd8L3n*26nkw)^)1me<1LvJHF(fk^=k<8Fes)G2LwQ^G_^w;aBDy z|DDl(5jv?VPES@zQbwl?eO@3(mOb*66hD+ABvoj+gc>#XsnMAB;WS{7HhrBmhVEb2 zrM?jp$g_14g-A}Ji3VnryqBmf!h#O&1igyp>Fus*WMDs?dcv(JLv_iHS=Fz+C`Q$6>N**n4l-TD^0T(@Kqq!HY4_HCj%|66Cucnu${mA^$7FyaGKr1Hi zB<-(3bUiSX_J0niX*VJ%6)a8(Y|1nB?e4O&v6;j5OQ}jZjoc7yPP@noaTDR>yb*`ZID-uaxKfm;4@x36`i15}0NW6*M0lCFqUT6)2b+3S0|Z1j@#{1#7ka z+1w)sSxaID>s8HTe=Zd=|FBZ_NwJy@p4P;&lv~-^O)uHNg&i#C{W}(}{DEyU`OH>7 z|H@)s{9+s4NKoTr8R~Z#M5Z?e(f0s(`m|1w#J(ug;-cZyzfXg9EF4Ml_S*F0!DzDj zGM2i3>(bwxadcwzBubMtCKm})lD*96p}l~XTl2JY$_$z@ewJ9J=8( zmy%_j>CKnfw&m`k!jYl$_h>j>R^LOyxfrxvcP~{I#FFikIEud#Ps5)l(ah%kv^(@5je4F& zb1O1vz{5;h<8qXOSPuDl$vk&d!m+V zhg_p@*PAq;`YzG1R&w!fCkdZ!dLj9pwup!cO5-F2eL6CNu&qM`5le>)PKAvYEWI{K zpfG)&;HJWE!H;f#cH(|2yS3mb6R*x;S107NkzY@-i5D&JVCcrR1*>SwDm|FVyPB9!=Cin3?PQi0?kntDv0&L2@G>m$R+Ra%WKRff~z zd<~lUbR=yk(V}zLN7Kg8v80o#N4c8g$@u<6%1Ae)l^={~sn-;0G-5O_%7Q+RoK8FR ztY~kn4cXS&k|G>wfRqzeWzHkJC(d-kcp+`7a;I?z7t@H;CDiM+l=5ZBwcrorctY7XyuwXny$Hz zCa+AQpH|8Aq-Q^U>qw>1zK7}R(<7wOnn|Ngj*+NI9t9;Fr%}cQv~*+%>FS>*`EzGT z<#h$U&Oc8}V=qxwR}C%JZlG$}J7iJcLZ8yw=%T|LvWe}bpd=B&E@cV9Obcm&lAoL) z=Jz1M`Mqj_3AN^e!RoUG%F}iVX4P$BkK>Y<;O!CCsB(<$GA?8|3~43aCiA*onw zCybIJB4}fF6b*6NOa6Irl)pTI-Z~`Fz_w(nDNdot$TXUxb%fk^W>Qq6kTb)NlcCE= zYHlc`-CIx5^#tJ?9mbWEF`}9VZM;nPHq?{<-y6glZj#QkhqU;_dm5hdjlOFBA^Y1R zf{Ec`g3f7D0;zTx!NQqpg4*#+u(-`u&^seSK#J>F#G4pq|0shUkUYw|txm9_IYsP6 z>_v84vWB&Mt!3r!ue0Q)Ce~|qm+kX<#O(h*Wmy`p*f;UFY_0T1wv_+E{679*%WV6Z z(r5|t)EGdsPf3yfwt=K2E<5=`E+r;_^M znUwUvhK{_qqkvooS~zVkJ)Y}I$3M8yX@fu6uo2{i5}faC$vInkb!V-+ZaK7LnJ-Pil(*Q@w7)biL&nOr}X$#+W0q( z9_6M}l|&}VRA-Wl=~0?LJdZ*OPto|?GLmd4Co8Qgva+wG4>xX*TjfnUr*wz*U3yN< zjXm^Qp_ftziU>YUl@JW`loO2Dt|suQ(iU{gn=E|0ex~3{PK+QS&Xc7SMzYo~i7ffW zQKs_X7+ZAe1e@ek#8PG!Glwx1tU&q#+x@qWos+!IL_%(|EzwP^rn;F$ZF;ZX%P7o5^e4R%(2{ojhB1)0)H(de9w4!;JQl z+t*lX{u)OHZxSh(4pNX?8byg5qUe_yq!^e(+k5k9tz7{%wV$FL&&o+Yqncc6u9N(t zCfatSncm!cNTy@nkiB;gdHRV60xJgyb`&cMZbgg`?CcmLsM|MQ@GED!pyO+dV1)V{ zCg~f*9)H-&93A$vTEi5kkekKEe#&P75(R8>T_HR3qnK5GKEs-F&$7J{=h(66i)==C zEmPFL%6!*0vV?E<*zmH4EN#tWWW7Cm^wYOlOy17H7O_Nx7?XUJdH z^jw5q+ey%gn1M8^K$@-$98BSgLujg)0_BZU67ou!{EAda?yNdh4%eWK$y#KiJBoC_ zk0yD|v9xW7KAp@ON5_MWXn}z-UGp)ayk%w-qAZ}>ik9SV4wBqFlh&!(QegWm@?2$4 z;xlH`Sam0A_&$%8SG!PrjVrydUO-u6myof@a_WolrqI%Lbhm3gwQt!(iJ9AI=s)8x@O&Doz+e6EAV`-Iq92rX{Qv2D1Bo~!N@n%QJ#u_I>8I_%Z9e(asb`ykTX9 zJuK7tClfvXi=C_#p=?Dl(tRODHVee5wrc<#-9C_xb<2=Wj~sn7mZvJ=`wE@;3e>nn ziF`Fx$wYdDaIYIl6TWKEkoGY&CRvXf-%p@ZVn&oZ$e89FHKosnjKU`pl~-8^^KOm| zlX$XHn?|zJW>D0vnY6>sjs|zz(_1@7(mgYmB1`8}P3&Tl_wk~SZ@uZa>MHsxY8xsRfwWxS}`=#E|#Y4Or}Tb2WY)x8sXCsQtry4 z2TCVsL|P$nH%dw3O%>7LdWv?xO-3QDq?!7VG>zYqCi_C2W4@Ev9#KKb)PaKN1ImJ3 zhsOv?KA8%P#(4{>boK})>gX|dc{lc1Zw>QX5WsqFhOi?mV%Wa%2idOuhgizk47Nq< zC^M1BXB#ITXBYGf8O%#q+2qqKwc#vl4Xt7`)Gx6GZ)=(EqibxF@(t$He4AB|ddP;) zdCp#ce93HO-?2!u_iS_9CnneVjkSgJF(~#k$NJx_T9_ZS&J&{_bpwPs=s=p?E=`9f z$Wc|?Ae#GXC^fuMq{>gj=%vR9l3JxfKgQ{hTmKm9*{V-W zvqm3e7kg6J$X2i_CFg9TPNP zWn$->nDX^|Y>j^_n|SUa3r&5+f<@l3k zW61UDL<+GrB(W=#=)?2Lq+vRRn!?Q}%Z_NvLkkK%2a*_UO@UWz$=TnL(z~3fS!zB_ zaCW2P6$?pf!6It7yO?@kdr^tz3YufFn!4kCN$`0yE%V$;S7z>{z@H(cb|jpt759>w zM-1((-$x+>4pJC9OhzfkXj;q(vKJ|!?xGTMJARIGeqW*ud#}*h*z44-c!%ozACRHl z3n~uppq|Cuq~7zJ#s`ZEMhQ3FS0B^``I-|2x$~C@zRE`n4pd9AI%do~yj_{W@l`B( z#a5=aIfRXy5Y5c|V%V;RM0UaX0P7!;!o3(Fta%4WT2Wg{LxV7)V+uPij%8!dMEKsz)y`3%QdvkuEnFl2WA+$yu6^;x03)c_<+B9Xu6w2{nO9 zvq(kUo@Byj3->T5>Yn8+%p=?=cE=(rwOLFa8<$bsx>e*^>Ps%F8)>28Haa*Yi1uC% zC53lkl&T#?D~qD3Upa}|>QZRS@*{Mo>nQ!!Eheu|C8YAVj2uQ)lC10nax1H$$w7_u zzO03`hdd&OMNg=H?@N+B`<9AK`)J7}F@fw5If3?cH9_1GZGoeyn;>_6yg=P%7@I%G zfi2#-o#hRVWUH@7GQFcQEX*g7HQi5Qrr!>+rm!rwX#PJ&R^ zQqJTaRI;$}YIgZnEj#q?DzhKlz&iAr*d&KL?3~wqwtoE+)*08v%-*%LZ@*r%`or&; zTg*q+HTM&XKlgqowE3&2{ssleyCL*Z)-K0f7TWJ5Y2ejJuF)d1cPVd&ep(h<*s7>`dnJg6%eEcIPaJAMJyk#ze z1tSv#;fIE^AwBb$NB2s0PH7wS){bEBI`*=%wMnci=pakCPi6jX>8#o#lMQpsWhN#k z*w)4qY=kf$yR^B4DfyjcK?5t%WwMB7a8)pfq$)4Cl3hu-(*WywRCtjKsKf7*;ysDe4Ja$pEqz$&$tu;-IEvE7%0 z*tqO4*3li#Moo-ji$Y>p;Kw+Y8kWFZ&nB@qlKWZF{DbVRb_$zYbcm(5W-ytJnXGSc zHv2I*pRKGdWUAv!*oc-=cJxviyMMQw4eYCAuZ=IVxQ{jLSYI7ma_kDT7q4f*RX3PL zTO<2kc8iUVYhfDWAF(k{pRlSs9ZXm3Bb!k3mA#Ms!M-^CW$NLgBx);3&zfb3mmfrr zUFB)g5=B~9GmKQl)##*_Iu(m*kXFnnvQZpM#gFufe=>oNw@)O!cw=gkHKmEoW>nw9 z$XSx7T(@atJZ&a@{A5eBf@ab3M-KGp&0PBR%!RgQdXZ=PN*dI=iiTaB-(ot_m+Ypo;b)!}H`H#(8r zSkL$KY{jfBrMSv`JZkW@8wAd#gsXV}_IGeoY#xs6{VUk0zs? zV@RV}mpc2$)78s{w9j=iZEZ3kmp6=Nt+1rE-ZMyMhc!hd+tbi$CraP%O7}M|rjlT9 zYTmVyrc|t^a}zgE{EJQWc259(8@!Xo%nBisq5q4$w~US|`?mJc;O-8MyUW?Z7qD@6 zhtw&&A-F@zl9VRF-QC^Y-QC^Y{e5WqH^w{e825hr%ZJ-zR98|tbt-2)YtA+IK6DeV z2D!$oPOd5HQ1O|nP|o>k-^3-Vjj=*in!iPz$aF}#-ycz}3!PTa`(0D_yFXJ)=KWBY zE@TYuc}{}sx9t%er}mKw)AFV;YqynBkAH{M(D^ z4r7Ho!dQFPZN&fPH9F3THcpunjH3l788x;}Hu6rJZd95+(`dbKw$bCte4|dwCC20W z%Z+QLR~bV~t~JhYSZ_>yxy6`wbB8gf??FT7I%X7_aLNc8a?UtsId3GMa@nZ3 z82bX{ZW%_ryT;4h4~$hypBmZDy)m|x|6-&{|I4Tw8CyMk6HoPeoBN>!z98nrMey?PtSs3I<8QtQTNR%^awQyJssQMHVGs?O6Qs$q^2tiwyIkce`s zVoIgjwymrV=B}nDr>mt#B&ee@maDHub!(`sHMwWgq^){AyQ4aBIz$aE&_i9m8mhi8 z8KMe>*wpyh4z+KOOEr(}Q-wsno@0^O2$prF6-E*Y`0$R&bd`x&wfnx zn|fA-^th#}*yoZWO;! z&p5QGwZVV8ZQQEU!#JBZ)YzVNh~b*b-;=u6Vay5&H~L38p3?uvKnMTS1^NgQK78n-S5+m)I<;Kp1tBsTF0bD<|!MJsGn=!QT9;5TU zy+*Ef$BitHPa5Z%oj3ASV4b;yduKUs7@@;%8_CoI<3+Y7#?%Asp$&dxjH&t2_!j?* zQ6$B8qh0e@YEoo;)pcG%^|WkawZ2dil{zA&+S56eI{rM3iq|xq+Mh0?sy)wDgcUyjyc2Hq8ELCasYik+Rv$COzc2=r&%_=H5P+eVoTU{+3RZ|6C z*H+!kx@vv@X6ond=4x@X*2;6WtqR*^QSHy!S$nd>jEaHLg!W2wKSv3GwJ!$K>8o!{5I%TP@7IHuBwyvkHc5kSDerc}iUums&?`WrT zonfs%ue<8grnkDerLS5Z7^1#h2~%6oIMkw65h{0>Pem;csFA3 zRPstoRE`qsl4}7uRiVxQX{G-QfJC1R)fDKQF9L@Q>ovjP@fK`QN_2VS1EI6QVHW^ zRmE;F%j@J)O=kV22I{al<`#O3 zPf|g>*=MMo9V)7CTdJtiZL6t%NouLr$7-o7tLv$Q`J1THA6lwUCtIn`r`xJEDn!Ms z?WS4<^;RuM{;h&W4NyC>4^|D94O3gYjZ$we*;L_TE;TRUQ+GxMRO{x^szHtuEU(d`??mHLw<$)kBGV1~`h~`b z4U3JY-BuX;EUS(E**6+n5^gsxrrKq=HXSwUPdRPea$GQST)1vb>37cnk(q*dz% zrdJhyWl*(mWK^;9Wl@JOWmD&(v#Xnha;m-;^QdN3@~J0p^QqN+3#n#vim1w4i>dsH zOR?T4r#e}c`t+!x`uV(&ICMwxlX2UT4Y(=#;YTL;6 zs^XN6>QaZUs#3-7s@q?ERJTW=s(-v;YWLWYYT_E3YGrY%pUGV+s8_gp7u&5yANHt@ z^Svt88^3b3j!`%6O;D#R@ONFWouV2{oThfgo1>OgTR?tZqRM_>rJR->YC!ybs$;S9 zDp{YaD(=kNs@|l>DrWjCwd=(vbs}xt;0cqm2G7}&E4WhZdcjFQ_6UCUsE9GTTbNO9 za=7u5KMh~G!(^j*`RT@$r?ZU%J(nB9^Q|)`oZM=3ZL`N%-S2?WX3!C%ZR+F3?aXJ4 zJGOJi^Pr2yDC4^Et}5B+#UtbRz*okLHE)bbmS0BwA92;&ok42a>4d8Mp`h&7N+`{}#smG!s@|>3s~7GHs&G%m-b=8Go2jaD*A{hbaSb)DQEgR(J*~n68!KI^ znL5zDrHT`)l}d54oyxkXtJ?mpyDD3@k6QQrZ#8{vs7iKnpi0l?b&`!qh4Nk^%v>Bp%qwI{3ft){9B3#Y4iQ)a7EMdzx- z%ND7`>6fWCXIHAT-`1$34cD{xxJ!9nA5iZtj~yN^qZ z8Q(V=Devz!w)Z`31l}Dp24%WnoF0A2=vd^sF{t2eBf;DIM*6;wjcNB^7)3k1G17JY zU_77n&A5>yo=R~zzRFxb5&J+%R9pUT;s&MCs@D6{simnhs?cnD)lPMN*il^_-$`BR8=@@IQ&rp3S9Llb zsy;0lrgCl{p%P6WtwNuUQHPV;RgU6LRjOo!`gJZ+bvW%&^}74i(kp(I`Dc_mnLS3W znKVv~l1Zw_i^*!o!D-4dcc#iTb&g6fXra=@m#SGotCaWhdKLCzv%1i0i+Z1Hhe|wo zx4LxppgOqx0`vZg`YY2d6_n(eYJB~zYM4KEaP9U9gTJLr6CC+9Yw*T8`GONCE)|@^ z)hc-VyAi>$V+F;E{ri6a_Rqh5KNa}re+L#j&Yu@D{`)@%Gyi@5uiwXwmGPgqrplQk zR=j_X$r&p{!dMx`{Cj--SPB36d7Lv1_(l7C#` z|6l&mtYM=Xv17%LmGWQ5+Wj!m|L9%+@zc|f{^#RL{Ck$}_CH?=Mu~X`Fw) z`p1oW}P1^BnQ- zakHQP`Tjr0ZPtIfbN0W-P5$?|v5o$`{-5J+uSojOPj>%%+~R+aJN)M|{+WZnUz+_g z**`zYx&F`boBuuT^?x~Tb+HtGe)8_m@&6w7-#&ex5TdihwPas^ZR%ZxO&wLy)J?0Kx<)NiziDLZNzM4a zy{V^%n0l|()Tw%y+TF+0n+KS>;b2q$HG=m!P2J9I>Os*wPcrqysixjE%hVzBOkHk~ zsaGyF_0Dys{=CK1M|PX~*a1^lIAZFy$4y=Pys4jEG4|6f*} zBezu#;_)%RRhKGi)tO6JwY!v6_bF}Fr_1oDVAYo@ah_^c?W|?hS?XGK(FRsMt&vqH zZfeypT3L01HdbA%omD^SWYt$fth!qdt6tU1s_TVX_1>XYeSV}>-*i}Y#t5rk>$mE4 z)2zDge5;wd&r7IQA09edBlVh3K5=Ec#Fzi{2S+(V_D#I`IyRZhhOLk3F{N z^07?)A;{E=lbAYBI#Zv?X6oMgOr5l>srLk%y1eGOF13V7r#7bE*2&b>O;cy?ZR*yc zrj84b>cdU_Wu&QdxJ=#EW9rotO#K2LR(KSc&atyieQ>U+6Tu^7t*P&i!AN4_!jQkYminfk^z&i4yWajkj>JiPE& z1&^C)tXje2S$eKLvsHJ@V%0jQRcC@pWFD)&kt7z4gDqD4+ zid8p-hqs1R=d5Sdf5D=5Gpqgqi?ndK&Ld72tDXUi1F(qb%{l+J>R$b=`YJqf4Y%rX zWB46xR(%8>mT^{HWSUhcU24_$*IRYPT~MD=WEn10z&4t(W1#Ik}ODDX@7S->OrgnFtu&OJ>!#Qqiwzt$G4{3S>h|=%r~ctL^}sn}w~q7F-sVwCdqy ztvVfSavE0s06vcD=t-l8I-C=&yhAJTo4^J>`_V}#Tr6-o*UhSb!lp1fX^c*Kz$Vim ztMrmk8#bY^>4kPG!Da&5>Cu{gZ%^-cwdxcW&Iz00eXRNdY&>JF z`n~rLmzUEyelC1rGk%R#r{2wHKeg(=YlZ05k3uy6k*K~>#iA$owde#>ExOloi~h9D zqK6*0=;3!QI?)@8j{m`;i!)oU#4~k^gr;_;HnoP$o7|?}T98^7H}(0lrmk$5x*v>e z@CmI=eVU?^=BB=fx4nhY+RmmvWSTltPg6SvnK}+^7LP?&_*%UvQ-6&yb^3{>o;n%E zaH=@R)Ug+u`V@@fZ8G)k?Qnom>wU~3IE_4L>H){8_i0mKgwu%2%p5o!gwvWkyzc>X z=CP@Ro|^jUb9x0x2K&u=da zx9Y0+*hVz7ev(z6oP%dAqYtj)iy1?7@_iwCVQPzBQ5gT~WYH^!Sag1`MfZuZ=q&3j zdcqNl4rb;zxkC=PXVLASF>mm+M;|RZ`8SJh5oGFCNld*lC7zX*dSsw~vzWSg9oEv zH=&aQ7M%gz)I4R;CGfSymo0h+oQg7c7Cy1)0c5lyuPwS3ni`7N)r`el!0YxVHg)3^ zrmmKbnUllRCI2$@jsj?=ICUz8w<&z3im6Z5B#Sj401=>S^${DAKt(PvLhJ?yoq3%-FD+@d}( z|Gsm+pY&P+vI5MC=#oV&`W9T4H?in?%`E!HD2sNxExOKRi}u5&`5KGf zPhK8HHcQCN`MTet?;o;gCt9+?Y7(s0-?Zq{%%NFNEqeZQi>?o+(61KV8Q)7Amurhh z#)8xLR4_tMw*01k38U#nP2B-Kbu4G9_lYB?IJ$4p8hXpnjIwH+CaY*3XMQ({L~U1V5@ zZnY&uFM~;~tawsEi@sOMqT94$tIX!sjX(67Sm$n}!cfUF$jh@Rr~2qp4SY<^1UA zDgM`xJdug{bUcq$4@NU<@wi#AX$+fnc-qCTWP>OkySwV(u_3zFst}zirA1%QWzkQI zk$>x2^f=a$lZRP!rAUkZ9EC4UV$RI9=soawk6!#6Ejr~cw8A4NdPxn75~q1EdmQjE z(aOz77JU(|#K0m4^JfKGSs4dEgGaxlctIwv7p?fw${SexDoMRyk+LcpsLA!#G4%wr zQXU=?+i`vHxCV=-R_00{W_v&CKM?;yD+!oCap15B4rZjOAH$&?9Ga5r62amLECR4d z1A`$jc!$?Dn8W&WF0a90)IvUMF`QvBZ3P-eGqIUZDfaL=htM)w>2;p>UdC^)(JScW z$!$L89$&-a_!D&f%+x*J@E&;NfW_LMaDvBcJgo?GXKOyPRzcROWVu=8$>&wc4z;YT zBdq!q9+r@-JpOD~ZLA2$oh zpbP7lJ@{1Lt7K;CKjs$v&_hz{--+7qW}b9K3pZn%`a}X#FG+>ppos;!m>v1h2KB#M zgf(JWQ|DCpS6%$1K0VAlY1kD1Xa%SCtP@SVx;yh5EyRS9jR%?f<49Ad3qwmzQ=g{x zPw=Rv6PXj#dJ26$m_FY*huX|zb}b;oEu!Ab&>c0tzJ|})NUv{#?-uI59WUL%@w@Q1 z{bWCCAAOiSavbkEL#DrkZm56UJLDJY-s&ki@&ljq1)rqm%YV=t)O`+hZ%bwy9mL#7 z#JYprR+Jp@6~CHFwze=wGLof}kbbvnc z48yy|qXl?eUT4vb$Xk`}S@b4m^*d(4^w|8))Zaz#52gMQ*_j1-@c?>%S#j2ql}()+ z@5!dgNOj0p^{7o_v;l{Dtxesz2X*X+$I$b4hT#WesRupZ!;N?NO&v+?m*O!i>HW#m z@p-((MZXWjPeRvI|E=hTy2sv&hf()|N0}R^(BWCuOw@V$4f5|TybfP6srMsl-0L~( zO6r{RE&cHx&;La4d?x>Wk951F6wKleSh}gZ>ZnKok8~D~7Va1BVaj zVHh4&ne2OGGV7Z;Ry}z+zIKScxJzW}dLi2LGDLq5!lyctrH0@`UW+a>pM5W8_Wh$| zDrQ0$94^Da`n&%5$*_gdG4(H6mVT>1{j1>BHCRtH;<{V14r@<6E!4U@e&3t@$x!xO zsDCpXTwJEE8^s;~H6J^fxlG-kQujJb*&m?pF>BD*I@a%-@f`a8^d4sJepAOk!Mr$+ zPh7^2sC$M7%%ewW^D(pZ310J*&wGLI(Az$G+Y^iIL#=Jp`X~9P4*BO$Fxqi9TPJm32Wb)_Ba_H1u~C))sxKd2QAf#zYF2%ide@=W)f>~>O|ANE3#(2~ z&sU=E%{p84s1Q7d-tX0$Jw$3hjoLpShF`J%C=yQ23ZNhA??DSCXR{};m~*XUuX6+E z-OWDcQ9kbl`wL%LR}Kx)5#K{}7lWDow?&VRw&?I#>_0L;5-=lby=3MyAAV+I?OTxk zF3)<)piaSjU5&X{lX(G$Kr3q2ff{!q3!;G*Xkh8DL$6;zlh@b}yU8q}?=w>WqtDReD{=t+UgZOs z>@!?pFoXKLnWesDR{af+`I5@2`=ulA(C-&BvKL70OXD+($)hnj*{`Mdhmd_{Q~xo_ zs*_XyJXPpl*5dW(`|EH>(}bR;=X17V&!e4H_oV&{>HUn|+2f@DgJ96|Z`KuXcnODU zBhVOLR44+SL|gTuiLBw_aA78&Hy<6LhnwqQw}tin0rr89S#{0xFujV_KCqTd+fDb5 z2+EqYNXnKc|93oJUxR*Rl_$D&`b4xIg-y%v}hfJx3`^nVFzUXILCi9W5) zx}ZM#X-@sy(4$?+PH1C7ANIleGP?(oO^4z|FnNVHjYb=>r;z{A#7h@AlFG#=SLS0(M1SeH1rPpW9T9|dT4?kGQVb~z$G<4lm%@}{RQ`U z^f}(2E|FEQL>INuL*CzPm;qH;cQ4Ou)tBKi5hj7$Rvj-N>jKz}fz6ImtoJIg7YUzb z6*;~td9@bn?7GYcG_nOg71752wpLxGBlPR|T7`Ls=J&gfm=fpbwv$ z-y;fN8_&2T-*SWfdS-f(&la6E5uTF*E?LQucvz0y)S@_hxA5s$nOQD)d0n*7 zkc`tBW}WDNbkn{Yyn2#XVbllBY#B|>9q7Odzwyjp7~P)1`fCpRS8(bNr*|+a4WH@E z>Z9=K%Z$FlW7=u_k-72f68YdNne;l^K||s2I(?V(!z>sb&4byIcW4w|^FA}*zN1UD z^feaq0zC~2LL;yXLQ_F_SwVQk&Scfmti2b)Yi%y(1kCp5Wv0_b5mY;PH>?glqM zys~MS*Rkp;@M_Zx_ANOlywY~~v)1_D6K(e4m;q#6d~7qUQrN6IHyW}|!o#L=&t@Ly zTV~bY*YLUf(bp@h?mm$Wyq(;Yfx1^=Zgk+`W}j;!Yi_v6Zi~);n0=J<%zEbhHS$(k zbaU>Th5Hoz-)s`otLf|sqYq`=B3D22J@jJeT|Mf)!}{zoC-8$ZHJbEI-oQ3 z)UY=*zCS)Ri2Ea9%oI313dj3kwH2QWo=mN0k@ewp3!m$@g1tn1t{%K%Z(z-~37sRI(c=1^MvuUsd-??w-V{<*I*G7SB=Sf`++X>(z-7ccaN=wx$4boWmo zx_BvzE?tXzj?9%8qtFW3YyTWPjM?&K7rwS1A7fqe^CJ5#tj|v}`&*EQg*?0#J_qCA zDGA9$$*6Hg`aV0eA}=+DQ|ZEF1oZSf4}?}_9a4*#0;}1u`ke{hgy3UVGD8pcYSGm3 zKGZRkwZ>rlYb3RY*T@KT5zTvJ&>#9bF$vF`%9<06ZG~NN*bP~1>c5xbZ!7Um_*Gp` zUf9Sv;n!*#=iQE8c9I2Qxak0E6g1cZ{S7(EJ{b(l!EX%uYl`;Dpu4t@`Tbxv`UUwJ zuT1%ty!{c~!R_RCdI;^^ibGb44?pzgf?G{8+m(#?B;1ChwbAe@gU(zEjWDkgz-eDy zay5+Rqp9TC*3A%|t++)8)gy0pw&+xj4h3cd?gp zcO~l)_E6r!r7&Eqa2d|L`Ej32z|8sUEo&I|@~7Z&Uf5(xL_ST*HKwJ9GnhIne0HFv z=f%)YDYONf!j<38C;|GZ-iUk*mz-ACGyTbTaEXXy zpKE7`&i_0_Hz>#cRZELLj!yPFSu?VqvYPu_X;+cIHd(Z5uSLH;&bozlV*yy?W&dg^ zJY4YT0*mM$Xeu_Iosevrj5Tm-*3p^i$Lwh2FRmpYGoc7y!=p?kYAs}ey5uu>^n%4Q zSe%E&j?VNg9OiUqZuesS(}($jWlGO!4NgZC5PqnF?7n_aMY3XALstU6y3tA3LTpGwON&%iwt^wKrY zpZk|x%5zVwCilqTkq{nZnLpF;vcH%)U51mxysS~ih3IMrLUi8*>{r!gKe4w(S94kP znsF9=cs_MU8{c;RnJ2-pxOl^&7cf&kv38uy`n^(IQ#T5tpU{OZ1#5;>^lwI3z`>vU zk1jfvXFnpC`|_3X8(|L~-&$XfYidSswr7TSz*)0cxrF-v&Ijq~|3TFJ=m$QZnirzx`@iF()O|kr;2X8iga&?R z?xW}+JO!DT`WK`A(af3{W=#kT`sQM0ql@9pn$CFE2j=k)om%be#LuT$@Vw_(5|={<7BLsKhiKJ*3Ioj(8mj_;}cB5J>ly4R=X zdtx)s>G#vry#e)Z#cYVC-gU`kzwbGwqvoT?Wk-M4oZe4J-6Qj}E-lEcD1rt`vc4%} z)oaj0Lpybk$Z*dfcxIAlz)Lt&+dqpe@fG9Fp2}NbznC>Dk>Oi+eky zd0&V88sre~P=^d3?vP`n95Tx0klhZ4jC1j`NQW%+Iizs3L!M1=$m)p>DKy<76K6W4 z$t;H?pX-p;^Bgj5zC-#h;{8h<(s_+TmTq#$oGlK?xx*m^cRM6>pF=huaLANH4k>Vi z-|eVFE**1-and2}PdlXLIj-TNL#|$S$fT>B`<6q#+;zy>`wsc}*dfQCJ7mjC-us^S zd~`^%Zw{&Wi|dHxl)Leq(l*E`Ig>c0T}r1+P3x4x>75dt%_(bhIAv2#r}WP6l#_*= za;msfzLj-KLgkbr)ts_lobtJjQ=T+-%FK37`CxI%-o8$WYv=WGPI)-ZDJvE_<T!ouxR{gPc0ez8m1tPVMp-62KtJ0yE4hkU5y zkQdb)Qb{}HAxw%jamb)%4q4uUI>E)!%^{vX4oTPFAvFd%q|IQ5tQ+ExOd}kU2Og6~ zJLJY#hYWT)q!COixv7a4mM|$Z9=1~)vJEEb;4uImo8ZxTp+g?=sJ?`A!(`(MhrC@2 zzm1%Cn?rK%gx4O2WPnQsxO_b5kX?rzk{>Si;|_7bB>!1z4wFq+9MbE$L#D$d^F4av zfkVtk4vBgJmp5>NM;UnJi0zaNahzg{OTA#SEk0}#IHgr0rwoD1{B%xP0F$cuoRY4H zQz{p8%AFEU>0inzMawwlQaPtAtmu^1aOnz{CbfBAeW#3X<&;sKoRSSLi-tMn&_t)4 zo8y#MyWn`sDLWrKC3Pj2ObBsFujMYO71t)k`q^Y&Lc3fYVwZ_0?Gk;{F7@I%q(wT1 zl>Eyf^I)^TyhFN+Lz*?9M_W4NXnTir?2KkY_!@0&gvYzV4vCL0j>2FS46@lB@)ZXA zA{=rN4io$iiH_meaSll_5q47@5;MagJ7zlMK0Ip8K^rjX3kxS48mxrfDu*P7MR7FZ z*^JJ%J7hdO+QZ|*K8K_~@{r#X zjg&_tRbD&fJ&(I^Ir@Ru;Ia>m42Me_G%^m2%uC^vpQ)X)4vkdLgAM71K|S`Gtwz%7CEKER&+5wMi zE=hfNtb9yolSQp;Vi{zU7n^LdKgcfSYS|_4AiLBVVV4^GrLXnp+9lalyL`ECmu4{X zqoGtV((suUjE`4#$bp7PA$Ih!7H!;wgXf?_Dxi@CM;&qtjm$nt zufrq+CVkFvE_7mtOAWX*yWx<$=%nslhs1pVC)gZ@%?#LVc<+#VaJlh`dH2&HKVZ=k zuX@enHaf|SPIjhpN)P5qC`=CFU01SGPqgw859XL^MF1dBoCGjj{W$V;1iAiFU z2DxqWzN$@5599f`O`g5B$@F9}NMn~qCGFC$on6#uyCk2@?Ac%!>j~Jswado1Tu)Mm z1X40vGEkG;_rKXYoj zQ|h6S{AZkExeDXgPAL`alJyps)I=v=Ho4^J?J-iSaG2Cz6DDsihsn^7Ve%@0O^$c5 zN%0Xj={LnDYnJoaY?G^dZ4&&;CV|p+`5PUbWd5W@L&+!E#j?~cskhjr)NZ>xzF?P` zkL+^kqg}4U>rN7f)I?VYvrzY(%-DSNZxMKuaY(L84mn#B?KC1kp|5?N$OIOLoaqjS zzIf0GhorPQWT*qbjC9CT4|4>aHJ;><@Y!Sxd@tWhhbZ*46h?{mk|FT24rt~EjK-pw zI@kYXu97f$$72(ES$dDpM>D@?{Snw)e~G8U<_&Dd<7GR)p(%8eiTSb$Mmga#DKTCH zpJ2Q!K^mvL!_Nw%9Up$Sy|7b8mT*d{V5dw#GpVZMd2ngplJ|6VN?0iKekfVM!?Ej} z;yMDuyPPM9OGagM$)rv$dFpgY((ODa9V0pKj}=>^FbN(VCTkbN=Tw+vdKD&nQ`@9d zW}8&YX_MA9ZIZ6NP3n%e$-|{KxpBrOHNV;D3 z;is7MRhTbmZB-NIR7bRAI>d%AUK@;mjdn;z^wk_zJ7Lv224?u-);Z+t1=JLs?OyGW zPwQa~x9w=EA39o%hF)JLm!Y4D@Y#TFcD_PauhIECcz@)apU87x$(O(A3vyUC{485? zr!>UR&cnrkNjS5=PiCjs^Yb-a1{bCGOQW%}PH9?+EL;cepqB<6$qwk{FSt}gFO9bG z^MmB=Uz{_KOZMPj+xxlX=Tn#5t~**XZyzgR-^R+aa$!@v&-8>WQf&v>9yT1W$?n&SL~7=W=C$uK$sqeQz{$Lt}e<9!d#z#X5vdIz>J%8h<{p`FXH@u8W+Xl69| zxJ&|OJhP?@nmLAEMrI%nFk{^4r6rzrvk=*eIWq$`$KX;?JEe0AGBQ5q>Eo0KQM_li zQ-*GLiuaIHO8j80Ll$1p-z6I!xTNd!QS!0-Skd#x%9uA}-Uei{ z&S6q}K$y&*7$zGRg^95*Ow6ZY()M$hG|y&}Tg7ZLt%6PBx3NiCt4;C^w8`LTn|zyZ zlN~2*lK%?N&u!B5y-jw-TlMM&(Jd*Vm`f7!?WCnGgM>b$yjoHVV>@e#m7;RuK z{myRj&{Aw>Pm(WWYjkrLHXre_a%ko;S-C2D>55*;lgZ-2WdeFhRlzAq@Uq(#ol*t8 zJZet9wvriK%%Lf)ch)iEcROX^0oFMsT~ZkT`u@!&tK*ImDL7U}cN#01#*LL6%f?ET zqhn>`?Xi+DUYNAb7A7zAhe`W7VKTB$n7qf|4o(P@!3)A<-<~j8b2v;Y+zS&&JZ26# z!HSMfm9a_Z1~##GV)pd4Nn*QAy2Gi=40w^b@9nqA{TntpM)p4c!zQm%*d;+uyA&&E zmkLGg;=%{7m$S>=Ds~yu+%AvoCr3Vo43u70qK@0(rTrC_a+gIeYx&zE9$;fdd2<%iWSSPe4CkwaOppsQV8 z_8HJr!xiK;X4G~3uFo;D2cDM*zncS}(UzIGBe4(9)M=6`->%MzFrF6flYFsWFM zwF|Q)nwj6Y0qexJPC4G0EZ3i7COf6(JYHW9qoXj(jz-8{Gk>`xW&N>oh(14C%O<57 z+oVgVO?phX$wX#)3TDQTJ2ojDWS0nP)`J>VY-*R-!&$#fpx39`r7JZFrH6MNx62`X zXAxd=<-J`FCvm_Bzh|DWt48)}#hS1)d6XU56q*IRK0f%bh30&JJuB~(zCwn<$7G`G%**{IBQ#Q=Uua`UJ#s)lt z{?n)E!Ase}I%RW-Gp9=q=5W$+Wu84; zYDI?2=o#Tsd~>*DI?MU4hs)&q;rLp(EPEX;A@9TG;kR&k`XgLsCXbM0=_6!m&Ip-P zI6@wjh>)5UBIL6~NVygfGP!4j^cWl=c^wgaeuU(l7$L*wMM$-^5%O?fg!oQGNZB(H zV%(3A=g%Uf`o{=4n;=rQq=}T9c_Za~@kj|O8!2TgL`vu2NXbwuQp(khl*ElAC2!kE zx!yBUYV?gHCq~L_TcnKjMat!Iky30*q{N#aDOr|8ie+u2L>!EirWZKJUC#H8pQmt( zC%s!bWOqx?yl%-<-Yq|BxaE6uzHaT7*dcED(!(uZ`?_U$xLbAy+){3qTRty$OUq4e zxx3#jKTq=g1-JBg=9csC-7^2XTV4lw#GBnC>kW^rXz!8f!#wh9mPa}t@<@UvUU{{_ zC39v)NbutbNzZi*v_{H@ut@1WH&R9);hHZ+%DY#QGB2K6G8c2pn@VoE)zK~E|8~o) zk#0%paLd$ax2&1M??2Zqsn)qA|6aH3Jmr=Nm-wCTyQRttw?ut)%l#xCDVf?Mdop@t zeh!aR&+n0U$|L!!@cY&D$k#?5Sr_7w&%HeIW2i?GMR3eCk9=F|k>UG1^69uoBF}Oi zmptOW=8-24JTmc#N21<&B=t{^G>q+)v&p>@nbs>U^LwRu1+NsW;+5Dny%N&ED+^k9 zWp`(gjd>)@=Ae7ugvs&rOZ69>|ElNnDt)ix7RDnPkUwl4X>nl;+3ok zeG-(}C!O;9lZbGiESuny64QP1 zX@yT5TYYl#pihoo^vU2CK3NdQFPk&?%((@JcbR?|n+2yh!hpu-rZ| ziuxo?NuPA%I#W0GNrlcnDb&v=dq(=?hSw)2Ci>+5Y@d8s?32Ble3EXzPcmKbNuO&z zDSMY|eBhJI&wcXzwNJkN@X6Eoe%YJCFNxFn<#S%Y%q!%Vyw&`2ypCTSjr=mawO^8S z^GmBfe%UzIFTDfQWUgOcE%QshjehyD&oA!derb2!F9WXlCH_OdguV7lfzN(98#f@E z69r^N@__8j5s9;FD=5d{XSDPt;qVG>+$&ENT2wGCMWN?U%JB{gR=&UwYN@%aFEy+0@Z5 z#i-Yv;k;(^%YL69ol$g+0XZqU2hxDDf1Hl4L4M64r>4!_A}Qa<3?< z(?3co4W+K*qQp8iN^b7sXIG*mOYCS#o-JBVR*jbDU81FJ-)M1oqovZMX#AP)S4B(M z&S>fTFj}Ush>;0_adIQ8TmFv5&pvsiZb`5F9PE|CYrQh+h*yk9Ua6l3&DN#|JNP8y zV4obM_9GVhq{%Lytfk)Jw|$cRnNMcCq2IswWNci&3{322Jw;Du^GlojetA)fubca& zQF}i%@yp|ZepxcoFVCHRsWHVbG1U6gdcWM>Mt>jXd+J^AzF*eA^GoX=erXafAgdAw zWNWH`IMUH;#R4*$-i)pnkaH~p;%XP*9z{UD4hYDM(bPL4Ak$;0{q%sGm>-btYXV|l zACPPN>F?748GI=qmEH!V;n#o|NvUzBD2Xmk&8tSq>Sj^Wv3-Et+kkBE6p-5?0U6R4PD9}{3U01|oc9Fed2~Pq!fN=YfW(1M z(1n1^zZ;PI?*o!Mag@Z$93>C3MzNn2C6)6>N!{QmnOHMQ#==U$%HB0f^7e_61Zd*1 z7e2G2SQkV|D2&v_C`k#MxIxiUBWbkQ(?`qAqS5lWX0&udBR>a6OUEhE5_&XRGF*<9 zh_}&FC1;Gpw1|-eM`NUV=W){c{WvK%AX3(SaZ8-u9yy11%{s~qxx*Zd!+cr!gPg4xile_#`UICs&ve+3`6)JVVgkuYEqLd)g-{ANpj)Q=d5B`=nfK zzl5gpOP(yS$_F22L-yaggYOQ*FB@w5J>jb})f^QLgFa11b z2;R4vS+V_)UzVQri~W{gM!x;Sx4|!Tod8D30x~XFK<<@+C%!kPT0oXGhI{*fMD`BI zV*Kvhn1JMs49FMeMc-usdBePDzb_!Cuc6P!0a^4dAfZ7~@^`i<8Bj7xI#h^~jg`?@ z!zg)&_6~ZaMOajFrzLaqD>`xV1<8UCB719(nKPd**+DOw@k9NB-XAk;6OD8rdk(MY0l^ zpcHxN>=$xUGOq+ud!^G~UYVQ6EBA}SuoRi5l2?+|_R5KdUgjM6rWaoiBr}ciN(&p> zbbICI6tCD8dF9G-uY6kVl}coxJ_o&Wnmlxq%rFmc&GClb`0ADYamY3aeX=__jMMof z9{K1;ai0X2B}XW-LRFt+rANwl#CN;+WI_+0q!@_*4kbT~@=2P% zu;UL={A`wA7OnEj+^zJ?0p4?odHRY+4(1-2Aj9y0WO`0kfLGREQPOHixO}e>A-z}= z6#Nh&<8wvIJu?3II*~GpXQ%SeHDD9TN47Z%F@0P3Nyht+jB&%C$ z^(1HaA#03s%Qcr=&-C9-#i>K+NN;bEW5 zBUc-HWLO7}obJfad+^?Y9=XGs;eMD$VuyPqhW@)gm4004kqe7Gl6VC@xQlFhm|RaL zkMqbQH=cXs*?W&<`R0+qzdVvFu~$B(pjXm+rAjV(v7A?&^xyWjWYvCTC?DB-Dm}M_ z{Pf%_lbev4;eVu)UplkKPu9>S7q~yad!|cbbB}(}UYF!L?vi!qT~gK+6!-@luGr9($p?(DN3r}`96Ms z^dRFM_uPB#z2}}+!^PZcq?pHeih1QYF%Ov}=0_%rdCfF2*9#HjW*`RUVs3yvckp5{ zUll9n4$H*cB~Hu}*NFM@wPOBtotTfsj@na%ufv`?T!zod@wxS4{y9g?pKrnY+r|9v z4!n;Y`Sfn=w)tYdW}ld!-!BF?Vs||*=A#s1{;5*T2VE9($u+#*P5k_~@N;61HhCcC zqGw`$|CN}}dV`<)lbEk=#?Sdv%-cKgJ1R+dizfD5?7kDaVZSw#;NBqN%8n9#*h#__ z*nh2+#!u^j*xb0a9--rD-=_9`Xw}f}o zlyaZJ*j2|#d89(hOV41ZJd}c4D6|q)+@rfe3mFPM4mzu;C-k3*xC26Cl|T=YLl2q` z{qFAqXmil-KH;v>X$`c}G-#<3+$h%L=C=tqtF6%7x8eJs>8a+y*KhzDAN2A)hoRX* zGdDYdf8!#w75uTN`v044I5d?O=zXE}c#Zqeq8>m8e1f|OboAy|(4yX^^0^LX-N9yOk2|*~wuO!!Ux*BUmCZDcK9a#l?MBn2Obws;)(5$X0c?v#%^@gdw+j^ciQ4zGI+x01KG^R_|{ zf=^}oFcD`ATFDF%-wX}Md%cKz>=W?@;AUr? z%Ev*k`!xa@6)+0$!M)BOv77;|dnP^}hFje{_#GDF?huXJ#&YP~(8vAbp~)t|R|L$? zB|=+=w&juv9HqDy$>E#Gz)b=ezR1Ci3OLHP<29gtecy*z1H&=LQu*%Zfs-BQaijc;J2voL0-fz9ba-QEiid$~ttm9KUeMjF zfpgz9eg(L02d=8XbQJJZgATV2n8pIn5a4*p7k84$@E*-T9?XN*7=yf60nddrjd!ev zR$q|DZJ^QbxeBaa;_INp4RaE41++Hx)gt~9x|W6#JSzD8Bk(isz|S!bIb?>{v)Tq- z6I$LJV7%`VUaJ;)cOO6NGyIIt)42X?hc1|81;H|4G(KE@JwLqm(` z5AJgm@z~+u!;#QCq4VAJ6!A{@_wW0P_>Tq158Moni1B{9h%03we>UTOuoYV1ei1h* z#!Ug*-)h|6mR%O{iPuFu7<_7rpZhYr1fLp3eDG@#4{Q?g*WlN>@6ZPShVcDh9R0Q(T>20ksz?>{hRtIBVjH*;JedoAoCA6ngDXnB()d_ky$S1*_Fgm?*mv_XQ}knpERBwQVQ=Aey!oQ9rVoy@1> zj&KF}HUzm6hkBn5?KM9M+Wv+#J|DQwgU(N=y)`#*1NaYjvc@#N6_{9lK_2|TJrlT0 z0xr$KMO|0Kzv<)FZz$r;&|lB>g%<ZOv1|s0Shk)e-R+z?jaIxIZMJ1#7g)P^qZ)2c>ke<{{=1{ zSCe_RFK!FqiB;{m2cT}tar+;(5b;k(ZK7Uw8~`S#;EyRw;|{>47~1#3_rRtljW7O< z+l~tE`WpX_#X8i<)*d4Mq?d?y#yxW!@ThTs{yhNk0w#7IB7UA@rWhmQ8K{^4CX2Xa z9^$xE#8Z-i1!^Wf6B_y!)G#n9-HX2;6!9C!@EWJ!6D$>Rhf2i%nuyNL%?v-aZj?+jnAq4|Bp#dkGJQCXOcscm?KxTGY(fNC{tzS)fuP;p2hJf0k07 zjr-pUyHs8Wj<*93UjT<`>HUA78@4!&cShez0yfig&_j2naor2>Ro=x76ui}ne$`bO zbC^160+=Jh*-XTT*oycp;NgTCnLh+P1x(He;4tu)-dOzIPsEd_;A5d8 z{xJgY$D+@z5%HK*5x)prrlM9}ZG)c!*mOdz_+Ah}9~AL^sFmcj6ZumT1Nx?&!N`iQ`6*tZwvJA2$V`h(XT#JtfFcMBIWFB}eDa~Jbs z67wZKVm=wQvMm_-jykz87xNeDL$VAvidDE-B#8M+)QIDHyv`OeuSR`n9RRN#LfnBz z_p`W*V7~i&71-CIrXGm-t493$pKx1ILrqvpxV49b_Y6ec%#pyiAmKhanB58`+yVfk<8zIOyM)oe|IS0$-&%nwi9l}PVi|C!R&!P^<^16vTHGmh|~B|)J#G}8s`m| zq2Hjld`ROy-!XrHzshklveW`!fWMkhFP%-1&pk!FrxosJ=r8+GGexMENM~T+j`x{} z`=eGW14Ymkkk4Tv9yM3Q7c4|Q#i0*Lz-1XCz9<{q2M#Mag7-^B{4Q|mdIfK;e z5jTDbug6=|<2Uf%4-p^nTg1oW#>Bda`Au+Dw6T~wS%8bI5mOu7et=7JKg8Mzc{~h# z1$DB`3po!g`uU1^wm-Nn0DnR;XQQtqEy7F=JUHq?J4MWWF^ijKiTM@G-I*C81j~dj+EcBGC&&6E#E9$L9%DZhc6tRcJ#W|(s?V+LAx5pxP=lry+77h3?6!RQm1C)Q4Z{}uB?!cKT- zaI+kQzMq8tl7Rm5^B(HxNg6MGf&K%Gc6^1G8GUAN2WAu0kPJAv>42vU(36dU8ER;| zg@^~Cj@IMGcWngzpvTM~gM0Ku)Yc3Up9XBU0h?>6ovXlS8*1kb`0U+Y^q|8c{sO(_ z7x-%n`brJzj$t|I|%(?h?s9d zjrahQVvhVC4eWqTU*K{9xHJP32h@o*FxjvaH$m`K5peMY9(y*3xjy74y#M^_nIUz8XE{CNR0{FX4u3u;1*E@O8lC2{8Hc zUBWdYq}|nF|Pn7CX<0lAZh^|<}e$)1#H#o(^TB zR2ce60{qJ8HUBojy9|656~nW17CY8O_%#&pID_N*)qvA(rg5#?_})A4LO;OV0KAM| zrSUl68Rp>djsGmtEQF|34F0BO@qJAt;Ke@o@JMal@239{%V{Iayt|I0s8pvxa zF^{#yY=s_k0=0A2P0U}Tx7->f=2L)C4luIAoLn1%&jS~W#bRzA2YeGTV@dHg2Qyc$ zn0x(?$4;RhQ74txqcB zzQRoMRmANz5kt%Xwcxi4m;*F=Ln8t{X3ofYSNIITYfj^^v-l(5(PJV|Hw~zly})I9 zDrSiFz!0_a8$4!IAm-lSue=N3woBm5yU=i+U`GLW*>`~_!BoQS`%3tG0XofS2~YHq z@aDOwnYHj6luG#a59l+S;TwSW)E1tck5}Qb`U7uvPxN{8mVnGuz65)A7wpW}up7U6 zj%Oc!z*o~5ei-2Lu^-|;6h0Y-*=#a=WxmD0-ph&XIJ1k2j3Mn!W;b=-sx}n8t~c(Rm2i?GtWT8^Dr-*#k}Bv z9;86+7>-7-_eHK_UQ3D)@uN{9zI&C3cUud*65%&VMQvaHFu_b9L(Lp^5%ZVewB8(Z0BS`( z4f#JC@kVc%7!RyO`1nT5zNnRi-I&eJW2dPUgFi7NqfUmu#Gg-??XaWNc9rl|z+}W2 z2|tKF-(^0qKu=LuK&#Y|;yE=b_k|Bz?PUsg?T=>*P#=R=;TZ+Y#1r19@|B&S{d7Zq zqnCt_gnomVZ$lq>9)bLp(i4x6nTfU!Yc+ zF*}2szN2rv)D`hBh;cDuTmfGCg?Vc~@_yMg^oK~qYYAeyQp6`C-{*eBq6RX$f{*MG zAI!nS2S7_foKune8_~~S&p>Q3_o~F8rz{im5s0;k9K9kNvoZ4Db`N4)2%pvo;B*1F zD8&5970hqfu|GCJ@BWJKYXkow?n$~5KGR6Tvn?dNv7dyijgasa?h@W_GCqc#zZrVz$p-9-h{N(0^ia$=V$3)H)0s@H#khTfO9zU% zGG-eK#9+HOW)}3#N8kYKnb^G$hd+pedNSr4aDdZRXzJL9pB#qfaT>e~9w@@^e*ih> zQHReV=awPo+!2GjKcRJ~La$JVmIS`OVT>KD2l&|*ZxIV~%$=@7p{;|zAEBonhmKwy zB;kGw@byb1@C6`NtMT__2``diH`t7~m`$%DR+Ea+cTeGM1^i}rB)HXKCVGr~eu;ek zgx6|A9JQo;b1x}h<%pfaNy;a|W47N_3XK{36Nvlm94UVlE9KJFQho;htUEiToZ!(q zd05KBaC@5eOv+2XO8KcS@P^sSc+PMce|bp8yMd!z-k=tci%E!U7YFEQnA4*tf+ta* z8pxU7Td?;aKIEdthWTZ7W5id37-d^+%{IiFXH!qI1PCa zBH;$ejo3)+sEC2Z8svo-zc*r_yI#WMwoABh5IkFiKc_J(R)Jq{K@&m@+z^AAZ=pT> zl5kJtQ6g>)Ex5V;!@a@6P0HKwyPw0|VY$DQcbzKbD?_AwMwpaufyb`XDk*nMlyaq1 zDNo6i^7WW8=kA2YhnNgFBIP5F{(*PMS87kvT;cr_+GOjXP#!DCCRu?Pd>zB(gi^+Hue0P0h@ZZ6AccfUx zcfxmf^`?y9Z;)|g+}<|-l<`}g|KAcGfVizNL>`z*;V(fPou%9zv5Ofc<=?!exb;eLGr(!NZo%L8%Q!>qOmM%9 zua@yV+%Es;6I|FRa!-Q_$Dx5`@F z#b>}PSc@CwM)(>Ati=5hH_AEi44%r7bM*sqKK>-`mi2Pp0k1>d4>^CJp3cR6(z&)z zI`_xz+k7!@uB1T@cgjSyJ8s-)9G6n~yqmD*`6S$^UU>7H-D< zk(V*>R~jHUf4!G-y=Hu_Rm$yfJJ!>c@!#EKTnvv>ac>!SM2uVE6{z%p_i~bqH%yjs z>oE937s&X*CAj$}AiinHY2;@_CO80I%$5Vd=(vne!_971g^YKulJQA(GCuE}jEfNC zdx-B9_%SPS6Ta0Ko)FxFCn0B>;5)GKlk@Y)-8;AizeoOV443oV7&*^Nknrv_jDi1krHuDX#;ra>#%;2Y&$-|Ma6|??9TSl2_T{+!Uy(t>2d6xg@g0c&^%mgr z1Gj%*a9S5$Tr)XOwvuyA>=kXkn6a^dp5}Vk|sGXg)g@Dmz+l_rSl^FbpF;XowwMh^IyOq*cHBy zG3h)EILwYp=YDbNJYJp-Pk1_i0vxK}rQ`Xp3_ji#eph(3?(Ks275>iDEzpI~N9#XA zyG9@Uw+j5UNy4Yzm%!gE<)a5kxhuTUiV0Fa1$+}9E9I9~N^$Q*FO!0g^H4h{rF=Jh z6DF52JKUA>ISo=gLn!6_KEn%*{7(iB1OG{RpsI{pf@_8t!W&_R7+WFssDmb8F@GfD z3!d&h9^U6584s8V|4f987lCiGRsg>w8D9rHPHdL(J$W*|w-Ej(V9^`iiPzws!mBdA z|Bj4rZGh+I8F;J-uki~tr6K3X!8ujkwL;J-mSrH)@HT z>w{~K0FRfzBLaAMZU;{tl5_iW_;{I|_pFrjb?_^HK|Op&JzRT_9)Nl{(*Z7nKTc+x z&Xs`2WcVTez?ahqU(WXsco%_3k`%R&na&r(yBl#3+euYAcWi@a0zR9a@Yg&_&fvjU z;g4&`;3jr>Rtq!VAoT5Z;H(ko!_k}3L(pqAUc*a_InAkuly4m-Qkir?kp)^ zFds9-GAZAgfZm9D*$%$?yI;y}OQgI4_}s0Oa^)H+-%&5+$G}m2fzLS9%J%QzCt&nN zMaHLN-q;3wf`HGO{_wfL+fq72#+ATTy-_QUzA|n!4Llz#;}X=%ee_h1IOIM2El1E3 z{$}HQP%Fm^WPHP6)WivRS}p*y>oV>He3F69q&G5N4SZJrgtz@K`ihR6f9#2zhexUe zoRtp$iXLiaB=DI%1K#MFa{d$;{TB_76#9ZpB>Etwelb!ov(zqdj6_({sEqqIVZql zSK*t2A4&#X-UeszlHD2n7W{0j7obl~g>HtqD)k|JCxhX=2$S%~A_@3G!slFu&*C;T znFr`A=;LG5Q3v3&_ZFCC2H|-FH#}P~TFQ4#M7?14kx$370+E;z(PQqelJW!VG0UL0 zD5J0FpF+PmkDd?Q&Y`#1+;Q!w<1{jP5 zHooZZbLG5X0s0BB>m~ya<^qqS=q0GBi{Q6EweT?BgWva+oF}1{O5ih!guke%e>zVA zPW{o_|AUvU5?;n{H^FWC;I85DuD{CQI|k$4kNv{ONz8{V#(spk>*ycoY0v>|?J%Qz z;Qr++;o37W>tmk1mI_YGk?@W@%(J*7pT_*^U5@8Ms^JmCb0U?%?g{3wny+}P3{RMr z6wjDr2Dilg)CV!|FXfMbqmC!;X=A1Q>?F*wlksc?@VtsyHV*x2Kpbjs4Qfs-e>P@`a2cPx1YeIDbX_asr_i@{qXz$OlkvDcGOkyE znc|>~e>{z6IZDB&HNd$R^#@#UzK2)wtBfb2Z+!wsZZ`*)0n2(9IbXmqccBIcgCCW{ zFfXhCcA4;i?UutU0nCA28fvP@Hl5#@md>Yv*W%y-?i>e?jsPtPTExFXXiCr*HeuiW zU+o+U$6mSwngRB?n=OrJBIu9Nq7Y>B43af8kgWfxFX^GYw#Qq z=5%fBH_oq-FQ1Vk&6weTNO;v>XiJ^&oDJ$dQX9PzbNl;l$OY8>Kx^P>C*`vprCc@u z{Stk&UO--rhX*kT^$gD5HyiUhc%U12z!luR2HZV03A>^My>>lzqivXZ3Z?wS32-}j zK)q4QN4>_^gS+#QFYV~5@y0TqVvQPg!aRvtQj~&O5uBSi9J62wcoI6-JLrdr(4YQ+ z$7Ik!MnGdyh3;5G>TFAQ2p zq6E4VG^UD8xS>M_83~F8LW3G-hFQcGIv_L>DZL+&YKUsjj!cI4f>F4&imcj-N$FIq|X zTL%gBYvdmCYj-quJLFI+c;y;+#NiqCIqVh@$ctgdQvPBHo(BV8jGu;Hjh$9N?XL2| z|9Nv2&$q%C;WY}HYzX{g(6Y8cyXtZmo(SmZp15(mf?g&1i8~hbsv$bi`3$hvm?GvD z@T&B}Z4a|Tv=iby2p*GR(3wVv@N68g@D}moe$Zh9p_hk2cZWu^6nHko;cL=xH-v^& z4eZ1_q1!-XZ#V>B(8>S5y_;W#KjIqvm$xy8K*zEKZVtdLqzTW+e!%_e3$(6w5&!x} z#CNFTUZ@E#qY3o#p3nq)!PDaatsT0qxhu4F>|pKm|K5}n6U2N6u(Oz-t1p;lS%)F=Xxk98$~JRE!X5}eCOn6PJ|uxNUmu)9hm)Xqs4 zbjIfk<^~4@$0bLEMPX-!Z4WL9D_U;~d(YMjqjx+Ow%mIuu&-YPqpd%LcDG-`h?L*L znYl{j)<=b|^j4+ozcs1fd~MouP?vs|=+l;2eaL?8cyia7L47r=ErsMJ?GCh?>WdkMjd4`m3o|Ms@&~)mQoJESCS#&L713kOFnSLDH zLVpHtrNsf;>3HT2dZ5@%HH-3Sto1%JbvaD);*L_mpyRam!#Vnvc9B*dP|)UkSIEw! zhURp+P50)%B<(Z5XyFSjR$dvwa-#>9tIEzPQi}#C#QGMBBPME!c+aG=y3WJOvhsDy z#sp4uAKY@*{p->q4{3RmN9b@3LB6lIAZzR=cs~+^v=!5YS<4m(xd&GYN4F#jU53bn zDPG$Id%jCJvwN>_sjfiyv*Co`q<>1#zj;AO8d)nWKT#*N6gCKp{ay+dQ{M_#Lq7;P zv%d)b<}HGw@lT;k@85#`tPbJUdnLMjPm^-H=#lA9eTwunCOZcc+U0CU86G{U)3#o8 zyV;7Khg*{!l%g8j{?y6_(%!Z~q|hHs_Ul||{^+5!eWNRBo^YqrS3Rh{*C^7y=SgR; zj;8WGW9j}ie+tVDqUAmz)ZJ$$#f_RppS#SVUQQ9z9m(3cwVKrSuA#NdlIUD( zDjB&-=ud})nzCio>xG;`RWhmg$a-q|w27`4Zlz8`c2Xy`Jj(l+Pf6MPsBCT_#a=l^ zdfSR=to$rRwOynHML8*tyG*-Z*OJ5720E7UkGyWEvO3QQR&afEd1>cYitks-6dm(+ zDLM`=QrP>ARp_>KSL|G7VeGj2eo}(}}nuNTwzCz&n`9fjyS|QFtBy>xa2#(p=f^))F!OD8C@b+b~ zFg@s^aHW5_aOmhQ!S!~%pfdfTFi!QUaBl2#VQumoA^hDJ;rPflA$j&M;o;W5!XK+n z^!~UiJ+;;$?=U@z=w?6(+=xzIHKwE57If}JZ#wkbh87gsk!NZ@(%n9Ql(r2dlS|H| zx^NgNRNSa%CDDsfUNqcwEP1^ePbQBi(w_iaOfN|TN+M@W((c2=%VMXbrIy0LZV>VK--*(EGv4hrq+(k3y ziy2JBkxNrHV048x&t8A&LXiK8mu@x{5ywo|ZY3?<=bcG%R~? zGsk^J!xRsPQHdT-9hDxs&%b+gDKHjd-OPoCtNn!!1@40W(}_Z=>2x9B#Vo<%>sq1k zhfG-EvPme@%@fW#?-#-)hlSL#5}|xml@Qf@LkK9Z7arL>6y_d$A~;lj5Imf}2`hfK z3WGg5gaG|cWVl*`W(4R^QU5Nq%fyH-{V<~#ryivJ&Vp{)TT$p|YqGMiC9?=e+JDlS z-XssD#eZCBv%Wipg>mZCGL|;kO`s#>OKsHw^w)4IUGfX0h+jd})F*^G%EQRTcs|Xr zilW58MRZUlmi|szMp`4|$!YFtG8&smjsugZH7$)U6iF#gSx%R(W>Ni-4HWS*hYA;L zp_nNWf(}x~rr!|DQvsOa4*Y-lyiHSmd&H}+)zFe^M0C)9E z5%dD(!spzL0>8UWcyS|Izq%+-IW<9?^xosF@lvCb}Zda^5Z z+TcNt8;J_{kEX8f-sIXami!h?q>+>SsGxfwiHfGt9mNcC7lqQXeKTq4vbmI#IiL1@ zSxD=xVo9%I33VB@oI+bylIo~9+TyjAcz7bsU6o41Z>7=a{bJ%Vax$t|PtxVtWFNJW za!+ohoIl%1eeW(x4%|y8i}q7M$04eg9HsbCCn?SC4CUUvK+CJj$k@4pzO-DXc$e!G zIOYab%Wl#0kM)%E@+rwZ-jbc^C+hg|n|7P1v6XQqY~j}>%(s7B`2pi*g?RCPgT=T^LNg zE)1bK4L6!o>`op-IXyizhWhOrM-zMdQU6U-=;p2<>J}DE^X`VwALrS0qAZ+-td682 z%Z0S-+d?{65k)%fi^;-f3H^AuoVFj2C$Ic9bWk&q`tlS~m8R0`>1p(Mh?pY!OK4HL zloGY&l;e;=n`f;jiC;Ep9^F6|d7Ef?<2F)k*iIKN?j+wX`Lx+&FFh8cXS5emU-x5F zQh%20R$ipQpCxoyD5p2_O1kY|LrM|#H26ycH90<_hP{ncc>6i+o&Abz9X?WUL>u)T zqQv%})?m^gL-urg6mxr=TArBxSFtR3onm9Ap~9kaTiN>7@1>KRdU{-nDD*ID`ROtK zQ*WX6+Hi1vw2+u779L*MD%?JqFRTqJ5XNr2D2y6cE*y8fCiu<1BU~_P5Q^V63i)&2 z3!aNxg`55V31M-XbhlcE`c&zWS)(Dzesv|wlD}PUdm_Y*$t*3dF8z^weMtb~Y6O~ovQbf-kwCwjz`hxzU7F9sg z^AC~H-NW=C<~Y%nljNRuhH{)s$TOvkIyqO7>C+qZ>)<`ode%TX=NjqppBEG=yrQQc zUeolopXmGPR=Q>UkDMNBvF>-fvPkC$7W*}&eCS!@@`{C16dPXjQ1q77l`V@rQ~Ksz zSC2B;HID^5%!K?VYr%QTRAJNeWx{C#iBP|Ai!i8UpKyCtp^%+=LU^ZFB3#&5A^g+0 zB~%qW5KjDkAvD*17II$y5au>^2qR{6qNHGT8f@EzG~XD}H*Hf|^306dJ5`?`_j_e`L(3Zm&wqbN7co2uT8rQAM~sOz*T zG_p02<`)D}OZYT$Rt=`Pd=~Y{4x>8{=F@e}C|dks5j`6lL;mWh6U(L4I%x$>c1omI zVM!G1mP{6{DYRmB8d+};k)2pVdDmpL|7_ymt-_O=$h7Ac>Ug?^?9SxU z%aWa>bw7{x=;UaeIYgn2$0&2^3A#M!JQaGE(8NP!lOUpFpHFGpyO-3>`3+@``#?G~Td3bJHP%T?Oyv%i+ZCO*i++)X)dmhbaEQQDCT!p@4xe)%!PZ;niM(8yuRd~HRU5Iqg z5*nxP7m})rh4-7z2@SGJp>6FQK{e*3(46s3$nD-Lc#cq__wCB`eUB=6Xlj$rP<`?n zWk?UzO{l-9J4LDXrvEhhP;Rmvy{+m?oBBG?_!kb;uf>VtUk;+|=AopPKb+hH+^Ec$ zXjS|u($e#y`+L1<`+sfQ)M5N_1NjDU72TW7<=rP zRUTu&%k{qfr?{<1Qh5LKR($UGQ&#^;T6VhC)BV)D`5yYGr5;N!=X#i~Y48|0sLjKA zn3*8%J3!cI$py;+zCvf)g@R+OSeU(LyYOZA9^uun0>Q5CoN(*S6+ztdrZ9N(U184P z=R)bv_rhVnKZ5!MRnn`}q}d8x62I?ElY1J_RaYbO{?U!zNB1Cp)tpxB?L{L#+fdFk zdwNwkfLsR*rs}F8*hNOruMH!~J5wP2af~E~IBDddc3zL8^NGHcs+deIZvtq_*Qu0n zWE$1!2a|7PD6Ky^iyC{)rD-`4RCZ_qg_bO&bwVuJe~zW*Ys;xu&?@3{*U*73$&_7^ zLUBW}zh4(o!C?ul)yg0|XHEasX42~i>*?0VY#Ly>i41hM)0%O4^s;6z?XTQVi7yUO z!heTpvF{1mpmu?BO)t_V*D@O2R!RPOSLxl?Tcp)=pNwN3)5cZ6CFvFIqR&K!l-aUO z9ky+d5!?A_t`XI^F-ex3#5u3|zO_V_A1)A!n|p@NZooLAQOhu=qi=&}f+~n4gje%}X{2 z8{2jXGsd4143 zz5g0Szq-tzpGhIq!(`0l8!XR(-E&VG|4=X zW?oCAK7Z5b^>rx~?T}MqO*$=b%%HmH^)$sKi&S=Pp;PB_sl#d;$%pMAo!~rDi724S z{|-{qfx|SNis&}>!eKSXNtSeiK4hGwV?HIc)w-OFS5(kOQ57A$QAY+QPiSW8b2`5M zHBG(!h92mBp&lW>$fSk86f#*{`u9P?2Kz1Jb+yi=onPWtq`yA|C&U`H=a2Gha8!^w5mNILPJ=;Fcg zwDIX=D!(|DEFVp$AFo1b_})2`{CpnW+`o`syj)CmXX0qzk+rmKcM@$*PNVE8Qi?y9 zO{XJss9UEkRHDC?I*#w8k1e}s&FVc=m$9Fc<`mGY>_e!_XDjO@`>90zfh;FR*KR4Nx#~E(|S!M7JpKO1$n8n znj6|Iw!1DnUZlrDcI&fSFAUgbo38A7a8I_Ui#Dw#+RRt8e<&vQxIE1FJT#y<9U9c}4ICL7N*>V8N@=SwzHg5OSx zJ-m-5cRfPE1CP;>F2!V-aFTw{Jxu|aB~^Scl5U)W4xYP2vmRAZ<&K*+gR!^%3Jf@OyPbpINoIb|BCJ&#FWR>=rdVK#z)hm9Gs{JoI{^mEyg8z_v z+h1DHq{Jr0C^H>j6}G%emGv%AV+otpS+CC;>_Vg#TVtWazRb~M!W%udcY0?w^lN7} zeM1+PTBFZ=(+t?p&4%pu9wT;hr7?R`ZOr!GH({OTnX!E1p6sWxB~$Y5#T<6_X6bF# z?A7}|Y;wIFlWO#19hdvF__>ZOUv&T*YB7X8v>wLh^&83d*bD5^zA?lhY&tPZT z(F87U^dX1D$@I!|1|>Vpr7ZbUI_#fJalK{qsBbo1e7})=p5@XN@lHzCDxj{&)53(~ z)c)x#m3fs>e0&x8X5XNE|Nkf}sE&4}+#}!K59n-{#}q!`DXHteptfgk=tSBl3hwoV zu1#&GeKu{BThUHa>^sO?^oQoh{-X_E%53m0Wwy7s3fn1HW9rw{SzEpqvk1{)7d`cu z?cC1nLW({c_s@XY{WD?(kGrxlt|m+x-;I?tc4IOfGd8tLcUFC=J1es>X9McZndZ@+ zOmBh(Tf5Sd{b5#Y^P1l5Yn3$%DYao^18tdZx*fB~?#s@u>&J>dIWmt816Y-GAoHso z%!bQ`ujnxoMF)h6g4aUJHC;NIX++NJ zY%s?TBh|%0H0RGk%BznjUY|<#zvOhTFq@tb|xDI9*& z6&)q^QLe(SJy2yLb#>;nTZ3&Jsm1E_wb|1VI?U8qmrdWL$I=#dW)%~=u#99-@b1e*%KccFg9CeJJAjp|IJ5hg2Q%*pLs)6Y5Vp2{7)$YV zWd~l3V0zr0jsEA(k~9RiHJq`PqdAk0^1HzFuqicrRJ$K-(W5FDMOp-YwIzan$B!ebQku=%YfBe7&5oZMl9T?EBiFY zm>q02W;wYg%yd>acBQi^Oa5ubDxY;{`}KP=>zh5(``WS_ zz3o`Pwf0Pn`m&a`zHG>jek}06{!C`-$SzMBz{Ji2*?;#2v1JPevn{nQ>{j_u7Sl0| zwOP2b;Q?-J>D!TPQ?@%xG$8gNcNDX^Glu2pdb3PTAGTo7Sk~Ta0{fdXk=gg2%yzd1 zupQs0v9Z6VvxU2ZS%pz3J6sdSUay$X&Tm=(3>LABvSsXD&(%!LN6dyd6|m|j;pHQ% zl1t7wKKB?nN14WGJ5fg11UeQwoBHb{()@2?ax>1N&vsksV|*TsPA?|UTW6?J?>xPL zexT92nm*3FNhj-XQMzqCZA*SYLeOK%UiO;CP5nqWLcfqS{u_;Z*G6oAJGD*yOU>5G zthG>?9s1XaMXXn0vwNtqh@KkETU(2{25PfQXLQ)g2YO7!wF`62?ZVPR_1UllL$>^Y z5&M$~EIxN-nF+@1RfY)*HZo;*-kGwCqs-W`Y2Df2bv@YdrRJ>eUr+X7ss)=c*oxil zW5be0^kJK`?AZ4fd)B+6FWWSxA6sGGpOw}0XR)UoSl~b>hW{vr-G4TK&5jz(!Y{fY ze}}Nv)kB%`{$b3*-Idk0yRkJ3+*!^O#wPl3R=d@c-3b`O3JQJL?1FLZxiEn>51+*B z=K8X)rhaVo_{nVf%_;2s_UY{F{Tb|(S_qpIK9l`AJcrfF=d*_+7P8tQF>I~IDn{Xn zY<)orbNN`n>RN`ESDiOgXe|6Cc#U%;=K~?Mpe2&}VYeM?n?<{)=2Jk@9y&Mi2)+4q zg5uTB(bOL0G^l$u+3dMZbH3D3L+5*>cdUUX2#+Xl-&^Jy>ZcHg@PU{nAn-!4%26u ziw#%|8L}zGhRo-i5qr0!D;uO{%(fmdW*LJ_*p4z2_F{WCW>9C!yk%x=hDHxo{KcGo zZtTe>ezss82d$W8ZEyBH%bKmsvSCx!fUAP+*rPmqR(!56^X=Y``J47<`uiN%%CnB_ zm+=6$pkn}w+BuNzDRX8ryP<5#(BaH#yBk}(XatM1a%YhP84DW6nR>b>^Ufc`mJao1 z7b<*M#Nu%*BYy%*Y@Nu?U-o6GEB#r`{s6Z1;S?5~7sy@`>g9Y0n{ay;yLN0Y`*A#y z{kXiC^)Ot)0-Y0B=EY>@F<}?GGS8y?L`)xHyM+tgoH>J@jb2KxK8VOhIfwQh&ZQrT zd+2NTqcp+mEcLN3q30vY$WQAMy}egMn-2a*Cgbl>ku6PFmrtp)GvOL+ z%z>g7_c?KXnd>@`*y{M?O5KGwN5l< zTbhlT$6XVqxw;$6^)O==THRUCq#n$2g*of9p(oR}vtR|gEg4Jb#lmG)OxD|)HIKJp zpYr>#MujbVJl~!@Uep&{)sLxs>(BlLI5E3-PVBF#Gc%4G!n$b@pnk7T}N-D z8*poXNdB81Q*h)H8hyTzs&+o72T`xcyZjA#`oANSO-)qFKhWumpXj!vnJ##Jr%zdJ zw0KoJ{qyahg_(aS;K^V5s|USwqY`r(tIVD}Rc6^WotW(>6?UdXm6esLvyeIsmSL^M zl#;brk6zmBZN4^pXsE+>4%1~bD)rcVvo361a~F31us+N3He{!F8?yON4A}y4S5_Ql z!V>p(1814Cjs3c_Ik7!hRE#-$vbZN35@EqY3@n*QVa0}vty!L_EjzTxjyXlxvyC76 zvY-q7Syj9v6aEcgs>cSg&EUD*MMIcN&!Mc}-C=CWKUa3iWdtkTI+FDq=)pED7g%KU zD7GVPH0#pYi|sG)X3j<9Sdp49Yf78U(yV4N_uzSKLR$n&Ze7nL*x%zP-WIaXtCEsN zSGrPUO{ob^bZwIhb!{I`4O84{%KOpu#&JA-FP%(T>Ved4axjTs&8F3Jqe*4UGD>q< zNsb@lDdyK&>Nk5GSw$pKmklX&M^8#NKd|2!rc>X%Ogb;mqV&7a^Gb3lXv}t6nz)Um1}bCDA3O6ZVZ8C^Vbi8LlwQS8=gYP))kv?kTk*Cn?}<$67-${J{v>O)$a`iMSW zdqVfWHIfJR8vn_!=!p3{x-qhelJq~&l`S91<=tm`f4P~?PHdq!U4BuW^KTk==r3J= zs>BX;sIa(bHP&5KoyAVjU{lPrSfA%Q%(_mGjsK<3o{cbKh2>q@M(=Jc+}Mo0F)(M| zO&08?jTP%P-I^VnWW$23?bv}mec69vNA}>sKz2A~2rHS)*`s@2>~!!rCW{s`%C;<@ zx$dN(`lU|rocLX+|Ivx`6ZOcyzZnUedQsIF8`7L(Po1?K>C*f`G%b4wg+>jhziZsc zHr1UR^oSxgM$sJ`P6b;%scw}Q?f*QMoI6jX-vtxNHEj|#75mdnVJeN(52VRFh&qX; zQ{9*u^!?rpY6=LUCAOh7CO(uJR?ejRQ^P1jErNVDN74n2DEb+@2p)!L3jPsGFHS6> z$&#gHnYx0u*sY?&4hghGSVN6viPS43iGIyXp>*#wIyzTGCc`B3VWpI`HRa?zE}c%T z%pl{>nH0V-i{`dwQD|HaW&GGkg9dD+K1SOq_|SH$&D}|5R(UjU$XhgSL3k@=?kWPa=sw7VztAMiha=>ys8wvdOb5{q1>&HDNovVs44FnIxRvRc73 zclIrR<3H3x>F<1@J7o!rv_iCt=!!$sXX$5U-_8DPUYv*EXw<5bS=-Z)Goh1U9CKnE0z0w{-o$- z@=Re>`bZJ|{jS3GRIQ@wY=xq9OQ|BZ<$~hwz0(TY_TviOfkzcH*5xZEY|c^4?~$eW z?3}6Cd|#}nEfpz3Ly{D>GnOmvThCU+SI$t(5dsxi@d1j2zY`SOl}0J9d%7x~%^jjJ zH0iH+tFTunZM0E*xoxG0JZ!00^c?%scoW6NnMR5cD*B2Ihcy(Rd#EXTcquDZ!dXr^kg8)4h}-)mYdLkU=D5c6qJy01$d@m zDn=K>@st@7{2;9t{=Ciw^%vvH8Bo`YGR7cBe z2x;LV00lu1{dx>64j%=j14qEyjl1GYWp;_4rAHODJBtM0|z zm%*pGk4rRM-J1;VWW`3V-|=;vHJ!lOEe+%H4g_!!xB7DD2lwR6Q>?h9B?erF?N5tu zj?)%DsNGV$cjRp^sOB> zzioq)F1Nxh54FIaF9^J=@e5q){~5*?HNz73CV1G;2-=8x@R@xdPSsXJ^SqmIyWlDu zyl@eQ-aZdc#-4*^?aSd!K?!ViDu#t$PQiin6L99nVK_YI0EBPLhlb7jV0h_nSh^?& z94dCg3CVUKRNMIKpA5KfycI4S*bLhYH$h-{D!hNW0g!$zq%K$k$={+u-mnU?zAS@L z0~bNT_IdDHHxssCB|P(;1am4Tz+<0aaNIM7$1DJLwHN^n1%99%>I*}(1EI??5*7%( zVW)d@U|0*|)|E!$yzSvR`)vqF1iQB{xHg@e#Wu{GFPQ+2~+=^s^rh=}Y5~)_w5#F|K%2l{3E7 z&l$h!(;a8Ebi^Zl?C|jfYrG??H5R_Jz!m3A@T#6hIJXOdYukK=rD-1^f6qH`di@&K zWj=t|syYaHN1ay3l4s(f(gY)|W@Y|OUUmov)zQ3s9DZ^vcE*s{2*a4wOGNHRcCb;-yKyhvw zw2$5ZwSCt@-q{2&632n>+8DTaKMDlyE1^Z!a%iJi3Tal0z#%vSMDBB8$+)Qyzhe^2 z92N@3j$`5B`_T}3a3pl3N5B@fAIzLJ1jgLy3wN|E#J8nkuay`sTlE3X&>e7t3rySD z4aO0jz*EryE^loOmi4A^&71&c+-EMIYvSfQG;oPe^<0mhH@S=VRGjhjHC%y{FSnpg ze{OpxckWEfYsE|3ZYe^Mj%?6861jL7mJE$Pj%}Nx@b~DMc;et7yu*Gl{@BRi>0Kqb z;+Pk9mU`e_D_yZyZFjt|-X2r4?QoA?Hn^&fHGWaw21maz#a6PGc!6aLyvp4Odzk-# znpGdbxXoL59@zxy*Dpc(z5zC8K8Fqup2Ac?Jv`}B3pJhY!C1H3;4$SUSX5krk?xnE zdr<{s3@wKp&oGRyDuPM%$KjI2F^Fw*1nyK8K>6f6!1ijGYo7z#y>>$28h+KW4=M;> zngJC_>EJebGi0CN1PvckAhjeJHqT1}`>X^QT^$Ee_Hoc}WDLw|7X`Z`BVpQ`MPMPD z2g6>@gp$hXFmu5)2((nf^^W1d9t?szHe*1o9tA%NhruoFAV?q34{S|YC<&H=yh9(b zlDfm!&CW1i)dhx_b%GKf;~revmXoJPy!a~rD(H$PPI!;E`<=ky3uAC!G!1Wc8;j%c z09-GnvFMu=&sxw250B<~#m^Zh7<9ooEjr>}*Q~KwfhBet-5M`=BfvM`nqY6T1vc(z zfYZ190ROqYP^{sfesEd;dlFev;rFz~>B$gInQ?-u)DqNy6@kKP5@ z@3(_;b2_LFZG>OWtc7QlNgxhPf(3zzaJ765RC%s}qb+0M&YNhcJ{bjlUoC~>9~Q#& z&NIPug%U<=o(#fulVDSea9A2J9(u6BU^Zbiggp0$@lC!^8$1N68wP;CS$}Z2tbp(4 zB*Y|$;pyByu;8dCTxi`BZjb2#pX+Tw{oWcbR9M1|{jH#9TQhLqK)@55kGwp4!o4_D z%ROy>lY6=46c=BAkQ>)Ik8}AJ%ViS*+`Jjh#f$x+`0l>l?7{m!$n)A$?58Zm$5yY# z&mK+3UWbO@14kKrGgyX~dG^A&e(rcvTo?SZvjZ*(?SM}u+2ZRi?Qp2T5*M8`$MLKg z?vZ1HJ548WPRuvRoBtX5k86g!p>M%p@++w5*9e~a=WyrKGq___3w_4lh6?H?xGua7 zHf~p8&x*@X`}sV4zfl1bv&-PKTPZx+h+*NlVmLhQ6m+&Y3JN<7^yFoiYi>SxZrlgY zpY4H)bvuFB&&#aND#)nZ3O>g+!$O}lFj$uYMFSHdZ(sr(-Vp`L@D*_J;xdSMzXV2~ zSqO(GFMz6`IZ*#%2Ec*I(8C}c%x;IlV59M{P52A6$Qc8E4a2~k@Pm+SABf*P5Vl7R zfRmp6VbBu<3n>ax+k3(Xwi_7OcLn(B1U=8&L)2YcK&LIi>_uxBbXou-2AP1{svn#* z=rd<~>n$g%f5si1sOQcezs8we%jNv!aa`KOp}pv|=Z$l4pUhNha8~-v%=eq{H}Eo1uSfDx80^0q$n3gLqjYRQFy3 z+T0jOTC)P|hD5+r)okb*Jqz|T(;<1L5+3iK4EHL-;Gx?D@V)#CoR}~crrV5!%l-Xf zmDO+r#ykTaq-r&E@9S-d74tftqI62r3T31;?f_Ga8DQE@n z$5=pEZ3`GO`YWd~f5)whd(D-Oy}?O(p5Y$uz}%1QByO`}J{P^eA9sDZ8Rs_p7HzA! zDG%*`hz;9211($o9?ug3HtV($FCIAsf2bIRiMtnkyT78pjF;H5oVU`a0{93E(hJvJC%@2KzaZQmz&$i9I-%bTD8zXa6t33xZu z!PT_;&=6D&&xzY$@}mmer&hx2$yb1FaRFv$RKP9Aa#*((L(qvL@ECgxtoIdyN%>(2 zeW!uS>HFbhXg=6xfE4A{G*r(Gcdh8Z>K{!>7yz5T=cQnGsXDyY z_I65=Q{r~4d2RtKbqztw`hLdq1|7l=qt@bk4GZzZhA^zU#A1U5QcOnn!Ur2%u>OW4 z-XCg@Z9+~| z>D6}#^ZyK|nPvzKcmuiJUqMoK1E}UagHYxnjC*n)nmg1$seLtM488?OaTB_r8xWXz z9n=r6z|7AVfS;d%;Wv4G|GgCU4lRMS?HrtUJq>qPoq%tT3t`pwL(tk>19)yebT!O_ z>h`%%Gk7ngM(>7Mx!GW5vlH&JJHV=S7CaxY4K&@hz{AlgaLzUv4!l|m&OegiZo4Fy zy*dG!4dUVS{AdX19R&t=S3(c81m1mG2=9_-L)Q7ppnWrm_g5!Ck~jo9O&SMP?g4P` z>oCaK*B6FFF>tw13=?klfR(p!SC5C1=9U;OxY`IMp^CH!P3G zgRhRm8|U}OXGbA?A-E3~FLcFozIVsSvI|~qMet~+hy>%V@g028pdI?VH&%?qy6(B1<3;DmC0X-kj*4I1c@$90UEcBapY^FkCuz0A4Edp?u#yIPbO>dVkpsw_EIn_M>+KUdH>KqcY*Z zvkY*{Nr&%yHiM+&CU|>p1MH4o3(`{w(5qiOST)4J$%oNUmJkJ<;+MhYw+q4J@LX`8 zGZTU`Cxc>V5Omrd0Hef1V8fh#V9*Xh%nuT__me{2OBXON>;hpDdpNt$3O2QE1ye?v zz@;T0xh}Wga5tuwbA1goT(Qq~u3c>sH_>4#XAszvdp5b0i8@)szRN8`PSZP-kh%5v zLs~iZGd+rj-ce!yb1ArWyI8!UeH2c=upGxl&BRGfQ?PDh7(QA$4*Rm1lI^i$Q_P9Z4gFE%L!jlhL;;PGS z@$GMI@Y|Huc*$0C>^#f_Hwk(C$_?9}I_zUFB`3R0?AHZ#&y zFphf!-X%PrUAzlX$#)=BtA{5=H{ji!tI&SP6$s4JLGas)@F}+f+zyw)q?e_zU{Vo$ z{;Gvt{f_dnvpjIPuorAKd%!zmH*|Wt6V`ms0zJ=5$7EZ;?o%pUKAH^kF0Fyo%vjiW zXa#(}y%Y%31+bl)1ywGg&%Uwd;D?Cp&@ewmM4 zTdh?17_=|x(d`3nk#`ff8lb@f=l!_Hpj_-wvJ)3KrsJf!oAIEzDR|-Z^|*FuBHq0* z1`nLH1m_4M@GqHjaLt{W_`#bgxNT4nE_EJ*y}l2}URlHNZQK{T#UdP?$KW4KPh67i zjxDZs$DiyR@s@}7_=}YtF0-)4BTU-jyFnJX$kG%)_-uryUH<`#W8OpLyO%J2-!phF ztcPm<2e3$bA0|Jk2BGK<%+I?4V=r9>i`)xPEG>oQD^J5=`3ZQOdIa>d3&8GqE*$05 zpz+!P((@TG<4F?4@G-MJaZ6!b(`-;XguxPWD2!Ss18qky80^#yJX1PA-;u4s+Rzm0 zmVe;{t8Z~5zntRs$LDgVinel>`y_M0$}nzsb1!bT_v7NJ?y6#)@nYtUR>CeQT*+>0 zwU>Rj>H_PN@rJd*M(Bo@C9(|ON@>(n}O2h|J;_xqVtMU6KOYsBQ0^D)m3_LI(9FJ?5fQz4v$4_g5ap8_J zcw5kLytc3}&QhpJtE8x3I%|&$Y)#ms;ZL?-uymd{dl|YKYBr1fC)J z0%jra;TrV<_U(8G%bIIoyY(F?G`k58L#{%{+h@V7bt&B7v+NJR5h&=rAD-)SA$9RC zFs;u5?Zj;WpEg12*yKO3((z&>tU%NqAILHo_R zqZwU#Bb#@FP~M%<$SZ6%N(_lY9>e31*Pb*qY4%p6w#-I@yY53TmvX3i>>2cJaWy(H z`W+fI-B7XigPB6~sjZ^7p@U*TD|baH-B00hV1$C55~&b+mMB(RbS&{oxQCrDoWm<` z72+%3_F;?NJMrp-tvDhg6`NgNkH04-;S;WF@O8@-_|lFA`0ebu_~EwMc+yoR9(sQQ z9v>cp^Y)L$)_Vf*L)XFh;0XmD5-i87*Y(6xs$KB65svu7^^O>a+TfBe?J(}p8tXiG zEXs_rv%^PtHtRJkzx)c`I6Q;4+FA%}x(gRy--N1wtH8|C!GlvJ;6LyXm_Nvc(>{B_ z-fItB-n1LG|HuXlBNgNeGN2Wc24@o1!=ku&I9eYA{ft+^_{8~ev+FNVJfuHZ=eR)O zEk|&DCx9TsCT`BNJKSR7MQ)cw!-*cKxOJupT&E@}*PS)td{$j5nsuas@$|7{9$*6t~raMujcA3CDgR!->k=01Eq;288HVi6iET8W8QFe8_l&ZK;tDR z(bBXt=#uCzx-zO3eQ5O@oj?B(ZCYfiaQe_%QIgR?5tG+lG37xY#RQc?;mp^W>G2Z9 z$}L(&q`Xau!{J-_)x48 z1y~+#fLla-g_T!6LzkKFVVJT34i-L!>w$H!;6*jmth@$WmtTP^i8|QywG0MIi(q2V zQ5YdFfTMT!!sRG6bhSx`HG|Wjzeh5h^NfYmie(V+bP;5+(;>Df42mNHVbuzMh}bd& z!n@Mo73T>LQe5Hn%=WPJ&Sh?R6Xx=J9pmDLWN~NJtGOJrV6I_-HFr;UzSz+>ws=9q z&{HSFDEayxcI=J}1zX=KialkqovmJSk=#&IDi%ttx zVbhxn@V7@(u^vyxH~WQPkBD(NCdLnsdNB~YX34SJNpJl5mIuCkq$_SU!x6uZ>x{c< zh4_Y{1zwXNz~cReIHlbiXnXl7tm#?{lNOIAbM&*g}fPO{t^ch{)@$J0W%!QlTAGQ0~utCM_yOO7D%rRr+I^71v zzwt(g_Y6Q*!~9UMZK3FbbSlcMTZ6{kSdU81Y(z<2ccID`8st8v6kYYYgw(I@A>GX9 zsA5Mms`4ciZ!1j{&9;1<#|)u@{K3~p&2m+=+B85Rxj$L4blNt>i5(6l?e`Snt*dro zVR{C3nw*L&BV#dH7Ksm!osPkH3O?W+j0apDhN<^M@R0Wk%v6c+mkVxq|1u|hr;h`+ zS!IiTItp>_;TAY}m;sKn{RHu36YO$rgl(IiK%n{#v@N{})?LrRM&3t%Pjkq;xWbz= zU0~p%_AtHC3f@f z>89cf2Q7*hPPoG?s~#$^ycs2bHTkmqTfLOcS?0@HZXeB-o}IzY^v_{$UoK#c<^wxo za}8T-_kj&M+74||yP(f?Vzjk$e>D7n4{~(~LXyV0Xtv=}blp4=jmb_$9a^TNoO?S_ z*!{hzX6gwv)AkCAGrx)ESKmg9V(QTz>LYqT$564Q*+j8F-CWTsrH!JNbyKYFCRNnd z_$j7Tu2+1S=2UXeCI@#*+JQIiO2(dp;&Gpx)p%9GT>MMocx>Q&J%3)p}ST5)aySG2qm=4E)y52T9B{7?$b7@oq8t;C~g3Exn6&_J53sUQNiS{tIex z$w1L*0G}N@*;)}Ztg|BD+e6XtLZn#re6fO>+NtFB;!ND;P7-!K9fi-=&Bg_yY548t zaahoK7^V-gc*#pKzGKh>7ln7l%%YAsd$%=Son(e{JX&Dl*jwl-d<u<0?o$pNVhZz>4&UCTksaN%VZl;ZP|(FF8k1qiag{ntr)d&Dnb1QSD?n= zN_6pT6?!w{A+n}lqT9`1QF5q}Vzr~OVta^*!uO1gLNm34V*OBiMZ^q+qW|OeB{w5B z;+oIPv3K7AxYB^Z_g9K>&ka4WxsM|jFzs>UbaR|B%n)~<{TYsV*Ft}XYfy+T!o{wq zp!sYr{0QF!lX8=_Og>w_S*$PdCVDqw`06gu4YhMI?N5P#hfj?U`{dvyYM zJK6}^=znnEYoBqo=kIe(=TC7x?T>PK#}9DM{nNSDjvKiLfvdUiW#hOmO+MV=-XczW zxf8ebfskXry(_*Kf3P@iwsEog?2@7nYFI)Qoal|lT1(I|FBw`>AV)`@^+$bUha>I!Ff=J>7CKx!A8kFo9Ibeo zh;kxQk#Bq&vKpO^Hgw29mxg4c^Mi6x&9;2h=IKGyeZVo~6M6zQm!Cq$L@_$$e-6zv zxQ?bYzC|tGeL*ixS}0B=w=K~t;&|WN5AW1E;f}-1@Wc)TzAt+Xn^)b0nf=PacUTeZ zVY49U`&RhyG6n9LCc>k$%i!MHnGk(`JXAdy4o4CcU>(y3R4QjMnP>&hqXe*KuMvES z`o?`5^MPyirk0x&b(ib-@fNqP?kab%3UgPk9Or1uom_512FJABz$NaA=T@9s%C*lA z;T)`exzh%o-2Alf#rb8|ifiwlF7DDdx_I#cWpPkDs<@@Dw8%w57ilNj(gPew`A5%K zd1Z5sJS*ygJYj&4y{h$OQP;ujv%6DSe}k2*dTcCPHZqxoq%EwUrYoscZsd9sbYH$zQwNFR>L;rJYaw1JZGyfy=N^pd}mExwm?JD&CrE(OJv<(jf&Db zqwLv^DDt{9y4BPZy|*M$#lU_@ZZ-(@>^ln8SA?Lcu``j2@k(TWHx^lDB%ryEQ&C=U z8XCJ}3%ayu8!F1rL`~dIRNiYhS}{Hk$-@t!52`X0?krIB+R?gXcH{=$R#jrp8SZ%0 zNC!N&#tJXaH^lqO>fzG0S_qfkf(fgSL2m!UQ1dbe?gwN-gM1r2tJ(-HqvBxk{$=3g zy8v!an+tRMOoB7=AjlMr1S_XOaQ=V@T#9?Z*!1qu>KPx;es2To_qK*l9s=-eY5|9C zf8#3WzU6kFe#Qm7yTOgRd6nB^P|DeFFXGJGoZyV76mq$C1zh%zJ)FY=5hEu zxiV?D{8arF`PPt+a)h3B<-%huH2JGse^BMud#c?X!w6#9KYsmRemrXUhye!tNPedR zL1bk8{*?dL4?F(xMdJ2~T7@8+kcyp@B-ikmaWNCIxe zOX9!Wk~9mOC8GX!rFGmOnQp~7nP6&zOjSvc#AG??JUR;z`zht(ixly(HSKWEjSguuj7~{kN-G~H(3+0Lw65_H9Wln35svp{ z2+t_SAzs6TkcRS<2QKoEj#+X-QT8v5$v*=tYSYGB-8NGk5Ozr->NrlCvUrcQ?v$IX zZn3{i1Mg(SpZeM&ab!F#II)G+EzG91H}hzREtlxJ;RcLSf*9TABqp9d#1K){jBa6Xc?7wZ$K&R| zJSOqg#l4h$Ec)n2Z1Rp6<1LZMpM~;q=6H)j6r7_XXvJ^Fc?Ch=l z+)Es}y+j;;ajhieRhdL{Vx5%8t(K|=N@c>%sj`SXjjS%*iX?6fAp=a;kj;(TNX@G} zQn>ausUm-njR&l#kbRx0l+Q}aq5FDDIeaS>|FDX3>tRA`JG;*SxnTz$-Un$N$iu-FW+0W$X@Kz zeXY2zb6ZJ(Y?Q>O(om{o$4VowmPwm^{AGeC=Vd~TF^@@CG9@64RL_efeJXj`W&4CA z4jE9IiguLVswt}>!6DNE_+LTY!6Bi)K}$i_NM5>1t4S@LtT z(MwE;POPSg)Kp4!<^&b;q>fUnKT!^Q9C^D-Oe>EE(rU#5I&yRr?elFnU3Tmut!)32 zE<0e(>y=TA-aVE{IqNJZ{MG-`nEZ;1zH1vVW#e0sP%>Yv8C)g~80RHPDV-!yP24Nd z&M}q>HFBvQWl1B8E=Y-aon-MHrLwv{qh#@?@5vfpD@a1Il8h8)kb*1aWW30nDmxQI zDcdcfM8Bj{Arp>Mf_-l(H$_)kcUMB|awpQd73=7__L+2Z>ONW*cAZXn@S0W?n=+a! zq5MCVy7T(vr;h&>7oDuuOaIFNk&iswTa{Gq9hqSv(fR2lDFIH>vMD2^f)gL4jkg_T z#DL*4ZOsPWK0YgpoLnyxTxm;+uJzXQ(h_j~xWW6bepkb7Q^+ZbG5>Kft4^g^CJ*AoY zm?FORrWNUf5_|#8Kcsz855nciIn?;ef%_2?0J8$KT zt76sbL6XQ>XLx>jE>TK`N!|R`ND2EKX~5H#va-BIGX4HjGM_UaWU8drq=OqthD?}G zM%3*kBZ@EZzI`p(xXhRmF7~8|w!V}$W+FwH&Zh#FCQ=${J|$e!j}EEO(1i9XE!g^= z*E1cMl<*}?WUv`8yLi9x=SC17dM`nRQsi*PLF{AjR@^vnfJ9VS!`sAW(uf0trK*S& zsX%{9N;tli%I&(!bdUUGK8NdM%D^_HGQBey`N)H8bfZXB-cmBeVIwJexR2BhDk1gm zZFn7TPnGTRrIejlQKGU8O8=18{df|sI+;!rUyEt|^?P*4!FEjK);c$c8#huCVU3YA!mPbixyDUOObeAp$fKnpg|DTJWPcgq zGgnrpN|y=BuE`=g)ywqm&b&SeBOQ_!k@A4eWXRAeGG%@vshZf5YK)Lk+Py=m$Q!FD zb=%F9@@yfc+kBnUY_*}4Z+g-4=No9z!*aUJ`T_0s&Vv!9?PTI@y7B(Wzoso5nUvip zqE)GP%JY?C-KDo;rQKplzi4!TXk&$&5hLaJ=ybc+XLyBINlA4{b$+}qvRKT<@6fs#wgED9x3F zlwj~>N^{~V6;UkYV=CR~<{}?jS1^v2*JRLo^OLj)b!0-Wh!{cF#Z2?lCZ@4&2=6n# z``csEJlU_0Xyzu7aL#3K{r;6=)euTj*Evt(@Qsy<#!i!lpj0X0e^pvXeUXMp9b|4E ztjz7uESV}oCsW_GB30A-lFiqmNi|Wx$Lf!g8mXR)fAW?TxL8q*FYGA7Q%))S4X5h5 z1W^IPBuX8np+rl{D8cIclveega+}qbR%+a7-3=c)qSahFz%-tYxOa*cB-k>lJetv7 z9>Ro}EM|ntb4&mp%FC`Fe?LsF3cTezE_xI5g2lwUAW5UsR*Al_LgEuKO4|5styCF! zL0Y!WT2{8Qk1W1=v`m>EB_k9!WsN}wy!;u+>w_p#wQ&=V#eULlz-6-T&I7Xfk`*OP zcch40Jt?IzMa54TMn&F@p#o~tDdNyUinw%lo)H8Ae`vSUKcB=)e?s>&Qj6t9a7!bb~2)chfLLBm`rGxE(GuT!^vS=EQ$r?%-m`{}rc}I!N+wnf1C#{<`l`fkSO_wd& zMhlJi(c15)X;DK3T{iFm-JJN8jtKohr%VZc@{{A15Kpra3_jr z>_Mr_22qVIr%-CwO%zeIi&AGFp+xY6s#96es&95YCKRnU9ZlC=TS*HI`T58(48Vptgm-S%0RKGGFB2#*GL5YJ4t=&CQ1XA9+2uh9!TqY zg~;UdQ)GgMv$C=+pJc?X4!rK>?OWrSWXh)1q|fxNq^?^o8F06Xw{<&HKKB%q0QvJa z?g~mcK8v4wKS?z&y-k%(G2>;I6)oJ`k>{1}v~b5rT2PTiE5DzmQ)axOo4W~^kO^Ix zkOj^R;i_QD&J1Dn4;L|VWdaj_aTB8|ImigMzF>5zz2#-^XUWyt!#pOB|5lhd%x=}E ztg2q5DQxm?PJAKOZC)S|&Uq!#Uh|b|m1$CKyF*gd^mkI#E_YeV^WieWZvn4g;$^D3 zjWX?ctt|4~C7FJ=fK;v;LyCqbklN+P`8Y`>Deqy(+qPB|k>NmTK8&H;4nQvCu#R+cqD=Js`-jF^=olgIG#4AtrAWq4yHq1ZJ{-K4IOg7jMl!a=jVRk&;emaOx>McOu*z3ygo@~bWPhC)$Q}Vy){fu zf_G%`dX(<#=9x4yj`F)e0VuWj`_b3o#kX zjC6I|o-PtiDixQ7?vQG{;`w=ES5n(7*_Q1zE~M^P@>sc zFKv9r$27w)^7(n!DV0+L{jR~BuSxr%0|iS>ni475|8RPEo1ij2#r zROA^-ztfhFRr>NeB$ZZeJ4zGY*XcT6-u`;kn~zIIF@m>zOw#dJJ^yq2t0|yc8zEM? zNTd-14@#AT@<`EGK2~O%Ny%$&Q@TrSXrD#BXo1NvTIUl-M|3(%3kDL5Jluwc`G_jmZmBR3IEi|aaFkv5LZkqHi3^0_Q+DdOEaO4&Y>iZDAv2__J6d7+998R5Z%$j>oE?Jqwu`7gjE`|cyxkcKlN9~Tfen~s#$wXc-vEW$}aN-C)f z8A3Ts*-5#*`$Vad2Gc3QakSg|dRp0wkGE7k<@L%>9sgHCa%$)eH-|lwy$K(IxY<5U zA}Hx474CQ_Rc-mgm*TGE^YC){+Pg+R|7ba1uVTR0(eq*ltUK3PB6!j$ty`Qd)9CfGfQ5Vcn7$Ue z(LO1`(}+}hOEoy>x9KDrP_9wQa@@($ItYnMIFOvec?P>WxbZq zE&7G=>AQtdFI6+T^^Y0iYT{2!{(oSipa0xd^(jiE^3sZZEYC{<%3Wk`D?(+u#15pe zhR+>#IYR0Rx>4F*<0%5);d708(ILIVX?;Tqtxi2c>q468h;RLwX74Pws{z-Bj}M;u6s-gc!#R!b=j|5s0S&k5S$ z%s85Ox`M7dyO!20DWvrk*Jy`DZ5d^sK8#zzQbu=mFJJ3$o)OL<_*|l&G06W6B*8~$ zH~mGaw{rY;9+M>!<5k4WA}Pvqmin|c3Z2d&(3o7U_3Jg-kqjIdV_ zqisKf(J3}FZhm=82s+98@2?nbkB)qfgz_gQ{{c*#=C$k7`1*y&q4=P;obnKBQf5eW z9_^%pSBqt3-Ji%(G?tXl$jOvy;|VGt*Ok@`RnV#qOK4RnUyl{olF>bF$LK5^nG|1= zNog0&$GB1$hwjH2wa;@VAfu%`l>%?g);zIj)KN) zs1qqSen!>_JSm-pme1cdp&Q>1r9-aFq_vB8(z*es=*Xqt`Mlw7Oyp>a(Kn1`bhFm+ zvMQGmrQBuWBU{V0t)2MwR(@jgk97Wz9BY@K6-BP!Dc1Sjln9*-NzwjgWWeTYWMdw| z=NAv91%b~fwWlYoRp!z}ZZWN{y~5kR=8Vq)Cnj?9Xhx@=&%~eJ$)pTF#?&2Y zB^NyD!Ph#3{^XPY2oV3&R+CvP=Bws+I&OA*AP|82uCrQVrWWf+i#^*L(WqA-d*_mhh?;~^dPNq zxkyLM7BGT+u8i_QAkQZY7~xtzPc<4C&09}ipUjf~_pRHuboc+nb=&^?=rQj6PkQ{i zM&|D;&>0!XUj3xUKhne2`uCM{|7Wdp_}|AbAE}@8_{aC-R@hH^{3AWIYyKzF<~$!Nj-DM+?s8p>m5qV)N*QRTOJXqxF_6fk-j(hQ73 zOC6)p28S55(l-{}yB3EQr>;TI4<;h{=OpwdWgR*(JsGW;oPumxq@m*jwxBa}(^0Ne zg|ZWOqqfO=(S4_V=-l2sTZkH=IJ9rkqA$r#Uq8cnQj_ zID=j`o=24)rdpT#4({LF(V9d-V;%m zwc+TqacJHPK?(S?!aC^hdK$~$x!wLMsguI{~!mf^eT zyU8PTa@S*&QT`Hu8N*{ z9t!U(B88hftvEk)fMQ$aFhz%hqZQ{mO;)^EIa85+EJE>N%_2pw$mNQunm9$IMVg}D z`~!-B4#yP}?p;!NH$G7uciY8&3p~V*n_Iz#t8TI*-!`x%vzqz;tTjMOMjIot)&gBQ z*9LWM79!JUHpumr9jakFqh3@ObWYL@d6afTo%g#SucIEw{HzzE%S7n(Q88K;$e@u^ z`l5v~{n7m6gV4NYKeW(gBq}HfK>o8wBk!w$XkvaaYD$}cn5v1Wu4yuA+i4owb9p+d zo;wR&;O3%fbLXL$#KoxK;ZoF&T7_ZA$4 zhcU${z78YwzZg2xxE#A6jJFrq^QV}6p8dc-}>A@wJHF!SyVDqKDL~@_1nKG?5h`?Fg37TnH1(@I)#u6`;~So8t|D;hH#h% ze7S5|V=a5<^b_{`rd~Fn=p!rT^_xA{%7ayN1hCvh7@e)fP@_cx4^Bv-!SpN)vyj8O ztqOS3O$Bf1tD#!u92BqDL_DU0rKJndBF_}#9SBzcV^Ktm;^w`UD0F2BPOPxOKUuao zCchkKXRpFVee1B)dIN4X-H4af+>l4w9VI%pp-Q+XhP~Q>2RJp7edvvPQ@+^W7=ZWt zLh+FIL3~{ni4l8a&_FN_CFPHx7GEN!|47DU!F1ep6+j=GGqZdT!_&vh(pxs6kacktb-2E0FfAD7CtV&$PWJXX_=VuxPh z9^O9G&iQ}?abNM2+7C<=;-ab30<_Iagz9{grYCO7P&pNO8dsu16^S~1_g#Zd$?4F` zYYpiJQ&Qxa84*EAgke+!TK`VmesLrQk`Z4qjm7aBh_8z%P|AXgLGWZL9 zM19!EXD8XlvI^F-=q_7q*2bnt4X|4zM%jIfrrGr=+~~WO7gc5o;lw{7%Uip|>)TX*0+k6l<9 z>yPi10}=d#aHT>B=07-q`(8xiWMdQ-xWwSmt~mTXHyJxPeeIz0am-nB0uQxhp|p86 zu5c^BRYJwsn^=nX+{^K(NhKEDufh<8YE+A>#%t5pFne1qZqK=eClwnpov#U-{af*j zYzI0T{)aWyz4*lI1JdOqxc>eG&J*ULT^>SIe4i+F(3hggnR4{Bkpq>D!h~RN%!P8g?L*#z)7~8l`l4jpos?;Szda*>(EkS{n`X z|4s#Uma_f2+3eDNrEH(#Z8n1YK6|I6hqc1*Y{Iu+tcv0k`+@M_g53h>;U$QZ5i`&> zRv1Ov#L+!n8YL`bF~dX-J9jIh!BQpsHbWJ){I${UfF3UYI3M+F7GOu=Lew8KL5s(x zC=yQaKpKNH|67cmw<$JWvF0$6mSSX~J?^Spi=DOWa7&dlW=U^AUwc=a73hX016%P) zn+KNs+JVOhcVpohKfHG-5M8(J$D+P)JehPD2Tny{uW&pzHzwfLyGgiNAdS=4j$-$f zQ>ajK9>bUA;fjKMJSkO(jcbchyY~W`9V*4cJ(sXRq5>z~uHr;&H8yU(jz07j2BBVnEvz;wTSQGZvt(4T7|8ix_RGlA-&jWvSgxb($-u zM}NIHpufU^nr*P6dmU}5{bxH0hc?pa+8xyCN-zyckEbIuPS9nu&(oY=7ioG>EltjD zqrV>gqAP#tu~m4Ul?*IlJJ;8;I+73Bm&qOM+UWOe!-6qZhj)^FA34SHJ>Wv`6n+dO zf*4aPgr!y@D54;VN4TYNi?$5zsFcGerxh^bh%ydsoP*4oxwtf38#8|B;9#&GcIO(P zO!Y$aTx*KKJ?2>cAHgS4i?QoJ%IWJic>ks?y5UmXaAP@gJJ_RSfFnK#TaVY&T~SfhDkKnq>1Wde>j6PzinE&l4 zI$b`Fn#@TItIfimnA7+w?i`lyK94KE=3~hGBBU3K(N6yoZhKjdZ%Y(6&A*AC zUf;ouUd`z2^9JWw^dh*w$Kz5%_~Pmih6In{^g=GWG=rB0Jr$(J`Qr5IOIgaxqd@z< ztJ1JmUF!2-0gZ|6^orG~xXU>V9!OZ64T01FZMa9_a*n6;h~Q{u%lvqL`Kp z+@J>p9?_%R6LjRKB6~aaESv0knZsSZ#>(;BVY}Y7vV{`;?5OcqHtYQ;tHkx29X0yP zI%!R_hq(EWG3CdtyM?eLR2cOwB(Nh*65VWPq2?($G`CVh)we3RcH1009yJdmcsX38 z25s!;osUjU1{m*Rh%u!Lu_V|8?~0oukzI_fiHlK8ZwYE@TBBB;Eq1K5Lya5uIOnn> zhOgOx=d@f=Z>byZ+qMmF^LXLWp2U0O%UmDKWOvlRMquB8_1B*AGM)_4|QTF>e%X;~j${4|I+%ZBle^H*HoGKH3(xv3o&Kb;Mt^ta&0T#y$_MVw)vu*;fmi*oXICv)oE=+1X-4Y;^o6 zJErxU^ObEXz|sk61jiw^6+dUi7Bl ze)?xlEFHR-MnAaa(8WAO^u?naH1GX$`fR-zJ5Y0$owL1&RlQxoj>+6)gKpQe#RV;_ zwZRKkzPXRBzV@B{@-i6Xbe@Nb?3 zzIY&oX^yhEIaC4DPAX&KGc_CwQODMV?`%)1O+Rh5Y)-%er0m$S&a6yS%? z#kiNllau>gf=44OP*wOE`X0N1lizQnTJ=K=KKvLJ3fr*kY8O^Fzrp6OeF!Iqa4_Q= zsw7Wf;NO2ZV^oN`T@#@VZzSoI#%#_p33=-IWDYevV?Z|so6xFMq{NCMo2h@5y%t#wsm&N$!Tw_J6i%^`KAaho#)m=?0vLBg1X~M4 z@$FkNteuv`Kz(U6V&w5(nj)TEu8cClYWT)<4wk=H#}OG#+&8L&A42ufVzD6xj~e2k zS`%zdFvaWEi%~O!VyLS%o_DatC$pC0jRPyN&So{H1+PUf>Wur&u19UfEy#Pv9Sni?XtmiF@8t%dxkCsBemaPa%8~f;B}b2~j>lyoN$7ha8K;L*@xZ1`%!xjQ^8e1_ z%C1~gTU3bdn~E`ZX(_rdx`fjSC50qY9JLykELeQn@v}#kY)=#nf7yWy;Ks1qnFLzSzW+J zMpm(Mw`dHeq%*!daQ!S&d(|M@4SsOD?>R|gi zU393_M|C+v++A#ppqg{L_?bpRGQ7{VQds$SrLn_fr(f>GZo+29L3_{6R7TS z5<_!O;fjc}ShM{+HVbh$ak+)aq;c|@*mC@sR*9ExUB&f-*Rl6NE!q^-r6+fEK&N(;ot9G*IUnee|r2{&pH$y!KK88}>Jy zUE!U}I^8H`6%A|Iz1JVH&-q%|OV8U`-GwjNur;sQ?)X0T(YG(`XwnEftv<%SeaMAd z7+$nLB7iK%zXW=VpisUT9+#BFYFR0iaF~UQw#%U6M|n(aRzij6$~ftzf*o7cQNKZ?D3Xrt5es;ug;AzKef4 zpWrZ8H=3^NL5)FXg9|gThp6Y!OAG=-p#0Xtv#IYCW-q=G*S1i!}mhhI|UO%1@`M{x#IZq@C8!5MW!! zqFI5aqwKS^^K9y$GWJEnRd)LH9X9?}3wwCYbN1NT|5&m5*X*lpeeA+N{j7=8M|Pvj zAGUfPH||}+%khi?xWZBh9}0?~Xukw%^hjddY-v=nnT_k_%43d}BKGAf<8DE9oDkN) z`!#bhH$@Yz9d&S>x*lFOH^5pGLtK1)A$rU2d($>j!%H{qPIHuTbejVJZrVFdRunuhlLQ@w*>o^T zj@m3!qsO{*=}mECIy7cURRruf{?VD{6>XvpsqVDH%ZsKR458sxNi;JzlP1orr4=4e z>9;~9Hq0rJl{j{WRnWY|T5l?2t3#{Uxohj$MI+6u)WK)0x8G~_smVLmR&ju>`992k z{_&k18JJ|h228Wj!2%fnN{HhfMbN%n0@cSQQCC6+4_V3M24_XwoUe+1{pR5Lg&KIF zWFB7JsD)u%+Boj1i)jxGP|C^>vn!48-6>;S7_I@v4YnMZ*m%P zllW3(y6y1#NeBFN!4XG8o$+-1X7roC6}fkNVDK+5ytdB=)sFb#)h2(m2;GO7IfrnE zLNscb#i8qX0-B9y;>{1)n5B1~lb__GN<#s52^XO{XU6-LUqpZJGK^NKK*!o^m~f#M zU6bmt+~7XCE^We7&aLR8+le{ZZ?RZ-2tU++#gU9Zc=;kX-IB>qPaYDYH}*)k*L61R^G){Gmd9-R_%l{a^Cj!7)yE2UePkcX zePJIakFh(XezA@G+*shjgG<(nV8>T6)Z&uj@O5Wlft?(BUFXz8N*PZXt6`U%Ixc!J z7sZRT(7#3-({=T5Rx4+IOc`J~YmCZ^%(49;gCBn{#>BOV>s2^iddF9^$DhoGKyI41WW!m1aC z@w7`ct~QLrzq=B#c|{uXI~~LKN6uhGbS?(`DMX%s7cjf26#c)JV`y+CO1V{`^qOjP z5vxVj{Caf1d>_Y*o?=znGo0jmiL$5OE zjx^n>r9j30sMGMC`P5i@5ncOuF}+-FMUx+_qOadM)5VeQ)J4~ej=tPOH!lmOHE$#7 zW%ER;H<&{MBWh^0c?(s);=(=|Im>>v$z!cEuCZ6+8rg8KN9?%PGd5}eEB0*c5ZjHz ztQ`A|)erm5&Yk+hzRl*r?wf-6C0-nlU6I62y;*3@C5@vAGU#BSjAG-e_|aYi8-(Yf ztF9Kd$mn6zPko%MG2nOxL-cGj!-Pt}h;0^V70Y7f6-(6QvBpIaHYllQho8T$!3#O- zuye!-H&$&xr-!b1QNs<*eBIIDxd(Q4?7$8oAKY~z04@Iop@x4j4!4G(NmUe@JdMR` z7vu5!7EZqsOhj@y8H4(c;lqC?@Q3qh%wKdKf3)Ufv0M?l9=?c|^eS*rx)MWmI5~0M zZR|f%hgY2%arD?Dj4^J*6FogxG1!Lz9v@KZ)F%{j8^(?O-_V=qC+6hwQu8z+Dk3jR zRi4hGX782gd=(Aa+OJ9J9|L+!grP5_kUpxlr>|G6rE8^JXhFv|I(~g8%{{S?W=0&M zHjz>ELvSVyw=1L{&R5Y#Ce2hg;voCTB##x!yv#27d7G8G_mFKr{*ui#>to5WVRm!h zH#Y6%Pj>FuKi1fd8#jydq3N+1xQmLTsG>9mhsmPbJvp2eu7EjzRB-E(dHCXk7Jf0E zk0}KUP;6)+%KtG!?KO+=a=$6A+Ha2i;fwKxA4U7!Huywh8NPqB0)=W;qM!U4TtCN! z!=c`U@uk~vew8OGaPs@6rrqdP<%@5t{BfaRATpcw;R)*y^wbN--Ia%N%r+WFKg45@ zUJAbbo{AHW>Bx8VD4q{Efj`%r#9-%a{KT9?>QjieqD6RV^a38}DZ}%BDmlNKYnXbb z7VT~B;6&Ix{Kw&2tJk$)%ckc@qk3@Cw-;CF_u&V=K@2G#MyEStxG4BH-c8`5N9Xa- zPhUi+`uSP({FDlPDWySsgLP=iwFPwDIH8o^f+m<)(Ruq<&{fiFX<*>f3@Mxbk#=f@2<@@chaCimQYdhe%Lu*jwha+}ZIN|UX z7u>L9E0$UAz~&4uyfd^1x0DCs?pynDrN}}2RvUrf6N$fSqA>MB9C~&o;Gkg=THB_e zm}44_+&YfppHE6P7|)qdx9&sw&A+MZag>jiu0ylBiY!8FZ4bkz&9jZW0-3*fpb4_(Wh6q zsqPpb{c=>2a+@j8v({=qgOwzrEM}>>AhDT zbi>7c^sht|{dMXH<&#RMf!xPw?8h?tvHd39=<%5DYAj}u|fmdeo3|#oa0H`0w0S z^ql318S=aEbkJ^`TjzuC{tLw4Z}(wO!9kqLh{DRCXyj^)#czg3FyUS@{)$UM4>k?? zw2q^az)2jf&%#QpQ~31nS!`L5kLPz3U?^u7Y@1VtU8z@)L|(;%`>tcoa4p_et>^gn z2dKQi89TPO;Ipr-cx+t<8h!4<=xwiX&h^)v9J?1Crw4Jwb(oXcjo{PmKXET`QA18f zAJD}^P3#4!@=FnFStUtZmdet|aaDRHK#OXc>C&xx=2MG+g>*S%O7~=N;(ZHSx>wwh zO1E#KqGA5@@tqKA0CCj*eiBt(mri%KpQKN-uF~#`T55Eqn`S(&W8;;(*#0N4*_gyn z?72lFtk#NYR;-2_%cpr!Peu^8KNi7CTXDQEFM|cG%DB>B9ajfyV@a|ewiYkIc{>(j zLy-v@Z6qB3V1e_0Q8aa1hN6E~p{M*>lr3zH2%96$6g1cbwL>RYac|{8sNg8*mCYxSqv-M{;phT0ZvhT)@znQdCtg!yc;&T*BFr?2x{Jd^xrFY0X{a7rBql zK~1Rl`!T*KXvNiGZ78^-10z4Zz|q8BTt3i`w)a2aGxs4>{P!JiZX83AOA{!vd>RF( zxvBCX51owUr)%|u=$Rr>>YFV?Wd;=Jojq#QQ&62MR?ee=270t4$C&OPT|@^H7Sjh- zHgx-%mDJ{$GredVM2$5LQ(5mgTCbl-pS7k@wX__%Q{pN$ynBN>?(U;LX3yCD>wDRO z(E)bH_$M}Ra*P#B=El5_0(kVcD89Crz&~$iW8Lj*YX8E~G${hrIo|u`n@G$kj>hztv1pkXkJ}z4;KY_h^m0zYm+|T7{p}e3 zY|X%ipC@tR_8E*6Ifp&{g?Lo<0>;>vqDI>#ycKW-eW$C@z?75k?!AeXPw$|!^IaVB zYD9g-M;KSqgy#gFV2|Td?C9;pe>yL4@aiixi+hXdVf|=dJAkc!zH|0sKQX~#5*@B_ z(Zhy3^zZ+t+2#V`F)AxVD0p$~Dr{8ohL` z%~Q5z^*i=P`4D@>bA>r!f#D^QN3uDL$NsQPm%h8@nxKKbF=P%dA^nv;4Rb+s= zatpCq&lGzIgYF+#w6yI3qM3Ju?!}B{Ufi^>Z@!`{}r7 z)^VI+b^_1tK82c%IT*StA1%5H@f?2%-uv$oo@pq@?8FM(pLYe52e09Yf?70myN!7_ z8_+1>KAv9x5Qn{+Foly}aoude!nds$e!C5w-*#e?a1Vyh>E(PoGl<^DKVdiTH;lLc zfn?PbPMJ^RiE?hLqQOr;CkW8|D>LcN6C%_BB({iZ_fj(qTFSG`K~;lof}JZb`6`RDhtbD&TLU4ITepBDR5fncA zTfsM_rSS8J9keZUfMRDyc(K71{ylO7zj<3Bzsv)c9M}PmoOVLW(_O&lN3|={I^e||k@jQuwj5(l8OM{>(*~;P@vBHeak^ zrTa1n+OYzD?QwwV&ud{>jSI}p+6+q%yThSi5Aa&I6Rth<2D3B1Aos-|HW=*(OP&L; zS?n-uG>QWJ76sBZF(6$W3*LJ1FupbcwmeD%xsD{*38|p{JQWUnPlsT$<4|Cf2~M%6 z;h*t2Se<_kK33#{&-)_CXexzQ)n%}zt{m!_N|@YN4P|!MLE_a7sI9*RHUf8`YefUZ zNf_!uTr+hC?>CmenK67D3vgLwrXpljz4ybk{c{R_sRNni?op5$Wg z>vJ3CF@s@@B$!i0GE8*cZ04x&9Ok~a2DAK=E;Gn$#OQ~c zF?SU>_txiGGP`XZnU}R*4A+7^%*4BJMtx-_lT{o;v`^*`RhueO)pVDfTGvY2NgJ89 zx|_s}_ml1$pU9fw-(>N~6j6;A2LB8xFuFM#Xt4r(pQ!=I-)Og;q>E5h_kN-7q|)C z+iyYrcmt$}+=tBU`>-~?2{v$bfWYJDkoBe$3VyzVDDT&h$81qu;h;_ z%xKVp@@_*&&^3V~6@Xp37GUGE7_zom!J@fKA*6U2)TlbbQDbL_y1X8;pKk&SaEGcS zPiWri1x?+%p>4h&9LNrU@u?uF{I?&1qQhWt*X!sHm<$oJY4C8?QP2y|fGa;wfl^a8Xze=(!_Md7LP9QdH5GugW-;6?ErCq2 zOE5Hc32Jwh!{AO%|5CUPUp(sI&+-PC^S%*$Ufkz!nVMn8XbWhqc?wSZo`J^G|G=~6 z4NUkBK=br4j46-8;O<`#qCEv?7w|A6+xZ!-P5~x-mk85WD#~bdN-#n@W;2^K<(S>) z<}iFUI?QCpBBuHi!?3ql=4BIQK0IE*6rA*B0@A{mZ<5)Ji*5l^DC$Zg^N)}-qiN)C z%~8T?7LlInTjYXvBRQ$mO5`27NR8zilDA}lbo}~4^!H8>C1Wm#l;MM8GD5&RB@Vf3 zXG4ysEL6%X!g4be$kv$y7uL@O&mK*vCwj2J#{jmbEQERs6A*_baAv|9+|Js;Ios9H zB)ATuwbz5!fD2g5y21D7TVYr9b_mh%fQy+sA^WR0i2w8lIJ6hM{_TUCpkfKHioY@M8 zrrKc<{|h*MAl> z@;E7`;)@cKBdW`MKf8b#b~9(jT^UBa3Yn-sPRz66VCHvGI0Ip)8H0@%nKegk2yeL` zv0QkNIF2Wgvpy%ti^x2}ZCgc-?7dBXpJ^cLi#kbd*-P@~=WEj8)JLS-eh~AWzeyGM zU(!2|8-x=107_@Tle;4DLP-p6PR|1KWJRbSRDsP4)Sx<39g?oi1?5Ri*qNXYcIOEQ zmH-GST0k*}6Xy2F8hjgsZtScm}afhV++u&532mF`s z34X&~(7_uBjr>7yDkU7IDi4BzWF%}Ih=Mtd(ZFXF3;N3OkR_i88Y_}uwPgyNjOWZA z|8!U!dK_{Noq%J)C&5zoEPNZxg)3YIaJJ_Hr~ajIW65PWt#}10^R7a>+I4txx)z+A z8(@0%LvUIB7%Kg`pzr2uxT81-wKKlJI^*x4e*Fii@0$dREmIJDjf-*K$(B$;Jj1LnPpDRb_&C1dqsE30iJt}!*ah)}(+oj)BRmsKaz)|JEGd}&G#m1A)xhe9 zCVUCggOC~eps8vIU&9xITapQk*a4Vx>eHvj!aK1gaPhY-NOLsW60?=C@IMEzOjrZj zOPzqebAhs^jbP2S6~3+B0UqynK`log#7z0YnV=vTvw9l8O9o^^2lz$4gc-wuB#yTFUL2PXFR!twrocwPMw#LGUzht4q&p2x*l{^e#| zba)x9GknZopCB{$(@e(ltR&OInfD5J6d1qjs!WF8JVrJ_kNMiTl(~?~+03^GF@o=p zGNl&9jO5#kj9i)s$$I5Tg6uqqdis7Mzc8BcS|*TPBdLUaah8PXR}u)nLG}jTBEN+1 zkR4-f#k3>H)z zZVX96u%#?uiW-c#sKe5hdB9d`f`#mSsNTB(WDhKalujeyYcqvxDGLxyppZ_jApEWk zJEDZrKlY_96 zI|goh#zBO`5m@j%5t>&fgVxFvu(C^sxHrc@@_7av`FH|G>rTVm@mvUvDS&~X3-C(o z5;&+RHpPo5o2Ch#d!G$6H5;xvUQa!$(0EpQ~goIQ#zjPu1+9z73ripESE(6tRS@b z8u{8&NAmI?kjP!F&U%wO`{X^I4Y;f5b4 z1mHlI5F9uy46siW(vl^BSjfQPUM0Bhs0QH*8gRc{6HHfVgY_3(s6NE;H^d06vQ1!K zq&aBLuzx&eG!i*l zKt2yrx{4sitqhb`SHR(am7E*3)$lp37V0V+!F1#aY>MoFX3Z|RKJP8;*vZkz;zJ-o zKEas>pW%7M4{%qTfQK8o7!OwgMu2|?^EOR{d39EX`7ukL87Wp_*hqcmi6v#mmpL$g zrQ4aj%K?mfZxT}$UdUM3TxP!S6(ZI17LbWWHsnvbKe;*_ON`3nN$kT!^5#GW`8{=# zyk?7utlkxpD0z$2U%yQZXaf-#ctFngJtm{F&&c|=SLC)?9|`asAn`Xok^uJ~q$XmV zWZ83Ojs!O-I`D$Js1WRsm+gyoLWF#a$W*61gLNOme@3LXW%wBw-6p9!OX zvfzPS4g`KZ5Bu`-K~AFxj6KT0ySE$^MXDesrUpj7HbVID18|!681`ni!&c)iXk7LZ zzK{I}zn*=7cg~+-jqXoavvLahN4c4tA|ZzPEy85fi8B{pOEJIn6_~S=T8#B|j`m7H zMpVp>*_GkOynPkUj6(|3JDJVMj~6m_!=2{(zjX-zEiMz$YRA`zi!rUiS>REg%ZMUqm6)R}v;1W#C7i9L#P} zf_Gi2An!OAs`|8nIlKS{{f*%5$RfDvW)2hI7Q?i`63+k77P4k8hqR`Z5Zt*6b{%y9 z7;=Ow+c!W9hwZMsb1Te{-2wG!yFkd>4{jUpgSg%MVJs&Mt}+qOpBf2o??r*uo&?Bp zJqCfCxAOWirzWX6&@Fc!hA!m6^TYzsNGXE<>dIirtt-%0QVT7hLWh{3@Hj>+@D%gh<2;j_US>YlWZqBQL74!7y^$|=-aXc zYzJ*%;W9hu805U0WC!5xbB2n`u3&7i1p?FEVg17GaIAAXjPKn6c{bi~Z;d~+KMMlK zl@S0kQJ@%-2nwdDu#09u_ohtn&&>kv{W;Lpkqc`yi$F8D1PWs-VSC^;fXy6_x#SkK z$KQjr+*Ys~dJa#eyTO`M_@2kmKgX_fSM;#fu+(=Hx-zNb>k4RKSJ4u@J zA0dn1k_(I9lghX+A@*nLF_URP;A<~J=+`l$rMnFr zSzrfG2OJ=wdp$f0=hSWa4oFf9fGZi{(ARYsw8c3bc<&={@?;VyEl+_3S5v{|90%Nd z<0N3>8JIJt7$Q_lL8iP63QMlS=)^7fWmFGE){U@5`zg%tcm_h(-@u`u&!F?}2i%hW z4Pv|gK{w1`3O9%_mDMs#+5Nf9bdV8a)o#uRy|ZToLtU84NKd9uCz$#CH;MW4BFkLK z?Weg-i#&1NGKVNX)*x%|Eh5v~my;))ue*;db0IV8w-bp`FEVeL5BZl7Kw=IDbNIvu zNMg}p@gJN-Q~6}|&;>FoaFI-gT_Pg;E)!AT zO7eE~Rid%EmQ-}tlh{oU$T7*s!6nQ`-R zXEHDDV(hMzn^!KIFqditx|Ilp!exh8H+MBmtvb!Mx{sM&fl;kKDr@$Dot zhIf%goBc?fV<;KVJ4D_XN0XoWv4rnnJo%=cM2y^zk$UG$@^k7GxvPAZ_*>+Xw!8wO zl6RRXcV8uc-qw)2X0=2}{x)$_xkG|f?~+G#4WwrEeNwXK2^o%iPF&`_Agj3Fl8ZO` zi0J1bvhnp7V&yPGK5QN%QqmKILw1GHjl3}4!Vmk;&4dykQD{3X4hg0baCr4B5Hg(& zqndIM#IFecvz1_M_8ici=7VsmA-MiFh6BE4ptXN7yc=UdH^3S;{#Qp7afJG>yATDS{A%jJ`E4e^WgSiA*6a& zz>VG4pg83gEH!S1zrs&p`-5i?{_-s-|M~F331-u|*-Y9wGyC7Ae50i%Ou)(b}y?G{L!E3&Je*;Cjz|P;FcQi+&nIgPb|Y*CABDw}JQN zE1>s+19-}8hFw})VciUGm@E%~=#gO9)Da23b}^tocm&qOC4xkLD(qG~4s`~Z@ZoG0 z?6BZuH1Zd~{c{<_##h1R@_J~5`(RMr1eHF|fcwaQU>Gw9Vy{Ml*Nm4joyE@_Mq%b@ zfH?F1yd0CXXC9+0vVd{uVwpv0PE62=!;Jm1*XBZ(jpX0eCjI&btBARTs=IAryBjoK=BFQ&7N_0-2B-d-RNrTK; zQp$CX@C~0MVb-~1)wx3Q`S?Y$_}^u+x$Y*p<$9Z}D7Z_OKD|eh>RZUjuIFU$?iZwf zaSthc{f2Zr>LcRfLu4%EBe{A03o+j|LbS6-iP_WyDcQpXTDy6mM2Yi0?1ey~S{Pzq zi$UrpNho+B15(fAq2HUM5B{h@%JR7&Yo`foHtK=y@A+WRX$X!rMiBJN7}63n+)D4+~KVjhL zqs5@wxdehkp}t}#7{>*IX!ijiEQgETo&a+j5@C6LI&>6f zfZgUyFjdV0-DTMjoSg#!+PR>z?jmgPz6RpoZo>?R2KdlAginej`lwXsXcE% z{lF*aDjkEYeS%D8jySWGl=<~)1-g% zS#n@SFC-n!!iHuNI{Jh}!!x3-^Mce} zd`)uiz9qMZ`-raA0Fm_gLRuD%kw8v%w$h0U_-nY~rWX%1Zr}&!0zp_FEDX|5#o&*s z1eiSL=w@LV&fY{8*4$HpOD<~AJgEUmmoyk5P|4i{E}hLF z*53}6ChuWQ>=$sDHx3IP^E2}zgqYpm#hDLP3d}|kWoG8zxs1P;5u;kWk>L{9&*Xbt zH}CN`CV3t}R?J&P6nr+5sr8;@htw|e&^?q4c^oFcw#AYg%?U(cNiyk4P9^8nP7}w( z9P;{p8IhE#Br88vktaDdm>;GnZoe8Qyj*HXF=Df z3{?GF!#m(7JnZf$tJQU^BN(}T24GvbgjY8 z&lZ+Fwu6d)tKj5&2N3_^1mQBAEc4SA2vOVx|8n-g$ig6)Gj$lq-j)QK`BTzAt z00;l3fOl0oygQQxGsjOs%Zan_Q70D?vx-2Bv%{>*tAL@ND)?h}3;et9!mp`DNP)*7 z9q<%#lG{Pew-XY3UV(AoFx)Er2XC{c;lfYOT|8l7W@7bhCO1Qa*}hwg>Dpk#bT&CN zCf56yl6$w!H>&(k(piQ_ku_}?cXtS`i@Qt*FW}(r4w-J;-683*j7M-MxVu|$cXxMp z-S6J-{n1yFk)EEq>v^h9onE$qrIFUw(!F#KOM_?qEY3VbErXv-v<&}iS~9M(S$aN> zwCp!MmY7(dxUwm!6E&iu-fbJROafytjO4+nm?q*%hgsrW3B*58p#JNPrXw&r~b zt$DH3>>;Ps+-1{gl01{v;6N5_d}L0o$B+=#8U?h6B?@T|R}|IOO)sHUtXNk2x}=xkhT4uZ-6YKGG`Rf2G}Ej(=%lqR>wMWT8tYr4HToC4K1VPPzH_@ydrLm|G=u=;sch z?~V@--7x-fwW3XWSw6W3T0)Oavz%TXX<2yKZ#ms0%F?CxEX#$1b1f&9F0j0Mxx%vi z%R0-Al^ZR?K5n&SUA51W^5Y3hp{Exu*4|evsTba~G=6u_QpWn&GXBaN%gx52|KwDVGsW=jRV@@>4&uk1CMI!Hl5Ti>*usuT`y|i$6V5e%zL0EU-wGu-|)Tm zy6O+@+wugVONt~34I7s-wC~n*p<51S2;JT%duZ881w)r5s1#akY=clENsrJ?U8ja7 zofBeN+QMbYel^xIVB}m&ogIrTzTPV=wYIFbG#XSw>xa;x7DOZ$3%Exke$X_sFl*COaq%Uw&Ojk}*oTe&j3R(3%i zZD_xI+K!|JwPk+_YB_DiwCV{q$As5w$~(Jr;@sx`UNP3w@Qx0d9Ru4OtqKx;XD zh!)>_xaQJFYTxfo)RPd=a5Hb1+p6`yxgE4%l;mL$Pb?c&UrTIh*4+ynDPtNHtf)^oxi?L?bop|RIe zhyMAPF7#OSETLc0hlDQAo+q@#?4qHCx>pHp)1Y4HmEvtfyDYJWzL@I_?YpPFrTxL6 z<#UrcmJK0`EmLxBuq3Il+0tO{UQ65Ehb&(wpSSEUd&Sbd`ZLRba~~|NAAPe_yZ^7{ zte!-hcs;q6d}B)Oyq;FO7Lrjj3TDyf=-fXsA-8s+MnNsEa#1ZZw3KE~Tvl`4sG!}t zTSfakM$?w8tFG;2@9J=a`r2)q(zY~jrp;O3O1qY%t+qHxd+lbH4%+hV9kn&TJ8Qq( zUA6qPdT0k1TD9M~`fKys4%ha3MrpHqj?+%goJjso(SrM@X`g4=v=qsm+OF$PZKo$v zTRqRKT}~CyhK-2P+R!WcFV57`E}g3#Sh+xJTVH1QwYtS{j?R^TDlJr9C^esGGaA9%&A0_x2po>YqNX)&F)*8=mg6_Ind+j?8y8E%j6F z>dlv0r}FQ$(6Eo%{dNhC1(3e+}hEA!GJao^8RH03#rU`u>$PzkWLGI9A zck+aKeMLi`J}wn{zFhgxr}=A!7X8~kbn1lOq2Ipu4c$A@7aDaZOigNHRp+}}Ri=4X zHR!xmU3_R&g}z!pRiy2@TzS7&PK>bKI>+IG4+){Doix_V&f z>dzour5~rOuxYxg6QQex4qd(V=&Ei&SKgqm+QjH8-3(m~ox}4BbroEut6i&fm3f`6 zUT@ab@a?)Xcj;>TZe6uJpsUh{bam;dt|lDU)t%G2N^@RUz6-jlcu7|iujp#VbzK#| zsjHCNy6W>#SBszMD&o1WZoJ{MKIrPhXRiH+u1fvZRfYtHvL!Oql_ZABm&{Pvk{haO z3PUAHYp7o740St;p_+vlYI)=