Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 19 additions & 18 deletions piff/basis_interp.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from .interp import Interp
from .star import Star
from . import _piff
from .config import LoggerWrapper

try:
import jax
Expand Down Expand Up @@ -154,7 +155,7 @@ def solve(self, stars, logger=None):
:param stars: A list of Star instances to interpolate between
:param logger: A logger object for logging debug info. [default: None]
"""
logger = galsim.config.LoggerWrapper(logger)
logger = LoggerWrapper(logger)
if self.q is None:
raise RuntimeError("Attempt to solve() before initialize() of BasisInterp")

Expand Down Expand Up @@ -210,7 +211,7 @@ def solve(self, stars, logger=None):
# QR method often has fewer numerical problems than the direct method for large matrices.
# It just requires a lot of memory.

logger = galsim.config.LoggerWrapper(logger)
logger = LoggerWrapper(logger)
if self.solver == "qr":
self._solve_qr(stars, logger)
else:
Expand Down Expand Up @@ -260,15 +261,15 @@ def _solve_qr(self, stars, logger):
if cond < 1.e-12:
# Note: this calculation is much slower, but it is safe to use even for
# singular inputs, so it will always produce a valid answer.
logger.info('Nominal condition is %s (min, max = %s, %s)', cond,
np.min(abs_Rdiag), np.max(abs_Rdiag))
logger.info('Switching to QRP solution')
logger.verbose('Nominal condition is %s (min, max = %s, %s)', cond,
np.min(abs_Rdiag), np.max(abs_Rdiag))
logger.verbose('Switching to QRP solution')
QR, P, tau, work, info = scipy.linalg.lapack.dgeqp3(A, overwrite_a=True)
P[:] -= 1 # Switch to python 0-based indexing.
abs_Rdiag = np.abs(np.diag(QR))
cond = np.min(abs_Rdiag) / np.max(abs_Rdiag)
logger.info('Condition for QRP is %s (min, max = %s, %s)', cond,
np.min(abs_Rdiag), np.max(abs_Rdiag))
logger.verbose('Condition for QRP is %s (min, max = %s, %s)', cond,
np.min(abs_Rdiag), np.max(abs_Rdiag))
# Skip any rows of R that have essentially 0 on the diagonal.
k = np.sum(abs_Rdiag > 1.e-15 * np.max(abs_Rdiag))
logger.debug('k = %d, m = %d',k,m)
Expand Down Expand Up @@ -363,7 +364,7 @@ def _solve_direct_python(self, stars, logger):
ATb += A1.T.dot(s.fit.b)
ATA += A1.T.dot(A1)

logger.info('Beginning solution of matrix size %s',ATA.shape)
logger.verbose('Beginning solution of matrix size %s',ATA.shape)
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Expand All @@ -383,25 +384,25 @@ def _solve_direct_python(self, stars, logger):
# size rediduals. We don't have a unit test that would catch this, so be careful
# about changing the behavior of this part of the code! For now, we just go to
# the svd solution when ATA is fully singular.
logger.info(w[0].message)
logger.verbose(w[0].message)
logger.debug('norm(ATA dq - ATb) = %s',scipy.linalg.norm(ATA.dot(dq) - ATb))
logger.debug('norm(dq) = %s',scipy.linalg.norm(dq))

except (np.linalg.LinAlgError, scipy.linalg.LinAlgError) as e:
logger.info('Caught %s',str(e))
logger.info('Switching to svd solution')
logger.verbose('Caught %s',str(e))
logger.verbose('Switching to svd solution')
Sd,U = scipy.linalg.eigh(ATA)
nsvd = np.sum(np.abs(Sd) > 1.e-15 * np.abs(Sd[-1]))
logger.info('2-condition is %e',np.abs(Sd[-1]/Sd[0]))
logger.info('nsvd = %d of %d',nsvd,len(Sd))
logger.verbose('2-condition is %e',np.abs(Sd[-1]/Sd[0]))
logger.verbose('nsvd = %d of %d',nsvd,len(Sd))
# Note: unlike scipy.linalg.svd, the Sd here is in *ascending* order, not descending.
Sd[-nsvd:] = 1./Sd[-nsvd:]
Sd[:-nsvd] = 0.
S = np.diag(Sd)
dq = U.dot(S.dot(U.T.dot(ATb)))
logger.info('norm(ATA dq - ATb) = %s',scipy.linalg.norm(ATA.dot(dq) - ATb))
logger.info('norm(dq) = %s',scipy.linalg.norm(dq))
logger.info('norm(q) = %s',scipy.linalg.norm(self.q))
logger.verbose('norm(ATA dq - ATb) = %s',scipy.linalg.norm(ATA.dot(dq) - ATb))
logger.verbose('norm(dq) = %s',scipy.linalg.norm(dq))
logger.verbose('norm(q) = %s',scipy.linalg.norm(self.q))

logger.debug('...finished solution')
# Reshape dq back into a 2d array and add it to the current solution.
Expand Down Expand Up @@ -483,7 +484,7 @@ def __init__(
):
super(BasisPolynomial, self).__init__()

logger = galsim.config.LoggerWrapper(logger)
logger = LoggerWrapper(logger)

self._keys = keys
if hasattr(order,'__len__'):
Expand Down Expand Up @@ -515,7 +516,7 @@ def __init__(
self.solver = "qr"

if not CAN_USE_JAX and self.solver == "jax":
logger.warning("JAX not installed. Reverting to numpy/scipy.")
logger.info("JAX not installed. Reverting to numpy/scipy.")
self.solver = "scipy"

if self._max_order<0 or np.any(np.array(self._orders) < 0):
Expand Down
49 changes: 37 additions & 12 deletions piff/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,30 @@
import yaml
import os
import galsim
import logging


# Add a logging level between INFO and DEBUG
VERBOSE = (logging.INFO + logging.DEBUG) // 2
logging.addLevelName(VERBOSE, "VERBOSE")

class PiffLogger(logging.Logger):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see that LSST's variant of this inherits from logging.LoggerAdapter instead, but I'm not familiar with the pros and cons, and since I see that you've looked at our code I'm guessing you've already weighed them.

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't try too hard, but LoggerAdapter doesn't have a addHandler method, which I use. Maybe there is a different syntax that would work for that. But just inheriting from Logger worked, so I went with that.


def verbose(self, fmt, *args, **kwargs):
"""Issue a VERBOSE level log message.

Arguments are as for `logging.info`.
``VERBOSE`` is between ``DEBUG`` and ``INFO``.
"""
# Based on the LSST logger at
# https://github.com/lsst/utils/blob/main/python/lsst/utils/logging.py
self.log(VERBOSE, fmt, *args, **kwargs)

class LoggerWrapper(galsim.config.LoggerWrapper):
# Extend GalSim's wrapper to include the verbose level
def verbose(self, *args, **kwargs):
if self.logger and self.isEnabledFor(VERBOSE):
self.logger.verbose(*args, **kwargs)

def setup_logger(verbose=1, log_file=None):
"""Build a logger object to use for logging progress
Expand All @@ -32,16 +56,18 @@ def setup_logger(verbose=1, log_file=None):

:returns: a logging.Logger instance
"""
import logging
# Verbose logging is midway between INFO and DEBUG.

# Parse the integer verbosity level from the command line args into a logging_level string
logging_levels = { 0: logging.CRITICAL,
1: logging.WARNING,
2: logging.INFO,
logging_levels = { 0: logging.ERROR,
0.5: logging.WARNING,
1: logging.INFO,
2: VERBOSE,
3: logging.DEBUG }
logging_level = logging_levels[verbose]

# Setup logging to go to sys.stdout or (if requested) to an output file
logger = logging.getLogger('piff')
logger = PiffLogger(logging.getLogger('piff'))
logger.handlers = [] # Remove any existing handlers
if log_file is None:
handle = logging.StreamHandler()
Expand All @@ -54,7 +80,6 @@ def setup_logger(verbose=1, log_file=None):

return logger


def parse_variables(config, variables, logger):
"""Parse configuration variables and add them to the config dict

Expand All @@ -78,8 +103,8 @@ def parse_variables(config, variables, logger):
# Use YAML parser to evaluate the string in case it is a list for instance.
value = yaml.safe_load(value)
except yaml.YAMLError as e: # pragma: no cover
logger.warning('Caught %r',e)
logger.warning('Unable to parse %s. Treating it as a string.',value)
logger.info('Caught %r',e)
logger.info('Unable to parse %s. Treating it as a string.',value)
new_params[key] = value
galsim.config.UpdateConfig(config, new_params)

Expand Down Expand Up @@ -201,7 +226,7 @@ def plotify(config, logger=None):
file_name = config['output']['file_name']
if 'dir' in config['output']:
file_name = os.path.join(config['output']['dir'], file_name)
logger.info("Looking for PSF at %s", file_name)
logger.verbose("Looking for PSF at %s", file_name)
psf = PSF.read(file_name, logger=logger)

# The stars we care about for plotify are psf.stars, not what would be made from scratch by
Expand Down Expand Up @@ -296,7 +321,7 @@ def meanify(config, logger=None):
config['output']['file_name'] = psf_list

file_name_in = config['output']['file_name']
logger.info("Looking for PSF at %s", file_name_in)
logger.verbose("Looking for PSF at %s", file_name_in)

file_name_out = config['hyper']['file_name']
if 'dir' in config['hyper']:
Expand All @@ -316,7 +341,7 @@ def meanify(config, logger=None):

params = np.concatenate(params, axis=0)
coords = np.concatenate(coords, axis=0)
logger.info('Computing average for {0} params with {1} stars'.format(len(params[0]), len(coords)))
logger.verbose('Computing average for {0} params with {1} stars'.format(len(params[0]), len(coords)))

if params_fitted is None:
params_fitted = range(len(params[0]))
Expand Down Expand Up @@ -361,6 +386,6 @@ def meanify(config, logger=None):
data['COORDS0'] = coords0
data['PARAMS0'] = params0

logger.info('Writing average solution to {0}'.format(file_name_out))
logger.verbose('Writing average solution to {0}'.format(file_name_out))
with fitsio.FITS(file_name_out,'rw',clobber=True) as f:
f.write_table(data, extname='average_solution')
5 changes: 3 additions & 2 deletions piff/convolvepsf.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def single_iteration(self, stars, logger, convert_funcs, draw_method):

# Fit each component in order
for k, comp in enumerate(self.components):
logger.info("Starting work on component %d (%s)", k, comp._type_name)
logger.verbose("Starting work on component %d (%s)", k, comp._type_name)

# Update the convert_funcs to add a convolution by the other components.
new_convert_funcs = []
Expand Down Expand Up @@ -276,7 +276,8 @@ def _finish_write(self, writer, logger):
:param writer: A writer object that encapsulates the serialization format.
:param logger: A logger object for logging debug info.
"""
logger = galsim.config.LoggerWrapper(logger)
from .config import LoggerWrapper
logger = LoggerWrapper(logger)
chisq_dict = {
'chisq' : self.chisq,
'last_delta_chisq' : self.last_delta_chisq,
Expand Down
3 changes: 2 additions & 1 deletion piff/gp_interp.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,7 @@ class GPInterpDepr(GPInterp):
_type_name = 'GPInterp'

def __init__(self, *args, logger=None, **kwargs):
logger = galsim.config.LoggerWrapper(logger)
from .config import LoggerWrapper
logger = LoggerWrapper(logger)
logger.error("WARNING: The name GPInterp is deprecated. Use GP or GaussianProcess instead.")
super().__init__(*args, logger=logger, **kwargs)
25 changes: 16 additions & 9 deletions piff/gsobject_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,9 @@ def least_squares_fit(self, star, logger=None, convert_func=None, draw_method=No

:returns: (flux, dx, dy, scale, g1, g2, flag)
"""
logger = galsim.config.LoggerWrapper(logger)
import warnings
from .config import LoggerWrapper
logger = LoggerWrapper(logger)
logger.debug("Start least_squares")
params = self._get_params(star)

Expand All @@ -270,9 +272,11 @@ def least_squares_fit(self, star, logger=None, convert_func=None, draw_method=No
if params[3]**2 < star.data.local_wcs.pixelArea():
params[3] = star.data.local_wcs.pixelArea()**0.5

results = scipy.optimize.least_squares(self._resid, params,
args=(star,convert_func,draw_method),
**self._scipy_kwargs)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
results = scipy.optimize.least_squares(self._resid, params,
args=(star,convert_func,draw_method),
**self._scipy_kwargs)
#logger.debug(results)
logger.debug("results = %s",results.x)
if not results.success:
Expand Down Expand Up @@ -306,7 +310,8 @@ def fit(self, star, fastfit=None, logger=None, convert_func=None, draw_method=No

:returns: a new Star with the fitted parameters in star.fit
"""
logger = galsim.config.LoggerWrapper(logger)
from .config import LoggerWrapper
logger = LoggerWrapper(logger)
if fastfit is None:
fastfit = self._fastfit
if convert_func is not None:
Expand Down Expand Up @@ -370,7 +375,8 @@ def initialize(self, star, logger=None, default_init=None):

:returns: a new initialized Star.
"""
logger = galsim.config.LoggerWrapper(logger)
from .config import LoggerWrapper
logger = LoggerWrapper(logger)
init = self._init if self._init is not None else default_init
if init is None: init = 'hsm'
logger.debug("initializing GSObject with method %s",init)
Expand All @@ -385,7 +391,7 @@ def initialize(self, star, logger=None, default_init=None):
flux_scaling = 1.e-6
# Also, make sure fit_flux=True. Otherwise, this won't work properly.
if not self._fit_flux:
logger.info('Setting fit_flux=True, since init=zero')
logger.verbose('Setting fit_flux=True, since init=zero')
self._fit_flux = True
self.kwargs['fit_flux'] = True
elif init == 'delta':
Expand All @@ -394,7 +400,7 @@ def initialize(self, star, logger=None, default_init=None):
flux_scaling, size_scaling = init
size *= size_scaling
if not self._fit_flux:
logger.info(f'Setting fit_flux=True, since init={init}')
logger.verbose(f'Setting fit_flux=True, since init={init}')
self._fit_flux = True
self.kwargs['fit_flux'] = True
elif init.startswith('(') and init.endswith(')'):
Expand Down Expand Up @@ -520,6 +526,7 @@ class GSObjectModelDepr(GSObjectModel):
_type_name = 'GSObjectModel'

def __init__(self, *args, logger=None, **kwargs):
logger = galsim.config.LoggerWrapper(logger)
from .config import LoggerWrapper
logger = LoggerWrapper(logger)
logger.error("WARNING: The name GSObjectModel is deprecated. Use GSObject instead.")
super().__init__(*args, logger=logger, **kwargs)
Loading