@@ -962,11 +962,16 @@ def log_(x):
962962 output_footprint = np .zeros (shape_opt )
963963 mask_opt = np .zeros (shape_opt [1 :])
964964
965+
965966 # check that the beams are deconvolvable
966967 if commonbeam is not None :
967- for cube in cubes :
968+ # assemble beams
969+ beams = [cube .beam if hasattr (cube , 'beam' ) else cube .beams .common_beam ()
970+ for cube in cubes ]
971+
972+ for beam in beams :
968973 # this will raise an exception if any of the cubes have bad beams
969- commonbeam .deconvolve (cube . beam )
974+ commonbeam .deconvolve (beam )
970975
971976 if verbose :
972977 class tqdm (std_tqdm ):
@@ -976,15 +981,18 @@ def update(self, n=1):
976981
977982 if method == 'cube' :
978983 log_ ("Using Cube method" )
984+ # Cube method: Regrid the whole cube in one operation.
985+ # Let reproject_and_coadd handle any iterations
979986
980- cubes = [cube .convolve_to (commonbeam , save_to_tmp_dir = save_to_tmp_dir )
981- for cube in std_tqdm (cubes , desc = "Convolve:" )]
987+ if commonbeam is not None :
988+ cubes = [cube .convolve_to (commonbeam , save_to_tmp_dir = save_to_tmp_dir )
989+ for cube in std_tqdm (cubes , desc = "Convolve:" )]
982990
983991 try :
984992 output_array , output_footprint = reproject_and_coadd (
985993 [cube .hdu for cube in cubes ],
986994 target_header ,
987- weights_in = [cube .hdu for cube in weightcubes ] if weightcubes is None else None ,
995+ input_weights = [cube .hdu for cube in weightcubes ] if weightcubes is None else None ,
988996 output_array = output_array ,
989997 output_footprint = output_footprint ,
990998 reproject_function = reproject_interp ,
@@ -1000,14 +1008,21 @@ def update(self, n=1):
10001008 output_array , output_footprint = reproject_and_coadd (
10011009 [cube .hdu for cube in cubes ],
10021010 target_header ,
1003- weights_in = [cube .hdu for cube in weightcubes ] if weightcubes is None else None ,
1011+ input_weights = [cube .hdu for cube in weightcubes ] if weightcubes is None else None ,
10041012 output_array = output_array ,
10051013 output_footprint = output_footprint ,
10061014 reproject_function = reproject_interp ,
10071015 progressbar = tqdm if verbose else False ,
10081016 )
10091017 elif method == 'channel' :
10101018 log_ ("Using Channel method" )
1019+ # Channel method: manually downselect to go channel-by-channel in the
1020+ # input cubes before handing off material to reproject_and_coadd This
1021+ # approach allows us more direct & granular control over memory and is
1022+ # likely better for large-area cubes
1023+ # (ideally we'd let Dask handle all the memory allocation choices under
1024+ # the hood, but as of early 2023, we do not yet have that capability)
1025+
10111026 outwcs = WCS (target_header )
10121027 channels = outwcs .spectral .pixel_to_world (np .arange (target_header ['NAXIS3' ]))
10131028 dx = outwcs .spectral .proj_plane_pixel_scales ()[0 ]
@@ -1016,7 +1031,7 @@ def update(self, n=1):
10161031 mincube_slices = [cube [cube .shape [0 ]// 2 :cube .shape [0 ]// 2 + 1 ]
10171032 .subcube_slices_from_mask (cube [cube .shape [0 ]// 2 :cube .shape [0 ]// 2 + 1 ].mask ,
10181033 spatial_only = True )
1019- for cube in std_tqdm (cubes , desc = 'MinSubSlices:' )]
1034+ for cube in std_tqdm (cubes , desc = 'MinSubSlices:' , delay = 5 )]
10201035
10211036 pbar = tqdm (enumerate (channels ), desc = "Channels" )
10221037 for ii , channel in pbar :
@@ -1041,26 +1056,39 @@ def update(self, n=1):
10411056 for (ch1 , ch2 ), slices , cube in std_tqdm (zip (chans , mincube_slices , cubes ),
10421057 delay = 5 , desc = 'Subcubes' )]
10431058
1059+ if weightcubes is not None :
1060+ sweightcubes = [cube [ch1 :ch2 , slices [1 ], slices [2 ]]
1061+ for (ch1 , ch2 ), slices , cube
1062+ in std_tqdm (zip (chans , mincube_slices , weightcubes ),
1063+ delay = 5 , desc = 'Subweight' )]
1064+
10441065
10451066 # reproject_and_coadd requires the actual arrays, so this is the convolution step
1067+
1068+ # commented out approach here: just let spectral-cube handle the convolution etc.
10461069 #hdus = [(cube._get_filled_data(), cube.wcs)
10471070 # for cube in std_tqdm(scubes, delay=5, desc='Data/conv')]
10481071
1072+ # somewhat faster (?) version - ask the dask client to handle
1073+ # gathering the data
1074+ # (this version is capable of parallelizing over many cubes, in
1075+ # theory; the previous would treat each cube in serial)
10491076 datas = [cube ._get_filled_data () for cube in scubes ]
10501077 wcses = [cube .wcs for cube in scubes ]
10511078 with Client () as client :
10521079 datas = client .gather (datas )
10531080 hdus = list (zip (datas , wcses ))
10541081
10551082 # project into array w/"dummy" third dimension
1083+ # (outputs are not used; data is written directly into the output array chunks)
10561084 output_array_ , output_footprint_ = reproject_and_coadd (
10571085 hdus ,
10581086 outwcs [ii :ii + 1 , :, :],
10591087 shape_out = (1 ,) + output_array .shape [1 :],
10601088 output_array = output_array [ii :ii + 1 ,:,:],
10611089 output_footprint = output_footprint [ii :ii + 1 ,:,:],
10621090 reproject_function = reproject_interp ,
1063- weights_in = weightcubes [ii :ii + 1 ].hdu if weightcubes is not None else None ,
1091+ input_weights = sweightcubes [ii :ii + 1 ].hdu if weightcubes is not None else None ,
10641092 progressbar = partial (tqdm , desc = 'coadd' ) if verbose else False ,
10651093 )
10661094
0 commit comments