From 3f3773d9379872f293f316a5862c019d80a4f0da Mon Sep 17 00:00:00 2001 From: rcooke Date: Fri, 8 Sep 2023 11:00:27 +0100 Subject: [PATCH 01/81] SlicerIFU --- pypeit/find_objects.py | 18 +++++++++--------- pypeit/flatfield.py | 6 +++--- pypeit/par/pypeitpar.py | 4 ++-- pypeit/pypeit.py | 2 +- pypeit/scripts/show_2dspec.py | 2 +- pypeit/slittrace.py | 12 ++++++------ pypeit/specobj.py | 14 +++++++------- pypeit/specobjs.py | 20 ++++++++++---------- pypeit/spectrographs/gemini_gnirs.py | 2 +- pypeit/spectrographs/gtc_osiris.py | 2 +- pypeit/spectrographs/keck_kcwi.py | 2 +- 11 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index 93ab69dfdf..3d5d82be94 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -42,7 +42,7 @@ class FindObjects: Specifies object being reduced. Should be 'science', 'standard', or 'science_coadd2d'. wv_calib (:class:`~pypeit.wavecalib.WaveCalib`, optional): - This is only used for the IFU child when a joint sky subtraction + This is only used for the SlicerIFU child when a joint sky subtraction is requested. waveTilts (:class:`~pypeit.wavetilts.WaveTilts`, optional): Calibration frame with arc/sky line tracing of the wavelength @@ -449,7 +449,7 @@ def find_objects(self, image, ivar, std_trace=None, # For nobj we take only the positive objects return sobjs_obj_single, nobj_single - # TODO maybe we don't need parent and children for this method. But IFU has a bunch of extra methods. + # TODO maybe we don't need parent and children for this method. But SlicerIFU has a bunch of extra methods. def find_objects_pypeline(self, image, ivar, std_trace=None, show_peaks=False, show_fits=False, show_trace=False, show=False, save_objfindQA=False, neg=False, debug=False, @@ -471,7 +471,7 @@ def get_platescale(self, slitord_id=None): Args: slitord_id (:obj:`int`, optional): - slit spat_id (MultiSlit, IFU) or ech_order (Echelle) value + slit spat_id (MultiSlit, SlicerIFU) or ech_order (Echelle) value Returns: :obj:`float`: plate scale in binned pixels @@ -678,7 +678,7 @@ def get_platescale(self, slitord_id=None): Args: slitord_id (:obj:`int`, optional): - slit spat_id (MultiSlit, IFU) or ech_order (Echelle) value + slit spat_id (MultiSlit, SlicerIFU) or ech_order (Echelle) value Returns: :obj:`float`: plate scale in binned pixels @@ -828,7 +828,7 @@ def get_platescale(self, slitord_id=None): Args: slitord_id (:obj:`int`, optional): - slit spat_id (MultiSlit, IFU) or ech_order (Echelle) value + slit spat_id (MultiSlit, SlicerIFU) or ech_order (Echelle) value Returns: :obj:`float`: plate scale in binned pixels @@ -941,9 +941,9 @@ def find_objects_pypeline(self, image, ivar, std_trace=None, return sobjs_ech, len(sobjs_ech) -class IFUFindObjects(MultiSlitFindObjects): +class SlicerIFUFindObjects(MultiSlitFindObjects): """ - Child of Reduce for IFU reductions + Child of Reduce for SlicerIFU reductions See parent doc string for Args and Attributes @@ -957,7 +957,7 @@ def find_objects_pypeline(self, image, ivar, std_trace=None, show=False, save_objfindQA=False, neg=False, debug=False, manual_extract_dict=None): """ - See MultiSlitReduce for slit-based IFU reductions + See MultiSlitReduce for SlicerIFU reductions """ if self.par['reduce']['cube']['slit_spec']: return super().find_objects_pypeline(image, ivar, std_trace=std_trace, @@ -1239,7 +1239,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), def global_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), previous_sky=None, show_fit=False, show=False, show_objs=False, objs_not_masked=False): """ - Perform global sky subtraction. This IFU-specific routine ensures that the + Perform global sky subtraction. This SlicerIFU-specific routine ensures that the edges of the slits are not trimmed, and performs a spatial and spectral correction using the sky spectrum, if requested. See Reduce.global_skysub() for parameter definitions. diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index 75096d4e72..ea13b2c3c3 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -1370,7 +1370,7 @@ def spatial_fit(self, norm_spec, spat_coo, median_slit_width, spat_gpm, gpm, deb def spatial_fit_finecorr(self, spat_illum, onslit_tweak, slit_idx, slit_spat, gpm, slit_trim=3, doqa=False): """ - Generate a relative scaling image for a slit-based IFU. All + Generate a relative scaling image for a slicer IFU. All slits are scaled relative to a reference slit, specified in the spectrograph settings file. @@ -1478,7 +1478,7 @@ def spatial_fit_finecorr(self, spat_illum, onslit_tweak, slit_idx, slit_spat, gp def extract_structure(self, rawflat_orig, slit_trim=3): """ - Generate a relative scaling image for a slit-based IFU. All + Generate a relative scaling image for a slicer IFU. All slits are scaled relative to a reference slit, specified in the spectrograph settings file. @@ -1550,7 +1550,7 @@ def extract_structure(self, rawflat_orig, slit_trim=3): def spectral_illumination(self, gpm=None, debug=False): """ - Generate a relative scaling image for a slit-based IFU. All + Generate a relative scaling image for a slicer IFU. All slits are scaled relative to a reference slit, specified in the spectrograph settings file. diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index 6108edcaae..58269d3876 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -330,7 +330,7 @@ def __init__(self, trim=None, apply_gain=None, orient=None, dtypes['use_specillum'] = bool descr['use_specillum'] = 'Use the relative spectral illumination profiles to correct ' \ 'the spectral illumination profile of each slit. This is ' \ - 'primarily used for IFUs. To use this, you must set ' \ + 'primarily used for slicer IFUs. To use this, you must set ' \ '``slit_illum_relative=True`` in the ``flatfield`` parameter set!' # Flexure @@ -673,7 +673,7 @@ def __init__(self, method=None, pixelflat_file=None, spec_samp_fine=None, 'for a multi-slit setup. If you set ``use_slitillum = ' \ 'True`` for any of the frames that use the flatfield ' \ 'model, this *must* be set to True. Currently, this is ' \ - 'only used for IFU reductions.' + 'only used for SlicerIFU reductions.' defaults['illum_iter'] = 0 dtypes['illum_iter'] = int diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index c733d0b1e1..5676abc0ad 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -785,7 +785,7 @@ def objfind_one(self, frames, det, bg_frames=None, std_outfile=None): (self.objtype == 'standard' and self.par['calibrations']['standardframe']['process']['spat_flexure_correct']): spat_flexure = sciImg.spat_flexure # Build the initial sky mask - initial_skymask = self.load_skyregions(initial_slits=self.spectrograph.pypeline != 'IFU', + initial_skymask = self.load_skyregions(initial_slits=self.spectrograph.pypeline != 'SlicerIFU', scifile=sciImg.files[0], frame=frames[0], spat_flexure=spat_flexure) # Deal with manual extraction diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index c2c0e67363..fdfe2f5b83 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -200,7 +200,7 @@ def main(args): msgs.info(f'Offseting slits by {sci_spat_flexure} pixels.') pypeline = hdu[f'{detname}-SCIIMG'].header['PYPELINE'] \ if 'PYPELINE' in hdu[f'{detname}-SCIIMG'].header else None - if pypeline in ['MultiSlit', 'IFU']: + if pypeline in ['MultiSlit', 'SlicerIFU']: slit_slid_IDs = slit_spat_id elif pypeline == 'Echelle': slit_slid_IDs = hdu[_ext].data['ech_order'] \ diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index 7ff941178c..aaa266cd70 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -330,13 +330,13 @@ def slit_info(self): @property def slitord_id(self): """ - Return array of slit_spatId (MultiSlit, IFU) or ech_order (Echelle) values + Return array of slit_spatId (MultiSlit, SlicerIFU) or ech_order (Echelle) values Returns: `numpy.ndarray`_: """ - if self.pypeline in ['MultiSlit', 'IFU']: + if self.pypeline in ['MultiSlit', 'SlicerIFU']: return self.spat_id if self.pypeline == 'Echelle': return self.ech_order @@ -345,13 +345,13 @@ def slitord_id(self): @property def slitord_txt(self): """ - Return string indicating if the logs/QA should use "slit" (MultiSlit, IFU) or "order" (Echelle) + Return string indicating if the logs/QA should use "slit" (MultiSlit, SlicerIFU) or "order" (Echelle) Returns: str: Either 'slit' or 'order' """ - if self.pypeline in ['MultiSlit', 'IFU']: + if self.pypeline in ['MultiSlit', 'SlicerIFU']: return 'slit' if self.pypeline == 'Echelle': return 'order' @@ -381,7 +381,7 @@ def slitord_to_zero(self, slitord): int: zero-based index of the input spat_id """ - if self.pypeline in ['MultiSlit', 'IFU']: + if self.pypeline in ['MultiSlit', 'SlicerIFU']: return np.where(self.spat_id == slitord)[0][0] elif self.pypeline in ['Echelle']: return np.where(self.ech_order == slitord)[0][0] @@ -418,7 +418,7 @@ def get_slitlengths(self, initial=False, median=False): def get_radec_image(self, wcs, alignSplines, tilts, initial=True, flexure=None): """Generate an RA and DEC image for every pixel in the frame - NOTE: This function is currently only used for IFU reductions. + NOTE: This function is currently only used for SlicerIFU reductions. Parameters ---------- diff --git a/pypeit/specobj.py b/pypeit/specobj.py index a7b428aa4c..530dfef9a1 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -40,14 +40,14 @@ class SpecObj(datamodel.DataContainer): Args: PYPELINE (:obj:`str`): Name of the ``PypeIt`` pipeline method. Allowed options are - MultiSlit, Echelle, or IFU. + MultiSlit, Echelle, or SlicerIFU. DET (:obj:`str`): The name of the detector or mosaic from which the spectrum was extracted. For example, DET01. OBJTYPE (:obj:`str`, optional): Object type. For example: 'unknown', 'standard', 'science'. SLITID (:obj:`int`, optional): - For multislit and IFU reductions, this is an identifier for the slit + For multislit and SlicerIFU reductions, this is an identifier for the slit (max=9999). ECH_ORDER (:obj:`int`, optional): Physical order number. @@ -265,7 +265,7 @@ def _validate(self): """ Validate the object. """ - pypelines = ['MultiSlit', 'IFU', 'Echelle'] + pypelines = ['MultiSlit', 'SlicerIFU', 'Echelle'] if self.PYPELINE not in pypelines: msgs.error(f'{self.PYPELINE} is not a known pipeline procedure. Options are: ' f"{', '.join(pypelines)}") @@ -310,7 +310,7 @@ def slit_order(self): return self.ECH_ORDER elif self.PYPELINE == 'MultiSlit': return self.SLITID - elif self.PYPELINE == 'IFU': + elif self.PYPELINE == 'SlicerIFU': return self.SLITID else: msgs.error("Bad PYPELINE") @@ -322,7 +322,7 @@ def slit_orderindx(self): return self.ECH_ORDERINDX elif self.PYPELINE == 'MultiSlit': return self.SLITID - elif self.PYPELINE == 'IFU': + elif self.PYPELINE == 'SlicerIFU': return self.SLITID else: msgs.error("Bad PYPELINE") @@ -377,7 +377,7 @@ def set_name(self): The ``PypeIt`` name depends on the type of data being processed: - - For multislit and IFU data, the name is + - For multislit and SlicerIFU data, the name is ``SPATnnnn-SLITmmmm-{DET}``, where ``nnnn`` is the nearest integer pixel in the spatial direction (at the spectral midpoint) where the object was extracted, ``mmmm`` is the slit identification @@ -414,7 +414,7 @@ def set_name(self): name += '{:04d}'.format(self.ECH_ORDER) self.ECH_NAME = ech_name self.NAME = name - elif self.PYPELINE in ['MultiSlit', 'IFU']: + elif self.PYPELINE in ['MultiSlit', 'SlicerIFU']: # Spat name = naming_model['spat'] if self['SPAT_PIXPOS'] is None: diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 7d72fb81d5..e45c155203 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -257,7 +257,7 @@ def unpack_object(self, ret_flam=False, extract_type='OPT'): meta_spec['DET'] = np.array(detector) meta_spec['DISPNAME'] = self.header['DISPNAME'] # Return - if self[0].PYPELINE in ['MultiSlit', 'IFU'] and self.nobj == 1: + if self[0].PYPELINE in ['MultiSlit', 'SlicerIFU'] and self.nobj == 1: meta_spec['ECH_ORDERS'] = None return wave.reshape(nspec), flux.reshape(nspec), flux_ivar.reshape(nspec), \ flux_gpm.reshape(nspec), trace_spec.reshape(nspec), trace_spat.reshape(nspec), meta_spec, self.header @@ -282,7 +282,7 @@ def get_std(self, multi_spec_det=None): """ # Is this MultiSlit or Echelle pypeline = (self.PYPELINE)[0] - if 'MultiSlit' in pypeline or 'IFU' in pypeline: + if 'MultiSlit' in pypeline or 'SlicerIFU' in pypeline: # Have to do a loop to extract the counts for all objects if self.OPT_COUNTS[0] is not None: SNR = np.median(self.OPT_COUNTS * np.sqrt(self.OPT_COUNTS_IVAR), axis=1) @@ -364,7 +364,7 @@ def append_neg(self, sobjs_neg): sobjs_neg.OBJID = -sobjs_neg.OBJID elif sobjs_neg[0].PYPELINE == 'MultiSlit': sobjs_neg.OBJID = -sobjs_neg.OBJID - elif sobjs_neg[0].PYPELINE == 'IFU': + elif sobjs_neg[0].PYPELINE == 'SlicerIFU': sobjs_neg.OBJID = -sobjs_neg.OBJID else: msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) @@ -385,7 +385,7 @@ def purge_neg(self): index = self.ECH_OBJID < 0 elif self[0].PYPELINE == 'MultiSlit': index = self.OBJID < 0 - elif self[0].PYPELINE == 'IFU': + elif self[0].PYPELINE == 'SlicerIFU': index = self.OBJID < 0 else: msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) @@ -403,7 +403,7 @@ def make_neg_pos(self): index = self.ECH_OBJID < 0 elif self[0].PYPELINE == 'MultiSlit': index = self.OBJID < 0 - elif self[0].PYPELINE == 'IFU': + elif self[0].PYPELINE == 'SlicerIFU': index = self.OBJID < 0 else: msgs.error("Should not get here") @@ -429,7 +429,7 @@ def slitorder_indices(self, slitorder): indx = self.ECH_ORDER == slitorder elif self[0].PYPELINE == 'MultiSlit': indx = self.SLITID == slitorder - elif self[0].PYPELINE == 'IFU': + elif self[0].PYPELINE == 'SlicerIFU': indx = self.SLITID == slitorder else: msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) @@ -451,7 +451,7 @@ def name_indices(self, name): indx = self.ECH_NAME == name elif self[0].PYPELINE == 'MultiSlit': indx = self.NAME == name - elif self[0].PYPELINE == 'IFU': + elif self[0].PYPELINE == 'SlicerIFU': indx = self.NAME == name else: msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) @@ -481,7 +481,7 @@ def slitorder_objid_indices(self, slitorder, objid, toler=5): indx = (self.ECH_ORDER == slitorder) & (self.ECH_OBJID == objid) elif self[0].PYPELINE == 'MultiSlit': indx = (np.abs(self.SLITID - slitorder) <= toler) & (self.OBJID == objid) - elif self[0].PYPELINE == 'IFU': + elif self[0].PYPELINE == 'SlicerIFU': indx = (self.SLITID == slitorder) & (self.OBJID == objid) else: msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) @@ -863,7 +863,7 @@ def write_info(self, outfile, pypeline): spat_fracpos.append(specobj.SPAT_FRACPOS) slits.append(specobj.SLITID) names.append(specobj.NAME) - elif pypeline == 'IFU': + elif pypeline == 'SlicerIFU': spat_fracpos.append(specobj.SPAT_FRACPOS) slits.append(specobj.SLITID) names.append(specobj.NAME) @@ -909,7 +909,7 @@ def write_info(self, outfile, pypeline): if pypeline == 'MultiSlit': obj_tbl['slit'] = slits obj_tbl['slit'].format = 'd' - elif pypeline == 'IFU': + elif pypeline == 'SlicerIFU': obj_tbl['slit'] = slits obj_tbl['slit'].format = 'd' elif pypeline == 'Echelle': diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index eca3e77cc9..54f07c3f16 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -577,7 +577,7 @@ class GNIRSIFUSpectrograph(GeminiGNIRSSpectrograph): # * Have a high threshold for detecting slit edges (par['calibrations']['slitedges']['edge_thresh'] = 100.), and have an option when inserting new traces to be the median of all other slit lengths (or a fit to the slit lengths). # * Need to store a wavelength solution for different grating options (Note, the Holy Grail algorithm works pretty well, most of the time) name = 'gemini_gnirs_ifu' - pypeline = 'IFU' + pypeline = 'SlicerIFU' def init_meta(self): super().init_meta() diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 9c8777746d..3c6ca8dedf 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -430,7 +430,7 @@ def bpm(self, filename, det, shape=None, msbias=None): class GTCMAATSpectrograph(GTCOSIRISPlusSpectrograph): - pypeline = 'IFU' + pypeline = 'SlicerIFU' name = 'gtc_maat' def init_meta(self): diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 9a7f46f4cc..36fdfaee3f 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -40,7 +40,7 @@ class KeckKCWIKCRMSpectrograph(spectrograph.Spectrograph): """ ndet = 1 telescope = telescopes.KeckTelescopePar() - pypeline = 'IFU' + pypeline = 'SlicerIFU' supported = True def __init__(self): From a60473f94a5e891ac4353f9153204f60f2c43b75 Mon Sep 17 00:00:00 2001 From: rcooke Date: Fri, 8 Sep 2023 21:47:38 +0100 Subject: [PATCH 02/81] rm coadd --- pypeit/core/datacube.py | 878 +--------------------------------------- 1 file changed, 1 insertion(+), 877 deletions(-) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 2e0115c10a..2e0d6b1c11 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -31,166 +31,6 @@ from IPython import embed -class DataCube(datamodel.DataContainer): - """ - DataContainer to hold the products of a datacube - - The datamodel attributes are: - - .. include:: ../include/class_datamodel_datacube.rst - - Args: - flux (`numpy.ndarray`_): - The science datacube (nwave, nspaxel_y, nspaxel_x) - sig (`numpy.ndarray`_): - The error datacube (nwave, nspaxel_y, nspaxel_x) - bpm (`numpy.ndarray`_): - The bad pixel mask of the datacube (nwave, nspaxel_y, nspaxel_x). - True values indicate a bad pixel - blaze_wave (`numpy.ndarray`_): - Wavelength array of the spectral blaze function - blaze_spec (`numpy.ndarray`_): - The spectral blaze function - sensfunc (`numpy.ndarray`_, None): - Sensitivity function (nwave,). Only saved if the data are fluxed. - PYP_SPEC (str): - Name of the PypeIt Spectrograph - fluxed (bool): - If the cube has been flux calibrated, this will be set to "True" - - Attributes: - head0 (`astropy.io.fits.Header`_): - Primary header - filename (str): - Filename to use when loading from file - spect_meta (:obj:`dict`): - Parsed meta from the header - spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): - Build from PYP_SPEC - - """ - version = '1.1.0' - - datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating, - descr='Flux datacube in units of counts/s/Ang/arcsec^2 or ' - '10^-17 erg/s/cm^2/Ang/arcsec^2'), - 'sig': dict(otype=np.ndarray, atype=np.floating, - descr='Error datacube (matches units of flux)'), - 'bpm': dict(otype=np.ndarray, atype=np.uint8, - descr='Bad pixel mask of the datacube (0=good, 1=bad)'), - 'blaze_wave': dict(otype=np.ndarray, atype=np.floating, - descr='Wavelength array of the spectral blaze function'), - 'blaze_spec': dict(otype=np.ndarray, atype=np.floating, - descr='The spectral blaze function'), - 'sensfunc': dict(otype=np.ndarray, atype=np.floating, - descr='Sensitivity function 10^-17 erg/(counts/cm^2)'), - 'PYP_SPEC': dict(otype=str, descr='PypeIt: Spectrograph name'), - 'fluxed': dict(otype=bool, descr='Boolean indicating if the datacube is fluxed.')} - - internals = ['head0', - 'filename', - 'spectrograph', - 'spect_meta' - ] - - def __init__(self, flux, sig, bpm, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, - fluxed=None): - - args, _, _, values = inspect.getargvalues(inspect.currentframe()) - _d = dict([(k, values[k]) for k in args[1:]]) - # Setup the DataContainer - datamodel.DataContainer.__init__(self, d=_d) - - def _bundle(self): - """ - Over-write default _bundle() method to separate the DetectorContainer - into its own HDU - - Returns: - :obj:`list`: A list of dictionaries, each list element is - written to its own fits extension. See the description - above. - """ - d = [] - # Rest of the datamodel - for key in self.keys(): - # Skip Nones - if self[key] is None: - continue - # Array? - if self.datamodel[key]['otype'] == np.ndarray: - tmp = {} - if self.datamodel[key]['atype'] == np.floating: - tmp[key] = self[key].astype(np.float32) - else: - tmp[key] = self[key] - d.append(tmp) - else: - # Add to header of the primary image - d[0][key] = self[key] - # Return - return d - - def to_file(self, ofile, primary_hdr=None, hdr=None, **kwargs): - """ - Over-load :func:`~pypeit.datamodel.DataContainer.to_file` - to deal with the header - - Args: - ofile (:obj:`str`): - Filename - primary_hdr (`astropy.io.fits.Header`_, optional): - Base primary header. Updated with new subheader keywords. If - None, initialized using :func:`~pypeit.io.initialize_header`. - wcs (`astropy.io.fits.Header`_, optional): - The World Coordinate System, represented by a fits header - kwargs (dict): - Keywords passed directly to parent ``to_file`` function. - - """ - if primary_hdr is None: - primary_hdr = io.initialize_header() - # Build the header - if self.head0 is not None and self.PYP_SPEC is not None: - spectrograph = load_spectrograph(self.PYP_SPEC) - subheader = spectrograph.subheader_for_spec(self.head0, self.head0) - else: - subheader = {} - # Add em in - for key in subheader: - primary_hdr[key] = subheader[key] - # Do it - super(DataCube, self).to_file(ofile, primary_hdr=primary_hdr, hdr=hdr, **kwargs) - - @classmethod - def from_file(cls, ifile): - """ - Over-load :func:`~pypeit.datamodel.DataContainer.from_file` - to deal with the header - - Args: - ifile (str): Filename holding the object - """ - with io.fits_open(ifile) as hdu: - # Read using the base class - self = super().from_hdu(hdu) - # Internals - self.filename = ifile - self.head0 = hdu[1].header # Actually use the first extension here, since it contains the WCS - # Meta - self.spectrograph = load_spectrograph(self.PYP_SPEC) - self.spect_meta = self.spectrograph.parse_spec_header(hdu[0].header) - return self - - @property - def ivar(self): - return utils.inverse(self.sig**2) - - @property - def wcs(self): - return wcs.WCS(self.head0) - - def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pressure, temperature, rel_humidity): """ @@ -610,7 +450,7 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): skyimg = np.zeros((numwave, numim)) # Just a dummy array - not needed thismask = np.ones((numwave, numim)) # Just a dummy array - not needed oprof = model.reshape((numim, numwave)).T - sobj = specobj.SpecObj('IFU', 'DET01', SLITID=0) + sobj = specobj.SpecObj('SlicerIFU', 'DET01', SLITID=0) extract.extract_optimal(sciimg, ivar, optmask, waveimg, skyimg, thismask, oprof, sobj) opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS, sobj.OPT_COUNTS_SIG**2, sobj.OPT_MASK # Setup the return values @@ -1661,719 +1501,3 @@ def get_output_whitelight_filename(outfile): out_wl_filename = os.path.splitext(outfile)[0] + "_whitelight.fits" return out_wl_filename - -def coadd_cube(files, opts, spectrograph=None, parset=None, overwrite=False): - """ Main routine to coadd spec2D files into a 3D datacube - - Args: - files (:obj:`list`): - List of all spec2D files - opts (:obj:`dict`): - coadd2d options associated with each spec2d file - spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): - The name or instance of the spectrograph used to obtain the data. - If None, this is pulled from the file header. - parset (:class:`~pypeit.par.pypeitpar.PypeItPar`, optional): - An instance of the parameter set. If None, assumes that detector 1 - is the one reduced and uses the default reduction parameters for the - spectrograph (see - :func:`~pypeit.spectrographs.spectrograph.Spectrograph.default_pypeit_par` - for the relevant spectrograph class). - overwrite (:obj:`bool`, optional): - Overwrite the output file, if it exists? - """ - if spectrograph is None: - with fits.open(files[0]) as hdu: - spectrograph = hdu[0].header['PYP_SPEC'] - - if isinstance(spectrograph, str): - spec = load_spectrograph(spectrograph) - specname = spectrograph - else: - # Assume it's a Spectrograph instance - spec = spectrograph - specname = spectrograph.name - - # Grab the parset, if not provided - if parset is None: - # TODO :: Use config_specific_par instead? - parset = spec.default_pypeit_par() - cubepar = parset['reduce']['cube'] - flatpar = parset['calibrations']['flatfield'] - senspar = parset['sensfunc'] - - # prep - numfiles = len(files) - method = cubepar['method'].lower() - combine = cubepar['combine'] - align = cubepar['align'] - # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. - if numfiles == 1 and cubepar["reference_image"] is None: - if not align: - msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image") - msgs.info("Setting 'align' to False") - align = False - if opts['ra_offset'] is not None: - if not align: - msgs.warn("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") - msgs.info("Setting 'align' to True") - align = True - # TODO :: The default behaviour (combine=False, align=False) produces a datacube that uses the instrument WCS - # It should be possible (and perhaps desirable) to do a spatial alignment (i.e. align=True), apply this to the - # RA,Dec values of each pixel, and then use the instrument WCS to save the output (or, just adjust the crval). - # At the moment, if the user wishes to spatially align the frames, a different WCS is generated. - if histogramdd is None: - msgs.warn("Generating a datacube is faster if you install fast-histogram:"+msgs.newline()+ - "https://pypi.org/project/fast-histogram/") - if method != 'ngp': - msgs.warn("Forcing NGP algorithm, because fast-histogram is not installed") - method = 'ngp' - - # Determine what method is requested - spec_subpixel, spat_subpixel = 1, 1 - if method == "subpixel": - msgs.info("Adopting the subpixel algorithm to generate the datacube.") - spec_subpixel, spat_subpixel = cubepar['spec_subpixel'], cubepar['spat_subpixel'] - elif method == "ngp": - msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") - else: - msgs.error(f"The following datacube method is not allowed: {method}") - - # Get the detector number and string representation - det = 1 if parset['rdx']['detnum'] is None else parset['rdx']['detnum'] - detname = spec.get_det_name(det) - - # Check if the output file exists - if combine: - outfile = get_output_filename("", cubepar['output_filename'], combine) - out_whitelight = get_output_whitelight_filename(outfile) - if os.path.exists(outfile) and not overwrite: - msgs.error("Output filename already exists:"+msgs.newline()+outfile) - if os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite: - msgs.error("Output filename already exists:"+msgs.newline()+out_whitelight) - else: - # Finally, if there's just one file, check if the output filename is given - if numfiles == 1 and cubepar['output_filename'] != "": - outfile = get_output_filename("", cubepar['output_filename'], True, -1) - out_whitelight = get_output_whitelight_filename(outfile) - if os.path.exists(outfile) and not overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + outfile) - if os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) - else: - for ff in range(numfiles): - outfile = get_output_filename(files[ff], cubepar['output_filename'], combine, ff+1) - out_whitelight = get_output_whitelight_filename(outfile) - if os.path.exists(outfile) and not overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + outfile) - if os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) - - # Check the reference cube and image exist, if requested - fluxcal = False - blaze_wave, blaze_spec = None, None - blaze_spline, flux_spline = None, None - if cubepar['standard_cube'] is not None: - fluxcal = True - ss_file = cubepar['standard_cube'] - if not os.path.exists(ss_file): - msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file) - msgs.info(f"Loading standard star cube: {ss_file:s}") - # Load the standard star cube and retrieve its RA + DEC - stdcube = fits.open(ss_file) - star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2'] - - # Extract a spectrum of the standard star - wave, Nlam_star, Nlam_ivar_star, gpm_star = extract_standard_spec(stdcube) - - # Extract the information about the blaze - if cubepar['grating_corr']: - blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data - blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, - kind='linear', bounds_error=False, fill_value="extrapolate") - # The first standard star cube is used as the reference blaze spline - if blaze_spline is None: - blaze_wave, blaze_spec = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data - blaze_spline = interp1d(blaze_wave, blaze_spec, - kind='linear', bounds_error=False, fill_value="extrapolate") - # Perform a grating correction - grat_corr = correct_grating_shift(wave.value, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline) - # Apply the grating correction to the standard star spectrum - Nlam_star /= grat_corr - Nlam_ivar_star *= grat_corr**2 - - # Read in some information above the standard star - std_dict = flux_calib.get_standard_spectrum(star_type=senspar['star_type'], - star_mag=senspar['star_mag'], - ra=star_ra, dec=star_dec) - # Calculate the sensitivity curve - # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. - msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") - zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm =\ - flux_calib.fit_zeropoint(wave.value, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, - mask_hydrogen_lines=senspar['mask_hydrogen_lines'], - mask_helium_lines=senspar['mask_helium_lines'], - hydrogen_mask_wid=senspar['hydrogen_mask_wid'], - nresln=senspar['UVIS']['nresln'], resolution=senspar['UVIS']['resolution'], - trans_thresh=senspar['UVIS']['trans_thresh'], polyorder=senspar['polyorder'], - polycorrect=senspar['UVIS']['polycorrect'], polyfunc=senspar['UVIS']['polyfunc']) - wgd = np.where(zeropoint_fit_gpm) - sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) - flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") - - # If a reference image has been set, check that it exists - if cubepar['reference_image'] is not None: - if not os.path.exists(cubepar['reference_image']): - msgs.error("Reference image does not exist:" + msgs.newline() + cubepar['reference_image']) - - # Initialise arrays for storage - ifu_ra, ifu_dec = np.array([]), np.array([]) # The RA and Dec at the centre of the IFU, as stored in the header - all_ra, all_dec, all_wave = np.array([]), np.array([]), np.array([]) - all_sci, all_ivar, all_idx, all_wghts = np.array([]), np.array([]), np.array([]), np.array([]) - all_spatpos, all_specpos, all_spatid = np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int) - all_tilts, all_slits, all_align = [], [], [] - all_wcs = [] - dspat = None if cubepar['spatial_delta'] is None else cubepar['spatial_delta']/3600.0 # binning size on the sky (/3600 to convert to degrees) - dwv = cubepar['wave_delta'] # binning size in wavelength direction (in Angstroms) - wave_ref = None - mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. - weights = np.ones(numfiles) # Weights to use when combining cubes - flat_splines = dict() # A dictionary containing the splines of the flatfield - # Load the default scaleimg frame for the scale correction - scalecorr_default = "none" - relScaleImgDef = np.array([1]) - if cubepar['scale_corr'] is not None: - if cubepar['scale_corr'] == "image": - msgs.info("The default relative spectral illumination correction will use the science image") - scalecorr_default = "image" - else: - msgs.info("Loading default scale image for relative spectral illumination correction:" + - msgs.newline() + cubepar['scale_corr']) - try: - spec2DObj = spec2dobj.Spec2DObj.from_file(cubepar['scale_corr'], detname) - relScaleImgDef = spec2DObj.scaleimg - scalecorr_default = cubepar['scale_corr'] - except: - msgs.warn("Could not load scaleimg from spec2d file:" + msgs.newline() + - cubepar['scale_corr'] + msgs.newline() + - "scale correction will not be performed unless you have specified the correct" + msgs.newline() + - "scale_corr file in the spec2d block") - cubepar['scale_corr'] = None - scalecorr_default = "none" - - # Load the default sky frame to be used for sky subtraction - skysub_default = "image" - skyImgDef, skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction) - if cubepar['skysub_frame'] in [None, 'none', '', 'None']: - skysub_default = "none" - skyImgDef = np.array([0.0]) # Do not perform sky subtraction - skySclDef = np.array([0.0]) # Do not perform sky subtraction - elif cubepar['skysub_frame'].lower() == "image": - msgs.info("The sky model in the spec2d science frames will be used for sky subtraction" +msgs.newline() + - "(unless specific skysub frames have been specified)") - skysub_default = "image" - else: - msgs.info("Loading default image for sky subtraction:" + - msgs.newline() + cubepar['skysub_frame']) - try: - spec2DObj = spec2dobj.Spec2DObj.from_file(cubepar['skysub_frame'], detname) - skysub_exptime = fits.open(cubepar['skysub_frame'])[0].header['EXPTIME'] - except: - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + cubepar['skysub_frame']) - skysub_default = cubepar['skysub_frame'] - skyImgDef = spec2DObj.skymodel/skysub_exptime # Sky counts/second - skySclDef = spec2DObj.scaleimg - - # Load all spec2d files and prepare the data for making a datacube - for ff, fil in enumerate(files): - # Load it up - msgs.info("Loading PypeIt spec2d frame:" + msgs.newline() + fil) - spec2DObj = spec2dobj.Spec2DObj.from_file(fil, detname) - detector = spec2DObj.detector - spat_flexure = None #spec2DObj.sci_spat_flexure - - # Load the header - hdr = spec2DObj.head0 - ifu_ra = np.append(ifu_ra, spec.compound_meta([hdr], 'ra')) - ifu_dec = np.append(ifu_dec, spec.compound_meta([hdr], 'dec')) - - # Get the exposure time - exptime = hdr['EXPTIME'] - - # Setup for PypeIt imports - msgs.reset(verbosity=2) - - # TODO :: Consider loading all calibrations into a single variable. - - # Initialise the slit edges - msgs.info("Constructing slit image") - slits = spec2DObj.slits - slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spat_flexure) - slits_left, slits_right, _ = slits.select_edges(initial=True, flexure=spat_flexure) - - # The order of operations below proceeds as follows: - # (1) Get science image - # (2) Subtract sky (note, if a joint fit has been performed, the relative scale correction is applied in the reduction!) - # (3) Apply relative scale correction to both science and ivar - - # Set the default behaviour if a global skysub frame has been specified - this_skysub = skysub_default - if skysub_default == "image": - skyImg = spec2DObj.skymodel - skyScl = spec2DObj.scaleimg - else: - skyImg = skyImgDef.copy() * exptime - skyScl = skySclDef.copy() - # See if there's any changes from the default behaviour - if opts['skysub_frame'][ff] is not None: - if opts['skysub_frame'][ff].lower() == 'default': - if skysub_default == "image": - skyImg = spec2DObj.skymodel - skyScl = spec2DObj.scaleimg - this_skysub = "image" # Use the current spec2d for sky subtraction - else: - skyImg = skyImgDef.copy() * exptime - skyScl = skySclDef.copy() * exptime - this_skysub = skysub_default # Use the global value for sky subtraction - elif opts['skysub_frame'][ff].lower() == 'image': - skyImg = spec2DObj.skymodel - skyScl = spec2DObj.scaleimg - this_skysub = "image" # Use the current spec2d for sky subtraction - elif opts['skysub_frame'][ff].lower() == 'none': - skyImg = np.array([0.0]) - skyScl = np.array([1.0]) - this_skysub = "none" # Don't do sky subtraction - else: - # Load a user specified frame for sky subtraction - msgs.info("Loading skysub frame:" + msgs.newline() + opts['skysub_frame'][ff]) - try: - spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts['skysub_frame'][ff], detname) - skysub_exptime = fits.open(opts['skysub_frame'][ff])[0].header['EXPTIME'] - except: - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts['skysub_frame'][ff]) - # TODO :: Consider allowing the actual frame (instead of the skymodel) to be used as the skysub image - make sure the BPM is carried over. - # :: Allow sky data fitting (i.e. scale the flux of a skysub frame to the science frame data) - skyImg = spec2DObj_sky.skymodel * exptime / skysub_exptime # Sky counts - skyScl = spec2DObj_sky.scaleimg - this_skysub = opts['skysub_frame'][ff] # User specified spec2d for sky subtraction - if this_skysub == "none": - msgs.info("Sky subtraction will not be performed.") - else: - msgs.info("Using the following frame for sky subtraction:"+msgs.newline()+this_skysub) - - # Load the relative scale image, if something other than the default has been provided - this_scalecorr = scalecorr_default - relScaleImg = relScaleImgDef.copy() - if opts['scale_corr'][ff] is not None: - if opts['scale_corr'][ff].lower() == 'default': - if scalecorr_default == "image": - relScaleImg = spec2DObj.scaleimg - this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling - else: - this_scalecorr = scalecorr_default # Use the default value for the scale correction - elif opts['scale_corr'][ff].lower() == 'image': - relScaleImg = spec2DObj.scaleimg - this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling - elif opts['scale_corr'][ff].lower() == 'none': - relScaleImg = np.array([1]) - this_scalecorr = "none" # Don't do relative spectral illumination scaling - else: - # Load a user specified frame for sky subtraction - msgs.info("Loading the following frame for the relative spectral illumination correction:" + - msgs.newline() + opts['scale_corr'][ff]) - try: - spec2DObj_scl = spec2dobj.Spec2DObj.from_file(opts['scale_corr'][ff], detname) - except: - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts['skysub_frame'][ff]) - relScaleImg = spec2DObj_scl.scaleimg - this_scalecorr = opts['scale_corr'][ff] - if this_scalecorr == "none": - msgs.info("Relative spectral illumination correction will not be performed.") - else: - msgs.info("Using the following frame for the relative spectral illumination correction:" + - msgs.newline()+this_scalecorr) - - # Prepare the relative scaling factors - relSclSky = skyScl/spec2DObj.scaleimg # This factor ensures the sky has the same relative scaling as the science frame - relScale = spec2DObj.scaleimg/relScaleImg # This factor is applied to the sky subtracted science frame - - # Extract the relevant information from the spec2d file - sciImg = (spec2DObj.sciimg - skyImg*relSclSky)*relScale # Subtract sky and apply relative illumination - ivar = spec2DObj.ivarraw / relScale**2 - waveimg = spec2DObj.waveimg - bpmmask = spec2DObj.bpmmask - - # TODO :: Really need to write some detailed information in the docs about all of the various corrections that can optionally be applied - - # TODO :: Include a flexure correction from the sky frame? Note, you cannot use the waveimg from a sky frame, - # since the heliocentric correction may have been applied to the sky frame. Need to recalculate waveimg using - # the slitshifts from a skyimage, and then apply the vel_corr from the science image. - - wnonzero = (waveimg != 0.0) - if not np.any(wnonzero): - msgs.error("The wavelength image contains only zeros - You need to check the data reduction.") - wave0 = waveimg[wnonzero].min() - # Calculate the delta wave in every pixel on the slit - waveimp = np.roll(waveimg, 1, axis=0) - waveimn = np.roll(waveimg, -1, axis=0) - dwaveimg = np.zeros_like(waveimg) - # All good pixels - wnz = np.where((waveimg!=0) & (waveimp!=0)) - dwaveimg[wnz] = np.abs(waveimg[wnz]-waveimp[wnz]) - # All bad pixels - wnz = np.where((waveimg!=0) & (waveimp==0)) - dwaveimg[wnz] = np.abs(waveimg[wnz]-waveimn[wnz]) - # All endpoint pixels - dwaveimg[0, :] = np.abs(waveimg[0, :] - waveimn[0, :]) - dwaveimg[-1, :] = np.abs(waveimg[-1, :] - waveimp[-1, :]) - dwv = np.median(dwaveimg[dwaveimg != 0.0]) if cubepar['wave_delta'] is None else cubepar['wave_delta'] - - msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv)) - - # Obtain the minimum and maximum wavelength of all slits - if mnmx_wv is None: - mnmx_wv = np.zeros((len(files), slits.nslits, 2)) - for slit_idx, slit_spat in enumerate(slits.spat_id): - onslit_init = (slitid_img_init == slit_spat) - mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init]) - mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init]) - - # Remove edges of the spectrum where the sky model is bad - sky_is_good = make_good_skymask(slitid_img_init, spec2DObj.tilts) - - # Construct a good pixel mask - # TODO: This should use the mask function to figure out which elements are masked. - onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good - - # Grab the WCS of this frame - frame_wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv) - all_wcs.append(copy.deepcopy(frame_wcs)) - - # Find the largest spatial scale of all images being combined - # TODO :: probably need to put this in the DetectorContainer - pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This should be degrees/pixel - slscl = spec.get_meta_value([spec2DObj.head0], 'slitwid') - if dspat is None: - dspat = max(pxscl, slscl) - if pxscl > dspat: - msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format(3600.0*dspat, 3600.0*pxscl)) - if slscl > dspat: - msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format(3600.0*dspat, 3600.0*slscl)) - - # Loading the alignments frame for these data - alignments = None - if cubepar['astrometric']: - key = alignframe.Alignments.calib_type.upper() - if key in spec2DObj.calibs: - alignfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) - if os.path.exists(alignfile) and cubepar['astrometric']: - msgs.info("Loading alignments") - alignments = alignframe.Alignments.from_file(alignfile) - else: - msgs.warn(f'Processed alignment frame not recorded or not found!') - msgs.info("Using slit edges for astrometric transform") - else: - msgs.info("Using slit edges for astrometric transform") - # If nothing better was provided, use the slit edges - if alignments is None: - left, right, _ = slits.select_edges(initial=True, flexure=spat_flexure) - locations = [0.0, 1.0] - traces = np.append(left[:,None,:], right[:,None,:], axis=1) - else: - locations = parset['calibrations']['alignment']['locations'] - traces = alignments.traces - # Generate an RA/DEC image - msgs.info("Generating RA/DEC image") - alignSplines = alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts) - raimg, decimg, minmax = slits.get_radec_image(frame_wcs, alignSplines, spec2DObj.tilts, - initial=True, flexure=spat_flexure) - # Perform the DAR correction - if wave_ref is None: - wave_ref = 0.5*(np.min(waveimg[onslit_gpm]) + np.max(waveimg[onslit_gpm])) - # Get DAR parameters - raval = spec.get_meta_value([spec2DObj.head0], 'ra') - decval = spec.get_meta_value([spec2DObj.head0], 'dec') - obstime = spec.get_meta_value([spec2DObj.head0], 'obstime') - pressure = spec.get_meta_value([spec2DObj.head0], 'pressure') - temperature = spec.get_meta_value([spec2DObj.head0], 'temperature') - rel_humidity = spec.get_meta_value([spec2DObj.head0], 'humidity') - coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) - location = spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) - if pressure == 0.0: - msgs.warn("Pressure is set to zero - DAR correction will not be performed") - else: - msgs.info("DAR correction parameters:"+msgs.newline() + - " Pressure = {0:f} bar".format(pressure) + msgs.newline() + - " Temperature = {0:f} deg C".format(temperature) + msgs.newline() + - " Humidity = {0:f}".format(rel_humidity)) - ra_corr, dec_corr = correct_dar(waveimg[onslit_gpm], coord, obstime, location, - pressure * units.bar, temperature * units.deg_C, rel_humidity, wave_ref=wave_ref) - raimg[onslit_gpm] += ra_corr*np.cos(np.mean(decimg[onslit_gpm]) * np.pi / 180.0) - decimg[onslit_gpm] += dec_corr - - # Get copies of arrays to be saved - wave_ext = waveimg[onslit_gpm].copy() - flux_ext = sciImg[onslit_gpm].copy() - ivar_ext = ivar[onslit_gpm].copy() - dwav_ext = dwaveimg[onslit_gpm].copy() - - # Correct for sensitivity as a function of grating angle - # (this assumes the spectrum of the flatfield lamp has the same shape for all setups) - key = flatfield.FlatImages.calib_type.upper() - if key not in spec2DObj.calibs: - msgs.error('Processed flat calibration file not recorded by spec2d file!') - flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) - if cubepar['grating_corr'] and flatfile not in flat_splines.keys(): - msgs.info("Calculating relative sensitivity for grating correction") - # Check if the Flat file exists - if not os.path.exists(flatfile): - msgs.error("Grating correction requested, but the following file does not exist:" + - msgs.newline() + flatfile) - # Load the Flat file - flatimages = flatfield.FlatImages.from_file(flatfile) - total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, spat_flexure=spat_flexure) * \ - flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, spat_flexure=spat_flexure) - flatframe = flatimages.pixelflat_raw / total_illum - if flatimages.pixelflat_spec_illum is None: - # Calculate the relative scale - scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits, - slit_illum_ref_idx=flatpar['slit_illum_ref_idx'], model=None, - skymask=None, trim=flatpar['slit_trim'], flexure=spat_flexure, - smooth_npix=flatpar['slit_illum_smooth_npix']) - else: - msgs.info("Using relative spectral illumination from FlatImages") - scale_model = flatimages.pixelflat_spec_illum - # Apply the relative scale and generate a 1D "spectrum" - onslit = waveimg != 0 - wavebins = np.linspace(np.min(waveimg[onslit]), np.max(waveimg[onslit]), slits.nspec) - hist, edge = np.histogram(waveimg[onslit], bins=wavebins, weights=flatframe[onslit]/scale_model[onslit]) - cntr, edge = np.histogram(waveimg[onslit], bins=wavebins) - cntr = cntr.astype(float) - norm = (cntr != 0) / (cntr + (cntr == 0)) - spec_spl = hist * norm - wave_spl = 0.5 * (wavebins[1:] + wavebins[:-1]) - flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', - bounds_error=False, fill_value="extrapolate") - flat_splines[flatfile+"_wave"] = wave_spl.copy() - # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous - # exposure in this for loop) - if blaze_spline is None: - blaze_wave, blaze_spec = wave_spl, spec_spl - blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', - bounds_error=False, fill_value="extrapolate") - - # Perform extinction correction - msgs.info("Applying extinction correction") - longitude = spec.telescope['longitude'] - latitude = spec.telescope['latitude'] - airmass = spec2DObj.head0[spec.meta['airmass']['card']] - extinct = flux_calib.load_extinction_data(longitude, latitude, senspar['UVIS']['extinct_file']) - # extinction_correction requires the wavelength is sorted - wvsrt = np.argsort(wave_ext) - ext_corr = flux_calib.extinction_correction(wave_ext[wvsrt] * units.AA, airmass, extinct) - # Grating correction - grat_corr = 1.0 - if cubepar['grating_corr']: - grat_corr = correct_grating_shift(wave_ext[wvsrt], flat_splines[flatfile + "_wave"], flat_splines[flatfile], - blaze_wave, blaze_spline) - # Sensitivity function - sens_func = 1.0 - if fluxcal: - msgs.info("Calculating the sensitivity function") - sens_func = flux_spline(wave_ext[wvsrt]) - # Convert the flux_sav to counts/s, correct for the relative sensitivity of different setups - ext_corr *= sens_func / (exptime * grat_corr) - # Correct for extinction - flux_sav = flux_ext[wvsrt] * ext_corr - ivar_sav = ivar_ext[wvsrt] / ext_corr ** 2 - - # Convert units to Counts/s/Ang/arcsec2 - # Slicer sampling * spatial pixel sampling - sl_deg = np.sqrt(frame_wcs.wcs.cd[0, 0] ** 2 + frame_wcs.wcs.cd[1, 0] ** 2) - px_deg = np.sqrt(frame_wcs.wcs.cd[1, 1] ** 2 + frame_wcs.wcs.cd[0, 1] ** 2) - scl_units = dwav_ext[wvsrt] * (3600.0 * sl_deg) * (3600.0 * px_deg) - flux_sav /= scl_units - ivar_sav *= scl_units ** 2 - - # sort back to the original ordering - resrt = np.argsort(wvsrt) - numpix = raimg[onslit_gpm].size - - # Calculate the weights relative to the zeroth cube - weights[ff] = 1.0#exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2 - - # Get the slit image and then unset pixels in the slit image that are bad - this_specpos, this_spatpos = np.where(onslit_gpm) - this_spatid = slitid_img_init[onslit_gpm] - - # If individual frames are to be output without aligning them, - # there's no need to store information, just make the cubes now - if not combine and not align: - # Get the output filename - if numfiles == 1 and cubepar['output_filename'] != "": - outfile = get_output_filename("", cubepar['output_filename'], True, -1) - else: - outfile = get_output_filename(fil, cubepar['output_filename'], combine, ff+1) - # Get the coordinate bounds - slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) - numwav = int((np.max(waveimg) - wave0) / dwv) - bins = spec.get_datacube_bins(slitlength, minmax, numwav) - # Generate the output WCS for the datacube - crval_wv = cubepar['wave_min'] if cubepar['wave_min'] is not None else 1.0E10 * frame_wcs.wcs.crval[2] - cd_wv = cubepar['wave_delta'] if cubepar['wave_delta'] is not None else 1.0E10 * frame_wcs.wcs.cd[2, 2] - output_wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv) - # Set the wavelength range of the white light image. - wl_wvrng = None - if cubepar['save_whitelight']: - wl_wvrng = get_whitelight_range(np.max(mnmx_wv[ff, :, 0]), - np.min(mnmx_wv[ff, :, 1]), - cubepar['whitelight_range']) - # Make the datacube - if method in ['subpixel', 'ngp']: - # Generate the datacube - generate_cube_subpixel(outfile, output_wcs, raimg[onslit_gpm], decimg[onslit_gpm], wave_ext, - flux_sav[resrt], ivar_sav[resrt], np.ones(numpix), - this_spatpos, this_specpos, this_spatid, - spec2DObj.tilts, slits, alignSplines, bins, - all_idx=None, overwrite=overwrite, blaze_wave=blaze_wave, blaze_spec=blaze_spec, - fluxcal=fluxcal, specname=specname, whitelight_range=wl_wvrng, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) - continue - - # Store the information if we are combining multiple frames - all_ra = np.append(all_ra, raimg[onslit_gpm].copy()) - all_dec = np.append(all_dec, decimg[onslit_gpm].copy()) - all_wave = np.append(all_wave, wave_ext.copy()) - all_sci = np.append(all_sci, flux_sav[resrt].copy()) - all_ivar = np.append(all_ivar, ivar_sav[resrt].copy()) - all_idx = np.append(all_idx, ff*np.ones(numpix)) - all_wghts = np.append(all_wghts, weights[ff]*np.ones(numpix)/weights[0]) - all_spatpos = np.append(all_spatpos, this_spatpos) - all_specpos = np.append(all_specpos, this_specpos) - all_spatid = np.append(all_spatid, this_spatid) - all_tilts.append(spec2DObj.tilts) - all_slits.append(slits) - all_align.append(alignSplines) - - # No need to continue if we are not combining nor aligning frames - if not combine and not align: - return - - # Grab cos(dec) for convenience - cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) - - # Register spatial offsets between all frames - if align: - if opts['ra_offset'] is not None: - # First, translate all coordinates to the coordinates of the first frame - # Note :: Don't need cosdec here, this just overrides the IFU coordinate centre of each frame - ref_shift_ra = ifu_ra[0] - ifu_ra - ref_shift_dec = ifu_dec[0] - ifu_dec - for ff in range(numfiles): - # Apply the shift - all_ra[all_idx == ff] += ref_shift_ra[ff] + opts['ra_offset'][ff]/3600.0 - all_dec[all_idx == ff] += ref_shift_dec[ff] + opts['dec_offset'][ff]/3600.0 - msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, opts['ra_offset'][ff], opts['dec_offset'][ff])) - else: - # Find the wavelength range where all frames overlap - min_wl, max_wl = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength - np.min(mnmx_wv[:, :, 1]), # The min red wavelength - cubepar['whitelight_range']) # The user-specified values (if any) - # Get the good whitelight pixels - ww, wavediff = get_whitelight_pixels(all_wave, min_wl, max_wl) - # Iterate over white light image generation and spatial shifting - numiter = 2 - for dd in range(numiter): - msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") - # Setup the WCS to use for all white light images - ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied - image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra[ww], all_dec[ww], all_wave[ww], - dspat, wavediff, collapse=True) - if voxedge[2].size != 2: - msgs.error("Spectral range for WCS is incorrect for white light image") - - wl_imgs = generate_image_subpixel(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], - all_sci[ww], all_ivar[ww], all_wghts[ww], - all_spatpos[ww], all_specpos[ww], all_spatid[ww], - all_tilts, all_slits, all_align, voxedge, all_idx=all_idx[ww], - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) - if reference_image is None: - # ref_idx will be the index of the cube with the highest S/N - ref_idx = np.argmax(weights) - reference_image = wl_imgs[:, :, ref_idx].copy() - msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1)) - else: - msgs.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'") - - # Calculate the image offsets relative to the reference image - for ff in range(numfiles): - # Calculate the shift - ra_shift, dec_shift = calculate_image_phase(reference_image.copy(), wl_imgs[:, :, ff], maskval=0.0) - # Convert pixel shift to degrees shift - ra_shift *= dspat/cosdec - dec_shift *= dspat - msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff+1, ra_shift*3600.0, dec_shift*3600.0)) - # Apply the shift - all_ra[all_idx == ff] += ra_shift - all_dec[all_idx == ff] += dec_shift - - # Calculate the relative spectral weights of all pixels - if numfiles == 1: - # No need to calculate weights if there's just one frame - all_wghts = np.ones_like(all_sci) - else: - # Find the wavelength range where all frames overlap - min_wl, max_wl = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength - np.min(mnmx_wv[:, :, 1]), # The min red wavelength - cubepar['whitelight_range']) # The user-specified values (if any) - # Get the good white light pixels - ww, wavediff = get_whitelight_pixels(all_wave, min_wl, max_wl) - # Get a suitable WCS - image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, wavediff, collapse=True) - # Generate the white light image (note: hard-coding subpixel=1 in both directions, and combining into a single image) - wl_full = generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, - all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, - all_tilts, all_slits, all_align, voxedge, all_idx=all_idx, - spec_subpixel=1, spat_subpixel=1, combine=True) - # Compute the weights - all_wghts = compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, wl_full[:, :, 0], - dspat, dwv, relative_weights=cubepar['relative_weights']) - - # Generate the WCS, and the voxel edges - cube_wcs, vox_edges, _ = create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv) - - sensfunc = None - if flux_spline is not None: - # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) - numwav = vox_edges[2].size-1 - senswave = cube_wcs.spectral.wcs_pix2world(np.arange(numwav), 0)[0] * 1.0E10 - sensfunc = flux_spline(senswave) - - # Generate a datacube - outfile = get_output_filename("", cubepar['output_filename'], True, -1) - if method in ['subpixel', 'ngp']: - # Generate the datacube - wl_wvrng = None - if cubepar['save_whitelight']: - wl_wvrng = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), - np.min(mnmx_wv[:, :, 1]), - cubepar['whitelight_range']) - if combine: - generate_cube_subpixel(outfile, cube_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, - np.ones(all_wghts.size), # all_wghts, - all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, vox_edges, - all_idx=all_idx, overwrite=overwrite, blaze_wave=blaze_wave, blaze_spec=blaze_spec, - fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, whitelight_range=wl_wvrng, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) - else: - for ff in range(numfiles): - outfile = get_output_filename("", cubepar['output_filename'], False, ff) - ww = np.where(all_idx == ff) - generate_cube_subpixel(outfile, cube_wcs, all_ra[ww], all_dec[ww], all_wave[ww], all_sci[ww], all_ivar[ww], np.ones(all_wghts[ww].size), - all_spatpos[ww], all_specpos[ww], all_spatid[ww], all_tilts[ff], all_slits[ff], all_align[ff], vox_edges, - all_idx=all_idx[ww], overwrite=overwrite, blaze_wave=blaze_wave, blaze_spec=blaze_spec, - fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, whitelight_range=wl_wvrng, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) - - From 6b33349c686856f6f193d7f77d838e419b02d2e5 Mon Sep 17 00:00:00 2001 From: rcooke Date: Fri, 8 Sep 2023 21:48:14 +0100 Subject: [PATCH 03/81] refactor to coadd3d --- pypeit/scripts/coadd_datacube.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 789c071112..18a07ceeb9 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -1,6 +1,6 @@ """ This script enables the user to convert spec2D FITS files -from IFU instruments into a 3D cube with a defined WCS. +from SlicerIFU instruments into a 3D cube with a defined WCS. .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst @@ -10,7 +10,7 @@ from pypeit import par from pypeit import inputfiles from pypeit import utils -from pypeit.core.datacube import coadd_cube +from pypeit.coadd3d import CoAdd3D from pypeit.spectrographs.util import load_spectrograph from pypeit.scripts import scriptbase @@ -52,7 +52,11 @@ def main(args): msgs.info("Restricting to detector={}".format(args.det)) parset['rdx']['detnum'] = int(args.det) + # Instantiate Coadd2d + coadd = CoAdd3D.get_instance(coadd3dfile.filenames, coadd3dfile.options, spectrograph=spectrograph, par=parset, + det=args.det, overwrite=args.overwrite) + # Coadd the files tstart = time.time() - coadd_cube(coadd3dfile.filenames, coadd3dfile.options, parset=parset, overwrite=args.overwrite) + coadd.coadd() msgs.info(utils.get_time_string(time.time()-tstart)) From 79a9cb43d945f92e20623a601509acc97856e5ae Mon Sep 17 00:00:00 2001 From: rcooke Date: Fri, 8 Sep 2023 21:48:36 +0100 Subject: [PATCH 04/81] refactor step 1 --- pypeit/coadd3d.py | 1091 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1091 insertions(+) create mode 100644 pypeit/coadd3d.py diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py new file mode 100644 index 0000000000..68b7398fd9 --- /dev/null +++ b/pypeit/coadd3d.py @@ -0,0 +1,1091 @@ +""" +Module containing routines used by 3D datacubes. + +.. include:: ../include/links.rst +""" + +import os +import copy +import inspect + +from astropy import wcs, units +from astropy.coordinates import AltAz, SkyCoord +from astropy.io import fits +import scipy.optimize as opt +from scipy.interpolate import interp1d +import numpy as np + +from pypeit import msgs +from pypeit import alignframe, datamodel, flatfield, io, specobj, spec2dobj, utils +from pypeit.core.flexure import calculate_image_phase +from pypeit.core import coadd, datacube, extract, findobj_skymask, flux_calib, parse, skysub +from pypeit.core.procimg import grow_mask +from pypeit.spectrographs.util import load_spectrograph + +# Use a fast histogram for speed! +try: + from fast_histogram import histogramdd +except ImportError: + histogramdd = None + +from IPython import embed + + +class DataCube(datamodel.DataContainer): + """ + DataContainer to hold the products of a datacube + + The datamodel attributes are: + + .. include:: ../include/class_datamodel_datacube.rst + + Args: + flux (`numpy.ndarray`_): + The science datacube (nwave, nspaxel_y, nspaxel_x) + sig (`numpy.ndarray`_): + The error datacube (nwave, nspaxel_y, nspaxel_x) + bpm (`numpy.ndarray`_): + The bad pixel mask of the datacube (nwave, nspaxel_y, nspaxel_x). + True values indicate a bad pixel + blaze_wave (`numpy.ndarray`_): + Wavelength array of the spectral blaze function + blaze_spec (`numpy.ndarray`_): + The spectral blaze function + sensfunc (`numpy.ndarray`_, None): + Sensitivity function (nwave,). Only saved if the data are fluxed. + PYP_SPEC (str): + Name of the PypeIt Spectrograph + fluxed (bool): + If the cube has been flux calibrated, this will be set to "True" + + Attributes: + head0 (`astropy.io.fits.Header`_): + Primary header + filename (str): + Filename to use when loading from file + spect_meta (:obj:`dict`): + Parsed meta from the header + spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): + Build from PYP_SPEC + + """ + version = '1.1.0' + + datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating, + descr='Flux datacube in units of counts/s/Ang/arcsec^2 or ' + '10^-17 erg/s/cm^2/Ang/arcsec^2'), + 'sig': dict(otype=np.ndarray, atype=np.floating, + descr='Error datacube (matches units of flux)'), + 'bpm': dict(otype=np.ndarray, atype=np.uint8, + descr='Bad pixel mask of the datacube (0=good, 1=bad)'), + 'blaze_wave': dict(otype=np.ndarray, atype=np.floating, + descr='Wavelength array of the spectral blaze function'), + 'blaze_spec': dict(otype=np.ndarray, atype=np.floating, + descr='The spectral blaze function'), + 'sensfunc': dict(otype=np.ndarray, atype=np.floating, + descr='Sensitivity function 10^-17 erg/(counts/cm^2)'), + 'PYP_SPEC': dict(otype=str, descr='PypeIt: Spectrograph name'), + 'fluxed': dict(otype=bool, descr='Boolean indicating if the datacube is fluxed.')} + + internals = ['head0', + 'filename', + 'spectrograph', + 'spect_meta' + ] + + def __init__(self, flux, sig, bpm, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, + fluxed=None): + + args, _, _, values = inspect.getargvalues(inspect.currentframe()) + _d = dict([(k, values[k]) for k in args[1:]]) + # Setup the DataContainer + datamodel.DataContainer.__init__(self, d=_d) + + def _bundle(self): + """ + Over-write default _bundle() method to separate the DetectorContainer + into its own HDU + + Returns: + :obj:`list`: A list of dictionaries, each list element is + written to its own fits extension. See the description + above. + """ + d = [] + # Rest of the datamodel + for key in self.keys(): + # Skip Nones + if self[key] is None: + continue + # Array? + if self.datamodel[key]['otype'] == np.ndarray: + tmp = {} + if self.datamodel[key]['atype'] == np.floating: + tmp[key] = self[key].astype(np.float32) + else: + tmp[key] = self[key] + d.append(tmp) + else: + # Add to header of the primary image + d[0][key] = self[key] + # Return + return d + + def to_file(self, ofile, primary_hdr=None, hdr=None, **kwargs): + """ + Over-load :func:`~pypeit.datamodel.DataContainer.to_file` + to deal with the header + + Args: + ofile (:obj:`str`): + Filename + primary_hdr (`astropy.io.fits.Header`_, optional): + Base primary header. Updated with new subheader keywords. If + None, initialized using :func:`~pypeit.io.initialize_header`. + wcs (`astropy.io.fits.Header`_, optional): + The World Coordinate System, represented by a fits header + kwargs (dict): + Keywords passed directly to parent ``to_file`` function. + + """ + if primary_hdr is None: + primary_hdr = io.initialize_header() + # Build the header + if self.head0 is not None and self.PYP_SPEC is not None: + spectrograph = load_spectrograph(self.PYP_SPEC) + subheader = spectrograph.subheader_for_spec(self.head0, self.head0) + else: + subheader = {} + # Add em in + for key in subheader: + primary_hdr[key] = subheader[key] + # Do it + super(DataCube, self).to_file(ofile, primary_hdr=primary_hdr, hdr=hdr, **kwargs) + + @classmethod + def from_file(cls, ifile): + """ + Over-load :func:`~pypeit.datamodel.DataContainer.from_file` + to deal with the header + + Args: + ifile (str): Filename holding the object + """ + with io.fits_open(ifile) as hdu: + # Read using the base class + self = super().from_hdu(hdu) + # Internals + self.filename = ifile + self.head0 = hdu[1].header # Actually use the first extension here, since it contains the WCS + # Meta + self.spectrograph = load_spectrograph(self.PYP_SPEC) + self.spect_meta = self.spectrograph.parse_spec_header(hdu[0].header) + return self + + @property + def ivar(self): + return utils.inverse(self.sig**2) + + @property + def wcs(self): + return wcs.WCS(self.head0) + + +class CoAdd3D: + """ + Main routine to convert processed PypeIt spec2d frames into + DataCube (spec3d) files. This routine is only used for IFU + data reduction. + + Algorithm steps are as follows: + - TODO :: Fill this in. + + """ + # Superclass factory method generates the subclass instance + @classmethod + def get_instance(cls, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, + show=False, debug=False): + """ + Instantiate the subclass appropriate for the provided spectrograph. + + The class to instantiate must match the ``pypeline`` + attribute of the provided ``spectrograph``, and must be a + subclass of :class:`CoAdd3D`; see the parent class + instantiation for parameter descriptions. + + Returns: + :class:`CoAdd3D`: One of the subclasses with + :class:`CoAdd3D` as its base. + """ + + return next(c for c in cls.__subclasses__() + if c.__name__ == (spectrograph.pypeline + 'CoAdd3D'))( + spec2dfiles, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, + show=show, debug=debug) + + def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=False, + show=False, debug=False): + """ + + Args: + files (:obj:`list`): + List of all spec2D files + opts (:obj:`dict`): + Options associated with each spec2d file + spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): + The name or instance of the spectrograph used to obtain the data. + If None, this is pulled from the file header. + par (:class:`~pypeit.par.pypeitpar.PypeItPar`, optional): + An instance of the parameter set. If None, assumes that detector 1 + is the one reduced and uses the default reduction parameters for the + spectrograph (see + :func:`~pypeit.spectrographs.spectrograph.Spectrograph.default_pypeit_par` + for the relevant spectrograph class). + det (int): + Detector index + overwrite (:obj:`bool`, optional): + Overwrite the output file, if it exists? + show (:obj:`bool`, optional): + Show results in ginga + debug (:obj:`bool`, optional): + Show QA for debugging. + + """ + self.spec2d = files + self.numfiles = len(files) + self.opts = opts + self.overwrite = overwrite + + # Check on Spectrograph input + if spectrograph is None: + with fits.open(files[0]) as hdu: + spectrograph = hdu[0].header['PYP_SPEC'] + + if isinstance(spectrograph, str): + self.spec = load_spectrograph(spectrograph) + self.specname = spectrograph + else: + # Assume it's a Spectrograph instance + self.spec = spectrograph + self.specname = spectrograph.name + + # Grab the parset, if not provided + if par is None: + # TODO :: Use config_specific_par instead? + par = self.spec.default_pypeit_par() + self.par = par + # Extract some parsets for simplicity + self.cubepar = self.par['reduce']['cube'] + self.flatpar = self.par['calibrations']['flatfield'] + self.senspar = self.par['sensfunc'] + + # Initialise arrays for storage + self.ifu_ra, self.ifu_dec = np.array([]), np.array([]) # The RA and Dec at the centre of the IFU, as stored in the header + self.all_ra, self.all_dec, self.all_wave = np.array([]), np.array([]), np.array([]) + self.all_sci, self.all_ivar, self.all_idx, self.all_wghts = np.array([]), np.array([]), np.array([]), np.array([]) + self.all_spatpos, self.all_specpos, self.all_spatid = np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int) + self.all_tilts, self.all_slits, self.all_align = [], [], [] + self.all_wcs = [] + self.weights = np.ones(self.numfiles) # Weights to use when combining cubes + + + + + # TODO :: need to sort out what to do with these - make them self. as well? + assert False + dspat = None if self.cubepar['spatial_delta'] is None else self.cubepar['spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees) + dwv = self.cubepar['wave_delta'] # binning size in wavelength direction (in Angstroms) + flat_splines = dict() # A dictionary containing the splines of the flatfield + + + + + + # Extract some commonly used variables + self.method = self.cubepar['method'].lower() + self.combine = self.cubepar['combine'] + self.align = self.cubepar['align'] + # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. + if self.numfiles == 1 and self.cubepar["reference_image"] is None: + if not self.align: + msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image") + msgs.info("Setting 'align' to False") + self.align = False + if self.opts['ra_offset'] is not None: + if not self.align: + msgs.warn("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") + msgs.info("Setting 'align' to True") + self.align = True + # TODO :: The default behaviour (combine=False, align=False) produces a datacube that uses the instrument WCS + # It should be possible (and perhaps desirable) to do a spatial alignment (i.e. align=True), apply this to the + # RA,Dec values of each pixel, and then use the instrument WCS to save the output (or, just adjust the crval). + # At the moment, if the user wishes to spatially align the frames, a different WCS is generated. + # Check if fast-histogram exists + if histogramdd is None: + msgs.warn("Generating a datacube is faster if you install fast-histogram:"+msgs.newline()+ + "https://pypi.org/project/fast-histogram/") + if self.method != 'ngp': + msgs.warn("Forcing NGP algorithm, because fast-histogram is not installed") + self.method = 'ngp' + + # Determine what method is requested + self.spec_subpixel, self.spat_subpixel = 1, 1 + if self.method == "subpixel": + msgs.info("Adopting the subpixel algorithm to generate the datacube.") + spec_subpixel, spat_subpixel = self.cubepar['spec_subpixel'], self.cubepar['spat_subpixel'] + elif self.method == "ngp": + msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") + else: + msgs.error(f"The following datacube method is not allowed: {self.method}") + + # Get the detector number and string representation + det = 1 if self.par['rdx']['detnum'] is None else self.par['rdx']['detnum'] + self.detname = self.spec.get_det_name(det) + + # Check if the output file exists + self.check_outputs() + + # Check the reference cube and image exist, if requested + self.fluxcal = False + self.blaze_wave, self.blaze_spec = None, None + self.blaze_spline, self.flux_spline = None, None + if self.cubepar['standard_cube'] is not None: + self.make_sensfunc() + + # If a reference image has been set, check that it exists + if self.cubepar['reference_image'] is not None: + if not os.path.exists(self.cubepar['reference_image']): + msgs.error("Reference image does not exist:" + msgs.newline() + self.cubepar['reference_image']) + + def check_outputs(self): + """ + TODO :: docstring + """ + if self.combine: + outfile = datacube.get_output_filename("", self.cubepar['output_filename'], self.combine) + out_whitelight = datacube.get_output_whitelight_filename(outfile) + if os.path.exists(outfile) and not self.overwrite: + msgs.error("Output filename already exists:"+msgs.newline()+outfile) + if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: + msgs.error("Output filename already exists:"+msgs.newline()+out_whitelight) + else: + # Finally, if there's just one file, check if the output filename is given + if self.numfiles == 1 and self.cubepar['output_filename'] != "": + outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) + out_whitelight = datacube.get_output_whitelight_filename(outfile) + if os.path.exists(outfile) and not self.overwrite: + msgs.error("Output filename already exists:" + msgs.newline() + outfile) + if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: + msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) + else: + for ff in range(self.numfiles): + outfile = datacube.get_output_filename(self.spec2d[ff], self.cubepar['output_filename'], self.combine, ff+1) + out_whitelight = datacube.get_output_whitelight_filename(outfile) + if os.path.exists(outfile) and not self.overwrite: + msgs.error("Output filename already exists:" + msgs.newline() + outfile) + if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: + msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) + + def make_sensfunc(self): + """ + TODO :: docstring + """ + self.fluxcal = True + ss_file = self.cubepar['standard_cube'] + if not os.path.exists(ss_file): + msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file) + msgs.info(f"Loading standard star cube: {ss_file:s}") + # Load the standard star cube and retrieve its RA + DEC + stdcube = fits.open(ss_file) + star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2'] + + # Extract a spectrum of the standard star + wave, Nlam_star, Nlam_ivar_star, gpm_star = datacube.extract_standard_spec(stdcube) + + # Extract the information about the blaze + if self.cubepar['grating_corr']: + blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data + blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, + kind='linear', bounds_error=False, fill_value="extrapolate") + # The first standard star cube is used as the reference blaze spline + if self.blaze_spline is None: + self.blaze_wave, self.blaze_spec = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data + self.blaze_spline = interp1d(self.blaze_wave, self.blaze_spec, + kind='linear', bounds_error=False, fill_value="extrapolate") + # Perform a grating correction + grat_corr = datacube.correct_grating_shift(wave.value, blaze_wave_curr, blaze_spline_curr, self.blaze_wave, + self.blaze_spline) + # Apply the grating correction to the standard star spectrum + Nlam_star /= grat_corr + Nlam_ivar_star *= grat_corr ** 2 + + # Read in some information above the standard star + std_dict = flux_calib.get_standard_spectrum(star_type=self.senspar['star_type'], + star_mag=self.senspar['star_mag'], + ra=star_ra, dec=star_dec) + # Calculate the sensitivity curve + # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. + msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") + zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \ + flux_calib.fit_zeropoint(wave.value, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, + mask_hydrogen_lines=self.senspar['mask_hydrogen_lines'], + mask_helium_lines=self.senspar['mask_helium_lines'], + hydrogen_mask_wid=self.senspar['hydrogen_mask_wid'], + nresln=self.senspar['UVIS']['nresln'], + resolution=self.senspar['UVIS']['resolution'], + trans_thresh=self.senspar['UVIS']['trans_thresh'], + polyorder=self.senspar['polyorder'], + polycorrect=self.senspar['UVIS']['polycorrect'], + polyfunc=self.senspar['UVIS']['polyfunc']) + wgd = np.where(zeropoint_fit_gpm) + sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) + self.flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") + + # Load the default scaleimg frame for the scale correction + self.scalecorr_default = "none" + self.relScaleImgDef = np.array([1]) + self.set_default_scalecorr() + + # Load the default sky frame to be used for sky subtraction + self.skysub_default = "image" + self.skyImgDef, self.skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction) + self.set_default_skysub() + + def set_default_scalecorr(self): + """ + TODO :: docstring + """ + if self.cubepar['scale_corr'] is not None: + if self.cubepar['scale_corr'] == "image": + msgs.info("The default relative spectral illumination correction will use the science image") + self.scalecorr_default = "image" + else: + msgs.info("Loading default scale image for relative spectral illumination correction:" + + msgs.newline() + self.cubepar['scale_corr']) + try: + spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['scale_corr'], self.detname) + self.relScaleImgDef = spec2DObj.scaleimg + self.scalecorr_default = self.cubepar['scale_corr'] + except: + msgs.warn("Could not load scaleimg from spec2d file:" + msgs.newline() + + self.cubepar['scale_corr'] + msgs.newline() + + "scale correction will not be performed unless you have specified the correct" + msgs.newline() + + "scale_corr file in the spec2d block") + self.cubepar['scale_corr'] = None + self.scalecorr_default = "none" + + def get_current_scalecorr(self, spec2DObj, opts_scalecorr=None): + """ + TODO :: docstring + """ + this_scalecorr = self.scalecorr_default + relScaleImg = self.relScaleImgDef.copy() + if opts_scalecorr is not None: + if opts_scalecorr.lower() == 'default': + if self.scalecorr_default == "image": + relScaleImg = spec2DObj.scaleimg + this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling + else: + this_scalecorr = self.scalecorr_default # Use the default value for the scale correction + elif opts_scalecorr.lower() == 'image': + relScaleImg = spec2DObj.scaleimg + this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling + elif opts_scalecorr.lower() == 'none': + relScaleImg = np.array([1]) + this_scalecorr = "none" # Don't do relative spectral illumination scaling + else: + # Load a user specified frame for sky subtraction + msgs.info("Loading the following frame for the relative spectral illumination correction:" + + msgs.newline() + opts_scalecorr) + try: + spec2DObj_scl = spec2dobj.Spec2DObj.from_file(opts_scalecorr, self.detname) + except: + msgs.error( + "Could not load skysub image from spec2d file:" + msgs.newline() + opts_scalecorr) + relScaleImg = spec2DObj_scl.scaleimg + this_scalecorr = opts_scalecorr + if this_scalecorr == "none": + msgs.info("Relative spectral illumination correction will not be performed.") + else: + msgs.info("Using the following frame for the relative spectral illumination correction:" + + msgs.newline() + this_scalecorr) + # Return the scaling correction for this frame + return this_scalecorr, relScaleImg + + def set_default_skysub(self): + """ + TODO :: Add docstring + """ + if self.cubepar['skysub_frame'] in [None, 'none', '', 'None']: + self.skysub_default = "none" + self.skyImgDef = np.array([0.0]) # Do not perform sky subtraction + self.skySclDef = np.array([0.0]) # Do not perform sky subtraction + elif self.cubepar['skysub_frame'].lower() == "image": + msgs.info("The sky model in the spec2d science frames will be used for sky subtraction" + msgs.newline() + + "(unless specific skysub frames have been specified)") + self.skysub_default = "image" + else: + msgs.info("Loading default image for sky subtraction:" + + msgs.newline() + self.cubepar['skysub_frame']) + try: + spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['skysub_frame'], self.detname) + skysub_exptime = fits.open(self.cubepar['skysub_frame'])[0].header['EXPTIME'] + self.skysub_default = self.cubepar['skysub_frame'] + self.skyImgDef = spec2DObj.sciimg / skysub_exptime # Sky counts/second + # self.skyImgDef = spec2DObj.skymodel/skysub_exptime # Sky counts/second + self.skySclDef = spec2DObj.scaleimg + except: + msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) + + def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): + """ + TODO :: docstring + """ + this_skysub = self.skysub_default + if self.skysub_default == "image": + skyImg = spec2DObj.skymodel + skyScl = spec2DObj.scaleimg + else: + skyImg = self.skyImgDef.copy() * exptime + skyScl = self.skySclDef.copy() + # See if there's any changes from the default behaviour + if opts_skysub is not None: + if opts_skysub.lower() == 'default': + if self.skysub_default == "image": + skyImg = spec2DObj.skymodel + skyScl = spec2DObj.scaleimg + this_skysub = "image" # Use the current spec2d for sky subtraction + else: + skyImg = self.skyImgDef.copy() * exptime + skyScl = self.skySclDef.copy() * exptime + this_skysub = self.skysub_default # Use the global value for sky subtraction + elif opts_skysub.lower() == 'image': + skyImg = spec2DObj.skymodel + skyScl = spec2DObj.scaleimg + this_skysub = "image" # Use the current spec2d for sky subtraction + elif opts_skysub.lower() == 'none': + skyImg = np.array([0.0]) + skyScl = np.array([1.0]) + this_skysub = "none" # Don't do sky subtraction + else: + # Load a user specified frame for sky subtraction + msgs.info("Loading skysub frame:" + msgs.newline() + opts_skysub) + try: + spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts_skysub, self.detname) + skysub_exptime = fits.open(opts_skysub)[0].header['EXPTIME'] + except: + msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts_skysub) + skyImg = spec2DObj_sky.sciimg * exptime / skysub_exptime # Sky counts + skyScl = spec2DObj_sky.scaleimg + this_skysub = opts_skysub # User specified spec2d for sky subtraction + if this_skysub == "none": + msgs.info("Sky subtraction will not be performed.") + else: + msgs.info("Using the following frame for sky subtraction:" + msgs.newline() + this_skysub) + # Return the skysub params for this frame + return this_skysub, skyImg, skyScl + + def compute_DAR(self, hdr0, raimg, decimg, waveimg, onslit_gpm, wave_ref=None): + """ + TODO :: docstring + """ + if wave_ref is None: + wave_ref = 0.5 * (np.min(waveimg[onslit_gpm]) + np.max(waveimg[onslit_gpm])) + # Get DAR parameters + raval = self.spec.get_meta_value([hdr0], 'ra') + decval = self.spec.get_meta_value([hdr0], 'dec') + obstime = self.spec.get_meta_value([hdr0], 'obstime') + pressure = self.spec.get_meta_value([hdr0], 'pressure') + temperature = self.spec.get_meta_value([hdr0], 'temperature') + rel_humidity = self.spec.get_meta_value([hdr0], 'humidity') + coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) + location = self.spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) + if pressure == 0.0: + msgs.warn("Pressure is set to zero - DAR correction will not be performed") + else: + msgs.info("DAR correction parameters:" + msgs.newline() + + " Pressure = {0:f} bar".format(pressure) + msgs.newline() + + " Temperature = {0:f} deg C".format(temperature) + msgs.newline() + + " Humidity = {0:f}".format(rel_humidity)) + ra_corr, dec_corr = datacube.correct_dar(waveimg[onslit_gpm], coord, obstime, location, + pressure * units.bar, temperature * units.deg_C, rel_humidity, + wave_ref=wave_ref) + raimg[onslit_gpm] += ra_corr * np.cos(np.mean(decimg[onslit_gpm]) * np.pi / 180.0) + decimg[onslit_gpm] += dec_corr + + def coadd(self): + """ + TODO :: Docstring + """ + msgs.bug("This routine should be overridden by child classes.") + msgs.error("Cannot proceed without coding the coadd routine.") + return + + +class SlicerIFUCoAdd3D(CoAdd3D): + """ + Child of CoAdd3D for SlicerIFU data reduction. For documentation, see CoAdd3d parent class above. + spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, + show=False, debug=False + + """ + def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, + show=False, debug=False): + super().__init__(spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, + show=show, debug=debug) + + def get_alignments(self, spec2DObj, slits, frame_wcs, spat_flexure=None): + """ + TODO :: docstring + """ + # Loading the alignments frame for these data + alignments = None + if self.cubepar['astrometric']: + key = alignframe.Alignments.calib_type.upper() + if key in spec2DObj.calibs: + alignfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) + if os.path.exists(alignfile) and self.cubepar['astrometric']: + msgs.info("Loading alignments") + alignments = alignframe.Alignments.from_file(alignfile) + else: + msgs.warn(f'Processed alignment frame not recorded or not found!') + msgs.info("Using slit edges for astrometric transform") + else: + msgs.info("Using slit edges for astrometric transform") + # If nothing better was provided, use the slit edges + if alignments is None: + left, right, _ = slits.select_edges(initial=True, flexure=spat_flexure) + locations = [0.0, 1.0] + traces = np.append(left[:, None, :], right[:, None, :], axis=1) + else: + locations = self.par['calibrations']['alignment']['locations'] + traces = alignments.traces + # Generate an RA/DEC image + msgs.info("Generating RA/DEC image") + alignSplines = alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts) + # Return the alignment splines + return alignSplines + + def load(self): + """ + TODO :: docstring + """ + # Initialise variables + wave_ref = None + mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. + # Load all spec2d files and prepare the data for making a datacube + for ff, fil in enumerate(self.spec2d): + # Load it up + msgs.info("Loading PypeIt spec2d frame:" + msgs.newline() + fil) + spec2DObj = spec2dobj.Spec2DObj.from_file(fil, self.detname) + detector = spec2DObj.detector + spat_flexure = None # spec2DObj.sci_spat_flexure + + # Load the header + hdr0 = spec2DObj.head0 + self.ifu_ra = np.append(self.ifu_ra, self.spec.compound_meta([hdr0], 'ra')) + self.ifu_dec = np.append(self.ifu_dec, self.spec.compound_meta([hdr0], 'dec')) + + # Get the exposure time + # TODO :: Surely this should be retrieved from metadata... + exptime = hdr0['EXPTIME'] + + # Setup for PypeIt imports + msgs.reset(verbosity=2) + + # TODO :: Consider loading all calibrations into a single variable within the main CoAdd3D parent class. + + # Initialise the slit edges + msgs.info("Constructing slit image") + slits = spec2DObj.slits + slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spat_flexure) + slits_left, slits_right, _ = slits.select_edges(initial=True, flexure=spat_flexure) + + # The order of operations below proceeds as follows: + # (1) Get science image + # (2) Subtract sky (note, if a joint fit has been performed, the relative scale correction is applied in the reduction!) + # (3) Apply relative scale correction to both science and ivar + + # Set the default behaviour if a global skysub frame has been specified + this_skysub, skyImg, skyScl = self.get_current_skysub(spec2DObj, exptime, + opts_skysub=self.opts['skysub_frame'][ff]) + + # Load the relative scale image, if something other than the default has been provided + this_scalecorr, relScaleImg = self.get_current_scalecorr(spec2DObj, + opts_scalecorr=self.opts['scale_corr'][ff]) + + # Prepare the relative scaling factors + relSclSky = skyScl / spec2DObj.scaleimg # This factor ensures the sky has the same relative scaling as the science frame + relScale = spec2DObj.scaleimg / relScaleImg # This factor is applied to the sky subtracted science frame + + # Extract the relevant information from the spec2d file + sciImg = (spec2DObj.sciimg - skyImg * relSclSky) * relScale # Subtract sky and apply relative illumination + ivar = spec2DObj.ivarraw / relScale ** 2 + waveimg = spec2DObj.waveimg + bpmmask = spec2DObj.bpmmask + + # TODO :: Really need to write some detailed information in the docs about all of the various corrections that can optionally be applied + + # TODO :: Include a flexure correction from the sky frame? Note, you cannot use the waveimg from a sky frame, + # since the heliocentric correction may have been applied to the sky frame. Need to recalculate waveimg using + # the slitshifts from a skyimage, and then apply the vel_corr from the science image. + + wnonzero = (waveimg != 0.0) + if not np.any(wnonzero): + msgs.error("The wavelength image contains only zeros - You need to check the data reduction.") + wave0 = waveimg[wnonzero].min() + # Calculate the delta wave in every pixel on the slit + waveimp = np.roll(waveimg, 1, axis=0) + waveimn = np.roll(waveimg, -1, axis=0) + dwaveimg = np.zeros_like(waveimg) + # All good pixels + wnz = np.where((waveimg != 0) & (waveimp != 0)) + dwaveimg[wnz] = np.abs(waveimg[wnz] - waveimp[wnz]) + # All bad pixels + wnz = np.where((waveimg != 0) & (waveimp == 0)) + dwaveimg[wnz] = np.abs(waveimg[wnz] - waveimn[wnz]) + # All endpoint pixels + dwaveimg[0, :] = np.abs(waveimg[0, :] - waveimn[0, :]) + dwaveimg[-1, :] = np.abs(waveimg[-1, :] - waveimp[-1, :]) + dwv = np.median(dwaveimg[dwaveimg != 0.0]) if self.cubepar['wave_delta'] is None else self.cubepar['wave_delta'] + + msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv)) + + # Obtain the minimum and maximum wavelength of all slits + if mnmx_wv is None: + mnmx_wv = np.zeros((len(self.spec2d), slits.nslits, 2)) + for slit_idx, slit_spat in enumerate(slits.spat_id): + onslit_init = (slitid_img_init == slit_spat) + mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init]) + mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init]) + + # Remove edges of the spectrum where the sky model is bad + sky_is_good = datacube.make_good_skymask(slitid_img_init, spec2DObj.tilts) + + # Construct a good pixel mask + # TODO: This should use the mask function to figure out which elements are masked. + onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good + + # Grab the WCS of this frame + frame_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv) + self.all_wcs.append(copy.deepcopy(frame_wcs)) + + # Find the largest spatial scale of all images being combined + # TODO :: probably need to put this in the DetectorContainer + pxscl = detector.platescale * parse.parse_binning(detector.binning)[ + 1] / 3600.0 # This should be degrees/pixel + slscl = self.spec.get_meta_value([spec2DObj.head0], 'slitwid') + if dspat is None: + dspat = max(pxscl, slscl) + if pxscl > dspat: + msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( + 3600.0 * dspat, 3600.0 * pxscl)) + if slscl > dspat: + msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( + 3600.0 * dspat, 3600.0 * slscl)) + + # Generate the alignment splines, and then + # retrieve images of the RA and Dec of every pixel, + # and the number of spatial pixels in each slit + alignSplines = self.get_alignments(spec2DObj, slits, frame_wcs, spat_flexure=spat_flexure) + raimg, decimg, minmax = slits.get_radec_image(frame_wcs, alignSplines, spec2DObj.tilts, + initial=True, flexure=spat_flexure) + + # Perform the DAR correction + self.compute_DAR(spec2DObj.head0, raimg, decimg, waveimg, onslit_gpm, wave_ref=wave_ref) + + # Get copies of arrays to be saved + wave_ext = waveimg[onslit_gpm].copy() + flux_ext = sciImg[onslit_gpm].copy() + ivar_ext = ivar[onslit_gpm].copy() + dwav_ext = dwaveimg[onslit_gpm].copy() + + # Correct for sensitivity as a function of grating angle + # (this assumes the spectrum of the flatfield lamp has the same shape for all setups) + key = flatfield.FlatImages.calib_type.upper() + if key not in spec2DObj.calibs: + msgs.error('Processed flat calibration file not recorded by spec2d file!') + flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) + if cubepar['grating_corr'] and flatfile not in flat_splines.keys(): + msgs.info("Calculating relative sensitivity for grating correction") + # Check if the Flat file exists + if not os.path.exists(flatfile): + msgs.error("Grating correction requested, but the following file does not exist:" + + msgs.newline() + flatfile) + # Load the Flat file + flatimages = flatfield.FlatImages.from_file(flatfile) + total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, + spat_flexure=spat_flexure) * \ + flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, + spat_flexure=spat_flexure) + flatframe = flatimages.pixelflat_raw / total_illum + if flatimages.pixelflat_spec_illum is None: + # Calculate the relative scale + scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits, + slit_illum_ref_idx=flatpar['slit_illum_ref_idx'], + model=None, + skymask=None, trim=flatpar['slit_trim'], + flexure=spat_flexure, + smooth_npix=flatpar['slit_illum_smooth_npix']) + else: + msgs.info("Using relative spectral illumination from FlatImages") + scale_model = flatimages.pixelflat_spec_illum + # Apply the relative scale and generate a 1D "spectrum" + onslit = waveimg != 0 + wavebins = np.linspace(np.min(waveimg[onslit]), np.max(waveimg[onslit]), slits.nspec) + hist, edge = np.histogram(waveimg[onslit], bins=wavebins, + weights=flatframe[onslit] / scale_model[onslit]) + cntr, edge = np.histogram(waveimg[onslit], bins=wavebins) + cntr = cntr.astype(float) + norm = (cntr != 0) / (cntr + (cntr == 0)) + spec_spl = hist * norm + wave_spl = 0.5 * (wavebins[1:] + wavebins[:-1]) + flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', + bounds_error=False, fill_value="extrapolate") + flat_splines[flatfile + "_wave"] = wave_spl.copy() + # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous + # exposure in this for loop) + if blaze_spline is None: + blaze_wave, blaze_spec = wave_spl, spec_spl + blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', + bounds_error=False, fill_value="extrapolate") + + # Perform extinction correction + msgs.info("Applying extinction correction") + longitude = self.spec.telescope['longitude'] + latitude = self.spec.telescope['latitude'] + airmass = spec2DObj.head0[self.spec.meta['airmass']['card']] + extinct = flux_calib.load_extinction_data(longitude, latitude, self.senspar['UVIS']['extinct_file']) + # extinction_correction requires the wavelength is sorted + wvsrt = np.argsort(wave_ext) + ext_corr = flux_calib.extinction_correction(wave_ext[wvsrt] * units.AA, airmass, extinct) + # Grating correction + grat_corr = 1.0 + if self.cubepar['grating_corr']: + grat_corr = correct_grating_shift(wave_ext[wvsrt], flat_splines[flatfile + "_wave"], + flat_splines[flatfile], + blaze_wave, blaze_spline) + # Sensitivity function + sens_func = 1.0 + if self.fluxcal: + msgs.info("Calculating the sensitivity function") + sens_func = flux_spline(wave_ext[wvsrt]) + # Convert the flux_sav to counts/s, correct for the relative sensitivity of different setups + ext_corr *= sens_func / (exptime * grat_corr) + # Correct for extinction + flux_sav = flux_ext[wvsrt] * ext_corr + ivar_sav = ivar_ext[wvsrt] / ext_corr ** 2 + + # Convert units to Counts/s/Ang/arcsec2 + # Slicer sampling * spatial pixel sampling + sl_deg = np.sqrt(frame_wcs.wcs.cd[0, 0] ** 2 + frame_wcs.wcs.cd[1, 0] ** 2) + px_deg = np.sqrt(frame_wcs.wcs.cd[1, 1] ** 2 + frame_wcs.wcs.cd[0, 1] ** 2) + scl_units = dwav_ext[wvsrt] * (3600.0 * sl_deg) * (3600.0 * px_deg) + flux_sav /= scl_units + ivar_sav *= scl_units ** 2 + + # sort back to the original ordering + resrt = np.argsort(wvsrt) + numpix = raimg[onslit_gpm].size + + # Calculate the weights relative to the zeroth cube + weights[ff] = 1.0 # exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2 + + # Get the slit image and then unset pixels in the slit image that are bad + this_specpos, this_spatpos = np.where(onslit_gpm) + this_spatid = slitid_img_init[onslit_gpm] + + # If individual frames are to be output without aligning them, + # there's no need to store information, just make the cubes now + if not self.combine and not self.align: + # Get the output filename + if self.numfiles == 1 and self.cubepar['output_filename'] != "": + outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) + else: + outfile = datacube.get_output_filename(fil, self.cubepar['output_filename'], self.combine, ff + 1) + # Get the coordinate bounds + slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) + numwav = int((np.max(waveimg) - wave0) / dwv) + bins = self.spec.get_datacube_bins(slitlength, minmax, numwav) + # Generate the output WCS for the datacube + crval_wv = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else 1.0E10 * frame_wcs.wcs.crval[2] + cd_wv = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else 1.0E10 * frame_wcs.wcs.cd[2, 2] + output_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv) + # Set the wavelength range of the white light image. + wl_wvrng = None + if self.cubepar['save_whitelight']: + wl_wvrng = datacube.get_whitelight_range(np.max(mnmx_wv[ff, :, 0]), + np.min(mnmx_wv[ff, :, 1]), + self.cubepar['whitelight_range']) + # Make the datacube + if self.method in ['subpixel', 'ngp']: + # Generate the datacube + generate_cube_subpixel(outfile, output_wcs, raimg[onslit_gpm], decimg[onslit_gpm], wave_ext, + flux_sav[resrt], ivar_sav[resrt], np.ones(numpix), + this_spatpos, this_specpos, this_spatid, + spec2DObj.tilts, slits, alignSplines, bins, + all_idx=None, overwrite=self.overwrite, blaze_wave=blaze_wave, + blaze_spec=blaze_spec, + fluxcal=self.fluxcal, specname=self.specname, whitelight_range=wl_wvrng, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + continue + + # Store the information if we are combining multiple frames + self.all_ra = np.append(self.all_ra, raimg[onslit_gpm].copy()) + self.all_dec = np.append(self.all_dec, decimg[onslit_gpm].copy()) + self.all_wave = np.append(self.all_wave, wave_ext.copy()) + self.all_sci = np.append(self.all_sci, flux_sav[resrt].copy()) + self.all_ivar = np.append(self.all_ivar, ivar_sav[resrt].copy()) + self.all_idx = np.append(self.all_idx, ff * np.ones(numpix)) + self.all_wghts = np.append(self.all_wghts, weights[ff] * np.ones(numpix) / weights[0]) + self.all_spatpos = np.append(self.all_spatpos, this_spatpos) + self.all_specpos = np.append(self.all_specpos, this_specpos) + self.all_spatid = np.append(self.all_spatid, this_spatid) + self.all_tilts.append(spec2DObj.tilts) + self.all_slits.append(slits) + self.all_align.append(alignSplines) + + def run_align(self): + """ + TODO :: Add docstring + """ + # Grab cos(dec) for convenience + cosdec = np.cos(np.mean(self.all_dec) * np.pi / 180.0) + + # Register spatial offsets between all frames + if self.opts['ra_offset'] is not None: + # First, translate all coordinates to the coordinates of the first frame + # Note :: Don't need cosdec here, this just overrides the IFU coordinate centre of each frame + ref_shift_ra = self.ifu_ra[0] - self.ifu_ra + ref_shift_dec = self.ifu_dec[0] - self.ifu_dec + for ff in range(self.numfiles): + # Apply the shift + self.all_ra[self.all_idx == ff] += ref_shift_ra[ff] + self.opts['ra_offset'][ff]/3600.0 + self.all_dec[self.all_idx == ff] += ref_shift_dec[ff] + self.opts['dec_offset'][ff]/3600.0 + msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, opts['ra_offset'][ff], opts['dec_offset'][ff])) + else: + # Find the wavelength range where all frames overlap + min_wl, max_wl = datacube.get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength + np.min(mnmx_wv[:, :, 1]), # The min red wavelength + self.cubepar['whitelight_range']) # The user-specified values (if any) + # Get the good whitelight pixels + ww, wavediff = get_whitelight_pixels(self.all_wave, min_wl, max_wl) + # Iterate over white light image generation and spatial shifting + numiter = 2 + for dd in range(numiter): + msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") + # Setup the WCS to use for all white light images + ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied + image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra[ww], all_dec[ww], all_wave[ww], + dspat, wavediff, collapse=True) + if voxedge[2].size != 2: + msgs.error("Spectral range for WCS is incorrect for white light image") + + wl_imgs = generate_image_subpixel(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], + all_sci[ww], all_ivar[ww], all_wghts[ww], + all_spatpos[ww], all_specpos[ww], all_spatid[ww], + all_tilts, all_slits, all_align, voxedge, all_idx=all_idx[ww], + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + if reference_image is None: + # ref_idx will be the index of the cube with the highest S/N + ref_idx = np.argmax(weights) + reference_image = wl_imgs[:, :, ref_idx].copy() + msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1)) + else: + msgs.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'") + + # Calculate the image offsets relative to the reference image + for ff in range(self.numfiles): + # Calculate the shift + ra_shift, dec_shift = calculate_image_phase(reference_image.copy(), wl_imgs[:, :, ff], maskval=0.0) + # Convert pixel shift to degrees shift + ra_shift *= dspat/cosdec + dec_shift *= dspat + msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff+1, ra_shift*3600.0, dec_shift*3600.0)) + # Apply the shift + all_ra[all_idx == ff] += ra_shift + all_dec[all_idx == ff] += dec_shift + + def compute_weights(self): + # Calculate the relative spectral weights of all pixels + if self.numfiles == 1: + # No need to calculate weights if there's just one frame + self.all_wghts = np.ones_like(self.all_sci) + else: + # Find the wavelength range where all frames overlap + min_wl, max_wl = datacube.get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength + np.min(mnmx_wv[:, :, 1]), # The min red wavelength + self.cubepar['whitelight_range']) # The user-specified values (if any) + # Get the good white light pixels + ww, wavediff = datacube.get_whitelight_pixels(all_wave, min_wl, max_wl) + # Get a suitable WCS + image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, wavediff, + collapse=True) + # Generate the white light image (note: hard-coding subpixel=1 in both directions, and combining into a single image) + wl_full = generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, + all_sci, all_ivar, all_wghts, + all_spatpos, all_specpos, all_spatid, + all_tilts, all_slits, all_align, voxedge, all_idx=all_idx, + spec_subpixel=1, spat_subpixel=1, combine=True) + # Compute the weights + all_wghts = datacube.compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, wl_full[:, :, 0], + dspat, dwv, relative_weights=self.cubepar['relative_weights']) + + def coadd(self): + """ + TODO :: Add docstring + """ + # First loop through all of the frames, load the data, and save datacubes if no combining is required + self.load() + + # No need to continue if we are not combining nor aligning frames + if not self.combine and not self.align: + return + + # Align the frames + if self.align: + self.run_align() + + # Compute the relative weights on the spectra + self.compute_weights() + + # Generate the WCS, and the voxel edges + cube_wcs, vox_edges, _ = datacube.create_wcs(self.cubepar, self.all_ra, self.all_dec, self.all_wave, dspat, dwv) + + sensfunc = None + if self.flux_spline is not None: + # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) + numwav = vox_edges[2].size - 1 + senswave = cube_wcs.spectral.wcs_pix2world(np.arange(numwav), 0)[0] * 1.0E10 + sensfunc = self.flux_spline(senswave) + + # Generate a datacube + outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) + if self.method in ['subpixel', 'ngp']: + # Generate the datacube + wl_wvrng = None + if self.cubepar['save_whitelight']: + wl_wvrng = datacube.get_whitelight_range(np.max(mnmx_wv[:, :, 0]), + np.min(mnmx_wv[:, :, 1]), + self.cubepar['whitelight_range']) + if self.combine: + generate_cube_subpixel(outfile, cube_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, + np.ones(all_wghts.size), # all_wghts, + all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, vox_edges, + all_idx=all_idx, overwrite=overwrite, blaze_wave=blaze_wave, + blaze_spec=blaze_spec, + fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, whitelight_range=wl_wvrng, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + else: + for ff in range(self.numfiles): + outfile = datacube.get_output_filename("", self.cubepar['output_filename'], False, ff) + ww = np.where(self.all_idx == ff) + generate_cube_subpixel(outfile, cube_wcs, all_ra[ww], all_dec[ww], all_wave[ww], all_sci[ww], + all_ivar[ww], np.ones(all_wghts[ww].size), + all_spatpos[ww], all_specpos[ww], all_spatid[ww], all_tilts[ff], + all_slits[ff], all_align[ff], vox_edges, + all_idx=all_idx[ww], overwrite=overwrite, blaze_wave=blaze_wave, + blaze_spec=blaze_spec, + fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, + whitelight_range=wl_wvrng, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) From b1b78088a80869bbf5c8c8d326e269da4b8d4cbf Mon Sep 17 00:00:00 2001 From: rcooke Date: Sat, 9 Sep 2023 20:14:28 +0100 Subject: [PATCH 05/81] refactor step 2 --- pypeit/coadd3d.py | 148 ++++++++++++++++++++++++---------------------- 1 file changed, 78 insertions(+), 70 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 68b7398fd9..664828ba37 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -295,7 +295,6 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=Fa assert False dspat = None if self.cubepar['spatial_delta'] is None else self.cubepar['spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees) dwv = self.cubepar['wave_delta'] # binning size in wavelength direction (in Angstroms) - flat_splines = dict() # A dictionary containing the splines of the flatfield @@ -633,6 +632,7 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwr show=False, debug=False): super().__init__(spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, show=show, debug=debug) + self.flat_splines = dict() # A dictionary containing the splines of the flatfield def get_alignments(self, spec2DObj, slits, frame_wcs, spat_flexure=None): """ @@ -666,6 +666,51 @@ def get_alignments(self, spec2DObj, slits, frame_wcs, spat_flexure=None): # Return the alignment splines return alignSplines + def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): + if flatfile not in self.flat_splines.keys(): + msgs.info("Calculating relative sensitivity for grating correction") + # Check if the Flat file exists + if not os.path.exists(flatfile): + msgs.error("Grating correction requested, but the following file does not exist:" + + msgs.newline() + flatfile) + # Load the Flat file + flatimages = flatfield.FlatImages.from_file(flatfile) + total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, + spat_flexure=spat_flexure) * \ + flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, + spat_flexure=spat_flexure) + flatframe = flatimages.pixelflat_raw / total_illum + if flatimages.pixelflat_spec_illum is None: + # Calculate the relative scale + scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits, + slit_illum_ref_idx=self.flatpar['slit_illum_ref_idx'], + model=None, + skymask=None, trim=self.flatpar['slit_trim'], + flexure=spat_flexure, + smooth_npix=self.flatpar['slit_illum_smooth_npix']) + else: + msgs.info("Using relative spectral illumination from FlatImages") + scale_model = flatimages.pixelflat_spec_illum + # Apply the relative scale and generate a 1D "spectrum" + onslit = waveimg != 0 + wavebins = np.linspace(np.min(waveimg[onslit]), np.max(waveimg[onslit]), slits.nspec) + hist, edge = np.histogram(waveimg[onslit], bins=wavebins, + weights=flatframe[onslit] / scale_model[onslit]) + cntr, edge = np.histogram(waveimg[onslit], bins=wavebins) + cntr = cntr.astype(float) + norm = (cntr != 0) / (cntr + (cntr == 0)) + spec_spl = hist * norm + wave_spl = 0.5 * (wavebins[1:] + wavebins[:-1]) + self.flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', + bounds_error=False, fill_value="extrapolate") + self.flat_splines[flatfile + "_wave"] = wave_spl.copy() + # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous + # exposure in this for loop) + if self.blaze_spline is None: + self.blaze_wave, self.blaze_spec = wave_spl, spec_spl + self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', + bounds_error=False, fill_value="extrapolate") + def load(self): """ TODO :: docstring @@ -800,55 +845,12 @@ def load(self): ivar_ext = ivar[onslit_gpm].copy() dwav_ext = dwaveimg[onslit_gpm].copy() - # Correct for sensitivity as a function of grating angle - # (this assumes the spectrum of the flatfield lamp has the same shape for all setups) - key = flatfield.FlatImages.calib_type.upper() - if key not in spec2DObj.calibs: - msgs.error('Processed flat calibration file not recorded by spec2d file!') - flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) - if cubepar['grating_corr'] and flatfile not in flat_splines.keys(): - msgs.info("Calculating relative sensitivity for grating correction") - # Check if the Flat file exists - if not os.path.exists(flatfile): - msgs.error("Grating correction requested, but the following file does not exist:" + - msgs.newline() + flatfile) - # Load the Flat file - flatimages = flatfield.FlatImages.from_file(flatfile) - total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, - spat_flexure=spat_flexure) * \ - flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, - spat_flexure=spat_flexure) - flatframe = flatimages.pixelflat_raw / total_illum - if flatimages.pixelflat_spec_illum is None: - # Calculate the relative scale - scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits, - slit_illum_ref_idx=flatpar['slit_illum_ref_idx'], - model=None, - skymask=None, trim=flatpar['slit_trim'], - flexure=spat_flexure, - smooth_npix=flatpar['slit_illum_smooth_npix']) - else: - msgs.info("Using relative spectral illumination from FlatImages") - scale_model = flatimages.pixelflat_spec_illum - # Apply the relative scale and generate a 1D "spectrum" - onslit = waveimg != 0 - wavebins = np.linspace(np.min(waveimg[onslit]), np.max(waveimg[onslit]), slits.nspec) - hist, edge = np.histogram(waveimg[onslit], bins=wavebins, - weights=flatframe[onslit] / scale_model[onslit]) - cntr, edge = np.histogram(waveimg[onslit], bins=wavebins) - cntr = cntr.astype(float) - norm = (cntr != 0) / (cntr + (cntr == 0)) - spec_spl = hist * norm - wave_spl = 0.5 * (wavebins[1:] + wavebins[:-1]) - flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', - bounds_error=False, fill_value="extrapolate") - flat_splines[flatfile + "_wave"] = wave_spl.copy() - # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous - # exposure in this for loop) - if blaze_spline is None: - blaze_wave, blaze_spec = wave_spl, spec_spl - blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', - bounds_error=False, fill_value="extrapolate") + # From here on out, work in sorted wavelengths + wvsrt = np.argsort(wave_ext) + wave_sort = wave_ext[wvsrt] + dwav_sort = dwav_ext[wvsrt] + # Here's an array to get back to the original ordering + resrt = np.argsort(wvsrt) # Perform extinction correction msgs.info("Applying extinction correction") @@ -857,39 +859,44 @@ def load(self): airmass = spec2DObj.head0[self.spec.meta['airmass']['card']] extinct = flux_calib.load_extinction_data(longitude, latitude, self.senspar['UVIS']['extinct_file']) # extinction_correction requires the wavelength is sorted - wvsrt = np.argsort(wave_ext) - ext_corr = flux_calib.extinction_correction(wave_ext[wvsrt] * units.AA, airmass, extinct) - # Grating correction - grat_corr = 1.0 + extcorr_sort = flux_calib.extinction_correction(wave_sort * units.AA, airmass, extinct) + + # Correct for sensitivity as a function of grating angle + # (this assumes the spectrum of the flatfield lamp has the same shape for all setups) + gratcorr_sort = 1.0 if self.cubepar['grating_corr']: - grat_corr = correct_grating_shift(wave_ext[wvsrt], flat_splines[flatfile + "_wave"], - flat_splines[flatfile], - blaze_wave, blaze_spline) + # Load the flatfield file + key = flatfield.FlatImages.calib_type.upper() + if key not in spec2DObj.calibs: + msgs.error('Processed flat calibration file not recorded by spec2d file!') + flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) + # Setup the grating correction + self.get_grating_shift(flatfile, waveimg, slits, spat_flexure=spat_flexure) + # Calculate the grating correction + gratcorr_sort = datacube.correct_grating_shift(wave_sort, self.flat_splines[flatfile + "_wave"], + self.flat_splines[flatfile], + self.blaze_wave, self.blaze_spline) # Sensitivity function - sens_func = 1.0 + sensfunc_sort = 1.0 if self.fluxcal: msgs.info("Calculating the sensitivity function") - sens_func = flux_spline(wave_ext[wvsrt]) + sensfunc_sort = self.flux_spline(wave_sort) # Convert the flux_sav to counts/s, correct for the relative sensitivity of different setups - ext_corr *= sens_func / (exptime * grat_corr) + extcorr_sort *= sensfunc_sort / (exptime * gratcorr_sort) # Correct for extinction - flux_sav = flux_ext[wvsrt] * ext_corr - ivar_sav = ivar_ext[wvsrt] / ext_corr ** 2 + flux_sort = flux_ext[wvsrt] * extcorr_sort + ivar_sort = ivar_ext[wvsrt] / extcorr_sort ** 2 # Convert units to Counts/s/Ang/arcsec2 # Slicer sampling * spatial pixel sampling sl_deg = np.sqrt(frame_wcs.wcs.cd[0, 0] ** 2 + frame_wcs.wcs.cd[1, 0] ** 2) px_deg = np.sqrt(frame_wcs.wcs.cd[1, 1] ** 2 + frame_wcs.wcs.cd[0, 1] ** 2) - scl_units = dwav_ext[wvsrt] * (3600.0 * sl_deg) * (3600.0 * px_deg) - flux_sav /= scl_units - ivar_sav *= scl_units ** 2 - - # sort back to the original ordering - resrt = np.argsort(wvsrt) - numpix = raimg[onslit_gpm].size + scl_units = dwav_sort * (3600.0 * sl_deg) * (3600.0 * px_deg) + flux_sort /= scl_units + ivar_sort *= scl_units ** 2 # Calculate the weights relative to the zeroth cube - weights[ff] = 1.0 # exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2 + self.weights[ff] = 1.0 # exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2 # Get the slit image and then unset pixels in the slit image that are bad this_specpos, this_spatpos = np.where(onslit_gpm) @@ -897,6 +904,7 @@ def load(self): # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now + numpix = raimg[onslit_gpm].size if not self.combine and not self.align: # Get the output filename if self.numfiles == 1 and self.cubepar['output_filename'] != "": From 39a1e8d4353bffd9e25fac242873a41ed1fa7b39 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 10:34:12 +0100 Subject: [PATCH 06/81] deprecate unused --- pypeit/deprecated/datacube.py | 155 ++++++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) diff --git a/pypeit/deprecated/datacube.py b/pypeit/deprecated/datacube.py index 56b3c1cc27..abeb9a1e4c 100644 --- a/pypeit/deprecated/datacube.py +++ b/pypeit/deprecated/datacube.py @@ -288,3 +288,158 @@ def generate_cube_ngp(outfile, hdr, all_sci, all_ivar, all_wghts, vox_coord, bin final_cube = DataCube(datacube.T, np.sqrt(var_cube.T), bpmcube.T, specname, blaze_wave, blaze_spec, sensfunc=sensfunc, fluxed=fluxcal) final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) + + +def gaussian2D_cube(tup, intflux, xo, yo, dxdz, dydz, sigma_x, sigma_y, theta, offset): + """ + Fit a 2D Gaussian function to a datacube. This function assumes that each + wavelength slice of the datacube is well-fit by a 2D Gaussian. The centre of + the Gaussian is allowed to vary linearly as a function of wavelength. + + .. note:: + + The integrated flux does not vary with wavelength. + + Args: + tup (:obj:`tuple`): + A three element tuple containing the x, y, and z locations of each + pixel in the cube + intflux (float): + The Integrated flux of the Gaussian + xo (float): + The centre of the Gaussian along the x-coordinate when z=0 + yo (float): + The centre of the Gaussian along the y-coordinate when z=0 + dxdz (float): + The change of xo with increasing z + dydz (float): + The change of yo with increasing z + sigma_x (float): + The standard deviation in the x-direction + sigma_y (float): + The standard deviation in the y-direction + theta (float): + The orientation angle of the 2D Gaussian + offset (float): + Constant offset + + Returns: + `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y, z) + """ + # Extract the (x, y, z) coordinates of each pixel from the tuple + (x, y, z) = tup + # Calculate the centre of the Gaussian for each z coordinate + xo = float(xo) + z*dxdz + yo = float(yo) + z*dydz + # Account for a rotated 2D Gaussian + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + # Normalise so that the integrated flux is a parameter, instead of the amplitude + norm = 1/(2*np.pi*np.sqrt(a*c-b*b)) + gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2))) + return gtwod.ravel() + + +def make_whitelight_frompixels(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat, + all_ivar=None, whitelightWCS=None, numra=None, numdec=None, trim=1): + """ + Generate a whitelight image using the individual pixels of every input frame + + Args: + all_ra (`numpy.ndarray`_): + 1D flattened array containing the RA values of each pixel from all + spec2d files + all_dec (`numpy.ndarray`_): + 1D flattened array containing the DEC values of each pixel from all + spec2d files + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength values of each pixel + from all spec2d files + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights attributed to each pixel + from all spec2d files + all_idx (`numpy.ndarray`_): + 1D flattened array containing an integer identifier indicating which + spec2d file each pixel originates from. For example, a 0 would + indicate that a pixel originates from the first spec2d frame listed + in the input file. a 1 would indicate that this pixel originates + from the second spec2d file, and so forth. + dspat (float): + The size of each spaxel on the sky (in degrees) + all_ivar (`numpy.ndarray`_, optional): + 1D flattened array containing of the inverse variance of each pixel + from all spec2d files. If provided, inverse variance images will be + calculated and returned for each white light image. + whitelightWCS (`astropy.wcs.WCS`_, optional): + The WCS of a reference white light image. If supplied, you must also + supply numra and numdec. + numra (int, optional): + Number of RA spaxels in the reference white light image + numdec (int, optional): + Number of DEC spaxels in the reference white light image + trim (int, optional): + Number of pixels to grow around a masked region + + Returns: + tuple: two 3D arrays will be returned, each of shape [N, M, numfiles], + where N and M are the spatial dimensions of the combined white light + images. The first array is a white light image, and the second array is + the corresponding inverse variance image. If all_ivar is None, this will + be an empty array. + """ + # Determine number of files + numfiles = np.unique(all_idx).size + + if whitelightWCS is None: + # Generate a 2D WCS to register all frames + coord_min = [np.min(all_ra), np.min(all_dec), np.min(all_wave)] + coord_dlt = [dspat, dspat, np.max(all_wave) - np.min(all_wave)] + whitelightWCS = generate_WCS(coord_min, coord_dlt) + + # Generate coordinates + cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) + numra = 1+int((np.max(all_ra) - np.min(all_ra)) * cosdec / dspat) + numdec = 1+int((np.max(all_dec) - np.min(all_dec)) / dspat) + else: + # If a WCS is supplied, the numra and numdec must be specified + if (numra is None) or (numdec is None): + msgs.error("A WCS has been supplied to make_whitelight." + msgs.newline() + + "numra and numdec must also be specified") + xbins = np.arange(1 + numra) - 1 + ybins = np.arange(1 + numdec) - 1 + spec_bins = np.arange(2) - 1 + bins = (xbins, ybins, spec_bins) + + whitelight_Imgs = np.zeros((numra, numdec, numfiles)) + whitelight_ivar = np.zeros((numra, numdec, numfiles)) + for ff in range(numfiles): + msgs.info("Generating white light image of frame {0:d}/{1:d}".format(ff + 1, numfiles)) + ww = (all_idx == ff) + # Make the cube + pix_coord = whitelightWCS.wcs_world2pix(np.vstack((all_ra[ww], all_dec[ww], all_wave[ww] * 1.0E-10)).T, 0) + wlcube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_sci[ww] * all_wghts[ww]) + norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts[ww]) + nrmCube = (norm > 0) / (norm + (norm == 0)) + whtlght = (wlcube * nrmCube)[:, :, 0] + # Create a mask of good pixels (trim the edges) + gpm = grow_mask(whtlght == 0, trim) == 0 # A good pixel = 1 + whtlght *= gpm + # Set the masked regions to the minimum value + minval = np.min(whtlght[gpm == 1]) + whtlght[gpm == 0] = minval + # Store the white light image + whitelight_Imgs[:, :, ff] = whtlght.copy() + # Now operate on the inverse variance image + if all_ivar is not None: + ivar_img, _ = np.histogramdd(pix_coord, bins=bins, weights=all_ivar[ww]) + ivar_img = ivar_img[:, :, 0] + ivar_img *= gpm + minval = np.min(ivar_img[gpm == 1]) + ivar_img[gpm == 0] = minval + whitelight_ivar[:, :, ff] = ivar_img.copy() + return whitelight_Imgs, whitelight_ivar, whitelightWCS + From 8aae909976b08eaeb588d5daa3826a0d8c61e025 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 10:34:20 +0100 Subject: [PATCH 07/81] restructure --- pypeit/core/datacube.py | 1137 ++++++++------------------------------- 1 file changed, 214 insertions(+), 923 deletions(-) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 2e0d6b1c11..0c299827ac 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -5,8 +5,6 @@ """ import os -import copy -import inspect from astropy import wcs, units from astropy.coordinates import AltAz, SkyCoord @@ -16,21 +14,97 @@ import numpy as np from pypeit import msgs -from pypeit import alignframe, datamodel, flatfield, io, specobj, spec2dobj, utils -from pypeit.core.flexure import calculate_image_phase -from pypeit.core import coadd, extract, findobj_skymask, flux_calib, parse, skysub -from pypeit.core.procimg import grow_mask -from pypeit.spectrographs.util import load_spectrograph - -# Use a fast histogram for speed! -try: - from fast_histogram import histogramdd -except ImportError: - histogramdd = None +from pypeit import utils +from pypeit.core import coadd from IPython import embed +def gaussian2D(tup, intflux, xo, yo, sigma_x, sigma_y, theta, offset): + """ + Fit a 2D Gaussian function to an image. + + Args: + tup (:obj:`tuple`): + A two element tuple containing the x and y coordinates of each pixel + in the image + intflux (float): + The Integrated flux of the 2D Gaussian + xo (float): + The centre of the Gaussian along the x-coordinate when z=0 + yo (float): + The centre of the Gaussian along the y-coordinate when z=0 + sigma_x (float): + The standard deviation in the x-direction + sigma_y (float): + The standard deviation in the y-direction + theta (float): + The orientation angle of the 2D Gaussian + offset (float): + Constant offset + + Returns: + `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y) + """ + # Extract the (x, y, z) coordinates of each pixel from the tuple + (x, y) = tup + # Ensure these are floating point + xo = float(xo) + yo = float(yo) + # Account for a rotated 2D Gaussian + a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) + b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) + c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) + # Normalise so that the integrated flux is a parameter, instead of the amplitude + norm = 1/(2*np.pi*np.sqrt(a*c-b*b)) + gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2))) + return gtwod.ravel() + + +def fitGaussian2D(image, norm=False): + """ + Fit a 2D Gaussian to an input image. It is recommended that the input image + is scaled to a maximum value that is ~1, so that all fit parameters are of + the same order of magnitude. Set norm=True if you do not care about the + amplitude or integrated flux. Otherwise, make sure you scale the image by + a known value prior to passing it into this function. + + Parameters + ---------- + image : `numpy.ndarray`_ + A 2D input image + norm : bool, optional + If True, the input image will be normalised to the maximum value + of the input image. + + Returns + ------- + popt : `numpy.ndarray`_ + The optimum parameters of the Gaussian in the following order: Integrated + flux, x center, y center, sigma_x, sigma_y, theta, offset. See + :func:`~pypeit.core.datacube.gaussian2D` for a more detailed description + of the model. + pcov : `numpy.ndarray`_ + Corresponding covariance matrix + """ + # Normalise if requested + wlscl = np.max(image) if norm else 1 + # Setup the coordinates + x = np.linspace(0, image.shape[0] - 1, image.shape[0]) + y = np.linspace(0, image.shape[1] - 1, image.shape[1]) + xx, yy = np.meshgrid(x, y, indexing='ij') + # Setup the fitting params + idx_max = [image.shape[0]/2, image.shape[1]/2] # Just use the centre of the image as the best guess + #idx_max = np.unravel_index(np.argmax(image), image.shape) + initial_guess = (1, idx_max[0], idx_max[1], 2, 2, 0, 0) + bounds = ([0, 0, 0, 0.5, 0.5, -np.pi, -np.inf], + [np.inf, image.shape[0], image.shape[1], image.shape[0], image.shape[1], np.pi, np.inf]) + # Perform the fit + popt, pcov = opt.curve_fit(gaussian2D, (xx, yy), image.ravel() / wlscl, bounds=bounds, p0=initial_guess) + # Return the fitting results + return popt, pcov + + def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pressure, temperature, rel_humidity): """ @@ -183,142 +257,6 @@ def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, ord return grat_corr -def gaussian2D_cube(tup, intflux, xo, yo, dxdz, dydz, sigma_x, sigma_y, theta, offset): - """ - Fit a 2D Gaussian function to a datacube. This function assumes that each - wavelength slice of the datacube is well-fit by a 2D Gaussian. The centre of - the Gaussian is allowed to vary linearly as a function of wavelength. - - .. note:: - - The integrated flux does not vary with wavelength. - - Args: - tup (:obj:`tuple`): - A three element tuple containing the x, y, and z locations of each - pixel in the cube - intflux (float): - The Integrated flux of the Gaussian - xo (float): - The centre of the Gaussian along the x-coordinate when z=0 - yo (float): - The centre of the Gaussian along the y-coordinate when z=0 - dxdz (float): - The change of xo with increasing z - dydz (float): - The change of yo with increasing z - sigma_x (float): - The standard deviation in the x-direction - sigma_y (float): - The standard deviation in the y-direction - theta (float): - The orientation angle of the 2D Gaussian - offset (float): - Constant offset - - Returns: - `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y, z) - """ - # Extract the (x, y, z) coordinates of each pixel from the tuple - (x, y, z) = tup - # Calculate the centre of the Gaussian for each z coordinate - xo = float(xo) + z*dxdz - yo = float(yo) + z*dydz - # Account for a rotated 2D Gaussian - a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) - b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) - c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) - # Normalise so that the integrated flux is a parameter, instead of the amplitude - norm = 1/(2*np.pi*np.sqrt(a*c-b*b)) - gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2))) - return gtwod.ravel() - - -def gaussian2D(tup, intflux, xo, yo, sigma_x, sigma_y, theta, offset): - """ - Fit a 2D Gaussian function to an image. - - Args: - tup (:obj:`tuple`): - A two element tuple containing the x and y coordinates of each pixel - in the image - intflux (float): - The Integrated flux of the 2D Gaussian - xo (float): - The centre of the Gaussian along the x-coordinate when z=0 - yo (float): - The centre of the Gaussian along the y-coordinate when z=0 - sigma_x (float): - The standard deviation in the x-direction - sigma_y (float): - The standard deviation in the y-direction - theta (float): - The orientation angle of the 2D Gaussian - offset (float): - Constant offset - - Returns: - `numpy.ndarray`_: The 2D Gaussian evaluated at the coordinate (x, y) - """ - # Extract the (x, y, z) coordinates of each pixel from the tuple - (x, y) = tup - # Ensure these are floating point - xo = float(xo) - yo = float(yo) - # Account for a rotated 2D Gaussian - a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) - b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) - c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) - # Normalise so that the integrated flux is a parameter, instead of the amplitude - norm = 1/(2*np.pi*np.sqrt(a*c-b*b)) - gtwod = offset + norm*intflux*np.exp(-(a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2))) - return gtwod.ravel() - - -def fitGaussian2D(image, norm=False): - """ - Fit a 2D Gaussian to an input image. It is recommended that the input image - is scaled to a maximum value that is ~1, so that all fit parameters are of - the same order of magnitude. Set norm=True if you do not care about the - amplitude or integrated flux. Otherwise, make sure you scale the image by - a known value prior to passing it into this function. - - Parameters - ---------- - image : `numpy.ndarray`_ - A 2D input image - norm : bool, optional - If True, the input image will be normalised to the maximum value - of the input image. - - Returns - ------- - popt : `numpy.ndarray`_ - The optimum parameters of the Gaussian in the following order: Integrated - flux, x center, y center, sigma_x, sigma_y, theta, offset. See - :func:`~pypeit.core.datacube.gaussian2D` for a more detailed description - of the model. - pcov : `numpy.ndarray`_ - Corresponding covariance matrix - """ - # Normalise if requested - wlscl = np.max(image) if norm else 1 - # Setup the coordinates - x = np.linspace(0, image.shape[0] - 1, image.shape[0]) - y = np.linspace(0, image.shape[1] - 1, image.shape[1]) - xx, yy = np.meshgrid(x, y, indexing='ij') - # Setup the fitting params - idx_max = [image.shape[0]/2, image.shape[1]/2] # Just use the centre of the image as the best guess - #idx_max = np.unravel_index(np.argmax(image), image.shape) - initial_guess = (1, idx_max[0], idx_max[1], 2, 2, 0, 0) - bounds = ([0, 0, 0, 0.5, 0.5, -np.pi, -np.inf], - [np.inf, image.shape[0], image.shape[1], image.shape[0], image.shape[1], np.pi, np.inf]) - # Perform the fit - popt, pcov = opt.curve_fit(gaussian2D, (xx, yy), image.ravel() / wlscl, bounds=bounds, p0=initial_guess) - # Return the fitting results - return popt, pcov - - def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): """ Extract a spectrum of a standard star from a datacube @@ -391,7 +329,7 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): flxcube -= skyspec.reshape((1, 1, numwave)) # Subtract the residual sky from the whitelight image - sky_val = np.sum(wl_img[:,:,np.newaxis] * smask) / np.sum(smask) + sky_val = np.sum(wl_img[:, :, np.newaxis] * smask) / np.sum(smask) wl_img -= sky_val if method == 'boxcar': @@ -411,84 +349,84 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): box_gpm = flxscl > 1/3 # Good pixels are those where at least one-third of the standard star flux is measured # Setup the return values ret_flux, ret_var, ret_gpm = box_flux, box_var, box_gpm - elif method == 'gauss2d': - msgs.error("Use method=boxcar... this method has not been thoroughly tested") - # Generate a mask - fitmask = np.logical_not(bpmcube) * mask - # Setup the coordinates - x = np.linspace(0, flxcube.shape[0] - 1, flxcube.shape[0]) - y = np.linspace(0, flxcube.shape[1] - 1, flxcube.shape[1]) - z = np.linspace(0, flxcube.shape[2] - 1, flxcube.shape[2]) - xx, yy, zz = np.meshgrid(x, y, z, indexing='ij') - # Normalise the flux in each wavelength channel - scispec = (flxcube * fitmask).sum(0).sum(0).reshape((1, 1, flxcube.shape[2])) - cntspec = fitmask.sum(0).sum(0).reshape((1, 1, flxcube.shape[2])) - # These operations are all inverted, because we need to divide flxcube by scispec - cntspec *= utils.inverse(scispec) - cubefit = flxcube * cntspec - cubesigfit = np.sqrt(varcube) * cntspec - # Setup the fit params - ww = np.where(fitmask) - initial_guess = (1, idx_max[0], idx_max[1], 0.0, 0.0, 2, 2, 0, 0) - bounds = ([-np.inf, 0, 0, -np.inf, -np.inf, 0.5, 0.5, -np.pi, -np.inf], - [np.inf,wl_img.shape[0],wl_img.shape[1],np.inf, np.inf, wl_img.shape[0],wl_img.shape[0],np.pi,np.inf]) - msgs.info("Fitting a 2D Gaussian to the datacube") - popt, pcov = opt.curve_fit(gaussian2D_cube, (xx[ww], yy[ww], zz[ww]), cubefit[ww], - sigma=cubesigfit[ww], bounds=bounds, p0=initial_guess) - # Subtract off the best-fitting continuum - popt[-1] = 0 - # Generate the best-fitting model to be used as an optimal profile - model = gaussian2D_cube((xx, yy, zz), *popt).reshape(flxcube.shape) - numim = flxcube.shape[0]*flxcube.shape[1] - - # Optimally extract - msgs.info("Optimally extracting...") - sciimg = (flxcube*mask).reshape((numim, numwave)).T - ivar = utils.inverse((varcube*mask**2).reshape((numim, numwave)).T) - optmask = fitmask.reshape((numim, numwave)).T - waveimg = np.ones((numwave, numim)) # Just a dummy array - not needed - skyimg = np.zeros((numwave, numim)) # Just a dummy array - not needed - thismask = np.ones((numwave, numim)) # Just a dummy array - not needed - oprof = model.reshape((numim, numwave)).T - sobj = specobj.SpecObj('SlicerIFU', 'DET01', SLITID=0) - extract.extract_optimal(sciimg, ivar, optmask, waveimg, skyimg, thismask, oprof, sobj) - opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS, sobj.OPT_COUNTS_SIG**2, sobj.OPT_MASK - # Setup the return values - ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm - elif method == 'optimal': - msgs.error("Use method=boxcar... this method has not been thoroughly tested") - # First do a boxcar along one dimension - msgs.info("Collapsing datacube to a 2D image") - omask = mask+smask - idx_sum = 0 - cntmask = np.logical_not(bpmcube) * omask - scimask = flxcube * cntmask - varmask = varcube * cntmask**2 - cnt_spec = cntmask.sum(idx_sum) * utils.inverse(omask.sum(idx_sum)) - nrmcnt = utils.inverse(cnt_spec) - box_sciimg = scimask.sum(idx_sum) * nrmcnt - box_scivar = varmask.sum(idx_sum) * nrmcnt**2 - box_sciivar = utils.inverse(box_scivar) - # Transpose for optimal - box_sciimg = box_sciimg.T - box_sciivar = box_sciivar.T - - # Prepare for optimal - msgs.info("Starting optimal extraction") - thismask = np.ones(box_sciimg.shape, dtype=bool) - nspec, nspat = thismask.shape[0], thismask.shape[1] - slit_left = np.zeros(nspec) - slit_right = np.ones(nspec)*(nspat-1) - tilts = np.outer(np.linspace(0.0,1.0,nspec), np.ones(nspat)) - waveimg = np.outer(wave.value, np.ones(nspat)) - global_sky = np.zeros_like(box_sciimg) - # Find objects and then extract - sobj = findobj_skymask.objs_in_slit(box_sciimg, thismask, slit_left, slit_right) - skysub.local_skysub_extract(box_sciimg, box_sciivar, tilts, waveimg, global_sky, thismask, slit_left, - slit_right, sobj, model_noise=False) - opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS[0,:], sobj.OPT_COUNTS_SIG[0,:]**2, sobj.OPT_MASK[0,:] - # Setup the return values - ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm + # elif method == 'gauss2d': + # msgs.error("Use method=boxcar... this method has not been thoroughly tested") + # # Generate a mask + # fitmask = np.logical_not(bpmcube) * mask + # # Setup the coordinates + # x = np.linspace(0, flxcube.shape[0] - 1, flxcube.shape[0]) + # y = np.linspace(0, flxcube.shape[1] - 1, flxcube.shape[1]) + # z = np.linspace(0, flxcube.shape[2] - 1, flxcube.shape[2]) + # xx, yy, zz = np.meshgrid(x, y, z, indexing='ij') + # # Normalise the flux in each wavelength channel + # scispec = (flxcube * fitmask).sum(0).sum(0).reshape((1, 1, flxcube.shape[2])) + # cntspec = fitmask.sum(0).sum(0).reshape((1, 1, flxcube.shape[2])) + # # These operations are all inverted, because we need to divide flxcube by scispec + # cntspec *= utils.inverse(scispec) + # cubefit = flxcube * cntspec + # cubesigfit = np.sqrt(varcube) * cntspec + # # Setup the fit params + # ww = np.where(fitmask) + # initial_guess = (1, idx_max[0], idx_max[1], 0.0, 0.0, 2, 2, 0, 0) + # bounds = ([-np.inf, 0, 0, -np.inf, -np.inf, 0.5, 0.5, -np.pi, -np.inf], + # [np.inf,wl_img.shape[0],wl_img.shape[1],np.inf, np.inf, wl_img.shape[0],wl_img.shape[0],np.pi,np.inf]) + # msgs.info("Fitting a 2D Gaussian to the datacube") + # popt, pcov = opt.curve_fit(gaussian2D_cube, (xx[ww], yy[ww], zz[ww]), cubefit[ww], + # sigma=cubesigfit[ww], bounds=bounds, p0=initial_guess) + # # Subtract off the best-fitting continuum + # popt[-1] = 0 + # # Generate the best-fitting model to be used as an optimal profile + # model = gaussian2D_cube((xx, yy, zz), *popt).reshape(flxcube.shape) + # numim = flxcube.shape[0]*flxcube.shape[1] + # + # # Optimally extract + # msgs.info("Optimally extracting...") + # sciimg = (flxcube*mask).reshape((numim, numwave)).T + # ivar = utils.inverse((varcube*mask**2).reshape((numim, numwave)).T) + # optmask = fitmask.reshape((numim, numwave)).T + # waveimg = np.ones((numwave, numim)) # Just a dummy array - not needed + # skyimg = np.zeros((numwave, numim)) # Just a dummy array - not needed + # thismask = np.ones((numwave, numim)) # Just a dummy array - not needed + # oprof = model.reshape((numim, numwave)).T + # sobj = specobj.SpecObj('SlicerIFU', 'DET01', SLITID=0) + # extract.extract_optimal(sciimg, ivar, optmask, waveimg, skyimg, thismask, oprof, sobj) + # opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS, sobj.OPT_COUNTS_SIG**2, sobj.OPT_MASK + # # Setup the return values + # ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm + # elif method == 'optimal': + # msgs.error("Use method=boxcar... this method has not been thoroughly tested") + # # First do a boxcar along one dimension + # msgs.info("Collapsing datacube to a 2D image") + # omask = mask+smask + # idx_sum = 0 + # cntmask = np.logical_not(bpmcube) * omask + # scimask = flxcube * cntmask + # varmask = varcube * cntmask**2 + # cnt_spec = cntmask.sum(idx_sum) * utils.inverse(omask.sum(idx_sum)) + # nrmcnt = utils.inverse(cnt_spec) + # box_sciimg = scimask.sum(idx_sum) * nrmcnt + # box_scivar = varmask.sum(idx_sum) * nrmcnt**2 + # box_sciivar = utils.inverse(box_scivar) + # # Transpose for optimal + # box_sciimg = box_sciimg.T + # box_sciivar = box_sciivar.T + # + # # Prepare for optimal + # msgs.info("Starting optimal extraction") + # thismask = np.ones(box_sciimg.shape, dtype=bool) + # nspec, nspat = thismask.shape[0], thismask.shape[1] + # slit_left = np.zeros(nspec) + # slit_right = np.ones(nspec)*(nspat-1) + # tilts = np.outer(np.linspace(0.0,1.0,nspec), np.ones(nspat)) + # waveimg = np.outer(wave.value, np.ones(nspat)) + # global_sky = np.zeros_like(box_sciimg) + # # Find objects and then extract + # sobj = findobj_skymask.objs_in_slit(box_sciimg, thismask, slit_left, slit_right) + # skysub.local_skysub_extract(box_sciimg, box_sciivar, tilts, waveimg, global_sky, thismask, slit_left, + # slit_right, sobj, model_noise=False) + # opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS[0,:], sobj.OPT_COUNTS_SIG[0,:]**2, sobj.OPT_MASK[0,:] + # # Setup the return values + # ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm else: msgs.error("Unknown extraction method: ", method) @@ -535,6 +473,54 @@ def make_good_skymask(slitimg, tilts): return gpm +def get_output_filename(fil, par_outfile, combine, idx=1): + """ + Get the output filename of a datacube, given the input + + Args: + fil (str): + The spec2d filename. + par_outfile (str): + The user-specified output filename (see cubepar['output_filename']) + combine (bool): + Should the input frames be combined into a single datacube? + idx (int, optional): + Index of filename to be saved. Required if combine=False. + + Returns: + str: The output filename to use. + """ + if combine: + if par_outfile == "": + par_outfile = "datacube.fits" + # Check the output files don't exist + outfile = par_outfile if ".fits" in par_outfile else par_outfile + ".fits" + else: + if par_outfile == "": + outfile = fil.replace("spec2d_", "spec3d_") + else: + # Use the output filename as a prefix + outfile = os.path.splitext(par_outfile)[0] + "_{0:03d}.fits".format(idx) + # Return the outfile + return outfile + + +def get_output_whitelight_filename(outfile): + """ + Given the output filename of a datacube, create an appropriate whitelight + fits file name + + Args: + outfile (str): + The output filename used for the datacube. + + Returns: + str: The output filename to use for the whitelight image. + """ + out_wl_filename = os.path.splitext(outfile)[0] + "_whitelight.fits" + return out_wl_filename + + def get_whitelight_pixels(all_wave, min_wl, max_wl): """ Determine which pixels are included within the specified wavelength range @@ -669,208 +655,6 @@ def load_imageWCS(filename, ext=0): return image, imgwcs -def make_whitelight_frompixels(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat, - all_ivar=None, whitelightWCS=None, numra=None, numdec=None, trim=1): - """ - Generate a whitelight image using the individual pixels of every input frame - - Args: - all_ra (`numpy.ndarray`_): - 1D flattened array containing the RA values of each pixel from all - spec2d files - all_dec (`numpy.ndarray`_): - 1D flattened array containing the DEC values of each pixel from all - spec2d files - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength values of each pixel - from all spec2d files - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights attributed to each pixel - from all spec2d files - all_idx (`numpy.ndarray`_): - 1D flattened array containing an integer identifier indicating which - spec2d file each pixel originates from. For example, a 0 would - indicate that a pixel originates from the first spec2d frame listed - in the input file. a 1 would indicate that this pixel originates - from the second spec2d file, and so forth. - dspat (float): - The size of each spaxel on the sky (in degrees) - all_ivar (`numpy.ndarray`_, optional): - 1D flattened array containing of the inverse variance of each pixel - from all spec2d files. If provided, inverse variance images will be - calculated and returned for each white light image. - whitelightWCS (`astropy.wcs.WCS`_, optional): - The WCS of a reference white light image. If supplied, you must also - supply numra and numdec. - numra (int, optional): - Number of RA spaxels in the reference white light image - numdec (int, optional): - Number of DEC spaxels in the reference white light image - trim (int, optional): - Number of pixels to grow around a masked region - - Returns: - tuple: two 3D arrays will be returned, each of shape [N, M, numfiles], - where N and M are the spatial dimensions of the combined white light - images. The first array is a white light image, and the second array is - the corresponding inverse variance image. If all_ivar is None, this will - be an empty array. - """ - # Determine number of files - numfiles = np.unique(all_idx).size - - if whitelightWCS is None: - # Generate a 2D WCS to register all frames - coord_min = [np.min(all_ra), np.min(all_dec), np.min(all_wave)] - coord_dlt = [dspat, dspat, np.max(all_wave) - np.min(all_wave)] - whitelightWCS = generate_WCS(coord_min, coord_dlt) - - # Generate coordinates - cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) - numra = 1+int((np.max(all_ra) - np.min(all_ra)) * cosdec / dspat) - numdec = 1+int((np.max(all_dec) - np.min(all_dec)) / dspat) - else: - # If a WCS is supplied, the numra and numdec must be specified - if (numra is None) or (numdec is None): - msgs.error("A WCS has been supplied to make_whitelight." + msgs.newline() + - "numra and numdec must also be specified") - xbins = np.arange(1 + numra) - 1 - ybins = np.arange(1 + numdec) - 1 - spec_bins = np.arange(2) - 1 - bins = (xbins, ybins, spec_bins) - - whitelight_Imgs = np.zeros((numra, numdec, numfiles)) - whitelight_ivar = np.zeros((numra, numdec, numfiles)) - for ff in range(numfiles): - msgs.info("Generating white light image of frame {0:d}/{1:d}".format(ff + 1, numfiles)) - ww = (all_idx == ff) - # Make the cube - pix_coord = whitelightWCS.wcs_world2pix(np.vstack((all_ra[ww], all_dec[ww], all_wave[ww] * 1.0E-10)).T, 0) - wlcube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_sci[ww] * all_wghts[ww]) - norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts[ww]) - nrmCube = (norm > 0) / (norm + (norm == 0)) - whtlght = (wlcube * nrmCube)[:, :, 0] - # Create a mask of good pixels (trim the edges) - gpm = grow_mask(whtlght == 0, trim) == 0 # A good pixel = 1 - whtlght *= gpm - # Set the masked regions to the minimum value - minval = np.min(whtlght[gpm == 1]) - whtlght[gpm == 0] = minval - # Store the white light image - whitelight_Imgs[:, :, ff] = whtlght.copy() - # Now operate on the inverse variance image - if all_ivar is not None: - ivar_img, _ = np.histogramdd(pix_coord, bins=bins, weights=all_ivar[ww]) - ivar_img = ivar_img[:, :, 0] - ivar_img *= gpm - minval = np.min(ivar_img[gpm == 1]) - ivar_img[gpm == 0] = minval - whitelight_ivar[:, :, ff] = ivar_img.copy() - return whitelight_Imgs, whitelight_ivar, whitelightWCS - - -def create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equinox=2000.0, - specname="PYP_SPEC"): - """ - Create a WCS and the expected edges of the voxels, based on user-specified - parameters or the extremities of the data. - - Parameters - ---------- - cubepar : :class:`~pypeit.par.pypeitpar.CubePar` - An instance of the CubePar parameter set, contained parameters of the - datacube reduction - all_ra : `numpy.ndarray`_ - 1D flattened array containing the RA values of each pixel from all - spec2d files - all_dec : `numpy.ndarray`_ - 1D flattened array containing the DEC values of each pixel from all - spec2d files - all_wave : `numpy.ndarray`_ - 1D flattened array containing the wavelength values of each pixel from - all spec2d files - dspat : float - Spatial size of each square voxel (in arcsec). The default is to use the - values in cubepar. - dwv : float - Linear wavelength step of each voxel (in Angstroms) - collapse : bool, optional - If True, the spectral dimension will be collapsed to a single channel - (primarily for white light images) - equinox : float, optional - Equinox of the WCS - specname : str, optional - Name of the spectrograph - - Returns - ------- - cubewcs : `astropy.wcs.WCS`_ - astropy WCS to be used for the combined cube - voxedges : tuple - A three element tuple containing the bin edges in the x, y (spatial) and - z (wavelength) dimensions - reference_image : `numpy.ndarray`_ - The reference image to be used for the cross-correlation. Can be None. - """ - # Grab cos(dec) for convenience - cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) - - # Setup the cube ranges - reference_image = None # The default behaviour is that the reference image is not used - ra_min = cubepar['ra_min'] if cubepar['ra_min'] is not None else np.min(all_ra) - ra_max = cubepar['ra_max'] if cubepar['ra_max'] is not None else np.max(all_ra) - dec_min = cubepar['dec_min'] if cubepar['dec_min'] is not None else np.min(all_dec) - dec_max = cubepar['dec_max'] if cubepar['dec_max'] is not None else np.max(all_dec) - wav_min = cubepar['wave_min'] if cubepar['wave_min'] is not None else np.min(all_wave) - wav_max = cubepar['wave_max'] if cubepar['wave_max'] is not None else np.max(all_wave) - dwave = cubepar['wave_delta'] if cubepar['wave_delta'] is not None else dwv - - # Number of voxels in each dimension - numra = int((ra_max-ra_min) * cosdec / dspat) - numdec = int((dec_max-dec_min)/dspat) - numwav = int(np.round((wav_max-wav_min)/dwave)) - - # If a white light WCS is being generated, make sure there's only 1 wavelength bin - if collapse: - wav_min = np.min(all_wave) - wav_max = np.max(all_wave) - dwave = wav_max - wav_min - numwav = 1 - - # Generate a master WCS to register all frames - coord_min = [ra_min, dec_min, wav_min] - coord_dlt = [dspat, dspat, dwave] - - # If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts - if cubepar["reference_image"] is not None: - # Load the requested reference image - reference_image, imgwcs = load_imageWCS(cubepar["reference_image"]) - # Update the celestial WCS - coord_min[:2] = imgwcs.wcs.crval - coord_dlt[:2] = imgwcs.wcs.cdelt - numra, numdec = reference_image.shape - - cubewcs = generate_WCS(coord_min, coord_dlt, equinox=equinox, name=specname) - msgs.info(msgs.newline() + "-" * 40 + - msgs.newline() + "Parameters of the WCS:" + - msgs.newline() + "RA min = {0:f}".format(coord_min[0]) + - msgs.newline() + "DEC min = {0:f}".format(coord_min[1]) + - msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) + - msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0*dspat) + - msgs.newline() + "Wavelength step = {0:f} A".format(dwave) + - msgs.newline() + "-" * 40) - - # Generate the output binning - xbins = np.arange(1+numra)-0.5 - ybins = np.arange(1+numdec)-0.5 - spec_bins = np.arange(1+numwav)-0.5 - voxedges = (xbins, ybins, spec_bins) - return cubewcs, voxedges, reference_image - - def generate_WCS(crval, cdelt, equinox=2000.0, name="PYP_SPEC"): """ Generate a WCS that will cover all input spec2D files @@ -1008,496 +792,3 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white bounds_error=False, fill_value="extrapolate")(all_wave[ww]) msgs.info("Optimal weighting complete") return all_wghts - - -def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, bins, all_idx=None, - spec_subpixel=10, spat_subpixel=10, combine=False): - """ - Generate a white light image from the input pixels - - Args: - image_wcs (`astropy.wcs.WCS`_): - World coordinate system to use for the white light images. - all_ra (`numpy.ndarray`_): - 1D flattened array containing the right ascension of each pixel - (units = degrees) - all_dec (`numpy.ndarray`_): - 1D flattened array containing the declination of each pixel (units = - degrees) - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = - Angstroms) - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_ivar (`numpy.ndarray`_): - 1D flattened array containing the inverse variance of each pixel - from all spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights of each pixel to be used - in the combination - all_spatpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spatial direction - all_specpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spectral direction - all_spatid (`numpy.ndarray`_): - 1D flattened array containing the spatid of each pixel - tilts (`numpy.ndarray`_, list): - 2D wavelength tilts frame, or a list of tilt frames (see all_idx) - slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): - Information stored about the slits, or a list of SlitTraceSet (see - all_idx) - astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): - A Class containing the transformation between detector pixel - coordinates and WCS pixel coordinates, or a list of Alignment - Splines (see all_idx) - bins (tuple): - A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial - and z wavelength coordinates - all_idx (`numpy.ndarray`_, optional): - If tilts, slits, and astrom_trans are lists, this should contain a - 1D flattened array, of the same length as all_sci, containing the - index the tilts, slits, and astrom_trans lists that corresponds to - each pixel. Note that, in this case all of these lists need to be - the same length. - spec_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spectral direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spectral - direction. - spat_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spatial direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spatial - direction. - combine (:obj:`bool`, optional): - If True, all of the input frames will be combined into a single - output. Otherwise, individual images will be generated. - - Returns: - `numpy.ndarray`_: The white light images for all frames - """ - # Perform some checks on the input -- note, more complete checks are performed in subpixellate() - _all_idx = np.zeros(all_sci.size) if all_idx is None else all_idx - if combine: - numfr = 1 - else: - numfr = np.unique(_all_idx).size - if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr: - msgs.error("The following arguments must be the same length as the expected number of frames to be combined:" - + msgs.newline() + "tilts, slits, astrom_trans") - # Prepare the array of white light images to be stored - numra = bins[0].size-1 - numdec = bins[1].size-1 - all_wl_imgs = np.zeros((numra, numdec, numfr)) - - # Loop through all frames and generate white light images - for fr in range(numfr): - msgs.info(f"Creating image {fr+1}/{numfr}") - if combine: - # Subpixellate - img, _, _ = subpixellate(image_wcs, all_ra, all_dec, all_wave, - all_sci, all_ivar, all_wghts, all_spatpos, - all_specpos, all_spatid, tilts, slits, astrom_trans, bins, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, all_idx=_all_idx) - else: - ww = np.where(_all_idx == fr) - # Subpixellate - img, _, _ = subpixellate(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], - all_sci[ww], all_ivar[ww], all_wghts[ww], all_spatpos[ww], - all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], bins, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) - all_wl_imgs[:, :, fr] = img[:, :, 0] - # Return the constructed white light images - return all_wl_imgs - - -def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, - all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None, - blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None, - specname="PYP_SPEC", debug=False): - r""" - Save a datacube using the subpixel algorithm. Refer to the subpixellate() - docstring for further details about this algorithm - - Args: - outfile (str): - Filename to be used to save the datacube - output_wcs (`astropy.wcs.WCS`_): - Output world coordinate system. - all_ra (`numpy.ndarray`_): - 1D flattened array containing the right ascension of each pixel - (units = degrees) - all_dec (`numpy.ndarray`_): - 1D flattened array containing the declination of each pixel (units = - degrees) - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = - Angstroms) - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_ivar (`numpy.ndarray`_): - 1D flattened array containing the inverse variance of each pixel - from all spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights of each pixel to be used - in the combination - all_spatpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spatial direction - all_specpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spectral direction - all_spatid (`numpy.ndarray`_): - 1D flattened array containing the spatid of each pixel - tilts (`numpy.ndarray`_, list): - 2D wavelength tilts frame, or a list of tilt frames (see all_idx) - slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): - Information stored about the slits, or a list of SlitTraceSet (see - all_idx) - astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): - A Class containing the transformation between detector pixel - coordinates and WCS pixel coordinates, or a list of Alignment - Splines (see all_idx) - bins (tuple): - A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial - and z wavelength coordinates - all_idx (`numpy.ndarray`_, optional): - If tilts, slits, and astrom_trans are lists, this should contain a - 1D flattened array, of the same length as all_sci, containing the - index the tilts, slits, and astrom_trans lists that corresponds to - each pixel. Note that, in this case all of these lists need to be - the same length. - spec_subpixel (int, optional): - What is the subpixellation factor in the spectral direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spectral - direction. - spat_subpixel (int, optional): - What is the subpixellation factor in the spatial direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spatial - direction. - overwrite (bool, optional): - If True, the output cube will be overwritten. - blaze_wave (`numpy.ndarray`_, optional): - Wavelength array of the spectral blaze function - blaze_spec (`numpy.ndarray`_, optional): - Spectral blaze function - fluxcal (bool, optional): - Are the data flux calibrated? If True, the units are: :math:`{\rm - erg/s/cm}^2{\rm /Angstrom/arcsec}^2` multiplied by the - PYPEIT_FLUX_SCALE. Otherwise, the units are: :math:`{\rm - counts/s/Angstrom/arcsec}^2`. - sensfunc (`numpy.ndarray`_, None, optional): - Sensitivity function that has been applied to the datacube - whitelight_range (None, list, optional): - A two element list that specifies the minimum and maximum - wavelengths (in Angstroms) to use when constructing the white light - image (format is: [min_wave, max_wave]). If None, the cube will be - collapsed over the full wavelength range. If a list is provided an - either element of the list is None, then the minimum/maximum - wavelength range of that element will be set by the minimum/maximum - wavelength of all_wave. - specname (str, optional): - Name of the spectrograph - debug (bool, optional): - If True, a residuals cube will be output. If the datacube generation - is correct, the distribution of pixels in the residual cube with no - flux should have mean=0 and std=1. - """ - # Prepare the header, and add the unit of flux to the header - hdr = output_wcs.to_header() - if fluxcal: - hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2") - else: - hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") - - # Subpixellate - subpix = subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, bins, all_idx=all_idx, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug) - # Extract the variables that we need - if debug: - datacube, varcube, bpmcube, residcube = subpix - # Save a residuals cube - outfile_resid = outfile.replace(".fits", "_resid.fits") - msgs.info("Saving residuals datacube as: {0:s}".format(outfile_resid)) - hdu = fits.PrimaryHDU(residcube.T, header=hdr) - hdu.writeto(outfile_resid, overwrite=overwrite) - else: - datacube, varcube, bpmcube = subpix - - # Check if the user requested a white light image - if whitelight_range is not None: - # Grab the WCS of the white light image - whitelight_wcs = output_wcs.celestial - # Determine the wavelength range of the whitelight image - if whitelight_range[0] is None: - whitelight_range[0] = np.min(all_wave) - if whitelight_range[1] is None: - whitelight_range[1] = np.max(all_wave) - msgs.info("White light image covers the wavelength range {0:.2f} A - {1:.2f} A".format( - whitelight_range[0], whitelight_range[1])) - # Get the output filename for the white light image - out_whitelight = get_output_whitelight_filename(outfile) - nspec = datacube.shape[2] - # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) - wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] - whitelight_img = make_whitelight_fromcube(datacube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) - msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) - img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) - img_hdu.writeto(out_whitelight, overwrite=overwrite) - - # Write out the datacube - msgs.info("Saving datacube as: {0:s}".format(outfile)) - final_cube = DataCube(datacube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, - sensfunc=sensfunc, fluxed=fluxcal) - final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) - - -def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, bins, all_idx=None, - spec_subpixel=10, spat_subpixel=10, debug=False): - r""" - Subpixellate the input data into a datacube. This algorithm splits each - detector pixel into multiple subpixels, and then assigns each subpixel to a - voxel. For example, if ``spec_subpixel = spat_subpixel = 10``, then each - detector pixel is divided into :math:`10^2=100` subpixels. Alternatively, - when spec_subpixel = spat_subpixel = 1, this corresponds to the nearest grid - point (NGP) algorithm. - - Important Note: If spec_subpixel > 1 or spat_subpixel > 1, the errors will - be correlated, and the covariance is not being tracked, so the errors will - not be (quite) right. There is a tradeoff one has to make between sampling - and better looking cubes, versus no sampling and better behaved errors. - - Args: - output_wcs (`astropy.wcs.WCS`_): - Output world coordinate system. - all_ra (`numpy.ndarray`_): - 1D flattened array containing the right ascension of each pixel - (units = degrees) - all_dec (`numpy.ndarray`_): - 1D flattened array containing the declination of each pixel (units = - degrees) - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = - Angstroms) - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_ivar (`numpy.ndarray`_): - 1D flattened array containing the inverse variance of each pixel - from all spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights of each pixel to be used - in the combination - all_spatpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spatial direction - all_specpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spectral direction - all_spatid (`numpy.ndarray`_): - 1D flattened array containing the spatid of each pixel - tilts (`numpy.ndarray`_, list): - 2D wavelength tilts frame, or a list of tilt frames (see all_idx) - slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): - Information stored about the slits, or a list of SlitTraceSet (see - all_idx) - astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): - A Class containing the transformation between detector pixel - coordinates and WCS pixel coordinates, or a list of Alignment - Splines (see all_idx) - bins (tuple): - A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial - and z wavelength coordinates - all_idx (`numpy.ndarray`_, optional): - If tilts, slits, and astrom_trans are lists, this should contain a - 1D flattened array, of the same length as all_sci, containing the - index the tilts, slits, and astrom_trans lists that corresponds to - each pixel. Note that, in this case all of these lists need to be - the same length. - spec_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spectral direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spectral - direction. - spat_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spatial direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spatial - direction. - debug (bool): - If True, a residuals cube will be output. If the datacube generation - is correct, the distribution of pixels in the residual cube with no - flux should have mean=0 and std=1. - - Returns: - :obj:`tuple`: Three or four `numpy.ndarray`_ objects containing (1) the - datacube generated from the subpixellated inputs, (2) the corresponding - variance cube, (3) the corresponding bad pixel mask cube, and (4) the - residual cube. The latter is only returned if debug is True. - """ - # Check for combinations of lists or not - if type(tilts) is list and type(slits) is list and type(astrom_trans) is list: - # Several frames are being combined. Check the lists have the same length - numframes = len(tilts) - if len(slits) != numframes or len(astrom_trans) != numframes: - msgs.error("The following lists must have the same length:" + msgs.newline() + - "tilts, slits, astrom_trans") - # Check all_idx has been set - if all_idx is None: - if numframes != 1: - msgs.error("Missing required argument for combining frames: all_idx") - else: - all_idx = np.zeros(all_sci.size) - else: - tmp = np.unique(all_idx).size - if tmp != numframes: - msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.") - # Store in the following variables - _tilts, _slits, _astrom_trans = tilts, slits, astrom_trans - elif type(tilts) is not list and type(slits) is not list and \ - type(astrom_trans) is not list: - # Just a single frame - store as lists for this code - _tilts, _slits, _astrom_trans = [tilts], [slits], [astrom_trans], - all_idx = np.zeros(all_sci.size) - numframes = 1 - else: - msgs.error("The following input arguments should all be of type 'list', or all not be type 'list':" + - msgs.newline() + "tilts, slits, astrom_trans") - # Prepare the output arrays - outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1) - binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]] - datacube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape) - if debug: - residcube = np.zeros(outshape) - # Divide each pixel into subpixels - spec_offs = np.arange(0.5/spec_subpixel, 1, 1/spec_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. - spat_offs = np.arange(0.5/spat_subpixel, 1, 1/spat_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. - spat_x, spec_y = np.meshgrid(spat_offs, spec_offs) - num_subpixels = spec_subpixel * spat_subpixel - area = 1 / num_subpixels - all_wght_subpix = all_wghts * area - all_var = utils.inverse(all_ivar) - # Loop through all exposures - for fr in range(numframes): - # Extract tilts and slits for convenience - this_tilts = _tilts[fr] - this_slits = _slits[fr] - # Loop through all slits - for sl, spatid in enumerate(this_slits.spat_id): - if numframes == 1: - msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits}") - else: - msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits} of frame {fr+1}/{numframes}") - this_sl = np.where((all_spatid == spatid) & (all_idx == fr)) - wpix = (all_specpos[this_sl], all_spatpos[this_sl]) - # Generate a spline between spectral pixel position and wavelength - yspl = this_tilts[wpix]*(this_slits.nspec - 1) - tiltpos = np.add.outer(yspl, spec_y).flatten() - wspl = all_wave[this_sl] - asrt = np.argsort(yspl) - wave_spl = interp1d(yspl[asrt], wspl[asrt], kind='linear', bounds_error=False, fill_value='extrapolate') - # Calculate spatial and spectral positions of the subpixels - spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() - spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() - # Transform this to spatial location - spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy) - spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl]) - ra_coeff = np.polyfit(spatpos, all_ra[this_sl], 1) - dec_coeff = np.polyfit(spatpos, all_dec[this_sl], 1) - this_ra = np.polyval(ra_coeff, spatpos_subpix)#ra_spl(spatpos_subpix) - this_dec = np.polyval(dec_coeff, spatpos_subpix)#dec_spl(spatpos_subpix) - # ssrt = np.argsort(spatpos) - # ra_spl = interp1d(spatpos[ssrt], all_ra[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') - # dec_spl = interp1d(spatpos[ssrt], all_dec[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') - # this_ra = ra_spl(spatpos_subpix) - # this_dec = dec_spl(spatpos_subpix) - this_wave = wave_spl(tiltpos) - # Convert world coordinates to voxel coordinates, then histogram - vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0) - if histogramdd is not None: - # use the "fast histogram" algorithm, that assumes regular bin spacing - datacube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels)) - varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels)) - normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels)) - if debug: - residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels)) - else: - datacube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))[0] - varcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))[0] - normcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))[0] - if debug: - residcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))[0] - # Normalise the datacube and variance cube - nc_inverse = utils.inverse(normcube) - datacube *= nc_inverse - varcube *= nc_inverse**2 - bpmcube = (normcube == 0).astype(np.uint8) - if debug: - residcube *= nc_inverse - return datacube, varcube, bpmcube, residcube - return datacube, varcube, bpmcube - - -def get_output_filename(fil, par_outfile, combine, idx=1): - """ - Get the output filename of a datacube, given the input - - Args: - fil (str): - The spec2d filename. - par_outfile (str): - The user-specified output filename (see cubepar['output_filename']) - combine (bool): - Should the input frames be combined into a single datacube? - idx (int, optional): - Index of filename to be saved. Required if combine=False. - - Returns: - str: The output filename to use. - """ - if combine: - if par_outfile == "": - par_outfile = "datacube.fits" - # Check the output files don't exist - outfile = par_outfile if ".fits" in par_outfile else par_outfile + ".fits" - else: - if par_outfile == "": - outfile = fil.replace("spec2d_", "spec3d_") - else: - # Use the output filename as a prefix - outfile = os.path.splitext(par_outfile)[0] + "_{0:03d}.fits".format(idx) - # Return the outfile - return outfile - - -def get_output_whitelight_filename(outfile): - """ - Given the output filename of a datacube, create an appropriate whitelight - fits file name - - Args: - outfile (str): - The output filename used for the datacube. - - Returns: - str: The output filename to use for the whitelight image. - """ - out_wl_filename = os.path.splitext(outfile)[0] + "_whitelight.fits" - return out_wl_filename - From 97f695accfed984496ee3e0f0d4c871491190883 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 11:01:38 +0100 Subject: [PATCH 08/81] restructured --- pypeit/coadd3d.py | 765 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 661 insertions(+), 104 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 664828ba37..a686b1e6f4 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -9,17 +9,15 @@ import inspect from astropy import wcs, units -from astropy.coordinates import AltAz, SkyCoord +from astropy.coordinates import SkyCoord from astropy.io import fits -import scipy.optimize as opt from scipy.interpolate import interp1d import numpy as np from pypeit import msgs -from pypeit import alignframe, datamodel, flatfield, io, specobj, spec2dobj, utils +from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase -from pypeit.core import coadd, datacube, extract, findobj_skymask, flux_calib, parse, skysub -from pypeit.core.procimg import grow_mask +from pypeit.core import datacube, flux_calib, parse from pypeit.spectrographs.util import load_spectrograph # Use a fast histogram for speed! @@ -288,17 +286,8 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=Fa self.all_wcs = [] self.weights = np.ones(self.numfiles) # Weights to use when combining cubes - - - - # TODO :: need to sort out what to do with these - make them self. as well? - assert False - dspat = None if self.cubepar['spatial_delta'] is None else self.cubepar['spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees) - dwv = self.cubepar['wave_delta'] # binning size in wavelength direction (in Angstroms) - - - - + self._dspat = None if self.cubepar['spatial_delta'] is None else self.cubepar['spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees) + self._dwv = self.cubepar['wave_delta'] # linear binning size in wavelength direction (in Angstroms) # Extract some commonly used variables self.method = self.cubepar['method'].lower() @@ -385,6 +374,101 @@ def check_outputs(self): if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) + def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equinox=2000.0, + specname="PYP_SPEC"): + """ + Create a WCS and the expected edges of the voxels, based on user-specified + parameters or the extremities of the data. + + Parameters + ---------- + all_ra : `numpy.ndarray`_ + 1D flattened array containing the RA values of each pixel from all + spec2d files + all_dec : `numpy.ndarray`_ + 1D flattened array containing the DEC values of each pixel from all + spec2d files + all_wave : `numpy.ndarray`_ + 1D flattened array containing the wavelength values of each pixel from + all spec2d files + dspat : float + Spatial size of each square voxel (in arcsec). The default is to use the + values in cubepar. + dwv : float + Linear wavelength step of each voxel (in Angstroms) + collapse : bool, optional + If True, the spectral dimension will be collapsed to a single channel + (primarily for white light images) + equinox : float, optional + Equinox of the WCS + specname : str, optional + Name of the spectrograph + + Returns + ------- + cubewcs : `astropy.wcs.WCS`_ + astropy WCS to be used for the combined cube + voxedges : tuple + A three element tuple containing the bin edges in the x, y (spatial) and + z (wavelength) dimensions + reference_image : `numpy.ndarray`_ + The reference image to be used for the cross-correlation. Can be None. + """ + # Grab cos(dec) for convenience + cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) + + # Setup the cube ranges + reference_image = None # The default behaviour is that the reference image is not used + ra_min = self.cubepar['ra_min'] if self.cubepar['ra_min'] is not None else np.min(all_ra) + ra_max = self.cubepar['ra_max'] if self.cubepar['ra_max'] is not None else np.max(all_ra) + dec_min = self.cubepar['dec_min'] if self.cubepar['dec_min'] is not None else np.min(all_dec) + dec_max = self.cubepar['dec_max'] if self.cubepar['dec_max'] is not None else np.max(all_dec) + wav_min = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else np.min(all_wave) + wav_max = self.cubepar['wave_max'] if self.cubepar['wave_max'] is not None else np.max(all_wave) + dwave = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else dwv + + # Number of voxels in each dimension + numra = int((ra_max - ra_min) * cosdec / dspat) + numdec = int((dec_max - dec_min) / dspat) + numwav = int(np.round((wav_max - wav_min) / dwave)) + + # If a white light WCS is being generated, make sure there's only 1 wavelength bin + if collapse: + wav_min = np.min(all_wave) + wav_max = np.max(all_wave) + dwave = wav_max - wav_min + numwav = 1 + + # Generate a master WCS to register all frames + coord_min = [ra_min, dec_min, wav_min] + coord_dlt = [dspat, dspat, dwave] + + # If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts + if self.cubepar["reference_image"] is not None: + # Load the requested reference image + reference_image, imgwcs = datacube.load_imageWCS(self.cubepar["reference_image"]) + # Update the celestial WCS + coord_min[:2] = imgwcs.wcs.crval + coord_dlt[:2] = imgwcs.wcs.cdelt + numra, numdec = reference_image.shape + + cubewcs = datacube.generate_WCS(coord_min, coord_dlt, equinox=equinox, name=specname) + msgs.info(msgs.newline() + "-" * 40 + + msgs.newline() + "Parameters of the WCS:" + + msgs.newline() + "RA min = {0:f}".format(coord_min[0]) + + msgs.newline() + "DEC min = {0:f}".format(coord_min[1]) + + msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) + + msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0 * dspat) + + msgs.newline() + "Wavelength step = {0:f} A".format(dwave) + + msgs.newline() + "-" * 40) + + # Generate the output binning + xbins = np.arange(1 + numra) - 0.5 + ybins = np.arange(1 + numdec) - 0.5 + spec_bins = np.arange(1 + numwav) - 0.5 + voxedges = (xbins, ybins, spec_bins) + return cubewcs, voxedges, reference_image + def make_sensfunc(self): """ TODO :: docstring @@ -584,12 +668,12 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): # Return the skysub params for this frame return this_skysub, skyImg, skyScl - def compute_DAR(self, hdr0, raimg, decimg, waveimg, onslit_gpm, wave_ref=None): + def compute_DAR(self, hdr0, waves, cosdec, wave_ref=None): """ TODO :: docstring """ if wave_ref is None: - wave_ref = 0.5 * (np.min(waveimg[onslit_gpm]) + np.max(waveimg[onslit_gpm])) + wave_ref = 0.5 * (np.min(waves) + np.max(waves)) # Get DAR parameters raval = self.spec.get_meta_value([hdr0], 'ra') decval = self.spec.get_meta_value([hdr0], 'dec') @@ -599,6 +683,8 @@ def compute_DAR(self, hdr0, raimg, decimg, waveimg, onslit_gpm, wave_ref=None): rel_humidity = self.spec.get_meta_value([hdr0], 'humidity') coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) location = self.spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) + # Set a default value + ra_corr, dec_corr = 0.0, 0.0 if pressure == 0.0: msgs.warn("Pressure is set to zero - DAR correction will not be performed") else: @@ -606,11 +692,10 @@ def compute_DAR(self, hdr0, raimg, decimg, waveimg, onslit_gpm, wave_ref=None): " Pressure = {0:f} bar".format(pressure) + msgs.newline() + " Temperature = {0:f} deg C".format(temperature) + msgs.newline() + " Humidity = {0:f}".format(rel_humidity)) - ra_corr, dec_corr = datacube.correct_dar(waveimg[onslit_gpm], coord, obstime, location, + ra_corr, dec_corr = datacube.correct_dar(waves, coord, obstime, location, pressure * units.bar, temperature * units.deg_C, rel_humidity, wave_ref=wave_ref) - raimg[onslit_gpm] += ra_corr * np.cos(np.mean(decimg[onslit_gpm]) * np.pi / 180.0) - decimg[onslit_gpm] += dec_corr + return ra_corr*cosdec, dec_corr def coadd(self): """ @@ -633,8 +718,10 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwr super().__init__(spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, show=show, debug=debug) self.flat_splines = dict() # A dictionary containing the splines of the flatfield + self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. + self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale - def get_alignments(self, spec2DObj, slits, frame_wcs, spat_flexure=None): + def get_alignments(self, spec2DObj, slits, spat_flexure=None): """ TODO :: docstring """ @@ -711,13 +798,27 @@ def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', bounds_error=False, fill_value="extrapolate") + def set_spatial_scale(self): + """ + TODO :: docstring + """ + # Make sure all frames being combined have consistent scales + if not np.all(self._spatscale[:,0] != self._spatscale[0,0]): + msgs.warn("The pixel scales of all input frames are not the same!") + msgs.info("Pixel scales of all input frames:" + msgs.newline() + self._spatscale[:,0]) + if not np.all(self._spatscale[:,1] != self._spatscale[0,1]): + msgs.warn("The slicer scales of all input frames are not the same!") + msgs.info("Slicer scales of all input frames:" + msgs.newline() + self._spatscale[:,1]) + # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale + if self._dspat is None: + self._dspat = np.max(self._spatscale) + def load(self): """ TODO :: docstring """ # Initialise variables wave_ref = None - mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. # Load all spec2d files and prepare the data for making a datacube for ff, fil in enumerate(self.spec2d): # Load it up @@ -797,12 +898,12 @@ def load(self): msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv)) # Obtain the minimum and maximum wavelength of all slits - if mnmx_wv is None: - mnmx_wv = np.zeros((len(self.spec2d), slits.nslits, 2)) + if self.mnmx_wv is None: + self.mnmx_wv = np.zeros((len(self.spec2d), slits.nslits, 2)) for slit_idx, slit_spat in enumerate(slits.spat_id): onslit_init = (slitid_img_init == slit_spat) - mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init]) - mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init]) + self.mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init]) + self.mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init]) # Remove edges of the spectrum where the sky model is bad sky_is_good = datacube.make_good_skymask(slitid_img_init, spec2DObj.tilts) @@ -817,41 +918,49 @@ def load(self): # Find the largest spatial scale of all images being combined # TODO :: probably need to put this in the DetectorContainer - pxscl = detector.platescale * parse.parse_binning(detector.binning)[ - 1] / 3600.0 # This should be degrees/pixel + pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This should be degrees/pixel slscl = self.spec.get_meta_value([spec2DObj.head0], 'slitwid') - if dspat is None: - dspat = max(pxscl, slscl) - if pxscl > dspat: - msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( - 3600.0 * dspat, 3600.0 * pxscl)) - if slscl > dspat: - msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( - 3600.0 * dspat, 3600.0 * slscl)) + self._spatscale[ff, 0] = pxscl + self._spatscale[ff, 1] = slscl + # If the spatial scale has been set by the user, check that it doesn't exceed the pixel or slicer scales + if self._dspat is not None: + if pxscl > self._dspat: + msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( + 3600.0 * self._dspat, 3600.0 * pxscl)) + if slscl > self._dspat: + msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( + 3600.0 * self._dspat, 3600.0 * slscl)) # Generate the alignment splines, and then # retrieve images of the RA and Dec of every pixel, # and the number of spatial pixels in each slit - alignSplines = self.get_alignments(spec2DObj, slits, frame_wcs, spat_flexure=spat_flexure) + alignSplines = self.get_alignments(spec2DObj, slits, spat_flexure=spat_flexure) raimg, decimg, minmax = slits.get_radec_image(frame_wcs, alignSplines, spec2DObj.tilts, initial=True, flexure=spat_flexure) - # Perform the DAR correction - self.compute_DAR(spec2DObj.head0, raimg, decimg, waveimg, onslit_gpm, wave_ref=wave_ref) - # Get copies of arrays to be saved - wave_ext = waveimg[onslit_gpm].copy() - flux_ext = sciImg[onslit_gpm].copy() - ivar_ext = ivar[onslit_gpm].copy() - dwav_ext = dwaveimg[onslit_gpm].copy() + ra_ext = raimg[onslit_gpm] + dec_ext = decimg[onslit_gpm] + wave_ext = waveimg[onslit_gpm] + flux_ext = sciImg[onslit_gpm] + ivar_ext = ivar[onslit_gpm] + dwav_ext = dwaveimg[onslit_gpm] # From here on out, work in sorted wavelengths wvsrt = np.argsort(wave_ext) wave_sort = wave_ext[wvsrt] dwav_sort = dwav_ext[wvsrt] + ra_sort = ra_ext[wvsrt] + dec_sort = dec_ext[wvsrt] # Here's an array to get back to the original ordering resrt = np.argsort(wvsrt) + # Perform the DAR correction + cosdec = np.cos(np.mean(dec_sort) * np.pi / 180.0) + ra_corr, dec_corr = self.compute_DAR(spec2DObj.head0, wave_sort, cosdec, wave_ref=wave_ref) + ra_sort += ra_corr + dec_sort += dec_corr + # Perform extinction correction msgs.info("Applying extinction correction") longitude = self.spec.telescope['longitude'] @@ -904,7 +1013,7 @@ def load(self): # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now - numpix = raimg[onslit_gpm].size + numpix = ra_sort.size if not self.combine and not self.align: # Get the output filename if self.numfiles == 1 and self.cubepar['output_filename'] != "": @@ -922,30 +1031,30 @@ def load(self): # Set the wavelength range of the white light image. wl_wvrng = None if self.cubepar['save_whitelight']: - wl_wvrng = datacube.get_whitelight_range(np.max(mnmx_wv[ff, :, 0]), - np.min(mnmx_wv[ff, :, 1]), + wl_wvrng = datacube.get_whitelight_range(np.max(self.mnmx_wv[ff, :, 0]), + np.min(self.mnmx_wv[ff, :, 1]), self.cubepar['whitelight_range']) # Make the datacube if self.method in ['subpixel', 'ngp']: # Generate the datacube - generate_cube_subpixel(outfile, output_wcs, raimg[onslit_gpm], decimg[onslit_gpm], wave_ext, - flux_sav[resrt], ivar_sav[resrt], np.ones(numpix), + generate_cube_subpixel(outfile, output_wcs, ra_sort[resrt], dec_sort[resrt], wave_sort[resrt], + flux_sort[resrt], ivar_sort[resrt], np.ones(numpix), this_spatpos, this_specpos, this_spatid, spec2DObj.tilts, slits, alignSplines, bins, - all_idx=None, overwrite=self.overwrite, blaze_wave=blaze_wave, - blaze_spec=blaze_spec, + all_idx=None, overwrite=self.overwrite, + blaze_wave=self.blaze_wave, blaze_spec=self.blaze_spec, fluxcal=self.fluxcal, specname=self.specname, whitelight_range=wl_wvrng, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) continue # Store the information if we are combining multiple frames - self.all_ra = np.append(self.all_ra, raimg[onslit_gpm].copy()) - self.all_dec = np.append(self.all_dec, decimg[onslit_gpm].copy()) - self.all_wave = np.append(self.all_wave, wave_ext.copy()) - self.all_sci = np.append(self.all_sci, flux_sav[resrt].copy()) - self.all_ivar = np.append(self.all_ivar, ivar_sav[resrt].copy()) + self.all_ra = np.append(self.all_ra, ra_sort[resrt]) + self.all_dec = np.append(self.all_dec, dec_sort[resrt]) + self.all_wave = np.append(self.all_wave, wave_sort[resrt]) + self.all_sci = np.append(self.all_sci, flux_sort[resrt]) + self.all_ivar = np.append(self.all_ivar, ivar_sort[resrt].copy()) self.all_idx = np.append(self.all_idx, ff * np.ones(numpix)) - self.all_wghts = np.append(self.all_wghts, weights[ff] * np.ones(numpix) / weights[0]) + self.all_wghts = np.append(self.all_wghts, self.weights[ff] * np.ones(numpix) / self.weights[0]) self.all_spatpos = np.append(self.all_spatpos, this_spatpos) self.all_specpos = np.append(self.all_specpos, this_specpos) self.all_spatid = np.append(self.all_spatid, this_spatid) @@ -970,33 +1079,34 @@ def run_align(self): # Apply the shift self.all_ra[self.all_idx == ff] += ref_shift_ra[ff] + self.opts['ra_offset'][ff]/3600.0 self.all_dec[self.all_idx == ff] += ref_shift_dec[ff] + self.opts['dec_offset'][ff]/3600.0 - msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, opts['ra_offset'][ff], opts['dec_offset'][ff])) + msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, self.opts['ra_offset'][ff], self.opts['dec_offset'][ff])) else: # Find the wavelength range where all frames overlap - min_wl, max_wl = datacube.get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength - np.min(mnmx_wv[:, :, 1]), # The min red wavelength + min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength + np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength self.cubepar['whitelight_range']) # The user-specified values (if any) # Get the good whitelight pixels - ww, wavediff = get_whitelight_pixels(self.all_wave, min_wl, max_wl) + ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl) # Iterate over white light image generation and spatial shifting numiter = 2 for dd in range(numiter): msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") # Setup the WCS to use for all white light images ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied - image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra[ww], all_dec[ww], all_wave[ww], - dspat, wavediff, collapse=True) + image_wcs, voxedge, reference_image = self.create_wcs(self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], + self._dspat, wavediff, collapse=True) if voxedge[2].size != 2: msgs.error("Spectral range for WCS is incorrect for white light image") - wl_imgs = generate_image_subpixel(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], - all_sci[ww], all_ivar[ww], all_wghts[ww], - all_spatpos[ww], all_specpos[ww], all_spatid[ww], - all_tilts, all_slits, all_align, voxedge, all_idx=all_idx[ww], - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + wl_imgs = generate_image_subpixel(image_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], + self.all_sci[ww], self.all_ivar[ww], self.all_wghts[ww], + self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], + self.all_tilts, self.all_slits, self.all_align, voxedge, + all_idx=self.all_idx[ww], + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) if reference_image is None: # ref_idx will be the index of the cube with the highest S/N - ref_idx = np.argmax(weights) + ref_idx = np.argmax(self.weights) reference_image = wl_imgs[:, :, ref_idx].copy() msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1)) else: @@ -1007,12 +1117,12 @@ def run_align(self): # Calculate the shift ra_shift, dec_shift = calculate_image_phase(reference_image.copy(), wl_imgs[:, :, ff], maskval=0.0) # Convert pixel shift to degrees shift - ra_shift *= dspat/cosdec - dec_shift *= dspat + ra_shift *= self._dspat/cosdec + dec_shift *= self._dspat msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff+1, ra_shift*3600.0, dec_shift*3600.0)) # Apply the shift - all_ra[all_idx == ff] += ra_shift - all_dec[all_idx == ff] += dec_shift + self.all_ra[self.all_idx == ff] += ra_shift + self.all_dec[self.all_idx == ff] += dec_shift def compute_weights(self): # Calculate the relative spectral weights of all pixels @@ -1021,23 +1131,23 @@ def compute_weights(self): self.all_wghts = np.ones_like(self.all_sci) else: # Find the wavelength range where all frames overlap - min_wl, max_wl = datacube.get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength - np.min(mnmx_wv[:, :, 1]), # The min red wavelength + min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength + np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength self.cubepar['whitelight_range']) # The user-specified values (if any) # Get the good white light pixels - ww, wavediff = datacube.get_whitelight_pixels(all_wave, min_wl, max_wl) + ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl) # Get a suitable WCS - image_wcs, voxedge, reference_image = create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, wavediff, - collapse=True) + image_wcs, voxedge, reference_image = self.create_wcs(self.all_ra, self.all_dec, self.all_wave, + self._dspat, wavediff, collapse=True) # Generate the white light image (note: hard-coding subpixel=1 in both directions, and combining into a single image) - wl_full = generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, - all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, - all_tilts, all_slits, all_align, voxedge, all_idx=all_idx, + wl_full = generate_image_subpixel(image_wcs, self.all_ra, self.all_dec, self.all_wave, + self.all_sci, self.all_ivar, self.all_wghts, + self.all_spatpos, self.all_specpos, self.all_spatid, + self.all_tilts, self.all_slits, self.all_align, voxedge, all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) # Compute the weights - all_wghts = datacube.compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, wl_full[:, :, 0], - dspat, dwv, relative_weights=self.cubepar['relative_weights']) + self.all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, wl_full[:, :, 0], + self._dspat, self._dwv, relative_weights=self.cubepar['relative_weights']) def coadd(self): """ @@ -1050,6 +1160,9 @@ def coadd(self): if not self.combine and not self.align: return + # Set the spatial scale of the output datacube + self.set_spatial_scale() + # Align the frames if self.align: self.run_align() @@ -1058,7 +1171,7 @@ def coadd(self): self.compute_weights() # Generate the WCS, and the voxel edges - cube_wcs, vox_edges, _ = datacube.create_wcs(self.cubepar, self.all_ra, self.all_dec, self.all_wave, dspat, dwv) + cube_wcs, vox_edges, _ = self.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, self._dwv) sensfunc = None if self.flux_spline is not None: @@ -1073,27 +1186,471 @@ def coadd(self): # Generate the datacube wl_wvrng = None if self.cubepar['save_whitelight']: - wl_wvrng = datacube.get_whitelight_range(np.max(mnmx_wv[:, :, 0]), - np.min(mnmx_wv[:, :, 1]), + wl_wvrng = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), + np.min(self.mnmx_wv[:, :, 1]), self.cubepar['whitelight_range']) if self.combine: - generate_cube_subpixel(outfile, cube_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, - np.ones(all_wghts.size), # all_wghts, - all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, vox_edges, - all_idx=all_idx, overwrite=overwrite, blaze_wave=blaze_wave, - blaze_spec=blaze_spec, - fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, whitelight_range=wl_wvrng, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, + np.ones(self.all_wghts.size), # all_wghts, + self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, vox_edges, + all_idx=self.all_idx, overwrite=self.overwrite, blaze_wave=self.blaze_wave, + blaze_spec=self.blaze_spec, + fluxcal=self.fluxcal, sensfunc=sensfunc, specname=self.specname, whitelight_range=wl_wvrng, + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) else: for ff in range(self.numfiles): outfile = datacube.get_output_filename("", self.cubepar['output_filename'], False, ff) ww = np.where(self.all_idx == ff) - generate_cube_subpixel(outfile, cube_wcs, all_ra[ww], all_dec[ww], all_wave[ww], all_sci[ww], - all_ivar[ww], np.ones(all_wghts[ww].size), - all_spatpos[ww], all_specpos[ww], all_spatid[ww], all_tilts[ff], - all_slits[ff], all_align[ff], vox_edges, - all_idx=all_idx[ww], overwrite=overwrite, blaze_wave=blaze_wave, - blaze_spec=blaze_spec, - fluxcal=fluxcal, sensfunc=sensfunc, specname=specname, + generate_cube_subpixel(outfile, cube_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], self.all_sci[ww], + self.all_ivar[ww], np.ones(self.all_wghts[ww].size), + self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], self.all_tilts[ff], + self.all_slits[ff], self.all_align[ff], vox_edges, + all_idx=self.all_idx[ww], overwrite=self.overwrite, blaze_wave=self.blaze_wave, + blaze_spec=self.blaze_spec, + fluxcal=self.fluxcal, sensfunc=sensfunc, specname=self.specname, whitelight_range=wl_wvrng, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + + +def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, + all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + all_idx=None, spec_subpixel=10, spat_subpixel=10, combine=False): + """ + Generate a white light image from the input pixels + + Args: + image_wcs (`astropy.wcs.WCS`_): + World coordinate system to use for the white light images. + all_ra (`numpy.ndarray`_): + 1D flattened array containing the right ascension of each pixel + (units = degrees) + all_dec (`numpy.ndarray`_): + 1D flattened array containing the declination of each pixel (units = + degrees) + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = + Angstroms) + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + bins (tuple): + A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial + and z wavelength coordinates + all_idx (`numpy.ndarray`_, optional): + If tilts, slits, and astrom_trans are lists, this should contain a + 1D flattened array, of the same length as all_sci, containing the + index the tilts, slits, and astrom_trans lists that corresponds to + each pixel. Note that, in this case all of these lists need to be + the same length. + spec_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spectral direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spectral + direction. + spat_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spatial direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spatial + direction. + combine (:obj:`bool`, optional): + If True, all of the input frames will be combined into a single + output. Otherwise, individual images will be generated. + + Returns: + `numpy.ndarray`_: The white light images for all frames + """ + # Perform some checks on the input -- note, more complete checks are performed in subpixellate() + _all_idx = np.zeros(all_sci.size) if all_idx is None else all_idx + if combine: + numfr = 1 + else: + numfr = np.unique(_all_idx).size + if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr: + msgs.error("The following arguments must be the same length as the expected number of frames to be combined:" + + msgs.newline() + "tilts, slits, astrom_trans") + # Prepare the array of white light images to be stored + numra = bins[0].size-1 + numdec = bins[1].size-1 + all_wl_imgs = np.zeros((numra, numdec, numfr)) + + # Loop through all frames and generate white light images + for fr in range(numfr): + msgs.info(f"Creating image {fr+1}/{numfr}") + if combine: + # Subpixellate + img, _, _ = subpixellate(image_wcs, all_ra, all_dec, all_wave, + all_sci, all_ivar, all_wghts, all_spatpos, + all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, all_idx=_all_idx) + else: + ww = np.where(_all_idx == fr) + # Subpixellate + img, _, _ = subpixellate(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], + all_sci[ww], all_ivar[ww], all_wghts[ww], all_spatpos[ww], + all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], bins, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + all_wl_imgs[:, :, fr] = img[:, :, 0] + # Return the constructed white light images + return all_wl_imgs + + +def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, + all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None, + blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None, + specname="PYP_SPEC", debug=False): + r""" + Save a datacube using the subpixel algorithm. Refer to the subpixellate() + docstring for further details about this algorithm + + Args: + outfile (str): + Filename to be used to save the datacube + output_wcs (`astropy.wcs.WCS`_): + Output world coordinate system. + all_ra (`numpy.ndarray`_): + 1D flattened array containing the right ascension of each pixel + (units = degrees) + all_dec (`numpy.ndarray`_): + 1D flattened array containing the declination of each pixel (units = + degrees) + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = + Angstroms) + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + bins (tuple): + A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial + and z wavelength coordinates + all_idx (`numpy.ndarray`_, optional): + If tilts, slits, and astrom_trans are lists, this should contain a + 1D flattened array, of the same length as all_sci, containing the + index the tilts, slits, and astrom_trans lists that corresponds to + each pixel. Note that, in this case all of these lists need to be + the same length. + spec_subpixel (int, optional): + What is the subpixellation factor in the spectral direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spectral + direction. + spat_subpixel (int, optional): + What is the subpixellation factor in the spatial direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spatial + direction. + overwrite (bool, optional): + If True, the output cube will be overwritten. + blaze_wave (`numpy.ndarray`_, optional): + Wavelength array of the spectral blaze function + blaze_spec (`numpy.ndarray`_, optional): + Spectral blaze function + fluxcal (bool, optional): + Are the data flux calibrated? If True, the units are: :math:`{\rm + erg/s/cm}^2{\rm /Angstrom/arcsec}^2` multiplied by the + PYPEIT_FLUX_SCALE. Otherwise, the units are: :math:`{\rm + counts/s/Angstrom/arcsec}^2`. + sensfunc (`numpy.ndarray`_, None, optional): + Sensitivity function that has been applied to the datacube + whitelight_range (None, list, optional): + A two element list that specifies the minimum and maximum + wavelengths (in Angstroms) to use when constructing the white light + image (format is: [min_wave, max_wave]). If None, the cube will be + collapsed over the full wavelength range. If a list is provided an + either element of the list is None, then the minimum/maximum + wavelength range of that element will be set by the minimum/maximum + wavelength of all_wave. + specname (str, optional): + Name of the spectrograph + debug (bool, optional): + If True, a residuals cube will be output. If the datacube generation + is correct, the distribution of pixels in the residual cube with no + flux should have mean=0 and std=1. + """ + # Prepare the header, and add the unit of flux to the header + hdr = output_wcs.to_header() + if fluxcal: + hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2") + else: + hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") + + # Subpixellate + subpix = subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, + all_spatid, tilts, slits, astrom_trans, bins, all_idx=all_idx, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug) + # Extract the variables that we need + if debug: + datacube, varcube, bpmcube, residcube = subpix + # Save a residuals cube + outfile_resid = outfile.replace(".fits", "_resid.fits") + msgs.info("Saving residuals datacube as: {0:s}".format(outfile_resid)) + hdu = fits.PrimaryHDU(residcube.T, header=hdr) + hdu.writeto(outfile_resid, overwrite=overwrite) + else: + datacube, varcube, bpmcube = subpix + + # Check if the user requested a white light image + if whitelight_range is not None: + # Grab the WCS of the white light image + whitelight_wcs = output_wcs.celestial + # Determine the wavelength range of the whitelight image + if whitelight_range[0] is None: + whitelight_range[0] = np.min(all_wave) + if whitelight_range[1] is None: + whitelight_range[1] = np.max(all_wave) + msgs.info("White light image covers the wavelength range {0:.2f} A - {1:.2f} A".format( + whitelight_range[0], whitelight_range[1])) + # Get the output filename for the white light image + out_whitelight = datacube.get_output_whitelight_filename(outfile) + nspec = datacube.shape[2] + # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) + wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] + whitelight_img = datacube.make_whitelight_fromcube(datacube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) + msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) + img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) + img_hdu.writeto(out_whitelight, overwrite=overwrite) + + # Write out the datacube + msgs.info("Saving datacube as: {0:s}".format(outfile)) + final_cube = DataCube(datacube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, + sensfunc=sensfunc, fluxed=fluxcal) + final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) + + +def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, + all_spatid, tilts, slits, astrom_trans, bins, all_idx=None, + spec_subpixel=10, spat_subpixel=10, debug=False): + r""" + Subpixellate the input data into a datacube. This algorithm splits each + detector pixel into multiple subpixels, and then assigns each subpixel to a + voxel. For example, if ``spec_subpixel = spat_subpixel = 10``, then each + detector pixel is divided into :math:`10^2=100` subpixels. Alternatively, + when spec_subpixel = spat_subpixel = 1, this corresponds to the nearest grid + point (NGP) algorithm. + + Important Note: If spec_subpixel > 1 or spat_subpixel > 1, the errors will + be correlated, and the covariance is not being tracked, so the errors will + not be (quite) right. There is a tradeoff one has to make between sampling + and better looking cubes, versus no sampling and better behaved errors. + + Args: + output_wcs (`astropy.wcs.WCS`_): + Output world coordinate system. + all_ra (`numpy.ndarray`_): + 1D flattened array containing the right ascension of each pixel + (units = degrees) + all_dec (`numpy.ndarray`_): + 1D flattened array containing the declination of each pixel (units = + degrees) + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = + Angstroms) + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + bins (tuple): + A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial + and z wavelength coordinates + all_idx (`numpy.ndarray`_, optional): + If tilts, slits, and astrom_trans are lists, this should contain a + 1D flattened array, of the same length as all_sci, containing the + index the tilts, slits, and astrom_trans lists that corresponds to + each pixel. Note that, in this case all of these lists need to be + the same length. + spec_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spectral direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spectral + direction. + spat_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spatial direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spatial + direction. + debug (bool): + If True, a residuals cube will be output. If the datacube generation + is correct, the distribution of pixels in the residual cube with no + flux should have mean=0 and std=1. + + Returns: + :obj:`tuple`: Three or four `numpy.ndarray`_ objects containing (1) the + datacube generated from the subpixellated inputs, (2) the corresponding + variance cube, (3) the corresponding bad pixel mask cube, and (4) the + residual cube. The latter is only returned if debug is True. + """ + # Check for combinations of lists or not + if type(tilts) is list and type(slits) is list and type(astrom_trans) is list: + # Several frames are being combined. Check the lists have the same length + numframes = len(tilts) + if len(slits) != numframes or len(astrom_trans) != numframes: + msgs.error("The following lists must have the same length:" + msgs.newline() + + "tilts, slits, astrom_trans") + # Check all_idx has been set + if all_idx is None: + if numframes != 1: + msgs.error("Missing required argument for combining frames: all_idx") + else: + all_idx = np.zeros(all_sci.size) + else: + tmp = np.unique(all_idx).size + if tmp != numframes: + msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.") + # Store in the following variables + _tilts, _slits, _astrom_trans = tilts, slits, astrom_trans + elif type(tilts) is not list and type(slits) is not list and \ + type(astrom_trans) is not list: + # Just a single frame - store as lists for this code + _tilts, _slits, _astrom_trans = [tilts], [slits], [astrom_trans], + all_idx = np.zeros(all_sci.size) + numframes = 1 + else: + msgs.error("The following input arguments should all be of type 'list', or all not be type 'list':" + + msgs.newline() + "tilts, slits, astrom_trans") + # Prepare the output arrays + outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1) + binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]] + datacube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape) + if debug: + residcube = np.zeros(outshape) + # Divide each pixel into subpixels + spec_offs = np.arange(0.5/spec_subpixel, 1, 1/spec_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. + spat_offs = np.arange(0.5/spat_subpixel, 1, 1/spat_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. + spat_x, spec_y = np.meshgrid(spat_offs, spec_offs) + num_subpixels = spec_subpixel * spat_subpixel + area = 1 / num_subpixels + all_wght_subpix = all_wghts * area + all_var = utils.inverse(all_ivar) + # Loop through all exposures + for fr in range(numframes): + # Extract tilts and slits for convenience + this_tilts = _tilts[fr] + this_slits = _slits[fr] + # Loop through all slits + for sl, spatid in enumerate(this_slits.spat_id): + if numframes == 1: + msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits}") + else: + msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits} of frame {fr+1}/{numframes}") + this_sl = np.where((all_spatid == spatid) & (all_idx == fr)) + wpix = (all_specpos[this_sl], all_spatpos[this_sl]) + # Generate a spline between spectral pixel position and wavelength + yspl = this_tilts[wpix]*(this_slits.nspec - 1) + tiltpos = np.add.outer(yspl, spec_y).flatten() + wspl = all_wave[this_sl] + asrt = np.argsort(yspl) + wave_spl = interp1d(yspl[asrt], wspl[asrt], kind='linear', bounds_error=False, fill_value='extrapolate') + # Calculate spatial and spectral positions of the subpixels + spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() + spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() + # Transform this to spatial location + spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy) + spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl]) + ra_coeff = np.polyfit(spatpos, all_ra[this_sl], 1) + dec_coeff = np.polyfit(spatpos, all_dec[this_sl], 1) + this_ra = np.polyval(ra_coeff, spatpos_subpix)#ra_spl(spatpos_subpix) + this_dec = np.polyval(dec_coeff, spatpos_subpix)#dec_spl(spatpos_subpix) + # ssrt = np.argsort(spatpos) + # ra_spl = interp1d(spatpos[ssrt], all_ra[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') + # dec_spl = interp1d(spatpos[ssrt], all_dec[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') + # this_ra = ra_spl(spatpos_subpix) + # this_dec = dec_spl(spatpos_subpix) + this_wave = wave_spl(tiltpos) + # Convert world coordinates to voxel coordinates, then histogram + vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0) + if histogramdd is not None: + # use the "fast histogram" algorithm, that assumes regular bin spacing + datacube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels)) + varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels)) + normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels)) + if debug: + residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels)) + else: + datacube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))[0] + varcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))[0] + normcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))[0] + if debug: + residcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))[0] + # Normalise the datacube and variance cube + nc_inverse = utils.inverse(normcube) + datacube *= nc_inverse + varcube *= nc_inverse**2 + bpmcube = (normcube == 0).astype(np.uint8) + if debug: + residcube *= nc_inverse + return datacube, varcube, bpmcube, residcube + return datacube, varcube, bpmcube From ed9169bafc4b4826166f00746ccc38f45a2f17f3 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 12:30:27 +0100 Subject: [PATCH 09/81] update docstrings --- pypeit/coadd3d.py | 56 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 13 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index a686b1e6f4..77651bbfee 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -670,7 +670,21 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): def compute_DAR(self, hdr0, waves, cosdec, wave_ref=None): """ - TODO :: docstring + Compute the differential atmospheric refraction correction for a given frame. + + Args: + hdr0 (`astropy.io.fits.Header`_): + Header of the spec2d file. This input should be retrieved from spec2DObj.head0 + waves (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = Angstroms) + cosdec (:obj:`float`): + Cosine of the target declination. + wave_ref (:obj:`float`, optional): + Reference wavelength (The DAR correction will be performed relative to this wavelength) + + Returns: + `numpy.ndarray`_: 1D differential RA for each wavelength of the input waves array + `numpy.ndarray`_: 1D differential Dec for each wavelength of the input waves array """ if wave_ref is None: wave_ref = 0.5 * (np.min(waves) + np.max(waves)) @@ -697,6 +711,27 @@ def compute_DAR(self, hdr0, waves, cosdec, wave_ref=None): wave_ref=wave_ref) return ra_corr*cosdec, dec_corr + def align_user_offsets(self): + """ + Align the RA and DEC of all input frames, and then + manually shift the cubes based on user-provided offsets. + The offsets should be specified in arcseconds, and the + ra_offset should include the cos(dec) factor. + """ + # First, translate all coordinates to the coordinates of the first frame + # Note: You do not need cos(dec) here, this just overrides the IFU coordinate centre of each frame + # The cos(dec) factor should be input by the user, and should be included in the self.opts['ra_offset'] + ref_shift_ra = self.ifu_ra[0] - self.ifu_ra + ref_shift_dec = self.ifu_dec[0] - self.ifu_dec + for ff in range(self.numfiles): + # Apply the shift + self.all_ra[self.all_idx == ff] += ref_shift_ra[ff] + self.opts['ra_offset'][ff] / 3600.0 + self.all_dec[self.all_idx == ff] += ref_shift_dec[ff] + self.opts['dec_offset'][ff] / 3600.0 + msgs.info("Spatial shift of cube #{0:d}:" + msgs.newline() + + "RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, + self.opts['ra_offset'][ff], + self.opts['dec_offset'][ff])) + def coadd(self): """ TODO :: Docstring @@ -800,9 +835,10 @@ def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): def set_spatial_scale(self): """ - TODO :: docstring + This function checks if the spatial scales of all frames are consistent. + If the user has not specified the spatial scale, it will be set here. """ - # Make sure all frames being combined have consistent scales + # Make sure all frames have consistent scales if not np.all(self._spatscale[:,0] != self._spatscale[0,0]): msgs.warn("The pixel scales of all input frames are not the same!") msgs.info("Pixel scales of all input frames:" + msgs.newline() + self._spatscale[:,0]) @@ -812,6 +848,7 @@ def set_spatial_scale(self): # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale if self._dspat is None: self._dspat = np.max(self._spatscale) + msgs.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * self._dspat)) def load(self): """ @@ -1064,22 +1101,14 @@ def load(self): def run_align(self): """ - TODO :: Add docstring + This routine aligns multiple cubes by using manual input offsets or by cross-correlating white light images. """ # Grab cos(dec) for convenience cosdec = np.cos(np.mean(self.all_dec) * np.pi / 180.0) # Register spatial offsets between all frames if self.opts['ra_offset'] is not None: - # First, translate all coordinates to the coordinates of the first frame - # Note :: Don't need cosdec here, this just overrides the IFU coordinate centre of each frame - ref_shift_ra = self.ifu_ra[0] - self.ifu_ra - ref_shift_dec = self.ifu_dec[0] - self.ifu_dec - for ff in range(self.numfiles): - # Apply the shift - self.all_ra[self.all_idx == ff] += ref_shift_ra[ff] + self.opts['ra_offset'][ff]/3600.0 - self.all_dec[self.all_idx == ff] += ref_shift_dec[ff] + self.opts['dec_offset'][ff]/3600.0 - msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, self.opts['ra_offset'][ff], self.opts['dec_offset'][ff])) + self.align_user_offsets() else: # Find the wavelength range where all frames overlap min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength @@ -1160,6 +1189,7 @@ def coadd(self): if not self.combine and not self.align: return + # If the user is aligning or combining, the spatial scale of the output cubes needs to be consistent. # Set the spatial scale of the output datacube self.set_spatial_scale() From 7b17e412a8f00d6951ca5f05975f496f712b234a Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 16:38:52 +0100 Subject: [PATCH 10/81] more docstrings --- pypeit/coadd3d.py | 82 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 71 insertions(+), 11 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 77651bbfee..9dc65038d9 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -182,10 +182,16 @@ def from_file(cls, ifile): @property def ivar(self): + """ + Utility function to compute the inverse variance cube + """ return utils.inverse(self.sig**2) @property def wcs(self): + """ + Utility function to provide the world coordinate system of the datacube + """ return wcs.WCS(self.head0) @@ -221,7 +227,7 @@ def get_instance(cls, spec2dfiles, opts, spectrograph=None, par=None, det=1, ove spec2dfiles, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, show=show, debug=debug) - def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=False, + def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite=False, show=False, debug=False): """ @@ -239,7 +245,7 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=Fa spectrograph (see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.default_pypeit_par` for the relevant spectrograph class). - det (int): + det (:obj:`int`_, optional): Detector index overwrite (:obj:`bool`, optional): Overwrite the output file, if it exists? @@ -327,7 +333,8 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=Fa msgs.error(f"The following datacube method is not allowed: {self.method}") # Get the detector number and string representation - det = 1 if self.par['rdx']['detnum'] is None else self.par['rdx']['detnum'] + if det is None: + det = 1 if self.par['rdx']['detnum'] is None else self.par['rdx']['detnum'] self.detname = self.spec.get_det_name(det) # Check if the output file exists @@ -347,7 +354,9 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=1, overwrite=Fa def check_outputs(self): """ - TODO :: docstring + Check if any of the intended output files already exist. This check should be done near the + beginning of the coaddition, to avoid any computation that won't be saved in the event that + files won't be overwritten. """ if self.combine: outfile = datacube.get_output_filename("", self.cubepar['output_filename'], self.combine) @@ -471,7 +480,7 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equi def make_sensfunc(self): """ - TODO :: docstring + Generate the sensitivity function to be used for the flux calibration. """ self.fluxcal = True ss_file = self.cubepar['standard_cube'] @@ -536,7 +545,7 @@ def make_sensfunc(self): def set_default_scalecorr(self): """ - TODO :: docstring + Set the default mode to use for relative spectral scale correction. """ if self.cubepar['scale_corr'] is not None: if self.cubepar['scale_corr'] == "image": @@ -559,7 +568,22 @@ def set_default_scalecorr(self): def get_current_scalecorr(self, spec2DObj, opts_scalecorr=None): """ - TODO :: docstring + Determine the scale correction that should be used to correct + for the relative spectral scaling of the science frame + + Args: + spec2DObj (:class:`~pypeit.spec2dobj.Spec2DObj`_): + 2D PypeIt spectra object. + opts_scalecorr (:obj:`str`, optional): + A string that describes what mode should be used for the sky subtraction. The + allowed values are: + default - Use the default value, as defined in self.set_default_scalecorr() + image - Use the relative scale that was derived from the science frame + none - Do not perform relative scale correction + + Returns: + :obj:`str`_: A string that describes the scale correction mode to be used (see opts_scalecorr description) + `numpy.ndarray`_: 2D image (same shape as science frame) containing the relative spectral scaling to apply to the science frame """ this_scalecorr = self.scalecorr_default relScaleImg = self.relScaleImgDef.copy() @@ -597,7 +621,7 @@ def get_current_scalecorr(self, spec2DObj, opts_scalecorr=None): def set_default_skysub(self): """ - TODO :: Add docstring + Set the default mode to use for sky subtraction. """ if self.cubepar['skysub_frame'] in [None, 'none', '', 'None']: self.skysub_default = "none" @@ -622,7 +646,24 @@ def set_default_skysub(self): def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): """ - TODO :: docstring + Determine the sky frame that should be used to subtract from the science frame + + Args: + spec2DObj (:class:`~pypeit.spec2dobj.Spec2DObj`_): + 2D PypeIt spectra object. + exptime (:obj:`float`_): + The exposure time of the science frame (in seconds) + opts_skysub (:obj:`str`, optional): + A string that describes what mode should be used for the sky subtraction. The + allowed values are: + default - Use the default value, as defined in self.set_default_skysub() + image - Use the sky model derived from the science frame + none - Do not perform sky subtraction + + Returns: + :obj:`str`_: A string that describes the sky subtration mode to be used (see opts_skysub description) + `numpy.ndarray`_: 2D image (same shape as science frame) containing the sky frame to be subtracted from the science frame + `numpy.ndarray`_: 2D image (same shape as science frame) containing the relative spectral scaling that has been applied to the returned sky frame """ this_skysub = self.skysub_default if self.skysub_default == "image": @@ -734,7 +775,8 @@ def align_user_offsets(self): def coadd(self): """ - TODO :: Docstring + Main entry routine to set the order of operations to coadd the data. For specific + details of this procedure, see the child routines. """ msgs.bug("This routine should be overridden by child classes.") msgs.error("Cannot proceed without coding the coadd routine.") @@ -758,7 +800,22 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwr def get_alignments(self, spec2DObj, slits, spat_flexure=None): """ - TODO :: docstring + Generate and return the spline interpolation fitting functions to be used for + the alignment frames, as part of the astrometric correction. + + Parameters + ---------- + spec2DObj : :class:`~pypeit.spec2dobj.Spec2DObj`_): + 2D PypeIt spectra object. + slits : :class:`pypeit.slittrace.SlitTraceSet`_): + Class containing information about the slits + spat_flexure: :obj:`float`, optional: + Spatial flexure in pixels + + Returns + ------- + alignSplines : :class:`~pypeit.alignframe.AlignmentSplines`_) + Alignment splines used for the astrometric correction """ # Loading the alignments frame for these data alignments = None @@ -789,6 +846,9 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): return alignSplines def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): + """ + TODO :: docstring + """ if flatfile not in self.flat_splines.keys(): msgs.info("Calculating relative sensitivity for grating correction") # Check if the Flat file exists From dafc1bead86934f026ce290ebde7f825c26891ce Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 22:34:14 +0100 Subject: [PATCH 11/81] more docstrings --- pypeit/coadd3d.py | 41 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 9dc65038d9..26128457bf 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -847,7 +847,20 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): """ - TODO :: docstring + Calculate the relative spectral sensitivity correction due to grating shifts with the + input frames. + + Parameters + ---------- + flatfile : :obj:`str` + Unique path of a flatfield frame used to calculate the relative spectral sensitivity + of the corresponding science frame. + waveimg : `numpy.ndarray`_ + 2D image (same shape as the science frame) indicating the wavelength of each detector pixel. + slits : :class:`pypeit.slittrace.SlitTraceSet`_): + Class containing information about the slits + spat_flexure: :obj:`float`, optional: + Spatial flexure in pixels """ if flatfile not in self.flat_splines.keys(): msgs.info("Calculating relative sensitivity for grating correction") @@ -912,7 +925,9 @@ def set_spatial_scale(self): def load(self): """ - TODO :: docstring + This is the main function that loads in the data, and performs several frame-specific corrections. + If the user does not wish to align or combine the individual datacubes, then this routine will also + produce a spec3d file, which is a DataCube representation of a PypeIt spec2d frame for SlicerIFU data. """ # Initialise variables wave_ref = None @@ -1214,6 +1229,9 @@ def run_align(self): self.all_dec[self.all_idx == ff] += dec_shift def compute_weights(self): + """ + Compute the relative weights to apply to pixels that are collected into the voxels of the output DataCubes + """ # Calculate the relative spectral weights of all pixels if self.numfiles == 1: # No need to calculate weights if there's just one frame @@ -1240,7 +1258,24 @@ def compute_weights(self): def coadd(self): """ - TODO :: Add docstring + This is the main routine called to convert PypeIt spec2d files into PypeIt DataCube objects. + + The simplest option is when combine=False and align=False. In this case, each individual spec2d file + is converted into a spec3d file (i.e. a PypeIt DataCube object). These fits files can be loaded/viewed + in other software to display or combine multiple datacubes into a single datacube. However, note that + different software packages use different algorithms that may not conserve flux, or may produce covariance + between adjacent voxels. First the data are loaded and several corrections are made. These include: + + (1) A sky frame or model is subtracted from the science data, and the relative spectral illumination + of different slices is corrected. + (2) A mask of good pixels is identified + (3) A common spaxel scale is determined, and the astrometric correction is derived + (4) An RA and Dec image is created for each pixel. + (5) Based on atmospheric conditions, a differential atmospheric refraction correction is applied. + (6) Extinction correction + (7) Flux calibration (optional - this calibration is only applied if a standard star cube is supplied) + + TODO :: NOT FINISHED THIS DOCSTRING YET! """ # First loop through all of the frames, load the data, and save datacubes if no combining is required self.load() From 2871ebf4083ae108db86f59f2e124adc2977c832 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 10 Sep 2023 22:34:43 +0100 Subject: [PATCH 12/81] more docstrings --- pypeit/coadd3d.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 26128457bf..b1c010e067 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1258,7 +1258,8 @@ def compute_weights(self): def coadd(self): """ - This is the main routine called to convert PypeIt spec2d files into PypeIt DataCube objects. + This is the main routine called to convert PypeIt spec2d files into PypeIt DataCube objects. It is specific + to the SlicerIFU data. The simplest option is when combine=False and align=False. In this case, each individual spec2d file is converted into a spec3d file (i.e. a PypeIt DataCube object). These fits files can be loaded/viewed From dcbd36299da79946c3d78dfadf0a8e45e5ca3fda Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 11 Sep 2023 07:57:47 +0100 Subject: [PATCH 13/81] final docstrings --- pypeit/coadd3d.py | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index b1c010e067..636e3572ba 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1261,22 +1261,32 @@ def coadd(self): This is the main routine called to convert PypeIt spec2d files into PypeIt DataCube objects. It is specific to the SlicerIFU data. - The simplest option is when combine=False and align=False. In this case, each individual spec2d file - is converted into a spec3d file (i.e. a PypeIt DataCube object). These fits files can be loaded/viewed - in other software to display or combine multiple datacubes into a single datacube. However, note that - different software packages use different algorithms that may not conserve flux, or may produce covariance - between adjacent voxels. First the data are loaded and several corrections are made. These include: - - (1) A sky frame or model is subtracted from the science data, and the relative spectral illumination - of different slices is corrected. - (2) A mask of good pixels is identified - (3) A common spaxel scale is determined, and the astrometric correction is derived - (4) An RA and Dec image is created for each pixel. - (5) Based on atmospheric conditions, a differential atmospheric refraction correction is applied. - (6) Extinction correction - (7) Flux calibration (optional - this calibration is only applied if a standard star cube is supplied) - - TODO :: NOT FINISHED THIS DOCSTRING YET! + First the data are loaded and several corrections are made. These include: + + * A sky frame or model is subtracted from the science data, and the relative spectral illumination + of different slices is corrected. + * A mask of good pixels is identified + * A common spaxel scale is determined, and the astrometric correction is derived + * An RA and Dec image is created for each pixel. + * Based on atmospheric conditions, a differential atmospheric refraction correction is applied. + * Extinction correction + * Flux calibration (optional - this calibration is only applied if a standard star cube is supplied) + + If the input frames will not be combined (combine=False) if they won't be aligned (align=False), then + each individual spec2d file is converted into a spec3d file (i.e. a PypeIt DataCube object). These fits + files can be loaded/viewed in other software packages to display or combine multiple datacubes into a + single datacube. However, note that different software packages use combination algorithms that may not + conserve flux, or may produce covariance between adjacent voxels. + + If the user wishes to either spatially align multiple exposures (align=True) or combine multiple + exposures (combine=True), then the next set of operations include: + + * Generate white light images of each individual cube (according to a user-specified wavelength range) + * Align multiple frames if align=True (either manually by user input, or automatically by cross-correlation) + * Create the output WCS, and apply the flux calibration to the data + * Generate individual datacubes (combine=False) or one master datacube containing all exposures (combine=True). + Note, there are several algorithms used to combine multiple frames. Refer to the subpixellate() routine for + more details about the combination options. """ # First loop through all of the frames, load the data, and save datacubes if no combining is required self.load() From 78b323c3883c5edbc5d03d111fec9121110cd2fa Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 11 Sep 2023 08:02:35 +0100 Subject: [PATCH 14/81] final docstrings --- pypeit/coadd3d.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 636e3572ba..e6792520fa 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -201,9 +201,7 @@ class CoAdd3D: DataCube (spec3d) files. This routine is only used for IFU data reduction. - Algorithm steps are as follows: - - TODO :: Fill this in. - + Algorithm steps are detailed in the coadd routine. """ # Superclass factory method generates the subclass instance @classmethod @@ -945,7 +943,7 @@ def load(self): self.ifu_dec = np.append(self.ifu_dec, self.spec.compound_meta([hdr0], 'dec')) # Get the exposure time - # TODO :: Surely this should be retrieved from metadata... + # TODO :: Surely this should be retrieved from metadata... although it's coming from spec2d file? exptime = hdr0['EXPTIME'] # Setup for PypeIt imports From fe055ee3edb089aa737bfe6203e16f37de426190 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 11 Sep 2023 08:03:31 +0100 Subject: [PATCH 15/81] refactor --- CHANGES.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.rst b/CHANGES.rst index 17cc07f3a9..eada706656 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -41,6 +41,7 @@ - HIRES wavelength solution improvements galor - Added `redo_slits` option - Refactored ``load_line_lists()`` yet again! +- Refactored ``coadd3d()`` 1.13.0 (2 June 2023) From 4e7db8fe508c11a5175a8ee2b3bfb41dbdbdff18 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 11 Sep 2023 12:17:45 +0100 Subject: [PATCH 16/81] fix args --- pypeit/coadd3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index e6792520fa..f6658ea1cc 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -222,7 +222,7 @@ def get_instance(cls, spec2dfiles, opts, spectrograph=None, par=None, det=1, ove return next(c for c in cls.__subclasses__() if c.__name__ == (spectrograph.pypeline + 'CoAdd3D'))( - spec2dfiles, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, + spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, show=show, debug=debug) def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite=False, From e6b652c3bf8d3c030ee5f93c4919ba06960a3fd5 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 11 Sep 2023 13:29:29 +0100 Subject: [PATCH 17/81] log update --- pypeit/coadd3d.py | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index f6658ea1cc..f0e25e5d03 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -350,6 +350,17 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite if not os.path.exists(self.cubepar['reference_image']): msgs.error("Reference image does not exist:" + msgs.newline() + self.cubepar['reference_image']) + # Load the default scaleimg frame for the scale correction + self.scalecorr_default = "none" + self.relScaleImgDef = np.array([1]) + self.set_default_scalecorr() + + # Load the default sky frame to be used for sky subtraction + self.skysub_default = "image" + self.skyImgDef, self.skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction) + self.set_default_skysub() + + def check_outputs(self): """ Check if any of the intended output files already exist. This check should be done near the @@ -531,16 +542,6 @@ def make_sensfunc(self): sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) self.flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") - # Load the default scaleimg frame for the scale correction - self.scalecorr_default = "none" - self.relScaleImgDef = np.array([1]) - self.set_default_scalecorr() - - # Load the default sky frame to be used for sky subtraction - self.skysub_default = "image" - self.skyImgDef, self.skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction) - self.set_default_skysub() - def set_default_scalecorr(self): """ Set the default mode to use for relative spectral scale correction. @@ -909,13 +910,18 @@ def set_spatial_scale(self): This function checks if the spatial scales of all frames are consistent. If the user has not specified the spatial scale, it will be set here. """ - # Make sure all frames have consistent scales - if not np.all(self._spatscale[:,0] != self._spatscale[0,0]): + # Make sure all frames have consistent pixel scales + ratio = (self._spatscale[:, 0] - self._spatscale[0, 0]) / self._spatscale[0, 0] + if np.any(np.abs(ratio) > 1E-4): msgs.warn("The pixel scales of all input frames are not the same!") - msgs.info("Pixel scales of all input frames:" + msgs.newline() + self._spatscale[:,0]) - if not np.all(self._spatscale[:,1] != self._spatscale[0,1]): + spatstr = ", ".join(["{0:.6f}".format(ss) for ss in self._spatscale[:,0]*3600.0]) + msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr) + # Make sure all frames have consistent slicer scales + ratio = (self._spatscale[:, 1] - self._spatscale[0, 1]) / self._spatscale[0, 1] + if np.any(np.abs(ratio) > 1E-4): msgs.warn("The slicer scales of all input frames are not the same!") - msgs.info("Slicer scales of all input frames:" + msgs.newline() + self._spatscale[:,1]) + spatstr = ", ".join(["{0:.6f}".format(ss) for ss in self._spatscale[:,1]*3600.0]) + msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr) # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale if self._dspat is None: self._dspat = np.max(self._spatscale) From 2b045bc2a8d907efc242f9e45d6c82b7f66c36b3 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 11 Sep 2023 13:55:05 +0100 Subject: [PATCH 18/81] renamed datacube --- pypeit/coadd3d.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index f0e25e5d03..372d090134 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1570,14 +1570,14 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug) # Extract the variables that we need if debug: - datacube, varcube, bpmcube, residcube = subpix + flxcube, varcube, bpmcube, residcube = subpix # Save a residuals cube outfile_resid = outfile.replace(".fits", "_resid.fits") msgs.info("Saving residuals datacube as: {0:s}".format(outfile_resid)) hdu = fits.PrimaryHDU(residcube.T, header=hdr) hdu.writeto(outfile_resid, overwrite=overwrite) else: - datacube, varcube, bpmcube = subpix + flxcube, varcube, bpmcube = subpix # Check if the user requested a white light image if whitelight_range is not None: @@ -1592,17 +1592,17 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s whitelight_range[0], whitelight_range[1])) # Get the output filename for the white light image out_whitelight = datacube.get_output_whitelight_filename(outfile) - nspec = datacube.shape[2] + nspec = flxcube.shape[2] # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] - whitelight_img = datacube.make_whitelight_fromcube(datacube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) + whitelight_img = datacube.make_whitelight_fromcube(flxcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) img_hdu.writeto(out_whitelight, overwrite=overwrite) # Write out the datacube msgs.info("Saving datacube as: {0:s}".format(outfile)) - final_cube = DataCube(datacube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, + final_cube = DataCube(flxcube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, sensfunc=sensfunc, fluxed=fluxcal) final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) @@ -1724,7 +1724,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w # Prepare the output arrays outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1) binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]] - datacube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape) + flxcube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape) if debug: residcube = np.zeros(outshape) # Divide each pixel into subpixels @@ -1774,23 +1774,23 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0) if histogramdd is not None: # use the "fast histogram" algorithm, that assumes regular bin spacing - datacube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels)) + flxcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels)) varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels)) normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels)) if debug: residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels)) else: - datacube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))[0] + flxcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))[0] varcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))[0] normcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))[0] if debug: residcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))[0] # Normalise the datacube and variance cube nc_inverse = utils.inverse(normcube) - datacube *= nc_inverse + flxcube *= nc_inverse varcube *= nc_inverse**2 bpmcube = (normcube == 0).astype(np.uint8) if debug: residcube *= nc_inverse - return datacube, varcube, bpmcube, residcube - return datacube, varcube, bpmcube + return flxcube, varcube, bpmcube, residcube + return flxcube, varcube, bpmcube From 5f657bbe84bbd8442b193bebfcb7e795a85949fd Mon Sep 17 00:00:00 2001 From: rcooke Date: Tue, 12 Sep 2023 09:51:45 +0100 Subject: [PATCH 19/81] fix tests --- pypeit/scripts/coadd_datacube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 18a07ceeb9..5ae3a74974 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -52,7 +52,7 @@ def main(args): msgs.info("Restricting to detector={}".format(args.det)) parset['rdx']['detnum'] = int(args.det) - # Instantiate Coadd2d + # Instantiate CoAdd3d coadd = CoAdd3D.get_instance(coadd3dfile.filenames, coadd3dfile.options, spectrograph=spectrograph, par=parset, det=args.det, overwrite=args.overwrite) From cd63beb841fe0d3a138b48cf88213dddf807a88e Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 10:45:33 +0100 Subject: [PATCH 20/81] voxel sampling --- pypeit/coadd3d.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 372d090134..c6ee144a6d 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -796,6 +796,7 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwr self.flat_splines = dict() # A dictionary containing the splines of the flatfield self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale + self._specscale = np.zeros(self.numfiles) def get_alignments(self, spec2DObj, slits, spat_flexure=None): """ @@ -905,27 +906,38 @@ def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', bounds_error=False, fill_value="extrapolate") - def set_spatial_scale(self): + def set_voxel_sampling(self): """ - This function checks if the spatial scales of all frames are consistent. - If the user has not specified the spatial scale, it will be set here. + This function checks if the spatial and spectral scales of all frames are consistent. + If the user has not specified either the spatial or spectral scales, they will be set here. """ # Make sure all frames have consistent pixel scales ratio = (self._spatscale[:, 0] - self._spatscale[0, 0]) / self._spatscale[0, 0] if np.any(np.abs(ratio) > 1E-4): msgs.warn("The pixel scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in self._spatscale[:,0]*3600.0]) - msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr) + msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") # Make sure all frames have consistent slicer scales ratio = (self._spatscale[:, 1] - self._spatscale[0, 1]) / self._spatscale[0, 1] if np.any(np.abs(ratio) > 1E-4): msgs.warn("The slicer scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in self._spatscale[:,1]*3600.0]) - msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr) + msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") + # Make sure all frames have consistent wavelength sampling + ratio = (self._specscale - self._specscale[0]) / self._specscale[0] + if np.any(np.abs(ratio) > 1E-2): + msgs.warn("The wavelength samplings of the input frames are not the same!") + specstr = ", ".join(["{0:.6f}".format(ss) for ss in self._specscale]) + msgs.info("Wavelength samplings of all input frames:" + msgs.newline() + specstr) + # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale if self._dspat is None: self._dspat = np.max(self._spatscale) msgs.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * self._dspat)) + # If the user has not specified the spectral sampling, then set it now to the largest value + if self._dwv is None: + self._dwv = np.max(self._specscale) + msgs.info("Adopting a wavelength sampling of {0:f} Angstrom".format(self._dwv)) def load(self): """ @@ -1038,6 +1050,8 @@ def load(self): slscl = self.spec.get_meta_value([spec2DObj.head0], 'slitwid') self._spatscale[ff, 0] = pxscl self._spatscale[ff, 1] = slscl + self._specscale[ff] = dwv + # If the spatial scale has been set by the user, check that it doesn't exceed the pixel or slicer scales if self._dspat is not None: if pxscl > self._dspat: @@ -1301,7 +1315,7 @@ def coadd(self): # If the user is aligning or combining, the spatial scale of the output cubes needs to be consistent. # Set the spatial scale of the output datacube - self.set_spatial_scale() + self.set_voxel_sampling() # Align the frames if self.align: From 60884e9cc44740de6f59876a1423d524e70d44d6 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 14:39:09 +0100 Subject: [PATCH 21/81] update comment --- pypeit/coadd3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index c6ee144a6d..3ee138139f 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1314,7 +1314,7 @@ def coadd(self): return # If the user is aligning or combining, the spatial scale of the output cubes needs to be consistent. - # Set the spatial scale of the output datacube + # Set the spatial and spectral scales of the output datacube self.set_voxel_sampling() # Align the frames From a7d4e3a0d5e748b38bdc4f0ad79037cd14ddd784 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 16:20:08 +0100 Subject: [PATCH 22/81] make a start --- pypeit/coadd3d.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 3ee138139f..1b963f1ee7 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1141,6 +1141,10 @@ def load(self): this_specpos, this_spatpos = np.where(onslit_gpm) this_spatid = slitid_img_init[onslit_gpm] + # Astrometric alignment to HST frames + # TODO :: RJC requests this remains here... it is only used by RJC + ra_sort, dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort) + # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now numpix = ra_sort.size @@ -1808,3 +1812,31 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w residcube *= nc_inverse return flxcube, varcube, bpmcube, residcube return flxcube, varcube, bpmcube + + +def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort): + """ + This is currently only used by RJC. This function adds corrections to the RA and Dec pixels + to align the daatcubes to an HST image. + + Process: + * Send away pixel RA, Dec, wave, flux, error. + * ------ + * Compute emission line map + - Need to generate full cube around H I gamma + - Fit to continuum and subtract it off + - Sum all flux above continuum + - Estimate error + * MPFIT HST emission line map to + * ------ + * Return updated pixel RA, Dec + """ + ############ + ## STEP 1 ## - Create a datacube around Hgamma + ############ + # Only use a small wavelength range + wv_mask = (wave_sort>) & (wave_sort<) + # Create a WCS for this subcube + image_wcs, voxedge, reference_image = datacube.create_wcs(ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], + dspat, wavediff, collapse=True) + # Compute an emission line map that is as consistent as possible to an archival HST image From d8f0f5594e464d266c4f1c8da0ea8fc584b9e80b Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 16:37:21 +0100 Subject: [PATCH 23/81] wrapper for create_wcs --- pypeit/coadd3d.py | 60 ++++------------------ pypeit/core/datacube.py | 110 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 49 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 3ee138139f..9e276a31d8 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -392,11 +392,12 @@ def check_outputs(self): if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) - def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equinox=2000.0, + def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwave, collapse=False, equinox=2000.0, specname="PYP_SPEC"): """ Create a WCS and the expected edges of the voxels, based on user-specified - parameters or the extremities of the data. + parameters or the extremities of the data. This is a convenience function + that calls the core function in `pypeit.core.datacube`_. Parameters ---------- @@ -412,7 +413,7 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equi dspat : float Spatial size of each square voxel (in arcsec). The default is to use the values in cubepar. - dwv : float + dwave : float Linear wavelength step of each voxel (in Angstroms) collapse : bool, optional If True, the spectral dimension will be collapsed to a single channel @@ -432,9 +433,6 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equi reference_image : `numpy.ndarray`_ The reference image to be used for the cross-correlation. Can be None. """ - # Grab cos(dec) for convenience - cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) - # Setup the cube ranges reference_image = None # The default behaviour is that the reference image is not used ra_min = self.cubepar['ra_min'] if self.cubepar['ra_min'] is not None else np.min(all_ra) @@ -443,49 +441,13 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, equi dec_max = self.cubepar['dec_max'] if self.cubepar['dec_max'] is not None else np.max(all_dec) wav_min = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else np.min(all_wave) wav_max = self.cubepar['wave_max'] if self.cubepar['wave_max'] is not None else np.max(all_wave) - dwave = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else dwv - - # Number of voxels in each dimension - numra = int((ra_max - ra_min) * cosdec / dspat) - numdec = int((dec_max - dec_min) / dspat) - numwav = int(np.round((wav_max - wav_min) / dwave)) - - # If a white light WCS is being generated, make sure there's only 1 wavelength bin - if collapse: - wav_min = np.min(all_wave) - wav_max = np.max(all_wave) - dwave = wav_max - wav_min - numwav = 1 - - # Generate a master WCS to register all frames - coord_min = [ra_min, dec_min, wav_min] - coord_dlt = [dspat, dspat, dwave] - - # If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts - if self.cubepar["reference_image"] is not None: - # Load the requested reference image - reference_image, imgwcs = datacube.load_imageWCS(self.cubepar["reference_image"]) - # Update the celestial WCS - coord_min[:2] = imgwcs.wcs.crval - coord_dlt[:2] = imgwcs.wcs.cdelt - numra, numdec = reference_image.shape - - cubewcs = datacube.generate_WCS(coord_min, coord_dlt, equinox=equinox, name=specname) - msgs.info(msgs.newline() + "-" * 40 + - msgs.newline() + "Parameters of the WCS:" + - msgs.newline() + "RA min = {0:f}".format(coord_min[0]) + - msgs.newline() + "DEC min = {0:f}".format(coord_min[1]) + - msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) + - msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0 * dspat) + - msgs.newline() + "Wavelength step = {0:f} A".format(dwave) + - msgs.newline() + "-" * 40) - - # Generate the output binning - xbins = np.arange(1 + numra) - 0.5 - ybins = np.arange(1 + numdec) - 0.5 - spec_bins = np.arange(1 + numwav) - 0.5 - voxedges = (xbins, ybins, spec_bins) - return cubewcs, voxedges, reference_image + if self.cubepar['wave_delta'] is not None: + dwave = self.cubepar['wave_delta'] + + return datacube.create_wcs(all_ra, all_dec, all_wave, dspat, dwave, ra_min=ra_min, ra_max=ra_max, + dec_min=dec_min, dec_max=dec_max, wave_min=wav_min, wave_max=wav_max, + reference=self.cubepar['reference_image'], collapse=collapse, equinox=equinox, + specname=specname) def make_sensfunc(self): """ diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 0c299827ac..0fc77c318e 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -655,6 +655,116 @@ def load_imageWCS(filename, ext=0): return image, imgwcs +def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, + ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None, + reference=None, collapse=False, equinox=2000.0, specname="PYP_SPEC"): + """ + Create a WCS and the expected edges of the voxels, based on user-specified + parameters or the extremities of the data. + + Parameters + ---------- + all_ra : `numpy.ndarray`_ + 1D flattened array containing the RA values of each pixel from all + spec2d files + all_dec : `numpy.ndarray`_ + 1D flattened array containing the DEC values of each pixel from all + spec2d files + all_wave : `numpy.ndarray`_ + 1D flattened array containing the wavelength values of each pixel from + all spec2d files + dspat : float + Spatial size of each square voxel (in arcsec). The default is to use the + values in cubepar. + dwave : float + Linear wavelength step of each voxel (in Angstroms) + ra_min : float, optional + Minimum RA of the WCS (degrees) + ra_max : float, optional + Maximum RA of the WCS (degrees) + dec_min : float, optional + Minimum Dec of the WCS (degrees) + dec_max : float, optional + Maximum Dec of the WCS (degrees) + wave_min : float, optional + Minimum wavelength of the WCS (degrees) + wave_max : float, optional + Maximum wavelength of the WCS (degrees) + reference : str, optional + Filename of a fits file that contains a WCS in the Primary HDU. + collapse : bool, optional + If True, the spectral dimension will be collapsed to a single channel + (primarily for white light images) + equinox : float, optional + Equinox of the WCS + specname : str, optional + Name of the spectrograph + + Returns + ------- + cubewcs : `astropy.wcs.WCS`_ + astropy WCS to be used for the combined cube + voxedges : tuple + A three element tuple containing the bin edges in the x, y (spatial) and + z (wavelength) dimensions + reference_image : `numpy.ndarray`_ + The reference image to be used for the cross-correlation. Can be None. + """ + # Grab cos(dec) for convenience + cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) + + # Setup the cube ranges + _ra_min = ra_min if ra_min is not None else np.min(all_ra) + _ra_max = ra_max if ra_max is not None else np.max(all_ra) + _dec_min = dec_min if dec_min is not None else np.min(all_dec) + _dec_max = dec_max if dec_max is not None else np.max(all_dec) + _wav_min = wave_min if wave_min is not None else np.min(all_wave) + _wav_max = wave_max if wave_max is not None else np.max(all_wave) + # dwave = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else dwv + + # Number of voxels in each dimension + numra = int((_ra_max - _ra_min) * cosdec / dspat) + numdec = int((_dec_max - _dec_min) / dspat) + numwav = int(np.round((_wav_max - _wav_min) / dwave)) + + # If a white light WCS is being generated, make sure there's only 1 wavelength bin + if collapse: + _wav_min = np.min(all_wave) + _wav_max = np.max(all_wave) + dwave = _wav_max - _wav_min + numwav = 1 + + # Generate a master WCS to register all frames + coord_min = [_ra_min, _dec_min, _wav_min] + coord_dlt = [dspat, dspat, dwave] + + # If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts + if reference is not None: + # Load the requested reference image + reference_image, imgwcs = load_imageWCS(reference) + # Update the celestial WCS + coord_min[:2] = imgwcs.wcs.crval + coord_dlt[:2] = imgwcs.wcs.cdelt + numra, numdec = reference_image.shape + + cubewcs = generate_WCS(coord_min, coord_dlt, equinox=equinox, name=specname) + msgs.info(msgs.newline() + "-" * 40 + + msgs.newline() + "Parameters of the WCS:" + + msgs.newline() + "RA min = {0:f}".format(coord_min[0]) + + msgs.newline() + "DEC min = {0:f}".format(coord_min[1]) + + msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(_wav_min, _wav_max) + + msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0 * dspat) + + msgs.newline() + "Wavelength step = {0:f} A".format(dwave) + + msgs.newline() + "-" * 40) + + # Generate the output binning + xbins = np.arange(1 + numra) - 0.5 + ybins = np.arange(1 + numdec) - 0.5 + spec_bins = np.arange(1 + numwav) - 0.5 + voxedges = (xbins, ybins, spec_bins) + return cubewcs, voxedges, reference_image + + def generate_WCS(crval, cdelt, equinox=2000.0, name="PYP_SPEC"): """ Generate a WCS that will cover all input spec2D files From c033320fb9f16a420427d141ebd9ef6234b6cd79 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 19:56:29 +0100 Subject: [PATCH 24/81] moving forward --- pypeit/coadd3d.py | 33 ++++++++++++++++++++++++++++----- pypeit/core/datacube.py | 1 + 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 4baf0f89a0..6f5d8541f9 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1105,7 +1105,10 @@ def load(self): # Astrometric alignment to HST frames # TODO :: RJC requests this remains here... it is only used by RJC - ra_sort, dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort) + ra_sort, dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, np.max(self._spatscale[ff,:]), self._specscale[ff], + np.ones(ra_sort.size), this_specpos[wvsrt], this_specpos[wvsrt], + this_spatid[wvsrt], spec2DObj.tilts, slits, alignSplines, + ) # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now @@ -1776,7 +1779,10 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w return flxcube, varcube, bpmcube -def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort): +def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, dspat, dwave, + wghts, spatpos, specpos, + all_spatid, tilts, slits, astrom_trans, + spat_subpixel=10, spec_subpixel=10): """ This is currently only used by RJC. This function adds corrections to the RA and Dec pixels to align the daatcubes to an HST image. @@ -1796,9 +1802,26 @@ def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort): ############ ## STEP 1 ## - Create a datacube around Hgamma ############ + embed() # Only use a small wavelength range - wv_mask = (wave_sort>) & (wave_sort<) + wv_mask = (wave_sort > 4345.0) & (wave_sort < 4359.0) # Create a WCS for this subcube - image_wcs, voxedge, reference_image = datacube.create_wcs(ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], - dspat, wavediff, collapse=True) + subcube_wcs, voxedge, reference_image = datacube.create_wcs(ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], + dspat, dwave, collapse=True) + # Create the subcube + flxcube, varcube, bpmcube = subpixellate(subcube_wcs[wv_mask], ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], flux_sort[wv_mask], ivar_sort[wv_mask], + wghts[wv_mask], spatpos[wv_mask], specpos[wv_mask], + all_spatid, tilts, slits, astrom_trans, + voxedge, all_idx=None, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=False) + + + # generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, + # all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + # all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None, + # blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None, + # specname="PYP_SPEC", debug=False) + # Compute an emission line map that is as consistent as possible to an archival HST image + + return ra_corr, dec_corr \ No newline at end of file diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 0fc77c318e..61a8deec7b 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -739,6 +739,7 @@ def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, coord_dlt = [dspat, dspat, dwave] # If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts + reference_image = None if reference is not None: # Load the requested reference image reference_image, imgwcs = load_imageWCS(reference) From f7fa87d1d3d1459ae3e505a6fdb49b5e8c6d4c76 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 19:58:57 +0100 Subject: [PATCH 25/81] fixes --- pypeit/coadd3d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 6f5d8541f9..41d95b90d8 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1809,9 +1809,9 @@ def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, dspat, dwa subcube_wcs, voxedge, reference_image = datacube.create_wcs(ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], dspat, dwave, collapse=True) # Create the subcube - flxcube, varcube, bpmcube = subpixellate(subcube_wcs[wv_mask], ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], flux_sort[wv_mask], ivar_sort[wv_mask], + flxcube, varcube, bpmcube = subpixellate(subcube_wcs, ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], flux_sort[wv_mask], ivar_sort[wv_mask], wghts[wv_mask], spatpos[wv_mask], specpos[wv_mask], - all_spatid, tilts, slits, astrom_trans, + all_spatid[wv_mask], tilts, slits, astrom_trans, voxedge, all_idx=None, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=False) From 8a1bd3912132df4f356688b06cf05ecce5351e1f Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 13 Sep 2023 19:59:31 +0100 Subject: [PATCH 26/81] fix refimg --- pypeit/core/datacube.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 0fc77c318e..61a8deec7b 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -739,6 +739,7 @@ def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, coord_dlt = [dspat, dspat, dwave] # If a reference image is being used and a white light image is requested (collapse=True) update the celestial parts + reference_image = None if reference is not None: # Load the requested reference image reference_image, imgwcs = load_imageWCS(reference) From 138bdd4f6d8c13a4281d969dbd160fba53e84474 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 20 Sep 2023 18:59:10 +0100 Subject: [PATCH 27/81] DAR fixed --- doc/coadd3d.rst | 2 +- pypeit/coadd3d.py | 332 ++++++++++++++++++++++++++++------------- pypeit/core/flat.py | 8 +- pypeit/find_objects.py | 5 + 4 files changed, 241 insertions(+), 106 deletions(-) diff --git a/doc/coadd3d.rst b/doc/coadd3d.rst index d0b2454297..5d43e692e8 100644 --- a/doc/coadd3d.rst +++ b/doc/coadd3d.rst @@ -330,7 +330,7 @@ plot a wavelength slice of the cube: from matplotlib import pyplot as plt from astropy.visualization import ZScaleInterval, ImageNormalize - from pypeit.core.datacube import DataCube + from pypeit.coadd3d import DataCube filename = "datacube.fits" cube = DataCube.from_file(filename) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 41d95b90d8..e112e3b5da 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -13,6 +13,7 @@ from astropy.io import fits from scipy.interpolate import interp1d import numpy as np +import ref_index # TODO :: Could just copy this code into the DAR class? from pypeit import msgs from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils @@ -195,6 +196,103 @@ def wcs(self): return wcs.WCS(self.head0) +class DARcorrection: + """ + This class holds all of the functions needed to quickly compute the differential atmospheric refraction correction. + """ + def __init__(self, hdr0, cosdec, spectrograph=None, wave_ref=4500.0): + """ + Args: + hdr0 (`astropy.io.fits.Header`_): + Header of the spec2d file. This input should be retrieved from spec2DObj.head0 + spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): + The name or instance of the spectrograph used to obtain the data. + If None, this is pulled from the file header. + cosdec (:obj:`float`): + Cosine of the target declination. + wave_ref (:obj:`float`, optional): + Reference wavelength (The DAR correction will be performed relative to this wavelength) + """ + msgs.info("Preparing the parameters for the DAR correction") + # Check on Spectrograph input + if spectrograph is None: + spectrograph = hdr0['PYP_SPEC'] + + if isinstance(spectrograph, str): + self.spec = load_spectrograph(spectrograph) + self.specname = spectrograph + else: + # Assume it's a Spectrograph instance + self.spec = spectrograph + self.specname = spectrograph.name + + # Get DAR parameters + self.airmass = self.spec.get_meta_value([hdr0], 'airmass') # unitless + self.parangle = self.spec.get_meta_value([hdr0], 'parangle') + self.pressure = self.spec.get_meta_value([hdr0], 'pressure') # units are pascals + self.temperature = self.spec.get_meta_value([hdr0], 'temperature') # units are degrees C + self.humidity = self.spec.get_meta_value([hdr0], 'humidity') # Expressed as a percentage (not a fraction!) + self.co2 = 400.0 # units are mu-mole/mole + self.wave_ref = wave_ref # This should be in Angstroms + self.cosdec = cosdec + + # Print out the DAR parameters + msgs.info("DAR correction parameters:" + msgs.newline() + + " Airmass = {0:.2f}".format(self.airmass) + msgs.newline() + + " Pressure = {0:.2f} Pa".format(self.pressure) + msgs.newline() + + " Humidity = {0:.2f} %".format(self.humidity) + msgs.newline() + + " Temperature = {0:.2f} deg C".format(self.temperature) + msgs.newline() + + " Reference wavelength = {0:.2f}".format(self.wave_ref)) + + def calculate_dispersion(self, waves): + """ Calculate the total atmospheric dispersion relative to the reference wavelength + + Parameters + ---------- + waves : `np.ndarray`_ + 1D array of wavelengths (units must be Angstroms) + + Returns + ------- + full_dispersion : :obj:`float` + The atmospheric dispersion (in degrees) for each wavelength input + """ + + # Calculate + z = np.arccos(1.0/self.airmass) + + n0 = ref_index.ciddor(wave=self.wave_ref/10.0, t=self.temperature, p=self.pressure, rh=self.humidity, co2=self.co2) + n1 = ref_index.ciddor(wave=waves/10.0, t=self.temperature, p=self.pressure, rh=self.humidity, co2=self.co2) + + return (180.0/np.pi) * (n0 - n1) * np.tan(z) # This is in degrees + + def correction(self, waves): + """ + Main routine that computes the DAR correction for both right ascension and declination. + + Parameters + ---------- + waves : `np.ndarray`_ + 1D array of wavelengths (units must be Angstroms) + + Returns + ------- + ra_corr : `np.ndarray`_ + The RA component of the atmospheric dispersion correction (in degrees) for each wavelength input. + dec_corr : `np.ndarray`_ + The Dec component of the atmospheric dispersion correction (in degrees) for each wavelength input. + """ + # Determine the correction angle + corr_ang = self.parangle - np.pi/2 + # Calculate the full amount of refraction + dar_full = self.calculate_dispersion(waves) + # Calculate the correction in dec and RA for each detector pixel + # These numbers should be ADDED to the original RA and Dec values + ra_corr = (dar_full/self.cosdec)*np.cos(corr_ang) + dec_corr = -dar_full*np.sin(corr_ang) + return ra_corr, dec_corr + + class CoAdd3D: """ Main routine to convert processed PypeIt spec2d frames into @@ -287,7 +385,7 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite self.all_sci, self.all_ivar, self.all_idx, self.all_wghts = np.array([]), np.array([]), np.array([]), np.array([]) self.all_spatpos, self.all_specpos, self.all_spatid = np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int) self.all_tilts, self.all_slits, self.all_align = [], [], [] - self.all_wcs = [] + self.all_wcs, self.all_dar = [], [] self.weights = np.ones(self.numfiles) # Weights to use when combining cubes self._dspat = None if self.cubepar['spatial_delta'] is None else self.cubepar['spatial_delta'] / 3600.0 # binning size on the sky (/3600 to convert to degrees) @@ -299,7 +397,7 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite self.align = self.cubepar['align'] # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. if self.numfiles == 1 and self.cubepar["reference_image"] is None: - if not self.align: + if self.align: msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image") msgs.info("Setting 'align' to False") self.align = False @@ -670,48 +768,48 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): # Return the skysub params for this frame return this_skysub, skyImg, skyScl - def compute_DAR(self, hdr0, waves, cosdec, wave_ref=None): - """ - Compute the differential atmospheric refraction correction for a given frame. - - Args: - hdr0 (`astropy.io.fits.Header`_): - Header of the spec2d file. This input should be retrieved from spec2DObj.head0 - waves (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = Angstroms) - cosdec (:obj:`float`): - Cosine of the target declination. - wave_ref (:obj:`float`, optional): - Reference wavelength (The DAR correction will be performed relative to this wavelength) - - Returns: - `numpy.ndarray`_: 1D differential RA for each wavelength of the input waves array - `numpy.ndarray`_: 1D differential Dec for each wavelength of the input waves array - """ - if wave_ref is None: - wave_ref = 0.5 * (np.min(waves) + np.max(waves)) - # Get DAR parameters - raval = self.spec.get_meta_value([hdr0], 'ra') - decval = self.spec.get_meta_value([hdr0], 'dec') - obstime = self.spec.get_meta_value([hdr0], 'obstime') - pressure = self.spec.get_meta_value([hdr0], 'pressure') - temperature = self.spec.get_meta_value([hdr0], 'temperature') - rel_humidity = self.spec.get_meta_value([hdr0], 'humidity') - coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) - location = self.spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) - # Set a default value - ra_corr, dec_corr = 0.0, 0.0 - if pressure == 0.0: - msgs.warn("Pressure is set to zero - DAR correction will not be performed") - else: - msgs.info("DAR correction parameters:" + msgs.newline() + - " Pressure = {0:f} bar".format(pressure) + msgs.newline() + - " Temperature = {0:f} deg C".format(temperature) + msgs.newline() + - " Humidity = {0:f}".format(rel_humidity)) - ra_corr, dec_corr = datacube.correct_dar(waves, coord, obstime, location, - pressure * units.bar, temperature * units.deg_C, rel_humidity, - wave_ref=wave_ref) - return ra_corr*cosdec, dec_corr + # def compute_DAR(self, hdr0, waves, cosdec, wave_ref=None): + # """ + # Compute the differential atmospheric refraction correction for a given frame. + # + # Args: + # hdr0 (`astropy.io.fits.Header`_): + # Header of the spec2d file. This input should be retrieved from spec2DObj.head0 + # waves (`numpy.ndarray`_): + # 1D flattened array containing the wavelength of each pixel (units = Angstroms) + # cosdec (:obj:`float`): + # Cosine of the target declination. + # wave_ref (:obj:`float`, optional): + # Reference wavelength (The DAR correction will be performed relative to this wavelength) + # + # Returns: + # `numpy.ndarray`_: 1D differential RA for each wavelength of the input waves array + # `numpy.ndarray`_: 1D differential Dec for each wavelength of the input waves array + # """ + # if wave_ref is None: + # wave_ref = 0.5 * (np.min(waves) + np.max(waves)) + # # Get DAR parameters + # raval = self.spec.get_meta_value([hdr0], 'ra') + # decval = self.spec.get_meta_value([hdr0], 'dec') + # obstime = self.spec.get_meta_value([hdr0], 'obstime') + # pressure = self.spec.get_meta_value([hdr0], 'pressure') + # temperature = self.spec.get_meta_value([hdr0], 'temperature') + # rel_humidity = self.spec.get_meta_value([hdr0], 'humidity') + # coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) + # location = self.spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) + # # Set a default value + # ra_corr, dec_corr = 0.0, 0.0 + # if pressure == 0.0: + # msgs.warn("Pressure is set to zero - DAR correction will not be performed") + # else: + # msgs.info("DAR correction parameters:" + msgs.newline() + + # " Pressure = {0:f} bar".format(pressure) + msgs.newline() + + # " Temperature = {0:f} deg C".format(temperature) + msgs.newline() + + # " Humidity = {0:f}".format(rel_humidity)) + # ra_corr, dec_corr = datacube.correct_dar(waves, coord, obstime, location, + # pressure * units.bar, temperature * units.deg_C, rel_humidity, + # wave_ref=wave_ref) + # return ra_corr, dec_corr def align_user_offsets(self): """ @@ -767,16 +865,16 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): Parameters ---------- - spec2DObj : :class:`~pypeit.spec2dobj.Spec2DObj`_): + spec2DObj : :class:`~pypeit.spec2dobj.Spec2DObj`_: 2D PypeIt spectra object. - slits : :class:`pypeit.slittrace.SlitTraceSet`_): + slits : :class:`pypeit.slittrace.SlitTraceSet`_: Class containing information about the slits spat_flexure: :obj:`float`, optional: Spatial flexure in pixels Returns ------- - alignSplines : :class:`~pypeit.alignframe.AlignmentSplines`_) + alignSplines : :class:`~pypeit.alignframe.AlignmentSplines`_ Alignment splines used for the astrometric correction """ # Loading the alignments frame for these data @@ -955,8 +1053,10 @@ def load(self): relScale = spec2DObj.scaleimg / relScaleImg # This factor is applied to the sky subtracted science frame # Extract the relevant information from the spec2d file - sciImg = (spec2DObj.sciimg - skyImg * relSclSky) * relScale # Subtract sky and apply relative illumination - ivar = spec2DObj.ivarraw / relScale ** 2 + # sciImg = (spec2DObj.sciimg - skyImg * relSclSky) * relScale # Subtract sky and apply relative illumination + # ivar = spec2DObj.ivarraw / relScale ** 2 + sciImg = spec2DObj.sciimg + ivar = spec2DObj.ivarraw waveimg = spec2DObj.waveimg bpmmask = spec2DObj.bpmmask @@ -1047,11 +1147,9 @@ def load(self): # Here's an array to get back to the original ordering resrt = np.argsort(wvsrt) - # Perform the DAR correction + # Compute the DAR correction cosdec = np.cos(np.mean(dec_sort) * np.pi / 180.0) - ra_corr, dec_corr = self.compute_DAR(spec2DObj.head0, wave_sort, cosdec, wave_ref=wave_ref) - ra_sort += ra_corr - dec_sort += dec_corr + darcorr = DARcorrection(spec2DObj.head0, cosdec, spectrograph=self.spec) # Perform extinction correction msgs.info("Applying extinction correction") @@ -1103,12 +1201,19 @@ def load(self): this_specpos, this_spatpos = np.where(onslit_gpm) this_spatid = slitid_img_init[onslit_gpm] + ################################## # Astrometric alignment to HST frames # TODO :: RJC requests this remains here... it is only used by RJC - ra_sort, dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, np.max(self._spatscale[ff,:]), self._specscale[ff], - np.ones(ra_sort.size), this_specpos[wvsrt], this_specpos[wvsrt], - this_spatid[wvsrt], spec2DObj.tilts, slits, alignSplines, - ) + # _ra_sort, _dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, np.max(self._spatscale[ff,:]), self._specscale[ff], + # np.ones(ra_sort.size), this_spatpos[wvsrt], this_specpos[wvsrt], + # this_spatid[wvsrt], spec2DObj.tilts, slits, alignSplines) + # ra_del = np.median(_ra_sort-ra_sort) + # dec_del = np.median(_dec_sort-dec_sort) + # ra_sort = _ra_sort + # dec_sort = _dec_sort + # spec2DObj.head0['RA'] += ra_del + # spec2DObj.head0['DEC'] += dec_del + ################################## # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now @@ -1139,7 +1244,7 @@ def load(self): generate_cube_subpixel(outfile, output_wcs, ra_sort[resrt], dec_sort[resrt], wave_sort[resrt], flux_sort[resrt], ivar_sort[resrt], np.ones(numpix), this_spatpos, this_specpos, this_spatid, - spec2DObj.tilts, slits, alignSplines, bins, + spec2DObj.tilts, slits, alignSplines, darcorr, bins, all_idx=None, overwrite=self.overwrite, blaze_wave=self.blaze_wave, blaze_spec=self.blaze_spec, fluxcal=self.fluxcal, specname=self.specname, whitelight_range=wl_wvrng, @@ -1160,6 +1265,7 @@ def load(self): self.all_tilts.append(spec2DObj.tilts) self.all_slits.append(slits) self.all_align.append(alignSplines) + self.all_dar.append(darcorr) def run_align(self): """ @@ -1237,8 +1343,8 @@ def compute_weights(self): wl_full = generate_image_subpixel(image_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_wghts, self.all_spatpos, self.all_specpos, self.all_spatid, - self.all_tilts, self.all_slits, self.all_align, voxedge, all_idx=self.all_idx, - spec_subpixel=1, spat_subpixel=1, combine=True) + self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, + all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) # Compute the weights self.all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, wl_full[:, :, 0], self._dspat, self._dwv, relative_weights=self.cubepar['relative_weights']) @@ -1287,7 +1393,7 @@ def coadd(self): self.set_voxel_sampling() # Align the frames - if self.align: + if self.align and False: self.run_align() # Compute the relative weights on the spectra @@ -1313,9 +1419,11 @@ def coadd(self): np.min(self.mnmx_wv[:, :, 1]), self.cubepar['whitelight_range']) if self.combine: + # TODO :: remove the following line... it's just temporary + outfile = datacube.get_output_filename(self.spec2d[0], "", False, 1) generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, np.ones(self.all_wghts.size), # all_wghts, - self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, vox_edges, + self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, self.all_dar, vox_edges, all_idx=self.all_idx, overwrite=self.overwrite, blaze_wave=self.blaze_wave, blaze_spec=self.blaze_spec, fluxcal=self.fluxcal, sensfunc=sensfunc, specname=self.specname, whitelight_range=wl_wvrng, @@ -1327,7 +1435,7 @@ def coadd(self): generate_cube_subpixel(outfile, cube_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], self.all_sci[ww], self.all_ivar[ww], np.ones(self.all_wghts[ww].size), self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], self.all_tilts[ff], - self.all_slits[ff], self.all_align[ff], vox_edges, + self.all_slits[ff], self.all_align[ff], self.all_dar[ff], vox_edges, all_idx=self.all_idx[ww], overwrite=self.overwrite, blaze_wave=self.blaze_wave, blaze_spec=self.blaze_spec, fluxcal=self.fluxcal, sensfunc=sensfunc, specname=self.specname, @@ -1336,7 +1444,7 @@ def coadd(self): def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None, spec_subpixel=10, spat_subpixel=10, combine=False): """ Generate a white light image from the input pixels @@ -1379,6 +1487,9 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i A Class containing the transformation between detector pixel coordinates and WCS pixel coordinates, or a list of Alignment Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. bins (tuple): A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial and z wavelength coordinates @@ -1413,9 +1524,9 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i numfr = 1 else: numfr = np.unique(_all_idx).size - if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr: + if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr or len(all_dar) != numfr: msgs.error("The following arguments must be the same length as the expected number of frames to be combined:" - + msgs.newline() + "tilts, slits, astrom_trans") + + msgs.newline() + "tilts, slits, astrom_trans, all_dar") # Prepare the array of white light images to be stored numra = bins[0].size-1 numdec = bins[1].size-1 @@ -1428,26 +1539,26 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i # Subpixellate img, _, _ = subpixellate(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, - all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, all_idx=_all_idx) else: ww = np.where(_all_idx == fr) # Subpixellate img, _, _ = subpixellate(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], all_sci[ww], all_ivar[ww], all_wghts[ww], all_spatpos[ww], - all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], bins, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], + all_dar[fr], bins, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) all_wl_imgs[:, :, fr] = img[:, :, 0] # Return the constructed white light images return all_wl_imgs def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, + all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None, blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None, specname="PYP_SPEC", debug=False): - r""" + """ Save a datacube using the subpixel algorithm. Refer to the subpixellate() docstring for further details about this algorithm @@ -1491,6 +1602,9 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s A Class containing the transformation between detector pixel coordinates and WCS pixel coordinates, or a list of Alignment Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. bins (tuple): A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial and z wavelength coordinates @@ -1549,7 +1663,7 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s # Subpixellate subpix = subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, bins, all_idx=all_idx, + all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=all_idx, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug) # Extract the variables that we need if debug: @@ -1591,7 +1705,7 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, bins, all_idx=None, + all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None, spec_subpixel=10, spat_subpixel=10, debug=False): r""" Subpixellate the input data into a datacube. This algorithm splits each @@ -1644,6 +1758,9 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w A Class containing the transformation between detector pixel coordinates and WCS pixel coordinates, or a list of Alignment Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. bins (tuple): A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial and z wavelength coordinates @@ -1677,12 +1794,12 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w residual cube. The latter is only returned if debug is True. """ # Check for combinations of lists or not - if type(tilts) is list and type(slits) is list and type(astrom_trans) is list: + if type(tilts) is list and type(slits) is list and type(astrom_trans) is list and type(all_dar) is list: # Several frames are being combined. Check the lists have the same length numframes = len(tilts) - if len(slits) != numframes or len(astrom_trans) != numframes: + if len(slits) != numframes or len(astrom_trans) != numframes or len(all_dar) != numframes: msgs.error("The following lists must have the same length:" + msgs.newline() + - "tilts, slits, astrom_trans") + "tilts, slits, astrom_trans, all_dar") # Check all_idx has been set if all_idx is None: if numframes != 1: @@ -1694,16 +1811,16 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w if tmp != numframes: msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.") # Store in the following variables - _tilts, _slits, _astrom_trans = tilts, slits, astrom_trans + _tilts, _slits, _astrom_trans, _all_dar = tilts, slits, astrom_trans, all_dar elif type(tilts) is not list and type(slits) is not list and \ - type(astrom_trans) is not list: + type(astrom_trans) is not list and type(all_dar) is not list: # Just a single frame - store as lists for this code - _tilts, _slits, _astrom_trans = [tilts], [slits], [astrom_trans], + _tilts, _slits, _astrom_trans, _all_dar = [tilts], [slits], [astrom_trans], [all_dar] all_idx = np.zeros(all_sci.size) numframes = 1 else: msgs.error("The following input arguments should all be of type 'list', or all not be type 'list':" + - msgs.newline() + "tilts, slits, astrom_trans") + msgs.newline() + "tilts, slits, astrom_trans, all_dar") # Prepare the output arrays outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1) binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]] @@ -1737,22 +1854,31 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w wspl = all_wave[this_sl] asrt = np.argsort(yspl) wave_spl = interp1d(yspl[asrt], wspl[asrt], kind='linear', bounds_error=False, fill_value='extrapolate') + # Calculate the wavelength at each subpixel + this_wave = wave_spl(tiltpos) + # Calculate the DAR correction at each sub pixel + ra_corr, dec_corr = _all_dar[fr].correction(this_wave) # This routine needs the wavelengths to be expressed in Angstroms # Calculate spatial and spectral positions of the subpixels spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() # Transform this to spatial location spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy) spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl]) - ra_coeff = np.polyfit(spatpos, all_ra[this_sl], 1) - dec_coeff = np.polyfit(spatpos, all_dec[this_sl], 1) - this_ra = np.polyval(ra_coeff, spatpos_subpix)#ra_spl(spatpos_subpix) - this_dec = np.polyval(dec_coeff, spatpos_subpix)#dec_spl(spatpos_subpix) - # ssrt = np.argsort(spatpos) - # ra_spl = interp1d(spatpos[ssrt], all_ra[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') - # dec_spl = interp1d(spatpos[ssrt], all_dec[this_sl][ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') - # this_ra = ra_spl(spatpos_subpix) - # this_dec = dec_spl(spatpos_subpix) - this_wave = wave_spl(tiltpos) + # OLD (WRONG) ROUTINE + # ra_coeff = np.polyfit(spatpos, all_ra[this_sl], 1) + # dec_coeff = np.polyfit(spatpos, all_dec[this_sl], 1) + # this_ra = np.polyval(ra_coeff, spatpos_subpix)#ra_spl(spatpos_subpix) + # this_dec = np.polyval(dec_coeff, spatpos_subpix)#dec_spl(spatpos_subpix) + ssrt = np.argsort(spatpos) + tmp_ra = all_ra[this_sl] + tmp_dec = all_dec[this_sl] + ra_spl = interp1d(spatpos[ssrt], tmp_ra[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') + dec_spl = interp1d(spatpos[ssrt], tmp_dec[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') + this_ra = ra_spl(spatpos_subpix) + this_dec = dec_spl(spatpos_subpix) + # Now apply the DAR correction + this_ra += ra_corr + this_dec += dec_corr # Convert world coordinates to voxel coordinates, then histogram vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0) if histogramdd is not None: @@ -1799,29 +1925,33 @@ def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, dspat, dwa * ------ * Return updated pixel RA, Dec """ + from pypeit import astrometry ############ ## STEP 1 ## - Create a datacube around Hgamma ############ - embed() # Only use a small wavelength range - wv_mask = (wave_sort > 4345.0) & (wave_sort < 4359.0) + wv_mask = (wave_sort > 4346.0) & (wave_sort < 4358.0) # Create a WCS for this subcube subcube_wcs, voxedge, reference_image = datacube.create_wcs(ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], - dspat, dwave, collapse=True) + dspat, dwave) # Create the subcube flxcube, varcube, bpmcube = subpixellate(subcube_wcs, ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], flux_sort[wv_mask], ivar_sort[wv_mask], wghts[wv_mask], spatpos[wv_mask], specpos[wv_mask], all_spatid[wv_mask], tilts, slits, astrom_trans, voxedge, all_idx=None, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=False) + if False: + hdu = fits.PrimaryHDU(flxcube) + hdu.writeto("tstHg.fits", overwrite=True) - - # generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, - # all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, bins, - # all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None, - # blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None, - # specname="PYP_SPEC", debug=False) - + ############ + ## STEP 2 ## - Create an emission line map of Hgamma + ############ # Compute an emission line map that is as consistent as possible to an archival HST image - + HgMap, HgMapErr = astrometry.fit_cube(flxcube.T, varcube.T, subcube_wcs) + ############ + ## STEP 3 ## - Map the emission line map to an HST image, and vice-versa + ############ + ra_corr, dec_corr = astrometry.map_image(HgMap, HgMapErr, subcube_wcs, ra_sort, dec_sort) + # embed() return ra_corr, dec_corr \ No newline at end of file diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index cb3ba6b1a2..3d8b640fc5 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -297,7 +297,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model, waveimg : `numpy.ndarray`_ Wavelength image slitmask : `numpy.ndarray`_ - A 2D int mask, the same shape as rawimg, indicating which pixels are on a slit. A zero value + A 2D int mask, the same shape as rawimg, indicating which pixels are on a slit. A -1 value indicates not on a slit, while any pixels on a slit should have the value of the slit spatial ID number. slitmask_trim : @@ -321,11 +321,11 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model, """ msgs.info(f"Performing relative spectral sensitivity correction (reference slit = {slit_illum_ref_idx})") # Generate the mask - _thismask = thismask if (thismask is not None) else slitmask + _thismask = thismask if (thismask is not None) else (slitmask > 0) gpm = gpmask if (gpmask is not None) else np.ones_like(rawimg, dtype=bool) # Extract the list of spatial IDs from the slitmask slitmask_spatid = np.unique(slitmask) - slitmask_spatid = np.sort(slitmask_spatid[slitmask_spatid != 0]) + slitmask_spatid = np.sort(slitmask_spatid[slitmask_spatid > 0]) # Initialise the scale image that will be returned scaleImg = np.ones_like(rawimg) # Divide the slit into several bins and calculate the median of each bin @@ -348,7 +348,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model, coeff = np.polyfit(wavcen[wgd], scale_bin[wgd], w=1/scale_err[wgd], deg=2) scaleImg[this_slit] *= np.polyval(coeff, waveimg[this_slit]) if sl == slit_illum_ref_idx: - scaleImg[_thismask] /= np.polyval(coeff, waveimg[_thismask]) + scaleImg[_thismask] *= utils.inverse(np.polyval(coeff, waveimg[_thismask])) minv, maxv = np.min(scaleImg[_thismask]), np.max(scaleImg[_thismask]) msgs.info("Minimum/Maximum scales = {0:.5f}, {1:.5f}".format(minv, maxv)) return scaleImg diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index 3d5d82be94..958bd340e5 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -1244,6 +1244,11 @@ def global_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), correction using the sky spectrum, if requested. See Reduce.global_skysub() for parameter definitions. """ + # Skip the sky subtraction, if requested by the user + if self.par['reduce']['findobj']['skip_skysub']: + msgs.info("Skipping global sky sub as per user request") + return np.zeros_like(self.sciImg.image) + # Generate a global sky sub for all slits separately global_sky_sep = super().global_skysub(skymask=skymask, update_crmask=update_crmask, trim_edg=trim_edg, show_fit=show_fit, show=show, From 5a45cc2c854524ad9084a35479cab892bf02bd20 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 20 Sep 2023 18:59:35 +0100 Subject: [PATCH 28/81] update spectrographs --- pypeit/core/meta.py | 5 +++-- pypeit/spectrographs/gemini_gnirs.py | 19 ++++++++++++++++--- pypeit/spectrographs/gtc_osiris.py | 17 +++++++++++++---- pypeit/spectrographs/keck_kcwi.py | 18 +++++++++++++----- 4 files changed, 45 insertions(+), 14 deletions(-) diff --git a/pypeit/core/meta.py b/pypeit/core/meta.py index a4eacf5f9b..0d521e9ff9 100644 --- a/pypeit/core/meta.py +++ b/pypeit/core/meta.py @@ -151,14 +151,15 @@ def define_additional_meta(nlamps=20): 'filter1': dict(dtype=str, comment='First filter in optical path'), 'frameno': dict(dtype=str, comment='Frame number provided by instrument software'), 'hatch': dict(dtype=str, comment='Position of instrument hatch'), - 'humidity': dict(dtype=float, comment='Relative humidity (0 to 1) at observation time'), + 'humidity': dict(dtype=float, comment='Humidity at observation time (as a percentage, not a fraction)'), 'idname': dict(dtype=str, comment='Instrument supplied frametype (e.g. bias)'), 'instrument': dict(dtype=str, comment='Header supplied instrument name'), 'mode': dict(dtype=str, comment='Observing mode'), 'object': dict(dtype=str, comment='Alternative object name (cf. target)'), 'obstime': dict(dtype=str, comment='Observation time'), 'oscansec': dict(dtype=str, comment='Overscan section (windowing)'), - 'pressure': dict(dtype=float, comment='Pressure (units.bar) at observation time'), + 'parangle': dict(dtype=float, comment='Parallactic angle (units.radian)'), + 'pressure': dict(dtype=float, comment='Pressure (units.pascal) at observation time'), 'seq_expno': dict(dtype=int, comment='Number of exposure in observing sequence'), 'slitwid': dict(dtype=float, comment='Slit width, sometimes distinct from decker'), 'slitlength': dict(dtype=float, comment='Slit length, used only for long slits'), diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 54f07c3f16..58c6cd1f33 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -135,21 +135,33 @@ def compound_meta(self, headarr, meta_key): return 0.0 elif meta_key == 'pressure': try: - return headarr[0]['PRESSURE'] * 0.001 # Must be in astropy.units.bar + return headarr[0]['PRESSUR2'] # Must be in astropy.units.pascal except KeyError: msgs.warn("Pressure is not in header") - return 0.0 + msgs.info("The default pressure will be assumed: 61.1 kPa") + return 61.1E3 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: msgs.warn("Temperature is not in header") - return 0.0 + msgs.info("The default temperature will be assumed: 1.5 deg C") + return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': try: + # Humidity expressed as a percentage, not a fraction return headarr[0]['HUMIDITY'] except KeyError: msgs.warn("Humidity is not in header") + msgs.info("The default relative humidity will be assumed: 20 %") + return 20.0 # van Kooten & Izett, arXiv:2208.11794 + elif meta_key == 'parangle': + try: + # Humidity expressed as a percentage, not a fraction + return headarr[0]['PARANGLE'] # Must be expressed in radians + except KeyError: + msgs.warn("Parallactic angle is not in header!") + msgs.info("The default parallactic angle will be assumed: 0 degrees") return 0.0 else: msgs.error("Not ready for this compound meta") @@ -585,6 +597,7 @@ def init_meta(self): self.meta['pressure'] = dict(card=None, compound=True, required=False) self.meta['temperature'] = dict(card=None, compound=True, required=False) self.meta['humidity'] = dict(card=None, compound=True, required=False) + self.meta['parangle'] = dict(card=None, compound=True, required=False) @classmethod def default_pypeit_par(cls): diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 3c6ca8dedf..03d91f3e18 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -173,22 +173,30 @@ def compound_meta(self, headarr, meta_key): return binning elif meta_key == 'pressure': try: - return headarr[0]['PRESSURE'] * 0.001 # Must be in astropy.units.bar + return headarr[0]['PRESSURE'] # Must be in astropy.units.pascal except KeyError: msgs.warn("Pressure is not in header") - return 0.0 + msgs.info("The default pressure will be assumed: 61.1 kPa") + return 61.1E3 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: msgs.warn("Temperature is not in header") - return 0.0 + msgs.info("The default temperature will be assumed: 1.5 deg C") + return 1.5 elif meta_key == 'humidity': try: return headarr[0]['HUMIDITY'] except KeyError: msgs.warn("Humidity is not in header") - return 0.0 + msgs.info("The default relative humidity will be assumed: 20 %") + return 20.0 + elif meta_key == 'parangle': + try: + return headarr[0]['PARANG'] # Must be expressed in radians + except KeyError: + msgs.error("Parallactic angle is not in header") elif meta_key == 'obstime': return Time(headarr[0]['DATE-END']) elif meta_key == 'gain': @@ -439,6 +447,7 @@ def init_meta(self): self.meta['pressure'] = dict(card=None, compound=True, required=False) self.meta['temperature'] = dict(card=None, compound=True, required=False) self.meta['humidity'] = dict(card=None, compound=True, required=False) + self.meta['parangle'] = dict(card=None, compound=True, required=False) @classmethod def default_pypeit_par(cls): diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 36fdfaee3f..dae8c2686b 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -86,6 +86,7 @@ def init_meta(self): self.meta['pressure'] = dict(card=None, compound=True, required=False) self.meta['temperature'] = dict(card=None, compound=True, required=False) self.meta['humidity'] = dict(card=None, compound=True, required=False) + self.meta['parangle'] = dict(card=None, compound=True, required=False) self.meta['instrument'] = dict(ext=0, card='INSTRUME') # Lamps @@ -211,11 +212,11 @@ def compound_meta(self, headarr, meta_key): return headarr[0][hdrstr] elif meta_key == 'pressure': try: - return headarr[0]['WXPRESS'] * 0.001 # Must be in astropy.units.bar + return headarr[0]['WXPRESS'] * 100.0 # Must be in astropy.units.pascal except KeyError: msgs.warn("Pressure is not in header") - msgs.info("The default pressure will be assumed: 0.611 bar") - return 0.611 + msgs.info("The default pressure will be assumed: 61.1 kPa") + return 61.1E3 elif meta_key == 'temperature': try: return headarr[0]['WXOUTTMP'] # Must be in astropy.units.deg_C @@ -225,11 +226,18 @@ def compound_meta(self, headarr, meta_key): return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': try: - return headarr[0]['WXOUTHUM'] / 100.0 + # Humidity expressed as a percentage, not a fraction + return headarr[0]['WXOUTHUM'] except KeyError: msgs.warn("Humidity is not in header") msgs.info("The default relative humidity will be assumed: 20 %") - return 0.2 # van Kooten & Izett, arXiv:2208.11794 + return 20.0 # van Kooten & Izett, arXiv:2208.11794 + elif meta_key == 'parangle': + try: + # Parallactic angle expressed in radians + return headarr[0]['PARANG'] * np.pi / 180 + except KeyError: + msgs.error("Parallactic angle is not in header") elif meta_key == 'obstime': return Time(headarr[0]['DATE-END']) else: From 9cb6daa217b1da1735b5fc592932d5a7c5359636 Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 21 Sep 2023 08:16:53 +0100 Subject: [PATCH 29/81] updated docs --- doc/coadd3d.rst | 4 ++-- pypeit/coadd3d.py | 28 +++++++++++++--------------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/doc/coadd3d.rst b/doc/coadd3d.rst index 5d43e692e8..03640ca5e5 100644 --- a/doc/coadd3d.rst +++ b/doc/coadd3d.rst @@ -155,13 +155,13 @@ Sky Subtraction The default behaviour of PypeIt is to subtract the model sky that is derived from the science frame during the reduction. If you would like -to turn off sky subtraction, set the following keyword argument: +to turn off sky subtraction, set the following keyword argument (all lowercase): .. code-block:: ini [reduce] [[cube]] - skysub_frame = None + skysub_frame = none If you would like to use a dedicated sky frame for sky subtraction that is separate from the science frame, then you need to provide diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index e112e3b5da..1eaf3eeff3 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1053,10 +1053,8 @@ def load(self): relScale = spec2DObj.scaleimg / relScaleImg # This factor is applied to the sky subtracted science frame # Extract the relevant information from the spec2d file - # sciImg = (spec2DObj.sciimg - skyImg * relSclSky) * relScale # Subtract sky and apply relative illumination - # ivar = spec2DObj.ivarraw / relScale ** 2 - sciImg = spec2DObj.sciimg - ivar = spec2DObj.ivarraw + sciImg = (spec2DObj.sciimg - skyImg * relSclSky) * relScale # Subtract sky and apply relative illumination + ivar = spec2DObj.ivarraw / relScale ** 2 waveimg = spec2DObj.waveimg bpmmask = spec2DObj.bpmmask @@ -1204,15 +1202,15 @@ def load(self): ################################## # Astrometric alignment to HST frames # TODO :: RJC requests this remains here... it is only used by RJC - # _ra_sort, _dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, np.max(self._spatscale[ff,:]), self._specscale[ff], - # np.ones(ra_sort.size), this_spatpos[wvsrt], this_specpos[wvsrt], - # this_spatid[wvsrt], spec2DObj.tilts, slits, alignSplines) - # ra_del = np.median(_ra_sort-ra_sort) - # dec_del = np.median(_dec_sort-dec_sort) - # ra_sort = _ra_sort - # dec_sort = _dec_sort - # spec2DObj.head0['RA'] += ra_del - # spec2DObj.head0['DEC'] += dec_del + _ra_sort, _dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, np.max(self._spatscale[ff,:]), self._specscale[ff], + np.ones(ra_sort.size), this_spatpos[wvsrt], this_specpos[wvsrt], + this_spatid[wvsrt], spec2DObj.tilts, slits, alignSplines, darcorr) + ra_del = np.median(_ra_sort-ra_sort) + dec_del = np.median(_dec_sort-dec_sort) + ra_sort = _ra_sort + dec_sort = _dec_sort + spec2DObj.head0['RA'] += ra_del + spec2DObj.head0['DEC'] += dec_del ################################## # If individual frames are to be output without aligning them, @@ -1907,7 +1905,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, dspat, dwave, wghts, spatpos, specpos, - all_spatid, tilts, slits, astrom_trans, + all_spatid, tilts, slits, astrom_trans, all_dar, spat_subpixel=10, spec_subpixel=10): """ This is currently only used by RJC. This function adds corrections to the RA and Dec pixels @@ -1937,7 +1935,7 @@ def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, dspat, dwa # Create the subcube flxcube, varcube, bpmcube = subpixellate(subcube_wcs, ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], flux_sort[wv_mask], ivar_sort[wv_mask], wghts[wv_mask], spatpos[wv_mask], specpos[wv_mask], - all_spatid[wv_mask], tilts, slits, astrom_trans, + all_spatid[wv_mask], tilts, slits, astrom_trans, all_dar, voxedge, all_idx=None, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=False) if False: From e83fed735e632b6af451a4db5b3a6e5bb1288609 Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 21 Sep 2023 11:44:02 +0100 Subject: [PATCH 30/81] rm old DAR --- pypeit/coadd3d.py | 48 ++------------------------ pypeit/core/datacube.py | 74 ----------------------------------------- 2 files changed, 2 insertions(+), 120 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 1eaf3eeff3..97dd9c4855 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -9,7 +9,6 @@ import inspect from astropy import wcs, units -from astropy.coordinates import SkyCoord from astropy.io import fits from scipy.interpolate import interp1d import numpy as np @@ -205,11 +204,11 @@ def __init__(self, hdr0, cosdec, spectrograph=None, wave_ref=4500.0): Args: hdr0 (`astropy.io.fits.Header`_): Header of the spec2d file. This input should be retrieved from spec2DObj.head0 + cosdec (:obj:`float`): + Cosine of the target declination. spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): The name or instance of the spectrograph used to obtain the data. If None, this is pulled from the file header. - cosdec (:obj:`float`): - Cosine of the target declination. wave_ref (:obj:`float`, optional): Reference wavelength (The DAR correction will be performed relative to this wavelength) """ @@ -768,49 +767,6 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): # Return the skysub params for this frame return this_skysub, skyImg, skyScl - # def compute_DAR(self, hdr0, waves, cosdec, wave_ref=None): - # """ - # Compute the differential atmospheric refraction correction for a given frame. - # - # Args: - # hdr0 (`astropy.io.fits.Header`_): - # Header of the spec2d file. This input should be retrieved from spec2DObj.head0 - # waves (`numpy.ndarray`_): - # 1D flattened array containing the wavelength of each pixel (units = Angstroms) - # cosdec (:obj:`float`): - # Cosine of the target declination. - # wave_ref (:obj:`float`, optional): - # Reference wavelength (The DAR correction will be performed relative to this wavelength) - # - # Returns: - # `numpy.ndarray`_: 1D differential RA for each wavelength of the input waves array - # `numpy.ndarray`_: 1D differential Dec for each wavelength of the input waves array - # """ - # if wave_ref is None: - # wave_ref = 0.5 * (np.min(waves) + np.max(waves)) - # # Get DAR parameters - # raval = self.spec.get_meta_value([hdr0], 'ra') - # decval = self.spec.get_meta_value([hdr0], 'dec') - # obstime = self.spec.get_meta_value([hdr0], 'obstime') - # pressure = self.spec.get_meta_value([hdr0], 'pressure') - # temperature = self.spec.get_meta_value([hdr0], 'temperature') - # rel_humidity = self.spec.get_meta_value([hdr0], 'humidity') - # coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) - # location = self.spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location) - # # Set a default value - # ra_corr, dec_corr = 0.0, 0.0 - # if pressure == 0.0: - # msgs.warn("Pressure is set to zero - DAR correction will not be performed") - # else: - # msgs.info("DAR correction parameters:" + msgs.newline() + - # " Pressure = {0:f} bar".format(pressure) + msgs.newline() + - # " Temperature = {0:f} deg C".format(temperature) + msgs.newline() + - # " Humidity = {0:f}".format(rel_humidity)) - # ra_corr, dec_corr = datacube.correct_dar(waves, coord, obstime, location, - # pressure * units.bar, temperature * units.deg_C, rel_humidity, - # wave_ref=wave_ref) - # return ra_corr, dec_corr - def align_user_offsets(self): """ Align the RA and DEC of all input frames, and then diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 61a8deec7b..c45d1c6105 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -145,80 +145,6 @@ def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pre return np.sum((np.array([coord_altaz.alt.value, coord_altaz.az.value])-datfit)**2) -def correct_dar(wave_arr, coord, obstime, location, pressure, temperature, rel_humidity, - wave_ref=None, numgrid=10): - """ - Apply a differental atmospheric refraction correction to the - input ra/dec. - - This implementation is based on ERFA, which is called through - astropy. - - .. todo:: - There's probably going to be issues when the RA angle is - either side of RA=0. - - Parameters - ---------- - wave_arr : `numpy.ndarray`_ - wavelengths to obtain ra and dec offsets - coord : `astropy.coordinates.SkyCoord`_ - ra, dec positions at the centre of the field - obstime : `astropy.time.Time`_ - time at the midpoint of observation - location : `astropy.coordinates.EarthLocation`_ - observatory location - pressure : :obj:`float` - Outside pressure at `location` - temperature : :obj:`float` - Outside ambient air temperature at `location` - rel_humidity : :obj:`float` - Outside relative humidity at `location`. This should be between 0 to 1. - wave_ref : :obj:`float` - Reference wavelength (The DAR correction will be performed relative to this wavelength) - numgrid : :obj:`int` - Number of grid points to evaluate the DAR correction. - - Returns - ------- - ra_diff : `numpy.ndarray`_ - Relative RA shift at each wavelength given by ``wave_arr`` - dec_diff : `numpy.ndarray`_ - Relative DEC shift at each wavelength given by ``wave_arr`` - """ - msgs.info("Performing differential atmospheric refraction correction") - if wave_ref is None: - wave_ref = 0.5*(wave_arr.min() + wave_arr.max()) - - # First create the reference frame and wavelength grid - coord_altaz = coord.transform_to(AltAz(obstime=obstime, location=location)) - wave_grid = np.linspace(wave_arr.min(), wave_arr.max(), numgrid) * units.AA - # Prepare the fit - ra_grid, dec_grid = np.zeros(numgrid), np.zeros(numgrid) - datfit = np.array([coord_altaz.alt.value, coord_altaz.az.value]) - # Loop through all wavelengths - for ww in range(numgrid): - # Fit the differential - args = (coord.ra.value, coord.dec.value, datfit, wave_grid[ww], obstime, location, pressure, temperature, rel_humidity) - #b_popt, b_pcov = opt.curve_fit(dar_fitfunc, tmp, datfit, p0=(0.0, 0.0)) - res_lsq = opt.least_squares(dar_fitfunc, [0.0, 0.0], args=args, xtol=1.0e-10, ftol=None, gtol=None) - if not res_lsq.success: - msgs.warn("DAR correction failed") - # Store the result - ra_grid[ww] = res_lsq.x[0] - dec_grid[ww] = res_lsq.x[1] - - # Generate spline of differentials - spl_ra = interp1d(wave_grid, ra_grid, kind='cubic') - spl_dec = interp1d(wave_grid, dec_grid, kind='cubic') - - # Evaluate the differentials at the input wave_arr - ra_diff = spl_ra(wave_arr) - spl_ra(wave_ref) - dec_diff = spl_dec(wave_arr) - spl_dec(wave_ref) - - return ra_diff, dec_diff - - def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, order=2): """ Using spline representations of the blaze profile, calculate the grating From f1111aff8bf2a7a0831b784f25531c0415dfbfd8 Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 21 Sep 2023 11:45:29 +0100 Subject: [PATCH 31/81] rm HST --- pypeit/coadd3d.py | 66 ----------------------------------------------- 1 file changed, 66 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 97dd9c4855..1b6a3c969c 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1155,20 +1155,6 @@ def load(self): this_specpos, this_spatpos = np.where(onslit_gpm) this_spatid = slitid_img_init[onslit_gpm] - ################################## - # Astrometric alignment to HST frames - # TODO :: RJC requests this remains here... it is only used by RJC - _ra_sort, _dec_sort = hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, np.max(self._spatscale[ff,:]), self._specscale[ff], - np.ones(ra_sort.size), this_spatpos[wvsrt], this_specpos[wvsrt], - this_spatid[wvsrt], spec2DObj.tilts, slits, alignSplines, darcorr) - ra_del = np.median(_ra_sort-ra_sort) - dec_del = np.median(_dec_sort-dec_sort) - ra_sort = _ra_sort - dec_sort = _dec_sort - spec2DObj.head0['RA'] += ra_del - spec2DObj.head0['DEC'] += dec_del - ################################## - # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now numpix = ra_sort.size @@ -1857,55 +1843,3 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w residcube *= nc_inverse return flxcube, varcube, bpmcube, residcube return flxcube, varcube, bpmcube - - -def hst_alignment(ra_sort, dec_sort, wave_sort, flux_sort, ivar_sort, dspat, dwave, - wghts, spatpos, specpos, - all_spatid, tilts, slits, astrom_trans, all_dar, - spat_subpixel=10, spec_subpixel=10): - """ - This is currently only used by RJC. This function adds corrections to the RA and Dec pixels - to align the daatcubes to an HST image. - - Process: - * Send away pixel RA, Dec, wave, flux, error. - * ------ - * Compute emission line map - - Need to generate full cube around H I gamma - - Fit to continuum and subtract it off - - Sum all flux above continuum - - Estimate error - * MPFIT HST emission line map to - * ------ - * Return updated pixel RA, Dec - """ - from pypeit import astrometry - ############ - ## STEP 1 ## - Create a datacube around Hgamma - ############ - # Only use a small wavelength range - wv_mask = (wave_sort > 4346.0) & (wave_sort < 4358.0) - # Create a WCS for this subcube - subcube_wcs, voxedge, reference_image = datacube.create_wcs(ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], - dspat, dwave) - # Create the subcube - flxcube, varcube, bpmcube = subpixellate(subcube_wcs, ra_sort[wv_mask], dec_sort[wv_mask], wave_sort[wv_mask], flux_sort[wv_mask], ivar_sort[wv_mask], - wghts[wv_mask], spatpos[wv_mask], specpos[wv_mask], - all_spatid[wv_mask], tilts, slits, astrom_trans, all_dar, - voxedge, all_idx=None, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=False) - if False: - hdu = fits.PrimaryHDU(flxcube) - hdu.writeto("tstHg.fits", overwrite=True) - - ############ - ## STEP 2 ## - Create an emission line map of Hgamma - ############ - # Compute an emission line map that is as consistent as possible to an archival HST image - HgMap, HgMapErr = astrometry.fit_cube(flxcube.T, varcube.T, subcube_wcs) - ############ - ## STEP 3 ## - Map the emission line map to an HST image, and vice-versa - ############ - ra_corr, dec_corr = astrometry.map_image(HgMap, HgMapErr, subcube_wcs, ra_sort, dec_sort) - # embed() - return ra_corr, dec_corr \ No newline at end of file From c10d24a813757a8589a69cb7055b35da294f2bc3 Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 21 Sep 2023 12:00:11 +0100 Subject: [PATCH 32/81] warning about parang --- pypeit/spectrographs/gemini_gnirs.py | 1 + pypeit/spectrographs/gtc_osiris.py | 1 + 2 files changed, 2 insertions(+) diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 58c6cd1f33..f7b00b15b0 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -158,6 +158,7 @@ def compound_meta(self, headarr, meta_key): elif meta_key == 'parangle': try: # Humidity expressed as a percentage, not a fraction + msgs.work("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") return headarr[0]['PARANGLE'] # Must be expressed in radians except KeyError: msgs.warn("Parallactic angle is not in header!") diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 03d91f3e18..6b157b221e 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -194,6 +194,7 @@ def compound_meta(self, headarr, meta_key): return 20.0 elif meta_key == 'parangle': try: + msgs.work("Parallactic angle is not available for MAAT - DAR correction may be incorrect") return headarr[0]['PARANG'] # Must be expressed in radians except KeyError: msgs.error("Parallactic angle is not in header") From 128ab96e3470d341f97bd3506716f79f1e6ed1e4 Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 21 Sep 2023 12:00:23 +0100 Subject: [PATCH 33/81] rm warn --- pypeit/coadd3d.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 1b6a3c969c..b966125b27 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1333,7 +1333,7 @@ def coadd(self): self.set_voxel_sampling() # Align the frames - if self.align and False: + if self.align: self.run_align() # Compute the relative weights on the spectra @@ -1359,8 +1359,6 @@ def coadd(self): np.min(self.mnmx_wv[:, :, 1]), self.cubepar['whitelight_range']) if self.combine: - # TODO :: remove the following line... it's just temporary - outfile = datacube.get_output_filename(self.spec2d[0], "", False, 1) generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, np.ones(self.all_wghts.size), # all_wghts, self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, self.all_dar, vox_edges, @@ -1804,11 +1802,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w # Transform this to spatial location spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy) spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl]) - # OLD (WRONG) ROUTINE - # ra_coeff = np.polyfit(spatpos, all_ra[this_sl], 1) - # dec_coeff = np.polyfit(spatpos, all_dec[this_sl], 1) - # this_ra = np.polyval(ra_coeff, spatpos_subpix)#ra_spl(spatpos_subpix) - # this_dec = np.polyval(dec_coeff, spatpos_subpix)#dec_spl(spatpos_subpix) + # Interpolate the RA/Dec over the subpixel spatial positions ssrt = np.argsort(spatpos) tmp_ra = all_ra[this_sl] tmp_dec = all_dec[this_sl] From fcb20a5269be8b3cc8235461ee5189fb79835279 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sat, 23 Sep 2023 14:10:06 +0100 Subject: [PATCH 34/81] move skip_skysub --- pypeit/find_objects.py | 5 ----- pypeit/pypeit.py | 3 ++- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index 958bd340e5..3d5d82be94 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -1244,11 +1244,6 @@ def global_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), correction using the sky spectrum, if requested. See Reduce.global_skysub() for parameter definitions. """ - # Skip the sky subtraction, if requested by the user - if self.par['reduce']['findobj']['skip_skysub']: - msgs.info("Skipping global sky sub as per user request") - return np.zeros_like(self.sciImg.image) - # Generate a global sky sub for all slits separately global_sky_sep = super().global_skysub(skymask=skymask, update_crmask=update_crmask, trim_edg=trim_edg, show_fit=show_fit, show=show, diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 5676abc0ad..69d9908ab1 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -942,10 +942,11 @@ def extract_one(self, frames, det, sciImg, objFind, initial_sky, sobjs_obj): ## TODO JFH I think all of this about determining the final global sky should be moved out of this method ## and preferably into the FindObjects class. I see why we are doing it like this since for multislit we need # to find all of the objects first using slitmask meta data, but this comes at the expense of a much more complicated - # control sctucture. + # control structure. # Update the global sky if 'standard' in self.fitstbl['frametype'][frames[0]] or \ + self.par['reduce']['findobj']['skip_skysub'] or \ self.par['reduce']['findobj']['skip_final_global'] or \ self.par['reduce']['skysub']['user_regions'] is not None: final_global_sky = initial_sky From 8a7b755a4fe84c696bf2a487d66d51fe224e4035 Mon Sep 17 00:00:00 2001 From: joe Date: Sat, 23 Sep 2023 20:31:38 +0200 Subject: [PATCH 35/81] tweaking parameters --- pypeit/spectrographs/keck_kcwi.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index dae8c2686b..af3a86e197 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -279,8 +279,8 @@ def default_pypeit_par(cls): par['reduce']['cube']['combine'] = False # Make separate spec3d files from the input spec2d files # Sky subtraction parameters - par['reduce']['skysub']['no_poly'] = True - par['reduce']['skysub']['bspline_spacing'] = 0.6 + par['reduce']['skysub']['no_poly'] = False # True + par['reduce']['skysub']['bspline_spacing'] = 0.4 par['reduce']['skysub']['joint_fit'] = False # Don't correct flexure by default, but you should use slitcen, From 54d3ea216c928d8a53cb0da57efc9d115acf6cda Mon Sep 17 00:00:00 2001 From: joe Date: Sun, 24 Sep 2023 15:45:07 +0200 Subject: [PATCH 36/81] Changing meta keys and pypeit file format. --- pypeit/core/meta.py | 6 +++++- pypeit/spectrographs/keck_kcwi.py | 19 +++++++++++++++---- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/pypeit/core/meta.py b/pypeit/core/meta.py index 0d521e9ff9..3e4ef9c009 100644 --- a/pypeit/core/meta.py +++ b/pypeit/core/meta.py @@ -143,10 +143,14 @@ def define_additional_meta(nlamps=20): 'dichroic': dict(dtype=str, comment='Beam splitter'), 'dispangle': dict(dtype=float, comment='Angle of the disperser', rtol=0.), 'cenwave': dict(dtype=float, comment='Central wavelength of the disperser', rtol=0.), + # TODO what is the difference between dither and dithoff? Also, we should rename these to be + # more clearly the offset along the slit to distinguish from the IFU case of RA_off and DEC_off 'dither': dict(dtype=float, comment='Dither amount in arcsec'), + 'dithoff': dict(dtype=float, comment='Dither offset'), 'dithpat': dict(dtype=str, comment='Dither pattern'), 'dithpos': dict(dtype=str, comment='Dither position'), - 'dithoff': dict(dtype=float, comment='Dither offset'), + 'ra_off': dict(dtype=float, comment='Dither offset in RA'), + 'dec_off': dict(dtype=float, comment='Dither offset in DEC'), 'echangle':dict(dtype=float, comment='Echelle angle'), 'filter1': dict(dtype=str, comment='First filter in optical path'), 'frameno': dict(dtype=str, comment='Frame number provided by instrument software'), diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index af3a86e197..0f6a9d6c2e 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -73,6 +73,9 @@ def init_meta(self): self.meta['mjd'] = dict(ext=0, card='MJD') self.meta['exptime'] = dict(card=None, compound=True) self.meta['airmass'] = dict(ext=0, card='AIRMASS') + self.meta['ra_off'] = dict(ext=0, card='RAOFF') + self.meta['dec_off'] = dict(ext=0, card='DECOFF') + # Extras for config and frametyping self.meta['hatch'] = dict(ext=0, card='HATPOS') @@ -164,7 +167,7 @@ def configuration_keys(self): and used to constuct the :class:`~pypeit.metadata.PypeItMetaData` object. """ - return ['dispname', 'decker', 'binning', 'dispangle'] + return ['dispname', 'decker', 'binning', 'cenwave'] def compound_meta(self, headarr, meta_key): """ @@ -279,8 +282,8 @@ def default_pypeit_par(cls): par['reduce']['cube']['combine'] = False # Make separate spec3d files from the input spec2d files # Sky subtraction parameters - par['reduce']['skysub']['no_poly'] = False # True - par['reduce']['skysub']['bspline_spacing'] = 0.4 + par['reduce']['skysub']['no_poly'] = True + par['reduce']['skysub']['bspline_spacing'] = 0.6 par['reduce']['skysub']['joint_fit'] = False # Don't correct flexure by default, but you should use slitcen, @@ -302,7 +305,7 @@ def pypeit_file_keys(self): :class:`~pypeit.metadata.PypeItMetaData` instance to print to the :ref:`pypeit_file`. """ - return super().pypeit_file_keys() + ['idname', 'calpos'] + return super().pypeit_file_keys() + ['ra_off', 'dec_off', 'idname', 'calpos'] def check_frame_type(self, ftype, fitstbl, exprng=None): """ @@ -852,6 +855,8 @@ def init_meta(self): super().init_meta() self.meta['dispname'] = dict(ext=0, card='BGRATNAM') self.meta['dispangle'] = dict(ext=0, card='BGRANGLE', rtol=0.01) + self.meta['cenwave'] = dict(ext=0, card='BCWAVE', rtol=0.01) + def raw_header_cards(self): """ @@ -1172,6 +1177,7 @@ def init_meta(self): super().init_meta() self.meta['dispname'] = dict(ext=0, card='RGRATNAM') self.meta['dispangle'] = dict(ext=0, card='RGRANGLE', rtol=0.01) + self.meta['cenwave'] = dict(ext=0, card='RCWAVE', rtol=0.01) def raw_header_cards(self): """ @@ -1243,6 +1249,11 @@ def default_pypeit_par(cls): par['calibrations']['flatfield']['slit_illum_smooth_npix'] = 5 # Sufficiently small value so less structure in relative weights par['calibrations']['flatfield']['fit_2d_det_response'] = True # Include the 2D detector response in the pixelflat. + # Sky subtraction parameters + par['reduce']['skysub']['no_poly'] = False + par['reduce']['skysub']['bspline_spacing'] = 0.4 + par['reduce']['skysub']['joint_fit'] = False + return par @staticmethod From 1c847406da196f65dbe828736d4f388c24d6b3f0 Mon Sep 17 00:00:00 2001 From: joe Date: Sun, 24 Sep 2023 16:23:21 +0200 Subject: [PATCH 37/81] Changing meta keys and pypeit file format. --- pypeit/core/meta.py | 1 + pypeit/spectrographs/keck_kcwi.py | 41 ++++++++++++++++++++++--------- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/pypeit/core/meta.py b/pypeit/core/meta.py index 3e4ef9c009..3d8092e853 100644 --- a/pypeit/core/meta.py +++ b/pypeit/core/meta.py @@ -149,6 +149,7 @@ def define_additional_meta(nlamps=20): 'dithoff': dict(dtype=float, comment='Dither offset'), 'dithpat': dict(dtype=str, comment='Dither pattern'), 'dithpos': dict(dtype=str, comment='Dither position'), + 'posang': dict(dtype=float, comment='Position angle of the observation (degrees, positive is East from North)'), 'ra_off': dict(dtype=float, comment='Dither offset in RA'), 'dec_off': dict(dtype=float, comment='Dither offset in DEC'), 'echangle':dict(dtype=float, comment='Echelle angle'), diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 0f6a9d6c2e..ca2ef17e62 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -73,6 +73,7 @@ def init_meta(self): self.meta['mjd'] = dict(ext=0, card='MJD') self.meta['exptime'] = dict(card=None, compound=True) self.meta['airmass'] = dict(ext=0, card='AIRMASS') + self.meta['posang'] = dict(card=None, compound=True) self.meta['ra_off'] = dict(ext=0, card='RAOFF') self.meta['dec_off'] = dict(ext=0, card='DECOFF') @@ -106,6 +107,7 @@ def init_meta(self): self.meta['lampstat{:02d}'.format(len(lamp_names) + 1)] = dict(ext=0, card='FLSPECTR') self.meta['lampshst{:02d}'.format(len(lamp_names) + 1)] = dict(ext=0, card=None, default=1) + def config_specific_par(self, scifile, inp_par=None): """ Modify the PypeIt parameters to hard-wired values used for @@ -243,6 +245,21 @@ def compound_meta(self, headarr, meta_key): msgs.error("Parallactic angle is not in header") elif meta_key == 'obstime': return Time(headarr[0]['DATE-END']) + elif meta_key == 'posang': + hdr = headarr[0] + # Get rotator position + if 'ROTPOSN' in hdr: + rpos = hdr['ROTPOSN'] + else: + rpos = 0. + if 'ROTREFAN' in hdr: + rref = hdr['ROTREFAN'] + else: + rref = 0. + # Get the offset and PA + rotoff = 0.0 # IFU-SKYPA offset (degrees) + skypa = rpos + rref # IFU position angle (degrees) + return skypa else: msgs.error("Not ready for this compound meta") @@ -305,7 +322,7 @@ def pypeit_file_keys(self): :class:`~pypeit.metadata.PypeItMetaData` instance to print to the :ref:`pypeit_file`. """ - return super().pypeit_file_keys() + ['ra_off', 'dec_off', 'idname', 'calpos'] + return super().pypeit_file_keys() + ['posang', 'ra_off', 'dec_off', 'idname', 'calpos'] def check_frame_type(self, ftype, fitstbl, exprng=None): """ @@ -580,18 +597,20 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): # Create a coordinate coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) + skypa = self.compound_meta([hdr], 'posang') + # Now in compont_meta # Get rotator position - if 'ROTPOSN' in hdr: - rpos = hdr['ROTPOSN'] - else: - rpos = 0. - if 'ROTREFAN' in hdr: - rref = hdr['ROTREFAN'] - else: - rref = 0. + #if 'ROTPOSN' in hdr: + # rpos = hdr['ROTPOSN'] + #else: + # rpos = 0. + #if 'ROTREFAN' in hdr: + # rref = hdr['ROTREFAN'] + #else: + # rref = 0. # Get the offset and PA - rotoff = 0.0 # IFU-SKYPA offset (degrees) - skypa = rpos + rref # IFU position angle (degrees) + #rotoff = 0.0 # IFU-SKYPA offset (degrees) + #skypa = rpos + rref # IFU position angle (degrees) crota = np.radians(-(skypa + rotoff)) # Calculate the fits coordinates From 4f6c5ca955fb6ded18cb0bd7a31b2cb43b1834a2 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 24 Sep 2023 17:04:13 +0100 Subject: [PATCH 38/81] PR comments --- pypeit/coadd3d.py | 40 ++++++++------ pypeit/core/datacube.py | 118 ++++++---------------------------------- pypeit/inputfiles.py | 4 +- pypeit/par/pypeitpar.py | 20 +++++-- 4 files changed, 55 insertions(+), 127 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index b966125b27..fc1819974e 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -38,6 +38,8 @@ class DataCube(datamodel.DataContainer): .. include:: ../include/class_datamodel_datacube.rst Args: + wave (`numpy.ndarray`_): + A 1D numpy array containing the wavelength array for convenience (nwave) flux (`numpy.ndarray`_): The science datacube (nwave, nspaxel_y, nspaxel_x) sig (`numpy.ndarray`_): @@ -67,9 +69,12 @@ class DataCube(datamodel.DataContainer): Build from PYP_SPEC """ - version = '1.1.0' + version = '1.2.0' - datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating, + datamodel = {'wave': dict(otype=np.ndarray, atype=np.floating, + descr='Wavelength of each slice in the spectral direction. ' + 'The units are Angstroms.'), + 'flux': dict(otype=np.ndarray, atype=np.floating, descr='Flux datacube in units of counts/s/Ang/arcsec^2 or ' '10^-17 erg/s/cm^2/Ang/arcsec^2'), 'sig': dict(otype=np.ndarray, atype=np.floating, @@ -91,7 +96,7 @@ class DataCube(datamodel.DataContainer): 'spect_meta' ] - def __init__(self, flux, sig, bpm, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, + def __init__(self, wave, flux, sig, bpm, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, fluxed=None): args, _, _, values = inspect.getargvalues(inspect.currentframe()) @@ -322,12 +327,12 @@ def get_instance(cls, spec2dfiles, opts, spectrograph=None, par=None, det=1, ove spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, show=show, debug=debug) - def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite=False, + def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, overwrite=False, show=False, debug=False): """ Args: - files (:obj:`list`): + spec2dfiles (:obj:`list`): List of all spec2D files opts (:obj:`dict`): Options associated with each spec2d file @@ -350,14 +355,14 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite Show QA for debugging. """ - self.spec2d = files - self.numfiles = len(files) + self.spec2d = spec2dfiles + self.numfiles = len(spec2dfiles) self.opts = opts self.overwrite = overwrite # Check on Spectrograph input if spectrograph is None: - with fits.open(files[0]) as hdu: + with fits.open(spec2dfiles[0]) as hdu: spectrograph = hdu[0].header['PYP_SPEC'] if isinstance(spectrograph, str): @@ -369,10 +374,8 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite self.specname = spectrograph.name # Grab the parset, if not provided - if par is None: - # TODO :: Use config_specific_par instead? - par = self.spec.default_pypeit_par() - self.par = par + self.par = self.spec.default_pypeit_par() if par is None else par + # Extract some parsets for simplicity self.cubepar = self.par['reduce']['cube'] self.flatpar = self.par['calibrations']['flatfield'] @@ -391,7 +394,7 @@ def __init__(self, files, opts, spectrograph=None, par=None, det=None, overwrite self._dwv = self.cubepar['wave_delta'] # linear binning size in wavelength direction (in Angstroms) # Extract some commonly used variables - self.method = self.cubepar['method'].lower() + self.method = self.cubepar['method'] self.combine = self.cubepar['combine'] self.align = self.cubepar['align'] # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. @@ -685,7 +688,7 @@ def set_default_skysub(self): self.skysub_default = "none" self.skyImgDef = np.array([0.0]) # Do not perform sky subtraction self.skySclDef = np.array([0.0]) # Do not perform sky subtraction - elif self.cubepar['skysub_frame'].lower() == "image": + elif self.cubepar['skysub_frame'] == "image": msgs.info("The sky model in the spec2d science frames will be used for sky subtraction" + msgs.newline() + "(unless specific skysub frames have been specified)") self.skysub_default = "image" @@ -1614,6 +1617,10 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s else: flxcube, varcube, bpmcube = subpix + # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) + nspec = flxcube.shape[2] + wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] # The factor 1.0E10 convert to Angstroms + # Check if the user requested a white light image if whitelight_range is not None: # Grab the WCS of the white light image @@ -1627,9 +1634,6 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s whitelight_range[0], whitelight_range[1])) # Get the output filename for the white light image out_whitelight = datacube.get_output_whitelight_filename(outfile) - nspec = flxcube.shape[2] - # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) - wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] whitelight_img = datacube.make_whitelight_fromcube(flxcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) @@ -1637,7 +1641,7 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s # Write out the datacube msgs.info("Saving datacube as: {0:s}".format(outfile)) - final_cube = DataCube(flxcube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, + final_cube = DataCube(wave, flxcube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, sensfunc=sensfunc, fluxed=fluxcal) final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index c45d1c6105..01bb64ba3a 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -183,7 +183,7 @@ def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, ord return grat_corr -def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): +def extract_standard_spec(stdcube, subpixel=20): """ Extract a spectrum of a standard star from a datacube @@ -193,9 +193,6 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): An HDU list of fits files subpixel : int Number of pixels to subpixelate spectrum when creating mask - method : str - Method used to extract standard star spectrum. Currently, only 'boxcar' - is supported Returns ------- @@ -258,103 +255,22 @@ def extract_standard_spec(stdcube, subpixel=20, method='boxcar'): sky_val = np.sum(wl_img[:, :, np.newaxis] * smask) / np.sum(smask) wl_img -= sky_val - if method == 'boxcar': - msgs.info("Extracting a boxcar spectrum of datacube") - # Construct an image that contains the fraction of flux included in the - # boxcar extraction at each wavelength interval - norm_flux = wl_img[:,:,np.newaxis] * mask - norm_flux /= np.sum(norm_flux) - # Extract boxcar - cntmask = np.logical_not(bpmcube) * mask # Good pixels within the masked region around the standard star - flxscl = (norm_flux * cntmask).sum(0).sum(0) # This accounts for the flux that is missing due to masked pixels - scimask = flxcube * cntmask - varmask = varcube * cntmask**2 - nrmcnt = utils.inverse(flxscl) - box_flux = scimask.sum(0).sum(0) * nrmcnt - box_var = varmask.sum(0).sum(0) * nrmcnt**2 - box_gpm = flxscl > 1/3 # Good pixels are those where at least one-third of the standard star flux is measured - # Setup the return values - ret_flux, ret_var, ret_gpm = box_flux, box_var, box_gpm - # elif method == 'gauss2d': - # msgs.error("Use method=boxcar... this method has not been thoroughly tested") - # # Generate a mask - # fitmask = np.logical_not(bpmcube) * mask - # # Setup the coordinates - # x = np.linspace(0, flxcube.shape[0] - 1, flxcube.shape[0]) - # y = np.linspace(0, flxcube.shape[1] - 1, flxcube.shape[1]) - # z = np.linspace(0, flxcube.shape[2] - 1, flxcube.shape[2]) - # xx, yy, zz = np.meshgrid(x, y, z, indexing='ij') - # # Normalise the flux in each wavelength channel - # scispec = (flxcube * fitmask).sum(0).sum(0).reshape((1, 1, flxcube.shape[2])) - # cntspec = fitmask.sum(0).sum(0).reshape((1, 1, flxcube.shape[2])) - # # These operations are all inverted, because we need to divide flxcube by scispec - # cntspec *= utils.inverse(scispec) - # cubefit = flxcube * cntspec - # cubesigfit = np.sqrt(varcube) * cntspec - # # Setup the fit params - # ww = np.where(fitmask) - # initial_guess = (1, idx_max[0], idx_max[1], 0.0, 0.0, 2, 2, 0, 0) - # bounds = ([-np.inf, 0, 0, -np.inf, -np.inf, 0.5, 0.5, -np.pi, -np.inf], - # [np.inf,wl_img.shape[0],wl_img.shape[1],np.inf, np.inf, wl_img.shape[0],wl_img.shape[0],np.pi,np.inf]) - # msgs.info("Fitting a 2D Gaussian to the datacube") - # popt, pcov = opt.curve_fit(gaussian2D_cube, (xx[ww], yy[ww], zz[ww]), cubefit[ww], - # sigma=cubesigfit[ww], bounds=bounds, p0=initial_guess) - # # Subtract off the best-fitting continuum - # popt[-1] = 0 - # # Generate the best-fitting model to be used as an optimal profile - # model = gaussian2D_cube((xx, yy, zz), *popt).reshape(flxcube.shape) - # numim = flxcube.shape[0]*flxcube.shape[1] - # - # # Optimally extract - # msgs.info("Optimally extracting...") - # sciimg = (flxcube*mask).reshape((numim, numwave)).T - # ivar = utils.inverse((varcube*mask**2).reshape((numim, numwave)).T) - # optmask = fitmask.reshape((numim, numwave)).T - # waveimg = np.ones((numwave, numim)) # Just a dummy array - not needed - # skyimg = np.zeros((numwave, numim)) # Just a dummy array - not needed - # thismask = np.ones((numwave, numim)) # Just a dummy array - not needed - # oprof = model.reshape((numim, numwave)).T - # sobj = specobj.SpecObj('SlicerIFU', 'DET01', SLITID=0) - # extract.extract_optimal(sciimg, ivar, optmask, waveimg, skyimg, thismask, oprof, sobj) - # opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS, sobj.OPT_COUNTS_SIG**2, sobj.OPT_MASK - # # Setup the return values - # ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm - # elif method == 'optimal': - # msgs.error("Use method=boxcar... this method has not been thoroughly tested") - # # First do a boxcar along one dimension - # msgs.info("Collapsing datacube to a 2D image") - # omask = mask+smask - # idx_sum = 0 - # cntmask = np.logical_not(bpmcube) * omask - # scimask = flxcube * cntmask - # varmask = varcube * cntmask**2 - # cnt_spec = cntmask.sum(idx_sum) * utils.inverse(omask.sum(idx_sum)) - # nrmcnt = utils.inverse(cnt_spec) - # box_sciimg = scimask.sum(idx_sum) * nrmcnt - # box_scivar = varmask.sum(idx_sum) * nrmcnt**2 - # box_sciivar = utils.inverse(box_scivar) - # # Transpose for optimal - # box_sciimg = box_sciimg.T - # box_sciivar = box_sciivar.T - # - # # Prepare for optimal - # msgs.info("Starting optimal extraction") - # thismask = np.ones(box_sciimg.shape, dtype=bool) - # nspec, nspat = thismask.shape[0], thismask.shape[1] - # slit_left = np.zeros(nspec) - # slit_right = np.ones(nspec)*(nspat-1) - # tilts = np.outer(np.linspace(0.0,1.0,nspec), np.ones(nspat)) - # waveimg = np.outer(wave.value, np.ones(nspat)) - # global_sky = np.zeros_like(box_sciimg) - # # Find objects and then extract - # sobj = findobj_skymask.objs_in_slit(box_sciimg, thismask, slit_left, slit_right) - # skysub.local_skysub_extract(box_sciimg, box_sciivar, tilts, waveimg, global_sky, thismask, slit_left, - # slit_right, sobj, model_noise=False) - # opt_flux, opt_var, opt_gpm = sobj.OPT_COUNTS[0,:], sobj.OPT_COUNTS_SIG[0,:]**2, sobj.OPT_MASK[0,:] - # # Setup the return values - # ret_flux, ret_var, ret_gpm = opt_flux, opt_var, opt_gpm - else: - msgs.error("Unknown extraction method: ", method) + msgs.info("Extracting a boxcar spectrum of datacube") + # Construct an image that contains the fraction of flux included in the + # boxcar extraction at each wavelength interval + norm_flux = wl_img[:,:,np.newaxis] * mask + norm_flux /= np.sum(norm_flux) + # Extract boxcar + cntmask = np.logical_not(bpmcube) * mask # Good pixels within the masked region around the standard star + flxscl = (norm_flux * cntmask).sum(0).sum(0) # This accounts for the flux that is missing due to masked pixels + scimask = flxcube * cntmask + varmask = varcube * cntmask**2 + nrmcnt = utils.inverse(flxscl) + box_flux = scimask.sum(0).sum(0) * nrmcnt + box_var = varmask.sum(0).sum(0) * nrmcnt**2 + box_gpm = flxscl > 1/3 # Good pixels are those where at least one-third of the standard star flux is measured + # Setup the return values + ret_flux, ret_var, ret_gpm = box_flux, box_var, box_gpm # Convert from counts/s/Ang/arcsec**2 to counts/s/Ang arcsecSQ = 3600.0*3600.0*(stdwcs.wcs.cdelt[0]*stdwcs.wcs.cdelt[1]) diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index 126ad2f2b2..292ec31fb5 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -803,9 +803,9 @@ def options(self): if scale_corr is None: opts['scale_corr'] = [None]*len(self.filenames) elif len(scale_corr) == 1 and len(self.filenames) > 1: - opts['scale_corr'] = scale_corr*len(self.filenames) + opts['scale_corr'] = scale_corr.lower()*len(self.filenames) elif len(scale_corr) != 0: - opts['scale_corr'] = scale_corr + opts['scale_corr'] = scale_corr.lower() # Get the skysub files skysub_frame = self.path_and_files('skysub_frame', skip_blank=False, check_exists=False) diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index 58269d3876..3959c63fb0 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -1455,7 +1455,7 @@ def __init__(self, slit_spec=None, relative_weights=None, align=None, combine=No 'into subpixels, and assigns each subpixel to a voxel of the datacube. Flux is conserved, ' \ 'but voxels are correlated, and the error spectrum does not account for covariance between ' \ 'adjacent voxels. See also, spec_subpixel and spat_subpixel. ' \ - '(2) "NGP" (nearest grid point) - this algorithm is effectively a 3D histogram. Flux is ' \ + '(2) "ngp" (nearest grid point) - this algorithm is effectively a 3D histogram. Flux is ' \ 'conserved, voxels are not correlated, however this option suffers the same downsides as ' \ 'any histogram; the choice of bin sizes can change how the datacube appears. This algorithm ' \ 'takes each pixel on the spec2d frame and puts the flux of this pixel into one voxel in the ' \ @@ -1464,9 +1464,6 @@ def __init__(self, slit_spec=None, relative_weights=None, align=None, combine=No 'pixels that contribute to the same voxel are inverse variance weighted (e.g. if two ' \ 'pixels have the same variance, the voxel would be assigned the average flux of the two ' \ 'pixels).' - # '(3) "resample" - this algorithm resamples the spec2d frames into a datacube. ' \ - # 'Flux is conserved, but voxels are correlated, and the error spectrum does not account ' \ - # 'for covariance between neighbouring pixels. ' \ defaults['spec_subpixel'] = 5 dtypes['spec_subpixel'] = int @@ -1584,9 +1581,20 @@ def from_dict(cls, cfg): return cls(**kwargs) def validate(self): - allowed_methods = ["subpixel", "NGP"]#, "resample" + # Check the method options + allowed_methods = ["subpixel", "ngp"] if self.data['method'] not in allowed_methods: - raise ValueError("The 'method' must be one of:\n"+", ".join(allowed_methods)) + # Check if the supplied name exists + if not os.path.exists(self.data['method']): + raise ValueError("The 'method' must be one of:\n"+", ".join(allowed_methods) + + "\nor, the relative path to a spec2d file.") + # Check the skysub options + allowed_skysub_options = ["none", "image", ""] # Note, "None" is treated as None which gets assigned to the default value "image". + if self.data['skysub_frame'] not in allowed_skysub_options: + # Check if the supplied name exists + if not os.path.exists(self.data['method']): + raise ValueError("The 'skysub_frame' must be one of:\n" + ", ".join(allowed_methods) + + "\nor, the relative path to a spec2d file.") if len(self.data['whitelight_range']) != 2: raise ValueError("The 'whitelight_range' must be a two element list of either NoneType or float") From 729403e791fe326806bb2d4f2f7530ff4bbf1bb6 Mon Sep 17 00:00:00 2001 From: joe Date: Sun, 24 Sep 2023 18:13:20 +0200 Subject: [PATCH 39/81] Flipped red side images to try to deal with WCS bug. --- pypeit/spectrographs/keck_kcwi.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index ca2ef17e62..96a75f69c9 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -257,7 +257,6 @@ def compound_meta(self, headarr, meta_key): else: rref = 0. # Get the offset and PA - rotoff = 0.0 # IFU-SKYPA offset (degrees) skypa = rpos + rref # IFU position angle (degrees) return skypa else: @@ -609,8 +608,8 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): #else: # rref = 0. # Get the offset and PA - #rotoff = 0.0 # IFU-SKYPA offset (degrees) #skypa = rpos + rref # IFU position angle (degrees) + rotoff = 0.0 # IFU-SKYPA offset (degrees) crota = np.radians(-(skypa + rotoff)) # Calculate the fits coordinates @@ -1151,7 +1150,7 @@ def get_detector_par(self, det, hdu=None): dataext = 0, specaxis = 0, specflip = specflip, - spatflip = False, + spatflip = True, # TODO There is a flip in the slices relative to KCWI platescale = 0.145728, # arcsec/pixel TODO :: Need to double check this darkcurr = None, # e-/pixel/hour TODO :: Need to check this. mincounts = -1e10, From 0dbdb7bce7df3d7e1e51bbb9a1570204feca1967 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 24 Sep 2023 20:09:11 +0100 Subject: [PATCH 40/81] rm changes --- CHANGES.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 18f171c3e1..8c1bf36484 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,8 +1,3 @@ -1.14.0dev ---------- -- Refactored ``coadd3d()`` - - 1.13.1dev --------- From 04838e88e8f7dfd12ce1c413218d4bf27a39d749 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 25 Sep 2023 14:12:22 +0100 Subject: [PATCH 41/81] cp ref_index --- pypeit/coadd3d.py | 3 +- pypeit/core/ref_index.py | 738 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 739 insertions(+), 2 deletions(-) create mode 100644 pypeit/core/ref_index.py diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index fc1819974e..ec1b4ea222 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -12,12 +12,11 @@ from astropy.io import fits from scipy.interpolate import interp1d import numpy as np -import ref_index # TODO :: Could just copy this code into the DAR class? from pypeit import msgs from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase -from pypeit.core import datacube, flux_calib, parse +from pypeit.core import datacube, flux_calib, parse, ref_index from pypeit.spectrographs.util import load_spectrograph # Use a fast histogram for speed! diff --git a/pypeit/core/ref_index.py b/pypeit/core/ref_index.py new file mode 100644 index 0000000000..1a21d83a24 --- /dev/null +++ b/pypeit/core/ref_index.py @@ -0,0 +1,738 @@ +""" +Module containing the core methods for calculating the refractive index +of the atmosphere based on current conditions. + +Note that this code is directly copied (on 25 September 2023) from the following +repository: https://github.com/phn/ref_index + +CREDIT :: All equations used in this module come from the documentation for the +NIST online refractive index calculator, written by Jack A. Stone and Jay H. Zimmerman, +and is available here: +https://emtoolbox.nist.gov/Wavelength/Documentation.asp + +Credit to the original source +################ +Refractive index of air. + +NIST provides an online calculator for calculating refractive index of +air, for light of a certain wave length, under varying atmospheric +conditions. This module implements the equations provided in the +documentation for the online calculator. + +In addition to calculating the refractive index, this module also has +functions for converting wave length of light in vacuum to that in air, +and vice-versa. + +The documentation for the online calculator is provided at +http://emtoolbox.nist.gov/Wavelength/Documentation.asp, and includes a +link to the online calculator. + +The following comments are based on the discussions presented in the +NIST documentation. It is intended as a brief overview. See +http://emtoolbox.nist.gov/Wavelength/Documentation.asp, for detailed +discussions. + +Refractive index of air can be caclulated using two different +algorithms: one due to Edlén (updated by Birch and Down), and one due +to Ciddor. The latter has been adopted by the International Association +of Geodesy (IAG) as the reference equation for calculating refractive +index of air. Functions for calculating refractive index using either +of these are defined in this module. + +The vacuum to air and air to vacuum wave length conversion functions in +this module use the Ciddor equation, in the form presented in the NIST +documentation. + +Uncertainities in refractive index, and hence in wave length +conversions, due to uncertanities in measured values of temperature, +pressure, and humidity exceeds that due to the intrinsic uncertainity +in the equations used. + +An uncertainty of 1e-6 in refractive index can result from a +combination of: + + + an error of 1°C (1.8 °F) in air temperature + + + an error of 0.4kPa (3mm of Hg) in air pressure + + + an error of 50% in relative humidity at sufficiently high air + temperatures (near 35°C) + +Valid range for input parameters for the refractive index calculations +are presented below. The online calculator issues a warning if input +parameters are outside a smaller interval within the maximum +range. Functions in this module do not raise a warning by default. But +they accept a keyword ``warn``, which when set to ``True`` will result +in warnings, when the input parameters are outside the accepted range. + + + Wavelength [300nm - 1700nm] + + Warning is issued if value is outside [350nm - 1600nm]. + + + Pressure [10kPa - 140kPa] + + Warning is issued if value is outside [60kPa - 120kPa]. + + + Temperature [-40∘C - 100∘C]. + + Warning is issued if value is outside [0∘C - 40∘C]. + + + Humidity [0 - 100] + + Can be given as relative humidity, dew point, frost point or + partial pressure of water vapour. A warning is given if the mole + fraction of water vapour exceeds 20% or, equivalently, relative + humidity exceeds 85%. A warning is issued if relative humidity is + less than 1%. + + + CO2 concentration [0µmole/mole - 2000µmole/mole] + + The common value to use is 450. Outdoor values are rarely below 300 + and indoor can be as high as 600. A difference of 150 will lead to + a difference of only ~ 2e-8 in index of refraction. + + A warning is issued if a value other than 450 is used. + + +In astronomy, the convention is to use the refraction correction for +wave length greater than 200nm, eventhough the equations are not +strictly valid at wave lengths shorter than 300nm. For example, the +popular IDLASTRO IDL code vactoair.pro and airtovac.pro will accept any +wave length greater than 2000Å. + +To accomodate this type of usage, instead of limiting the possible +input wave lengths, functions in this module will accept any wave +length value. It is up to the user to decide if a particular wave +length is to be used as an input to the equations. + +Comparison with the IDLASTRO vactoair.pro and airtovac.pro algorithms +show that the equivalent functions in this module, vac2air and air2vac, +give results that agree to within 1e-4nm, over a range of wavelengths +from 200nm to 1700nm. This uncertainty translates to a velocity +difference of 150m/s to 17m/s, over the wave length range 1700nm to +200nm. + +The IDLASTRO code uses a fixed value of temperature and humidity which +is not documented in the code. The above comparison was carried out at +a temperature of 15∘C and a relative humidity of 0. + +The IDL code used for testing was downloaded on 2011/10/07. The +revision history indicates that the IDL code in vactoair.pro and +airtovac.pro were last modified in March 2011. + +The PypeIt developers have made some minor adjustments to the code. + +Original author details: +:author: Prasanth Nair +:contact: prasanthhn@gmail.com +:license: BSD (http://www.opensource.org/licenses/bsd-license.php) +################### + +.. include:: ../include/links.rst + +""" + +from __future__ import division +from __future__ import print_function +import numpy as np +from pypeit import msgs + + +def f2k(f): + """Converts Fahrenheit to Kelvin.""" + return (f - 32.0) * (100.0 / 180.0) + 273.15 + + +def k2f(k): + """Converts Kelvin to Fahrenheit.""" + return (k - 273.15) * (180.0 / 100.0) + 32.0 + + +def c2k(c): + """Converts Celsius to Kelvin.""" + return c + 273.15 + + +def k2c(k): + """Converts Kelvin to Celsius.""" + return k - 273.15 + + +def c2f(c): + """Converts Celsius to Fahrenheit.""" + return c * (180.0 / 100.0) - 32.0 + + +def f2c(f): + """Converts Fahrenheit to Celsius.""" + return (f - 32.0) * (100.0 / 180.0) + + +def svp_water(t): + """Saturation vapour pressure over water at given temperature. + + Parameters + ---------- + t : float + Air temperature in degree Celsius. + + Returns + ------- + p_sv : float + Saturation vapour pressure over water, at the given + temperature, in Pascal. + + Notes + ----- + From section A-I of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + K1 = 1.16705214528e+03 + K2 = -7.24213167032e+05 + K3 = -1.70738469401e+01 + K4 = 1.20208247025e+04 + K5 = -3.23255503223e+06 + K6 = 1.49151086135e+01 + K7 = -4.82326573616e+03 + K8 = 4.05113405421e+05 + K9 = -2.38555575678e-01 + K10 = 6.50175348448e+02 + + T = t + 273.15 + omega = T + K9 / (T - K10) + A = omega ** 2 + K1 * omega + K2 + B = K3 * omega ** 2 + K4 * omega + K5 + C = K6 * omega ** 2 + K7 * omega + K8 + X = -B + np.sqrt(B ** 2 - 4 * A * C) + + p_sv = 1.0e6 * ((2.0 * C / X) ** 4) + + return p_sv + + +def svp_ice(t): + """Saturation vapour pressure over ice at given temperature. + + + Parameters + ---------- + t : float + Temperature in degree Celsius. + + Returns + ------- + p_sv : float + Saturation vapour pressure over ice, at the given + temperature, in Pascal. + + Notes + ----- + From section A-I of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + A1 = -13.928169 + A2 = 34.7078238 + + t += 273.15 + theta = t / 273.16 + Y = A1 * (1 - theta ** -1.5) + A2 * (1 - theta ** -1.25) + + p_sv = 611.657 * np.exp(Y) + + return p_sv + + +def dew_point_wvpp(td): + """Water vapour saturation pressure, given dew point temperature.""" + return svp_water(td) + + +def frost_point_wvpp(tf): + """Water vapour saturation pressure, given frost point temperature.""" + return svp_ice(tf) + + +def rh2wvpp(rh, t): + """Convert relative humidity to water vapour partial pressure. + + Parameters + ---------- + rh : float + Relative humidity as a number between 0 and 100. + t : float + Temperature in degree Celsius. + + Returns + ------- + p_sv : float + Water vapour partial pressure, in Pascal. + + Notes + ----- + See section A-II of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + # t > 0 according to documentation. + if t >= 0: + p_sv = svp_water(t) + elif t < 0: + p_sv = svp_ice(t) + + return (rh / 100.0) * p_sv + + +def f_factor(p, t): + """Enhancement factor for calculating mole fraction. + + Parameters + ---------- + p : float + Pressure in Pascal. + t : float + Temperature in degree Celsius. + + Returns + ------- + f : float + Enhancement factor needed in calculation of mole fraction. + + Notes + ----- + See section A-II of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + alpha = 1.00062 + beta = 3.14e-8 + gamma = 5.60e-7 + + return alpha + beta * p + gamma * (t ** 2) + + +def dew_point_mole_fraction(p, t): + """Water vapour mole fraction for given dew point temperature. + Parameters + ---------- + p : float + Pressure in Pascal. + t : float + Temperature in degree Celsius. + + Returns + ------- + xv : float + Mole fraction. + + Notes + ----- + See section A-II of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + return f_factor(p, t) * dew_point_wvpp(t) / p + + +def frost_point_mole_fraction(p, t): + """Water vapour mole fraction for given frost point temperature. + Parameters + ---------- + p : float + Pressure in Pascal. + t : float + Temperature in degree Celsius. + + Returns + ------- + xv : float + Mole fraction. + + Notes + ----- + See section A-II of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + return f_factor(p, t) * frost_point_wvpp(t) / p + + +def rh2mole_fraction(rh, p, t): + """Water vapour mole fraction from relative humidity. + + Parameters + ---------- + rh : float + Relative humidity as a number between 0 and 100. + p : float + Pressure in Pascal. + t : float + Temperature in Kelvin. + + Returns + ------- + xv : float + Mole fraction. + + Notes + ----- + See section A-II of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + return f_factor(p, t) * rh2wvpp(rh, t) / p + + +def pp2mole_fraction(pv, p, t): + """Water vapour mole fraction from partial pressure. + + Parameters + ---------- + rh : float + Relative humidity as a number between 0 and 100. + p : float + Pressure in Pascal. + t : float + Temperature in Kelvin. + + Returns + ------- + xv : float + Mole fraction. + + Notes + ----- + See section A-II of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + return f_factor(p, t) * pv / p + + +def _check_range(**kwargs): + """Return True if value is inside accepted range.""" + if not (350 <= kwargs.get('wave', 633) <= 1600): + msgs.warn("Wave length outside [350nm, 1600nm].") + if not (60000 <= kwargs.get('p', 101325) <= 120000): + msgs.warn("Pressure outside [60000Pa - 120000Pa].") + if not (0 <= kwargs.get('t', 20) <= 40): + msgs.warn("Temperature outside [0C - 40C].") + if not (1 < kwargs.get('rh', 50) <= 85): + msgs.warn("Relative humidity outside (1 - 85].") + if not (kwargs.get('xv', 0.4) >= 0.2): + msgs.warn("Mole fraction less than 0.2.") + if kwargs.get('co2', 450) != 450: + msgs.warn("CO2 concentration is not 450.") + + +def ciddor_ri(wave, t, p, xv, co2=450, warn=False): + """Refractive index of air according to the Ciddor equation. + + Parameters + ---------- + wave : float or Numpy array of float + Wavelength in vacuum, in nano-meters. Valid wavelength range is + 300nm - 1700nm. + t : float + Temperature in degree Celsius. Valid temperate range is -40 to + 100 degree Celsius. + p : float + Pressure in Pascal. Valid range is from 10kPa - 140 kPa. + xv : float + Water vapour mole fraction, as a number between 0 and + 1. Default is set to 0. + co2 : float + Carbon dioxide concentration in µmole/mole. The default value + of 450 should be enough for most purposes. Valid range is from + 0 - 2000 µmole/mole. + warn : bool + Warning is issued if parameters fall outside accept + range. Accepted range is smaller than the valid ranges + mentioned above. See module docstring for accepted ranges. + + The default is False and no warnings are issued. + + Notes + ----- + See section A-III of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + See + """ + if warn: + _check_range(wave, t, p, xv) + + w0 = 295.235 + w1 = 2.6422 + w2 = -0.03238 + w3 = 0.004028 + k0 = 238.0185 + k1 = 5792105 + k2 = 57.362 + k3 = 167917 + a0 = 1.58123e-6 + a1 = -2.9331e-8 + a2 = 1.1043e-10 + b0 = 5.707e-6 + b1 = -2.051e-8 + c0 = 1.9898e-4 + c1 = -2.376e-6 + d = 1.83e-11 + e = -0.765e-8 + pr1 = 101325 + tr1 = 288.15 + Za = 0.9995922115 + rhovs = 0.00985938 + R = 8.314472 + Mv = 0.018015 + + wave = wave * 1.0e-3 + S = 1.0 / wave ** 2 + + ras = 1e-8 * ((k1 / (k0 - S)) + (k3 / (k2 - S))) + rvs = 1.022e-8 * (w0 + w1 * S + w2 * S ** 2 + w3 * S ** 3) + + Ma = 0.0289635 + 1.2011e-8 * (co2 - 400.0) + + raxs = ras * (1 + 5.34e-7 * (co2 - 450.0)) + + T = t + 273.15 + + Zm = a0 + a1 * t + a2 * t ** 2 + (b0 + b1 * t) * xv + \ + (c0 + c1 * t) * xv ** 2 + Zm *= -(p / T) + Zm += (p / T ) ** 2 * (d + e * xv ** 2) + Zm += 1 + + rhoaxs = pr1 * Ma / (Za * R * tr1) + + rhov = xv * p * Mv / (Zm * R * T) + + rhoa = (1 - xv) * p * Ma / (Zm * R * T) + + n = 1.0 + (rhoa / rhoaxs) * raxs + (rhov / rhovs) * rvs + + return n + + +def ciddor(wave, t, p, rh, co2=450, warn=False): + """Refractive index of air according to the Ciddor equation. + + Accepts relative humidity instead of mole fraction, as done in + ``ciddor_ri()``. + + Parameters + ---------- + wave : float or Numpy array of float + Wavelength in vacuum, in nano-meters. Valid wavelength range is + 300nm - 1700nm. + t : float + Temperature in degree Celsius. Valid temperate range is -40 to + 100 degree Celsius. + p : float + Pressure in Pascal. Valid range is from 10kPa - 140 kPa. + rh : float + Relative humidity [0 - 100]. + co2 : float + Carbon dioxide concentration in µmole/mole. The default value + of 450 should be enough for most purposes. Valid range is from + 0 - 2000 µmole/mole. + warn : bool + Warning is issued if parameters fall outside accept + range. Accepted range is smaller than the valid ranges + mentioned above. See module docstring for accepted ranges. + + The default is False and no warnings are issued. + + Notes + ----- + See section A-III of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + if warn: + _check_range(wave, t, p, rh) + # turn off warning, so that ciddor_ri doesn't issue duplicate + # warning. + warn = False + + xv = rh2mole_fraction(rh=rh, p=p, t=t) + return ciddor_ri(wave=wave, t=t, p=p, xv=xv, co2=co2, warn=warn) + + +def edlen_ri(wave, t, p, pv, warn=False): + """Refractive index of air according to the Edlén equation. + + Parameters + ---------- + wave : float or Numpy array of float + Wavelength in vacuum, in nano-meters. Valid wavelength range is + 300nm - 1700nm. + t : float + Temperature in degree Celsius. Valid temperate range is -40 to + 100 degree Celsius. + p : float + Pressure in Pascal. Valid range is from 10kPa - 140 kPa. + pv : float + Water vapour partial pressure, in Pascal. + warn : bool + Warning is issued if parameters fall outside accept + range. Accepted range is smaller than the valid ranges + mentioned above. See module docstring for accepted ranges. + + The default is False and no warnings are issued. + + Notes + ----- + See section A-IV of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + if warn: + _check_range(wave, t, p) + + A = 8342.54 + B = 2406147 + C = 15998 + D = 96095.43 + E = 0.601 + F = 0.00972 + G = 0.003661 + + wave = wave * 1.0e-3 + S = 1.0 / wave ** 2 + + ns = 1 + 1e-8 * (A + B / (130.0 - S) + C / (38.9 - S)) + + X = (1 + 1e-8 * (E - F * t) * p) / (1 + G * t) + + ntp = 1 + p * (ns - 1) * X / D + + n = ntp - 1e-10 * ((292.75 / (t + 273.15)) * \ + (3.7345 - 0.0401 * S)) * pv + + return n + + +def edlen(wave, t, p, rh, warn=False): + """Refractive index of air according to the Edlén equation. + + Accepts relative humidity instead of water vapour partial pressure, + as in ``edlen_ri()``. + + Parameters + ---------- + wave : float or Numpy array of float + Wavelength in vacuum, in nano-meters. Valid wavelength range is + 300nm - 1700nm. + t : float + Temperature in degree Celsius. Valid temperate range is -40 to + 100 degree Celsius. + p : float + Pressure in Pascal. Valid range is from 10kPa - 140 kPa. + rh : float + Relative humidity in [0 - 100]. + warn : bool + Warning is issued if parameters fall outside accept + range. Accepted range is smaller than the valid ranges + mentioned above. See module docstring for accepted ranges. + + The default is False and no warnings are issued. + + Notes + ----- + See section A-IV of + http://emtoolbox.nist.gov/Wavelength/Documentation.asp. + + """ + if warn: + _check_range(wave, t, p) + # turn off warning so that edlen_ri() doesn't raise duplicate + # warning. + warn = False + + pv = rh2wvpp(rh=rh, t=t) + return edlen_ri(wave=wave, t=t, p=p, pv=pv, warn=warn) + + +def vac2air(wave, t=15.0, p=101325, rh=0.0, co2=450, warn=False): + """Wavelength of light in air, using Ciddor refractive index. + + Parameters + ---------- + wave : float or Numpy array of float + Wavelength in nano-meters. Valid range is 300nm - 1700nm. + t : float + Temperature in degree Celsius. Valid range is -40 - 100 degree + Celsius. Default is 15 degree Celsius (288.15 Kelvin). + p : float + Pressure in Pascal. Valid range is 10kPa - 140kPa. Default is + 101325 Pa (1 atmosphere). + rh : float + Relative humidity as a number between 0 and 100. Default is 0. + co2 : float + Carbon dioxide concentration in µmole/mole. The default value + of 450 is sufficient for most purposes. Valid range is 0 - 2000 + µmole/mole. + warn : bool + Warning is issued if parameters fall outside accept + range. Accepted range is smaller than the valid ranges + mentioned above. See module docstring for accepted ranges. + + The default is False and no warnings are issued. + + Returns + ------- + w : float + Wavelength in air, in nm. + + """ + if warn: + _check_range(wave, t, p, rh, co2) + + n = ciddor(wave, t, p, rh, co2) + return wave / n + + +def air2vac(wave, t=15.0, p=101325, rh=0.0, co2=450, warn=False): + """Wavelength of light in vacuum, using Ciddor refractive index. + + The refractive index calculation needs wavelength in vacuum. In + this function, the wavelength in air is used. The errors are on the + order of 1e-5 nm. + + Parameters + ---------- + wave : float or Numpy array of float + Wavelength in nano-meters. Valid range is 300nm - 1700nm. + t : float + Temperature in degree Celsius. Valid range is -40 - 100 degree + Celsius. Default is 15 degree Celsius (288.15 Kelvin). + p : float + Pressure in Pascal. Valid range is 10kPa - 140kPa. Default is + 101325 Pa (1 atmosphere). + rh : float + Relative humidity as a number between 0 and 100. Default is 0. + co2 : float + Carbon dioxide concentration in µmole/mole. The default value + of 450 is sufficient for most purposes. Valid range is 0 - 2000 + µmole/mole. + warn : bool + Warning is issued if parameters fall outside accept + range. Accepted range is smaller than the valid ranges + mentioned above. See module docstring for accepted ranges. + + The default is False and no warnings are issued. + + Returns + ------- + w : float + Wavelength in vacuum, in nm. + + """ + if warn: + _check_range(wave=wave, t=t, p=p, rh=rh, co2=co2) + + n = ciddor(wave, t, p, rh, co2) + return wave * n From a2c0755a59ffc4564117fb32b10a0f3a71d3d76c Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 25 Sep 2023 14:20:29 +0100 Subject: [PATCH 42/81] add tests for ref_index --- pypeit/tests/test_ref_index.py | 177 +++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 pypeit/tests/test_ref_index.py diff --git a/pypeit/tests/test_ref_index.py b/pypeit/tests/test_ref_index.py new file mode 100644 index 0000000000..b46dc01c80 --- /dev/null +++ b/pypeit/tests/test_ref_index.py @@ -0,0 +1,177 @@ +""" +Module to run tests on pyidl functions +""" +import numpy as np + +from pypeit.core import ref_index +import pytest + + +def test_nist_ciddor_1(): + """Compare with NIST output. + + Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. + + fix t at 20, p at 101325, rh at 50 + """ + wave = [321.456, 500, 600.1234, 633.0, 700, 1000.987, 1500.8, 1700.0] + nist_n = [1.000283543, 1.000273781, 1.000271818, 1.000271373, + 1.000270657, 1.000269038, 1.00026819, 1.000268041] + nist_w = [321.364879, 499.863147, 599.96032, 632.828268, + 699.810591, 1000.717769, 1500.397608, 1699.544453] + + xv = ref_index.rh2mole_fraction(50, 101325, 20) + + n = [ref_index.ciddor_ri(i, 20, 101325, xv) for i in wave] + wave_n = [ref_index.vac2air(i, t=20, p=101325, rh=50.0) for i in wave] + + for i, j in zip(n, nist_n): + assert abs(i - j) < 1e-8 + + for i, j in zip(wave_n, nist_w): + assert abs(i - j) < 1e-6 + + n = [ref_index.ciddor(i, 20, 101325, 50.0) for i in wave] + wave_n = [ref_index.vac2air(i, t=20, p=101325, rh=50.0) for i in wave] + + for i, j in zip(n, nist_n): + assert abs(i - j) < 1e-8 + + for i, j in zip(wave_n, nist_w): + assert abs(i - j) < 1e-6 + + +def test_nist_ciddor_2(): + """Compare with NIST output. + + Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. + + fix wave at 633.0 p at 101325 rh at 50 + """ + t = [-20.0, 0.0, 20, 26.7982, 40.123, 60.45] + nist_w = [632.800737, 632.815441, 632.828268, 632.832303, 632.839872, + 632.850953] + nist_n = [1.00031489, 1.000291647, 1.000271373, 1.000264994, 1.000253031, + 1.000235516] + + xv = [ref_index.rh2mole_fraction(50, 101325, i) for i in t] + n = [ref_index.ciddor_ri(633.0, i, 101325, j) for i, j in zip(t, xv)] + + wave_n = [ref_index.vac2air(633.0, i, 101325, 50) for i in t] + + for i, j in zip(n, nist_n): + assert abs(i - j) < 1e-8 + + for i, j in zip(wave_n, nist_w): + assert abs(i - j) < 1e-6 + + +def test_nist_ciddor_3(): + """Compare with NIST output. + + Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. + + fix wave at 633.0, t at 20, rh at 50. vary p + """ + p = [1000 * i for i in [10, 50.123, 100.1234, 140.0]] + + nist_n = [1.000026385, 1.000133999, 1.000268148, 1.000375169] + nist_w = [632.983299, 632.91519, 632.830308, 632.762607] + + xv = [ref_index.rh2mole_fraction(50, i, 20) for i in p] + n = [ref_index.ciddor_ri(633.0, 20, i, j) for i, j in zip(p, xv)] + + wave_n = [ref_index.vac2air(633.0, 20, i, 50) for i in p] + + for i, j in zip(n, nist_n): + assert abs(i - j) < 1e-8 + + for i, j in zip(wave_n, nist_w): + assert abs(i - j) < 1e-6 + + +def test_nist_ciddor_4(): + """Compare with NIST output. + + Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. + + fix wave at 633.0, t at 20, p at 101325, vary rh. + """ + rh = [0.0, 20.123, 40, 50.9876, 70, 90.7432, 100.0] + nist_n = [1.0002718, 1.000271627, 1.000271458, 1.000271364, + 1.000271203, 1.000271027, 1.000270949] + nist_w = [632.827997, 632.828106, 632.828214, 632.828273, + 632.828375, 632.828486, 632.828535] + + xv = [ref_index.rh2mole_fraction(i, 101325, 20) for i in rh] + n = [ref_index.ciddor_ri(633.0, 20, 101325, j) for j in xv] + + wave_n = [ref_index.vac2air(633.0, 20, 101325, i) for i in rh] + + for i, j in zip(n, nist_n): + assert abs(i - j) < 1e-8 + + for i, j in zip(wave_n, nist_w): + assert abs(i - j) < 1e-6 + + +def test_air2vac(): + """Test reversibility with vac2air.""" + wave = np.array([321.456, 500, 600.1234, 633.0, 700, 1000.987, 1500.8, 1700.0]) + wave_o = ref_index.air2vac(ref_index.vac2air(wave)) + assert np.allclose(wave, wave_o) + + +def test_idlastro(): + # Using IDLASTRO downloaded on 2011/10/07. The vac2air.pro uses a + # formulation of the Ciddor equation. Previous versions used a + # different equation. + + # The REVISION HISTORY from the vac2air.pro file is: + # ; REVISION HISTORY + # ; Written W. Landsman November 1991 + # ; Use Ciddor (1996) formula for better accuracy in the infrared + # ; Added optional output vector, W Landsman Mar 2011 + # ; Iterate for better precision W.L./D. Schlegel Mar 2011 + + # The REVISION HISTORY from air2vac.pro file is: + # ; REVISION HISTORY + # ; Written, D. Lindler 1982 + # ; Documentation W. Landsman Feb. 1989 + # ; Use Ciddor (1996) formula for better accuracy in the infrared + # ; Added optional output vector, W Landsman Mar 2011 + + # Velocity errors in m/s for different wave length errors, at + # different wave lengths. + # >>> 1e-5/330.0 * 299792458 + # 9.0846199393939404 + # >>> 1e-5/200.0 * 299792458 + # 14.989622900000001 + # >>> 1e-5/1000.0 * 299792458 + # 2.9979245800000003 + + # nm + wave = np.array([200.0, 300.0, 500.0, 800.0, 1200.0, 1600.0, 1700.0]) + + # angstrom + wave_idl_vactoair = np.array([1999.3526550081103323, 2999.1255923046301177, + 4998.6055889614663101, 7997.8003315140686027, + 11996.7167708424640296, 15995.6298776736693981, + 16995.3579139663052047]) + wave_vac2air = ref_index.vac2air(wave, t=15, rh=0) + + # values in wave_idl_vactoair was fed to airtovac idl procedure. + wave_idl_airtovac = np.array([1999.3526550081103323, + 3000.0000371189012185, + 5000.0000183785432455, + 8000.0000108292333607, + 12000.0000070745754783, + 16000.0000052688483265, + 17000.0000049538284657]) + # Have to convert angstrom to nm. + wave_air2vac = ref_index.air2vac(wave_idl_vactoair / 10.0, t=15, rh=0) + + assert np.allclose(wave_vac2air, wave_idl_vactoair/10.0) + + # IDL code ignores values under 2000 angstrom. + assert np.allclose(wave_air2vac[1:], wave_idl_airtovac[1:]/10.0) From 7a44b26f6f0248c5ef635b3f4b00997456e8f999 Mon Sep 17 00:00:00 2001 From: rcooke Date: Tue, 26 Sep 2023 10:07:17 +0100 Subject: [PATCH 43/81] bugfix dar --- doc/releases/1.14.1dev.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/releases/1.14.1dev.rst b/doc/releases/1.14.1dev.rst index b673493a39..e8b0290e9b 100644 --- a/doc/releases/1.14.1dev.rst +++ b/doc/releases/1.14.1dev.rst @@ -25,5 +25,7 @@ Bug Fixes - Fixed bug associated with finding more than one file with the same name (but presumably different extensions). - +- Fixed differential atmospheric refraction (DAR) correction bug. This bug affected + datacubes combined using CoAdd3D(). Previously, the DAR was being computed, applied, + and then later overwritten. The new routine is faster and more accurate. From 49cd4d77f75590639a1a142edb5c519f63e476c5 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 27 Sep 2023 16:51:48 +0100 Subject: [PATCH 44/81] revert --- doc/help/run_pypeit.rst | 2 +- doc/pypeit_par.rst | 436 ++++++++++++++++++++-------------------- 2 files changed, 216 insertions(+), 222 deletions(-) diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index b4c344c6ae..186062ab9c 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -4,7 +4,7 @@ usage: run_pypeit [-h] [-v VERBOSITY] [-r REDUX_PATH] [-m] [-s] [-o] [-c] pypeit_file - ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.13.1.dev308+g83230725a + ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.14.1.dev9+gb73d74939 ## ## Available spectrographs include: ## bok_bc, gemini_flamingos1, gemini_flamingos2, gemini_gmos_north_e2v, diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index ebe195de00..f31f8001dc 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -550,22 +550,22 @@ Collate1DPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.Collate1DPar` -========================= ========== ======= ===================================== ================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -========================= ========== ======= ===================================== ================================================================================================================================================================================================================================================================================================================================================================================================================== -``chk_version`` bool .. False Whether to check the data model versions of spec1d files and sensfunc files. -``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. -``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. -``exclude_slit_trace_bm`` list, str .. [] A list of slit trace bitmask bits that should be excluded. -``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. -``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. -``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. -``outdir`` str .. ``/Users/rcooke/Software/PypeIt/doc`` The path where all coadded output files and report files will be placed. -``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric -``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. -``tolerance`` str, float .. ``1.0`` The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. -``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. -========================= ========== ======= ===================================== ================================================================================================================================================================================================================================================================================================================================================================================================================== +========================= ========== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +========================= ========== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== +``chk_version`` bool .. False Whether to check the data model versions of spec1d files and sensfunc files. +``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. +``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. +``exclude_slit_trace_bm`` list, str .. [] A list of slit trace bitmask bits that should be excluded. +``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. +``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. +``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. +``outdir`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` The path where all coadded output files and report files will be placed. +``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric +``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. +``tolerance`` str, float .. ``1.0`` The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. +``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. +========================= ========== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== ---- @@ -578,13 +578,13 @@ FlexurePar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.FlexurePar` =================== ========== ======================================================== ==================== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description +Key Type Options Default Description =================== ========== ======================================================== ==================== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== ``excessive_shift`` str ``crash``, ``set_to_zero``, ``continue``, ``use_median`` ``use_median`` Behavior when the measured spectral flexure shift is larger than ``spec_maxshift``. The options are: 'crash' - Raise an error and halt the data reduction; 'set_to_zero' - Set the flexure shift to zero and continue with the reduction; 'continue' - Use the large flexure value whilst issuing a warning; and 'use_median' - Use the median flexure shift among all the objects in the same slit (if more than one object is detected) or among all the other slits; if not available, the flexure correction will not be applied. -``multi_min_SN`` int, float .. 1 Minimum S/N for analyzing sky spectrum for flexure -``spec_maxshift`` int .. 20 Maximum allowed spectral flexure shift in pixels. -``spec_method`` str ``boxcar``, ``slitcen``, ``skip`` ``skip`` Method used to correct for flexure. Use skip for no correction. If slitcen is used, the flexure correction is performed before the extraction of objects (not recommended). Options are: None, boxcar, slitcen, skip -``spectrum`` str .. ``paranal_sky.fits`` Archive sky spectrum to be used for the flexure correction. +``multi_min_SN`` int, float .. 1 Minimum S/N for analyzing sky spectrum for flexure +``spec_maxshift`` int .. 20 Maximum allowed spectral flexure shift in pixels. +``spec_method`` str ``boxcar``, ``slitcen``, ``skip`` ``skip`` Method used to correct for flexure. Use skip for no correction. If slitcen is used, the flexure correction is performed before the extraction of objects (not recommended). Options are: None, boxcar, slitcen, skip +``spectrum`` str .. ``paranal_sky.fits`` Archive sky spectrum to be used for the flexure correction. =================== ========== ======================================================== ==================== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== @@ -598,12 +598,12 @@ FluxCalibratePar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.FluxCalibratePar` ===================== ==== ======= =========== ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================ -Key Type Options Default Description +Key Type Options Default Description ===================== ==== ======= =========== ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================ ``extinct_correct`` bool .. .. The default behavior for atmospheric extinction corrections is that if UVIS algorithm is used (which does not correct for telluric absorption) than an atmospheric extinction model is used to correct for extinction below 10,000A, whereas if the IR algorithm is used, then no extinction correction is applied since the atmosphere is modeled directly. To follow these defaults based on the algorithm this parameter should be set to ``extinct_correct=None``. If instead this parameter is set, this overide this default behavior. In other words, it will force an extinction correction if ``extinct_correct=True``, and will not perform an extinction correction if ``extinct_correct=False``. -``extinct_file`` str .. ``closest`` If ``extinct_file='closest'`` the code will select the PypeIt-included extinction file for the closest observatory (within 5 deg, geographic coordinates) to the telescope identified in ``std_file`` (see :ref:`extinction_correction` for the list of currently included files). If constructing a sesitivity function for a telescope not within 5 deg of a listed observatory, this parameter may be set to the name of one of the listed extinction files. Alternatively, a custom extinction file may be installed in the PypeIt cache using the ``pypeit_install_extinctfile`` script; this parameter may then be set to the name of the custom extinction file. -``extrap_sens`` bool .. False If False (default), the code will crash if one tries to use sensfunc at wavelengths outside its defined domain. By changing the par['sensfunc']['extrap_blu'] and par['sensfunc']['extrap_red'] this domain can be extended. If True the code will blindly extrapolate. -``use_archived_sens`` bool .. False Use an archived sensfunc to flux calibration +``extinct_file`` str .. ``closest`` If ``extinct_file='closest'`` the code will select the PypeIt-included extinction file for the closest observatory (within 5 deg, geographic coordinates) to the telescope identified in ``std_file`` (see :ref:`extinction_correction` for the list of currently included files). If constructing a sesitivity function for a telescope not within 5 deg of a listed observatory, this parameter may be set to the name of one of the listed extinction files. Alternatively, a custom extinction file may be installed in the PypeIt cache using the ``pypeit_install_extinctfile`` script; this parameter may then be set to the name of the custom extinction file. +``extrap_sens`` bool .. False If False (default), the code will crash if one tries to use sensfunc at wavelengths outside its defined domain. By changing the par['sensfunc']['extrap_blu'] and par['sensfunc']['extrap_red'] this domain can be extended. If True the code will blindly extrapolate. +``use_archived_sens`` bool .. False Use an archived sensfunc to flux calibration ===================== ==== ======= =========== ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================ @@ -616,21 +616,21 @@ ReduxPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ReduxPar` -====================== ============== ======= ===================================== =============================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -====================== ============== ======= ===================================== =============================================================================================================================================================================================================================================================================================================================================================== -``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame -``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` -``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). -``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). -``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. -``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. -``redux_path`` str .. ``/Users/rcooke/Software/PypeIt/doc`` Path to folder for performing reductions. Default is the current working directory. -``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. -``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. -``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. -``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. -====================== ============== ======= ===================================== =============================================================================================================================================================================================================================================================================================================================================================== +====================== ============== ======= ============================================ =============================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +====================== ============== ======= ============================================ =============================================================================================================================================================================================================================================================================================================================================================== +``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame +``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` +``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). +``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). +``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. +``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. +``redux_path`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` Path to folder for performing reductions. Default is the current working directory. +``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. +``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. +``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. +``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. +====================== ============== ======= ============================================ =============================================================================================================================================================================================================================================================================================================================================================== ---- @@ -643,13 +643,13 @@ ReducePar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ReducePar` ============== ============================================ ======= ========================= ================================================================================= -Key Type Options Default Description +Key Type Options Default Description ============== ============================================ ======= ========================= ================================================================================= -``cube`` :class:`~pypeit.par.pypeitpar.CubePar` .. `CubePar Keywords`_ Parameters for cube generation algorithms -``extraction`` :class:`~pypeit.par.pypeitpar.ExtractionPar` .. `ExtractionPar Keywords`_ Parameters for extraction algorithms -``findobj`` :class:`~pypeit.par.pypeitpar.FindObjPar` .. `FindObjPar Keywords`_ Parameters for the find object and tracing algorithms -``skysub`` :class:`~pypeit.par.pypeitpar.SkySubPar` .. `SkySubPar Keywords`_ Parameters for sky subtraction algorithms -``slitmask`` :class:`~pypeit.par.pypeitpar.SlitMaskPar` .. `SlitMaskPar Keywords`_ Parameters for slitmask +``cube`` :class:`~pypeit.par.pypeitpar.CubePar` .. `CubePar Keywords`_ Parameters for cube generation algorithms +``extraction`` :class:`~pypeit.par.pypeitpar.ExtractionPar` .. `ExtractionPar Keywords`_ Parameters for extraction algorithms +``findobj`` :class:`~pypeit.par.pypeitpar.FindObjPar` .. `FindObjPar Keywords`_ Parameters for the find object and tracing algorithms +``skysub`` :class:`~pypeit.par.pypeitpar.SkySubPar` .. `SkySubPar Keywords`_ Parameters for sky subtraction algorithms +``slitmask`` :class:`~pypeit.par.pypeitpar.SlitMaskPar` .. `SlitMaskPar Keywords`_ Parameters for slitmask ``trim_edge`` list .. 3, 3 Trim the slit by this number of pixels left/right when performing sky subtraction ============== ============================================ ======= ========================= ================================================================================= @@ -664,32 +664,32 @@ CubePar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.CubePar` ==================== ===== ======= ============ =========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description +Key Type Options Default Description ==================== ===== ======= ============ =========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -``align`` bool .. False If set to True, the input frames will be spatially aligned by cross-correlating the whitelight images with either a reference image (see ``reference_image``) or the whitelight image that is generated using the first spec2d listed in the coadd3d file. Alternatively, the user can specify the offsets (i.e. Delta RA x cos(dec) and Delta Dec, both in arcsec) in the spec2d block of the coadd3d file. See the documentation for examples of this usage. -``astrometric`` bool .. True If true, an astrometric correction will be applied using the alignment frames. -``combine`` bool .. False If set to True, the input frames will be combined. Otherwise, a separate datacube will be generated for each input spec2d file, and will be saved as a spec3d file. -``dec_max`` float .. .. Maximum DEC to use when generating the WCS. If None, the default is maximum DEC based on the WCS of all spaxels. Units should be degrees. -``dec_min`` float .. .. Minimum DEC to use when generating the WCS. If None, the default is minimum DEC based on the WCS of all spaxels. Units should be degrees. -``grating_corr`` bool .. True This option performs a small correction for the relative blaze function of all input frames that have (even slightly) different grating angles, or if you are flux calibrating your science data with a standard star that was observed with a slightly different setup. +``align`` bool .. False If set to True, the input frames will be spatially aligned by cross-correlating the whitelight images with either a reference image (see ``reference_image``) or the whitelight image that is generated using the first spec2d listed in the coadd3d file. Alternatively, the user can specify the offsets (i.e. Delta RA x cos(dec) and Delta Dec, both in arcsec) in the spec2d block of the coadd3d file. See the documentation for examples of this usage. +``astrometric`` bool .. True If true, an astrometric correction will be applied using the alignment frames. +``combine`` bool .. False If set to True, the input frames will be combined. Otherwise, a separate datacube will be generated for each input spec2d file, and will be saved as a spec3d file. +``dec_max`` float .. .. Maximum DEC to use when generating the WCS. If None, the default is maximum DEC based on the WCS of all spaxels. Units should be degrees. +``dec_min`` float .. .. Minimum DEC to use when generating the WCS. If None, the default is minimum DEC based on the WCS of all spaxels. Units should be degrees. +``grating_corr`` bool .. True This option performs a small correction for the relative blaze function of all input frames that have (even slightly) different grating angles, or if you are flux calibrating your science data with a standard star that was observed with a slightly different setup. ``method`` str .. ``subpixel`` What method should be used to generate the datacube. There are currently two options: (1) "subpixel" (default) - this algorithm divides each pixel in the spec2d frames into subpixels, and assigns each subpixel to a voxel of the datacube. Flux is conserved, but voxels are correlated, and the error spectrum does not account for covariance between adjacent voxels. See also, spec_subpixel and spat_subpixel. (2) "NGP" (nearest grid point) - this algorithm is effectively a 3D histogram. Flux is conserved, voxels are not correlated, however this option suffers the same downsides as any histogram; the choice of bin sizes can change how the datacube appears. This algorithm takes each pixel on the spec2d frame and puts the flux of this pixel into one voxel in the datacube. Depending on the binning used, some voxels may be empty (zero flux) while a neighboring voxel might contain the flux from two spec2d pixels. Note that all spec2d pixels that contribute to the same voxel are inverse variance weighted (e.g. if two pixels have the same variance, the voxel would be assigned the average flux of the two pixels). -``output_filename`` str .. .. If combining multiple frames, this string sets the output filename of the combined datacube. If combine=False, the output filenames will be prefixed with ``spec3d_*`` -``ra_max`` float .. .. Maximum RA to use when generating the WCS. If None, the default is maximum RA based on the WCS of all spaxels. Units should be degrees. -``ra_min`` float .. .. Minimum RA to use when generating the WCS. If None, the default is minimum RA based on the WCS of all spaxels. Units should be degrees. -``reference_image`` str .. .. White light image of a previously combined datacube. The white light image will be used as a reference when calculating the offsets of the input spec2d files. Ideally, the reference image should have the same shape as the data to be combined (i.e. set the ra_min, ra_max etc. params so they are identical to the reference image). -``relative_weights`` bool .. False If set to True, the combined frames will use a relative weighting scheme. This only works well if there is a common continuum source in the field of view of all input observations, and is generally only required if high relative precision is desired. -``save_whitelight`` bool .. False Save a white light image of the combined datacube. The output filename will be given by the "output_filename" variable with a suffix "_whitelight". Note that the white light image collapses the flux along the wavelength axis, so some spaxels in the 2D white light image may have different wavelength ranges. To set the wavelength range, use the "whitelight_range" parameter. If combine=False, the individual spec3d files will have a suffix "_whitelight". -``scale_corr`` str .. .. This option performs a small correction for the relative spectral illumination scale of different spec2D files. Specify the relative path+file to the spec2D file that you would like to use for the relative scaling. If you want to perform this correction, it is best to use the spec2d file with the highest S/N sky spectrum. You should choose the same frame for both the standards and science frames. -``skysub_frame`` str .. ``image`` Set the sky subtraction to be implemented. The default behaviour is to subtract the sky using the model that is derived from each individual image (i.e. set this parameter to "image"). To turn off sky subtraction completely, set this parameter to "none" (all lowercase). Finally, if you want to use a different frame for the sky subtraction, specify the relative path+file to the spec2D file that you would like to use for the sky subtraction. The model fit to the sky of the specified frame will be used. Note, the sky and science frames do not need to have the same exposure time; the sky model will be scaled to the science frame based on the relative exposure time. -``slit_spec`` bool .. True If the data use slits in one spatial direction, set this to True. If the data uses fibres for all spaxels, set this to False. -``spat_subpixel`` int .. 5 When method=subpixel, spat_subpixel sets the subpixellation scale of each detector pixel in the spatial direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spec_subpixel. -``spatial_delta`` float .. .. The spatial size of each spaxel to use when generating the WCS (in arcsec). If None, the default is set by the spectrograph file. -``spec_subpixel`` int .. 5 When method=subpixel, spec_subpixel sets the subpixellation scale of each detector pixel in the spectral direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spat_subpixel. -``standard_cube`` str .. .. Filename of a standard star datacube. This cube will be used to correct the relative scales of the slits, and to flux calibrate the science datacube. -``wave_delta`` float .. .. The wavelength step to use when generating the WCS (in Angstroms). If None, the default is set by the wavelength solution. -``wave_max`` float .. .. Maximum wavelength to use when generating the WCS. If None, the default is maximum wavelength based on the WCS of all spaxels. Units should be Angstroms. -``wave_min`` float .. .. Minimum wavelength to use when generating the WCS. If None, the default is minimum wavelength based on the WCS of all spaxels. Units should be Angstroms. -``whitelight_range`` list .. None, None A two element list specifying the wavelength range over which to generate the white light image. The first (second) element is the minimum (maximum) wavelength to use. If either of these elements are None, PypeIt will automatically use a wavelength range that ensures all spaxels have the same wavelength coverage. Note, if you are using a reference_image to align all frames, it is preferable to use the same white light wavelength range for all white light images. For example, you may wish to use an emission line map to register two frames. +``output_filename`` str .. .. If combining multiple frames, this string sets the output filename of the combined datacube. If combine=False, the output filenames will be prefixed with ``spec3d_*`` +``ra_max`` float .. .. Maximum RA to use when generating the WCS. If None, the default is maximum RA based on the WCS of all spaxels. Units should be degrees. +``ra_min`` float .. .. Minimum RA to use when generating the WCS. If None, the default is minimum RA based on the WCS of all spaxels. Units should be degrees. +``reference_image`` str .. .. White light image of a previously combined datacube. The white light image will be used as a reference when calculating the offsets of the input spec2d files. Ideally, the reference image should have the same shape as the data to be combined (i.e. set the ra_min, ra_max etc. params so they are identical to the reference image). +``relative_weights`` bool .. False If set to True, the combined frames will use a relative weighting scheme. This only works well if there is a common continuum source in the field of view of all input observations, and is generally only required if high relative precision is desired. +``save_whitelight`` bool .. False Save a white light image of the combined datacube. The output filename will be given by the "output_filename" variable with a suffix "_whitelight". Note that the white light image collapses the flux along the wavelength axis, so some spaxels in the 2D white light image may have different wavelength ranges. To set the wavelength range, use the "whitelight_range" parameter. If combine=False, the individual spec3d files will have a suffix "_whitelight". +``scale_corr`` str .. .. This option performs a small correction for the relative spectral illumination scale of different spec2D files. Specify the relative path+file to the spec2D file that you would like to use for the relative scaling. If you want to perform this correction, it is best to use the spec2d file with the highest S/N sky spectrum. You should choose the same frame for both the standards and science frames. +``skysub_frame`` str .. ``image`` Set the sky subtraction to be implemented. The default behaviour is to subtract the sky using the model that is derived from each individual image (i.e. set this parameter to "image"). To turn off sky subtraction completely, set this parameter to "none" (all lowercase). Finally, if you want to use a different frame for the sky subtraction, specify the relative path+file to the spec2D file that you would like to use for the sky subtraction. The model fit to the sky of the specified frame will be used. Note, the sky and science frames do not need to have the same exposure time; the sky model will be scaled to the science frame based on the relative exposure time. +``slit_spec`` bool .. True If the data use slits in one spatial direction, set this to True. If the data uses fibres for all spaxels, set this to False. +``spat_subpixel`` int .. 5 When method=subpixel, spat_subpixel sets the subpixellation scale of each detector pixel in the spatial direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spec_subpixel. +``spatial_delta`` float .. .. The spatial size of each spaxel to use when generating the WCS (in arcsec). If None, the default is set by the spectrograph file. +``spec_subpixel`` int .. 5 When method=subpixel, spec_subpixel sets the subpixellation scale of each detector pixel in the spectral direction. The total number of subpixels in each pixel is given by spec_subpixel x spat_subpixel. The default option is to divide each spec2d pixel into 25 subpixels during datacube creation. See also, spat_subpixel. +``standard_cube`` str .. .. Filename of a standard star datacube. This cube will be used to correct the relative scales of the slits, and to flux calibrate the science datacube. +``wave_delta`` float .. .. The wavelength step to use when generating the WCS (in Angstroms). If None, the default is set by the wavelength solution. +``wave_max`` float .. .. Maximum wavelength to use when generating the WCS. If None, the default is maximum wavelength based on the WCS of all spaxels. Units should be Angstroms. +``wave_min`` float .. .. Minimum wavelength to use when generating the WCS. If None, the default is minimum wavelength based on the WCS of all spaxels. Units should be Angstroms. +``whitelight_range`` list .. None, None A two element list specifying the wavelength range over which to generate the white light image. The first (second) element is the minimum (maximum) wavelength to use. If either of these elements are None, PypeIt will automatically use a wavelength range that ensures all spaxels have the same wavelength coverage. Note, if you are using a reference_image to align all frames, it is preferable to use the same white light wavelength range for all white light images. For example, you may wish to use an emission line map to register two frames. ==================== ===== ======= ============ =========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== @@ -703,17 +703,17 @@ ExtractionPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ExtractionPar` ==================== ========== ======= ======= ============================================================================================================================================================================================================================================================================================= -Key Type Options Default Description +Key Type Options Default Description ==================== ========== ======= ======= ============================================================================================================================================================================================================================================================================================= -``boxcar_radius`` int, float .. 1.5 Boxcar radius in arcseconds used for boxcar extraction +``boxcar_radius`` int, float .. 1.5 Boxcar radius in arcseconds used for boxcar extraction ``model_full_slit`` bool .. False If True local sky subtraction will be performed on the entire slit. If False, local sky subtraction will be applied to only a restricted region around each object. This should be set to True for either multislit observations using narrow slits or echelle observations with narrow slits -``return_negative`` bool .. False If ``True`` the negative traces will be extracted and saved to disk -``skip_extraction`` bool .. False Do not perform an object extraction -``skip_optimal`` bool .. False Perform boxcar extraction only (i.e. skip Optimal and local skysub) -``sn_gauss`` int, float .. 4.0 S/N threshold for performing the more sophisticated optimal extraction which performs a b-spline fit to the object profile. For S/N < sn_gauss the code will simply optimal extractwith a Gaussian with FWHM determined from the object finding. -``std_prof_nsigma`` float .. 30.0 prof_nsigma parameter for Standard star extraction. Prevents undesired rejection. NOTE: Not consumed by the code at present. -``use_2dmodel_mask`` bool .. True Mask pixels rejected during profile fitting when extracting.Turning this off may help with bright emission lines. -``use_user_fwhm`` bool .. False Boolean indicating if PypeIt should use the FWHM provided by the user (``find_fwhm`` in `FindObjPar`) for the optimal extraction. If this parameter is ``False`` (default), PypeIt estimates the FWHM for each detected object, and uses ``find_fwhm`` as initial guess. +``return_negative`` bool .. False If ``True`` the negative traces will be extracted and saved to disk +``skip_extraction`` bool .. False Do not perform an object extraction +``skip_optimal`` bool .. False Perform boxcar extraction only (i.e. skip Optimal and local skysub) +``sn_gauss`` int, float .. 4.0 S/N threshold for performing the more sophisticated optimal extraction which performs a b-spline fit to the object profile. For S/N < sn_gauss the code will simply optimal extractwith a Gaussian with FWHM determined from the object finding. +``std_prof_nsigma`` float .. 30.0 prof_nsigma parameter for Standard star extraction. Prevents undesired rejection. NOTE: Not consumed by the code at present. +``use_2dmodel_mask`` bool .. True Mask pixels rejected during profile fitting when extracting.Turning this off may help with bright emission lines. +``use_user_fwhm`` bool .. False Boolean indicating if PypeIt should use the FWHM provided by the user (``find_fwhm`` in `FindObjPar`) for the optimal extraction. If this parameter is ``False`` (default), PypeIt estimates the FWHM for each detected object, and uses ``find_fwhm`` as initial guess. ==================== ========== ======= ======= ============================================================================================================================================================================================================================================================================================= @@ -727,25 +727,25 @@ FindObjPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.FindObjPar` =========================== ========== ======= ======= ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= -Key Type Options Default Description +Key Type Options Default Description =========================== ========== ======= ======= ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= -``ech_find_max_snr`` int, float .. 1.0 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than this value or satisfy the min_snr criteria described by the min_snr parameters. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept. -``ech_find_min_snr`` int, float .. 0.3 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept. -``ech_find_nabove_min_snr`` int .. 2 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept. -``find_extrap_npoly`` int .. 3 Polynomial order used for trace extrapolation -``find_fwhm`` int, float .. 5.0 Indicates roughly the fwhm of objects in pixels for object finding -``find_maxdev`` int, float .. 2.0 Maximum deviation of pixels from polynomial fit to trace used to reject bad pixels in trace fitting. -``find_min_max`` list .. .. It defines the minimum and maximum of your object in pixels in the spectral direction on the detector. It only used for object finding. This parameter is helpful if your object only has emission lines or at high redshift and the trace only shows in part of the detector. -``find_negative`` bool .. .. Identify negative objects in object finding for spectra that are differenced. This is used to manually override the default behavior in PypeIt for object finding by setting this parameter to something other than None The default behavior is that PypeIt will search for negative object traces if background frames are present in the PypeIt file that are classified as "science" (i.e. via pypeit_setup -b, and setting bkg_id in the PypeIt file). If background frames are present that are classified as "sky", then PypeIt will NOT search for negative object traces. If one wishes to explicitly override this default behavior, set this parameter to True to find negative objects or False to ignore them. -``find_trim_edge`` list .. 5, 5 Trim the slit by this number of pixels left/right before finding objects +``ech_find_max_snr`` int, float .. 1.0 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than this value or satisfy the min_snr criteria described by the min_snr parameters. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept. +``ech_find_min_snr`` int, float .. 0.3 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept. +``ech_find_nabove_min_snr`` int .. 2 Criteria for keeping echelle objects. They must either have a maximum S/N across all the orders greater than ech_find_max_snr, value or they must have S/N > ech_find_min_snr on >= ech_find_nabove_min_snr orders. If maxnumber is set (see above) then these criteria will be applied but only the maxnumber highest (median) S/N ratio objects will be kept. +``find_extrap_npoly`` int .. 3 Polynomial order used for trace extrapolation +``find_fwhm`` int, float .. 5.0 Indicates roughly the fwhm of objects in pixels for object finding +``find_maxdev`` int, float .. 2.0 Maximum deviation of pixels from polynomial fit to trace used to reject bad pixels in trace fitting. +``find_min_max`` list .. .. It defines the minimum and maximum of your object in pixels in the spectral direction on the detector. It only used for object finding. This parameter is helpful if your object only has emission lines or at high redshift and the trace only shows in part of the detector. +``find_negative`` bool .. .. Identify negative objects in object finding for spectra that are differenced. This is used to manually override the default behavior in PypeIt for object finding by setting this parameter to something other than None The default behavior is that PypeIt will search for negative object traces if background frames are present in the PypeIt file that are classified as "science" (i.e. via pypeit_setup -b, and setting bkg_id in the PypeIt file). If background frames are present that are classified as "sky", then PypeIt will NOT search for negative object traces. If one wishes to explicitly override this default behavior, set this parameter to True to find negative objects or False to ignore them. +``find_trim_edge`` list .. 5, 5 Trim the slit by this number of pixels left/right before finding objects ``maxnumber_sci`` int .. 10 Maximum number of objects to extract in a science frame. Use None for no limit. This parameter can be useful in situations where systematics lead to spurious extra objects. Setting this parameter means they will be trimmed. For mulitslit maxnumber applies per slit, for echelle observations this applies per order. Note that objects on a slit/order impact the sky-modeling and so maxnumber should never be lower than the true number of detectable objects on your slit. For image differenced observations with positive and negative object traces, maxnumber applies to the number of positive (or negative) traces individually. In other words, if you had two positive objects and one negative object, then you would set maxnumber to be equal to two (not three). Note that if manually extracted apertures are explicitly requested, they do not count against this maxnumber. If more than maxnumber objects are detected, then highest S/N ratio objects will be the ones that are kept. For multislit observations the choice here depends on the slit length. For echelle observations with short slits we set the default to be 1 -``maxnumber_std`` int .. 5 Maximum number of objects to extract in a standard star frame. Same functionality as maxnumber_sci documented above. For multislit observations the default here is 5, for echelle observations the default is 1 -``skip_final_global`` bool .. False If True, do not update initial sky to get global sky using updated noise model. This should be True for quicklook to save time. This should also be True for near-IR reductions which perform difference imaging, since there we fit sky-residuals rather than the sky itself, so there is no noise model to update. -``skip_second_find`` bool .. False Only perform one round of object finding (mainly for quick_look) -``skip_skysub`` bool .. False If True, do not sky subtract when performing object finding. This should be set to True for example when running on data that is already sky-subtracted. Note that for near-IR difference imaging one still wants to remove sky-residuals via sky-subtraction, and so this is typically set to False -``snr_thresh`` int, float .. 10.0 S/N threshold for object finding in wavelength direction smashed image. -``std_spec1d`` str .. .. A PypeIt spec1d file of a previously reduced standard star. The trace of the standard star spectrum is used as a crutch for tracing the object spectra, when a direct trace is not possible (i.e., faint sources). If provided, this overrides use of any standards included in your pypeit file; the standard exposures will still be reduced. -``trace_npoly`` int .. 5 Order of legendre polynomial fits to object traces. +``maxnumber_std`` int .. 5 Maximum number of objects to extract in a standard star frame. Same functionality as maxnumber_sci documented above. For multislit observations the default here is 5, for echelle observations the default is 1 +``skip_final_global`` bool .. False If True, do not update initial sky to get global sky using updated noise model. This should be True for quicklook to save time. This should also be True for near-IR reductions which perform difference imaging, since there we fit sky-residuals rather than the sky itself, so there is no noise model to update. +``skip_second_find`` bool .. False Only perform one round of object finding (mainly for quick_look) +``skip_skysub`` bool .. False If True, do not sky subtract when performing object finding. This should be set to True for example when running on data that is already sky-subtracted. Note that for near-IR difference imaging one still wants to remove sky-residuals via sky-subtraction, and so this is typically set to False +``snr_thresh`` int, float .. 10.0 S/N threshold for object finding in wavelength direction smashed image. +``std_spec1d`` str .. .. A PypeIt spec1d file of a previously reduced standard star. The trace of the standard star spectrum is used as a crutch for tracing the object spectra, when a direct trace is not possible (i.e., faint sources). If provided, this overrides use of any standards included in your pypeit file; the standard exposures will still be reduced. +``trace_npoly`` int .. 5 Order of legendre polynomial fits to object traces. =========================== ========== ======= ======= ============================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= @@ -759,17 +759,17 @@ SkySubPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.SkySubPar` =================== ========== ======= ======= =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description +Key Type Options Default Description =================== ========== ======= ======= =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -``bspline_spacing`` int, float .. 0.6 Break-point spacing for the bspline sky subtraction fits. -``global_sky_std`` bool .. True Global sky subtraction will be performed on standard stars. This should be turned off for example for near-IR reductions with narrow slits, since bright standards can fill the slit causing global sky-subtraction to fail. In these situations we go straight to local sky-subtraction since it is designed to deal with such situations -``joint_fit`` bool .. False Perform a simultaneous joint fit to sky regions using all available slits. Currently, this parameter is only used for IFU data reduction. Note that the current implementation does not account for variations in the instrument FWHM in different slits. This will be addressed by Issue #1660. -``local_maskwidth`` float .. 4.0 Initial width of the region in units of FWHM that will be used for local sky subtraction -``mask_by_boxcar`` bool .. False In global sky evaluation, mask the sky region around the object by the boxcar radius (set in ExtractionPar). -``max_mask_frac`` float .. 0.8 Maximum fraction of total pixels on a slit that can be masked by the input masks. If more than this threshold is masked the code will return zeros and throw a warning. -``no_local_sky`` bool .. False If True, turn off local sky model evaluation, but do fit object profile and perform optimal extraction -``no_poly`` bool .. False Turn off polynomial basis (Legendre) in global sky subtraction -``sky_sigrej`` float .. 3.0 Rejection parameter for local sky subtraction +``bspline_spacing`` int, float .. 0.6 Break-point spacing for the bspline sky subtraction fits. +``global_sky_std`` bool .. True Global sky subtraction will be performed on standard stars. This should be turned off for example for near-IR reductions with narrow slits, since bright standards can fill the slit causing global sky-subtraction to fail. In these situations we go straight to local sky-subtraction since it is designed to deal with such situations +``joint_fit`` bool .. False Perform a simultaneous joint fit to sky regions using all available slits. Currently, this parameter is only used for IFU data reduction. Note that the current implementation does not account for variations in the instrument FWHM in different slits. This will be addressed by Issue #1660. +``local_maskwidth`` float .. 4.0 Initial width of the region in units of FWHM that will be used for local sky subtraction +``mask_by_boxcar`` bool .. False In global sky evaluation, mask the sky region around the object by the boxcar radius (set in ExtractionPar). +``max_mask_frac`` float .. 0.8 Maximum fraction of total pixels on a slit that can be masked by the input masks. If more than this threshold is masked the code will return zeros and throw a warning. +``no_local_sky`` bool .. False If True, turn off local sky model evaluation, but do fit object profile and perform optimal extraction +``no_poly`` bool .. False Turn off polynomial basis (Legendre) in global sky subtraction +``sky_sigrej`` float .. 3.0 Rejection parameter for local sky subtraction ``user_regions`` str, list .. .. Provides a user-defined mask defining sky regions. By default, the sky regions are identified automatically. To specify sky regions for *all* slits, provide a comma separated list of percentages. For example, setting user_regions = :10,35:65,80: selects the first 10%, the inner 30%, and the final 20% of *all* slits as containing sky. Setting user_regions = user will attempt to load any SkyRegions files generated by the user via the pypeit_skysub_regions tool. =================== ========== ======= ======= =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== @@ -784,18 +784,18 @@ SlitMaskPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.SlitMaskPar` =========================== ========== ======= ======= ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description +Key Type Options Default Description =========================== ========== ======= ======= ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -``assign_obj`` bool .. False If SlitMask object was generated, assign RA,DEC,name to detected objects -``bright_maskdef_id`` int .. .. `maskdef_id` (corresponding e.g., to `dSlitId` and `Slit_Number` in the DEIMOS/LRIS and MOSFIRE slitmask design, respectively) of a slit containing a bright object that will be used to compute the slitmask offset. This parameter is optional and is ignored if ``slitmask_offset`` is provided. -``extract_missing_objs`` bool .. False Force extraction of undetected objects at the location expected from the slitmask design. -``missing_objs_boxcar_rad`` int, float .. 1.0 Indicates the boxcar radius in arcsec for the force extraction of undetected objects. +``assign_obj`` bool .. False If SlitMask object was generated, assign RA,DEC,name to detected objects +``bright_maskdef_id`` int .. .. `maskdef_id` (corresponding e.g., to `dSlitId` and `Slit_Number` in the DEIMOS/LRIS and MOSFIRE slitmask design, respectively) of a slit containing a bright object that will be used to compute the slitmask offset. This parameter is optional and is ignored if ``slitmask_offset`` is provided. +``extract_missing_objs`` bool .. False Force extraction of undetected objects at the location expected from the slitmask design. +``missing_objs_boxcar_rad`` int, float .. 1.0 Indicates the boxcar radius in arcsec for the force extraction of undetected objects. ``missing_objs_fwhm`` int, float .. .. Indicates the FWHM in arcsec for the force extraction of undetected objects. PypeIt will try to determine the FWHM from the flux profile (by using ``missing_objs_fwhm`` as initial guess). If the FWHM cannot be determined, ``missing_objs_fwhm`` will be assumed. If you do not want PypeIt to try to determine the FWHM set the parameter ``use_user_fwhm`` in ``ExtractionPar`` to True. If ``missing_objs_fwhm`` is ``None`` (which is the default) PypeIt will use the median FWHM of all the detected objects. -``obj_toler`` int, float .. 1.0 If slitmask design information is provided, and slit matching is performed (``use_maskdesign = True`` in ``EdgeTracePar``), this parameter provides the desired tolerance (arcsec) to match sources to targeted objects -``slitmask_offset`` int, float .. .. User-provided slitmask offset (pixels) from the position expected by the slitmask design. This is optional, and if set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``. -``snr_thrshd`` int, float .. 50.0 Objects detected above this S/N threshold will be used to compute the slitmask offset. This is the default behaviour for DEIMOS unless ``slitmask_offset``, ``bright_maskdef_id`` or ``use_alignbox`` is set. -``use_alignbox`` bool .. False Use stars in alignment boxes to compute the slitmask offset. If this is set to ``True`` PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id`` -``use_dither_offset`` bool .. False Use the dither offset recorded in the header of science frames as the value of the slitmask offset. This is currently only available for Keck MOSFIRE reduction and it is set as the default for this instrument. If set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``. However, it is ignored if ``slitmask_offset`` is provided. +``obj_toler`` int, float .. 1.0 If slitmask design information is provided, and slit matching is performed (``use_maskdesign = True`` in ``EdgeTracePar``), this parameter provides the desired tolerance (arcsec) to match sources to targeted objects +``slitmask_offset`` int, float .. .. User-provided slitmask offset (pixels) from the position expected by the slitmask design. This is optional, and if set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``. +``snr_thrshd`` int, float .. 50.0 Objects detected above this S/N threshold will be used to compute the slitmask offset. This is the default behaviour for DEIMOS unless ``slitmask_offset``, ``bright_maskdef_id`` or ``use_alignbox`` is set. +``use_alignbox`` bool .. False Use stars in alignment boxes to compute the slitmask offset. If this is set to ``True`` PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id`` +``use_dither_offset`` bool .. False Use the dither offset recorded in the header of science frames as the value of the slitmask offset. This is currently only available for Keck MOSFIRE reduction and it is set as the default for this instrument. If set PypeIt will NOT compute the offset using ``snr_thrshd`` or ``bright_maskdef_id``. However, it is ignored if ``slitmask_offset`` is provided. =========================== ========== ======= ======= ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== @@ -809,12 +809,12 @@ FrameGroupPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.FrameGroupPar` ============= =============================================== ============================================================================================================================================================ ============================ =============================================================================================================================================================================================================================================================== -Key Type Options Default Description +Key Type Options Default Description ============= =============================================== ============================================================================================================================================================ ============================ =============================================================================================================================================================================================================================================================== ``exprng`` list .. None, None Used in identifying frames of this type. This sets the minimum and maximum allowed exposure times. There must be two items in the list. Use None to indicate no limit; i.e., to select exposures with any time greater than 30 sec, use exprng = [30, None]. -``frametype`` str ``align``, ``arc``, ``bias``, ``dark``, ``pinhole``, ``pixelflat``, ``illumflat``, ``lampoffflats``, ``science``, ``standard``, ``trace``, ``tilt``, ``sky`` ``science`` Frame type. Options are: align, arc, bias, dark, pinhole, pixelflat, illumflat, lampoffflats, science, standard, trace, tilt, sky -``process`` :class:`~pypeit.par.pypeitpar.ProcessImagesPar` .. `ProcessImagesPar Keywords`_ Low level parameters used for basic image processing -``useframe`` str .. .. A calibrations file to use if it exists. +``frametype`` str ``align``, ``arc``, ``bias``, ``dark``, ``pinhole``, ``pixelflat``, ``illumflat``, ``lampoffflats``, ``science``, ``standard``, ``trace``, ``tilt``, ``sky`` ``science`` Frame type. Options are: align, arc, bias, dark, pinhole, pixelflat, illumflat, lampoffflats, science, standard, trace, tilt, sky +``process`` :class:`~pypeit.par.pypeitpar.ProcessImagesPar` .. `ProcessImagesPar Keywords`_ Low level parameters used for basic image processing +``useframe`` str .. .. A calibrations file to use if it exists. ============= =============================================== ============================================================================================================================================================ ============================ =============================================================================================================================================================================================================================================================== @@ -828,39 +828,38 @@ ProcessImagesPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ProcessImagesPar` ======================== ========== ====================================== ========== ============================================================================================================================================================================================================================================================================================================================================================ -Key Type Options Default Description +Key Type Options Default Description ======================== ========== ====================================== ========== ============================================================================================================================================================================================================================================================================================================================================================ -``apply_gain`` bool .. True Convert the ADUs to electrons using the detector gain -``clip`` bool .. True Perform sigma clipping when combining. Only used with combine=mean -``comb_sigrej`` float .. .. Sigma-clipping level for when clip=True; Use None for automatic limit (recommended). -``combine`` str ``median``, ``mean`` ``mean`` Method used to combine multiple frames. Options are: median, mean +``apply_gain`` bool .. True Convert the ADUs to electrons using the detector gain +``clip`` bool .. True Perform sigma clipping when combining. Only used with combine=mean +``comb_sigrej`` float .. .. Sigma-clipping level for when clip=True; Use None for automatic limit (recommended). +``combine`` str ``median``, ``mean`` ``mean`` Method used to combine multiple frames. Options are: median, mean ``dark_expscale`` bool .. False If designated dark frames are used and have a different exposure time than the science frames, scale the counts by the by the ratio in the exposure times to adjust the dark counts for the difference in exposure time. WARNING: You should always take dark frames that have the same exposure time as your science frames, so use this option with care! -``empirical_rn`` bool .. False If True, use the standard deviation in the overscan region to measure an empirical readnoise to use in the noise model. -``grow`` int, float .. 1.5 Factor by which to expand regions with cosmic rays detected by the LA cosmics routine. -``lamaxiter`` int .. 1 Maximum number of iterations for LA cosmics routine. -``mask_cr`` bool .. False Identify CRs and mask them -``n_lohi`` list .. 0, 0 Number of pixels to reject at the lowest and highest ends of the distribution; i.e., n_lohi = low, high. Use None for no limit. -``noise_floor`` float .. 0.0 Impose a noise floor by adding the provided fraction of the bias- and dark-subtracted electron counts to the error budget. E.g., a value of 0.01 means that the S/N of the counts in the image will never be greater than 100. -``objlim`` int, float .. 3.0 Object detection limit in LA cosmics routine -``orient`` bool .. True Orient the raw image into the PypeIt frame -``overscan_method`` str ``polynomial``, ``savgol``, ``median`` ``savgol`` Method used to fit the overscan. Options are: polynomial, savgol, median -``overscan_par`` int, list .. 5, 65 Parameters for the overscan subtraction. For 'polynomial', set overcan_par = order, number of pixels, number of repeats ; for 'savgol', set overscan_par = order, window size ; for 'median', set overscan_par = None or omit the keyword. -``rmcompact`` bool .. True Remove compact detections in LA cosmics routine -``satpix`` str ``reject``, ``force``, ``nothing`` ``reject`` Handling of saturated pixels. Options are: reject, force, nothing -``shot_noise`` bool .. True Use the bias- and dark-subtracted image to calculate and include electron count shot noise in the image processing error budget -``sigclip`` int, float .. 4.5 Sigma level for rejection in LA cosmics routine -``sigfrac`` int, float .. 0.3 Fraction for the lower clipping threshold in LA cosmics routine. -``spat_flexure_correct`` bool .. False Correct slits, illumination flat, etc. for flexure -``subtract_continuum`` bool .. False Subtract off the continuum level from an image. This parameter should only be set to True to combine arcs with multiple different lamps. For all other cases, this parameter should probably be False. -``subtract_scattlight`` bool .. False Subtract off the scattered light from an image. This parameter should only be set to True for spectrographs that have dedicated methods to subtract scattered light. For all other cases, this parameter should be False. -``trim`` bool .. True Trim the image to the detector supplied region -``use_biasimage`` bool .. True Use a bias image. If True, one or more must be supplied in the PypeIt file. -``use_darkimage`` bool .. False Subtract off a dark image. If True, one or more darks must be provided. -``use_illumflat`` bool .. True Use the illumination flat to correct for the illumination profile of each slit. -``use_overscan`` bool .. True Subtract off the overscan. Detector *must* have one or code will crash. -``use_pattern`` bool .. False Subtract off a detector pattern. This pattern is assumed to be sinusoidal along one direction, with a frequency that is constant across the detector. -``use_pixelflat`` bool .. True Use the pixel flat to make pixel-level corrections. A pixelflat image must be provied. -``use_specillum`` bool .. False Use the relative spectral illumination profiles to correct the spectral illumination profile of each slit. This is primarily used for IFUs. To use this, you must set ``slit_illum_relative=True`` in the ``flatfield`` parameter set! +``empirical_rn`` bool .. False If True, use the standard deviation in the overscan region to measure an empirical readnoise to use in the noise model. +``grow`` int, float .. 1.5 Factor by which to expand regions with cosmic rays detected by the LA cosmics routine. +``lamaxiter`` int .. 1 Maximum number of iterations for LA cosmics routine. +``mask_cr`` bool .. False Identify CRs and mask them +``n_lohi`` list .. 0, 0 Number of pixels to reject at the lowest and highest ends of the distribution; i.e., n_lohi = low, high. Use None for no limit. +``noise_floor`` float .. 0.0 Impose a noise floor by adding the provided fraction of the bias- and dark-subtracted electron counts to the error budget. E.g., a value of 0.01 means that the S/N of the counts in the image will never be greater than 100. +``objlim`` int, float .. 3.0 Object detection limit in LA cosmics routine +``orient`` bool .. True Orient the raw image into the PypeIt frame +``overscan_method`` str ``polynomial``, ``savgol``, ``median`` ``savgol`` Method used to fit the overscan. Options are: polynomial, savgol, median +``overscan_par`` int, list .. 5, 65 Parameters for the overscan subtraction. For 'polynomial', set overcan_par = order, number of pixels, number of repeats ; for 'savgol', set overscan_par = order, window size ; for 'median', set overscan_par = None or omit the keyword. +``rmcompact`` bool .. True Remove compact detections in LA cosmics routine +``satpix`` str ``reject``, ``force``, ``nothing`` ``reject`` Handling of saturated pixels. Options are: reject, force, nothing +``shot_noise`` bool .. True Use the bias- and dark-subtracted image to calculate and include electron count shot noise in the image processing error budget +``sigclip`` int, float .. 4.5 Sigma level for rejection in LA cosmics routine +``sigfrac`` int, float .. 0.3 Fraction for the lower clipping threshold in LA cosmics routine. +``spat_flexure_correct`` bool .. False Correct slits, illumination flat, etc. for flexure +``subtract_continuum`` bool .. False Subtract off the continuum level from an image. This parameter should only be set to True to combine arcs with multiple different lamps. For all other cases, this parameter should probably be False. +``trim`` bool .. True Trim the image to the detector supplied region +``use_biasimage`` bool .. True Use a bias image. If True, one or more must be supplied in the PypeIt file. +``use_darkimage`` bool .. False Subtract off a dark image. If True, one or more darks must be provided. +``use_illumflat`` bool .. True Use the illumination flat to correct for the illumination profile of each slit. +``use_overscan`` bool .. True Subtract off the overscan. Detector *must* have one or code will crash. +``use_pattern`` bool .. False Subtract off a detector pattern. This pattern is assumed to be sinusoidal along one direction, with a frequency that is constant across the detector. +``use_pixelflat`` bool .. True Use the pixel flat to make pixel-level corrections. A pixelflat image must be provied. +``use_specillum`` bool .. False Use the relative spectral illumination profiles to correct the spectral illumination profile of each slit. This is primarily used for IFUs. To use this, you must set ``slit_illum_relative=True`` in the ``flatfield`` parameter set! ======================== ========== ====================================== ========== ============================================================================================================================================================================================================================================================================================================================================================ @@ -874,24 +873,24 @@ SensFuncPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.SensFuncPar` ======================= ============================================== ================ =========================== ============================================================================================================================================================================================================================================================================================================================================================================================ -Key Type Options Default Description +Key Type Options Default Description ======================= ============================================== ================ =========================== ============================================================================================================================================================================================================================================================================================================================================================================================ -``IR`` :class:`~pypeit.par.pypeitpar.TelluricPar` .. `TelluricPar Keywords`_ Parameters for the IR sensfunc algorithm -``UVIS`` :class:`~pypeit.par.pypeitpar.SensfuncUVISPar` .. `SensfuncUVISPar Keywords`_ Parameters for the UVIS sensfunc algorithm +``IR`` :class:`~pypeit.par.pypeitpar.TelluricPar` .. `TelluricPar Keywords`_ Parameters for the IR sensfunc algorithm +``UVIS`` :class:`~pypeit.par.pypeitpar.SensfuncUVISPar` .. `SensfuncUVISPar Keywords`_ Parameters for the UVIS sensfunc algorithm ``algorithm`` str ``UVIS``, ``IR`` ``UVIS`` Specify the algorithm for computing the sensitivity function. The options are: (1) UVIS = Should be used for data with :math:`\lambda < 7000` A. No detailed model of telluric absorption but corrects for atmospheric extinction. (2) IR = Should be used for data with :math:`\lambda > 7000` A. Peforms joint fit for sensitivity function and telluric absorption using HITRAN models. -``extrap_blu`` float .. 0.1 Fraction of minimum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the blue direction (`i.e.`, if the standard star spectrum cuts off at ``wave_min``) the sensfunc will be extrapolated to cover down to (1.0 - ``extrap_blu``) * ``wave_min`` -``extrap_red`` float .. 0.1 Fraction of maximum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the red direction (`i.e.`, if the standard star spectrumcuts off at ``wave_max``) the sensfunc will be extrapolated to cover up to (1.0 + ``extrap_red``) * ``wave_max`` -``flatfile`` str .. .. Flat field file to be used if the sensitivity function model will utilize the blaze function computed from a flat field file in the Calibrations directory, e.g.Calibrations/Flat_A_0_DET01.fits -``hydrogen_mask_wid`` float .. 10.0 Mask width from line center for hydrogen recombination lines in Angstroms (total mask width is 2x this value). -``mask_helium_lines`` bool .. False Mask certain ``HeII`` recombination lines prominent in O-type stars in the sensitivity function fit A region equal to 0.5 * ``hydrogen_mask_wid`` on either side of the line center is masked. -``mask_hydrogen_lines`` bool .. True Mask hydrogen Balmer, Paschen, Brackett, and Pfund recombination lines in the sensitivity function fit. A region equal to ``hydrogen_mask_wid`` on either side of the line center is masked. -``multi_spec_det`` list .. .. List of detectors (identified by their string name, like DET01) to splice together for multi-detector instruments (e.g. DEIMOS). It is assumed that there is *no* overlap in wavelength across detectors (might be ok if there is). If entered as a list of integers, they should be converted to the detector name. **Cannot be used with detector mosaics.** -``polyorder`` int, list .. 5 Polynomial order for sensitivity function fitting -``samp_fact`` float .. 1.5 Sampling factor to make the wavelength grid for sensitivity function finer or coarser. samp_fact > 1.0 oversamples (finer), samp_fact < 1.0 undersamples (coarser). -``star_dec`` float .. .. DEC of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent) -``star_mag`` float .. .. Magnitude of the standard star (for near-IR mainly) -``star_ra`` float .. .. RA of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent) -``star_type`` str .. .. Spectral type of the standard star (for near-IR mainly) +``extrap_blu`` float .. 0.1 Fraction of minimum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the blue direction (`i.e.`, if the standard star spectrum cuts off at ``wave_min``) the sensfunc will be extrapolated to cover down to (1.0 - ``extrap_blu``) * ``wave_min`` +``extrap_red`` float .. 0.1 Fraction of maximum wavelength coverage to grow the wavelength coverage of the sensitivitity function in the red direction (`i.e.`, if the standard star spectrumcuts off at ``wave_max``) the sensfunc will be extrapolated to cover up to (1.0 + ``extrap_red``) * ``wave_max`` +``flatfile`` str .. .. Flat field file to be used if the sensitivity function model will utilize the blaze function computed from a flat field file in the Calibrations directory, e.g.Calibrations/Flat_A_0_DET01.fits +``hydrogen_mask_wid`` float .. 10.0 Mask width from line center for hydrogen recombination lines in Angstroms (total mask width is 2x this value). +``mask_helium_lines`` bool .. False Mask certain ``HeII`` recombination lines prominent in O-type stars in the sensitivity function fit A region equal to 0.5 * ``hydrogen_mask_wid`` on either side of the line center is masked. +``mask_hydrogen_lines`` bool .. True Mask hydrogen Balmer, Paschen, Brackett, and Pfund recombination lines in the sensitivity function fit. A region equal to ``hydrogen_mask_wid`` on either side of the line center is masked. +``multi_spec_det`` list .. .. List of detectors (identified by their string name, like DET01) to splice together for multi-detector instruments (e.g. DEIMOS). It is assumed that there is *no* overlap in wavelength across detectors (might be ok if there is). If entered as a list of integers, they should be converted to the detector name. **Cannot be used with detector mosaics.** +``polyorder`` int, list .. 5 Polynomial order for sensitivity function fitting +``samp_fact`` float .. 1.5 Sampling factor to make the wavelength grid for sensitivity function finer or coarser. samp_fact > 1.0 oversamples (finer), samp_fact < 1.0 undersamples (coarser). +``star_dec`` float .. .. DEC of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent) +``star_mag`` float .. .. Magnitude of the standard star (for near-IR mainly) +``star_ra`` float .. .. RA of the standard star. This will override values in the header (`i.e.`, if they are wrong or absent) +``star_type`` str .. .. Spectral type of the standard star (for near-IR mainly) ======================= ============================================== ================ =========================== ============================================================================================================================================================================================================================================================================================================================================================================================ @@ -905,20 +904,20 @@ SensfuncUVISPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.SensfuncUVISPar` ==================== ========== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= -Key Type Options Default Description +Key Type Options Default Description ==================== ========== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= -``extinct_correct`` bool .. True If ``extinct_correct=True`` the code will use an atmospheric extinction model to extinction correct the data below 10000A. Note that this correction makes no sense if one is telluric correcting and this shold be set to False +``extinct_correct`` bool .. True If ``extinct_correct=True`` the code will use an atmospheric extinction model to extinction correct the data below 10000A. Note that this correction makes no sense if one is telluric correcting and this shold be set to False ``extinct_file`` str .. ``closest`` If ``extinct_file='closest'`` the code will select the PypeIt-included extinction file for the closest observatory (within 5 deg, geographic coordinates) to the telescope identified in ``std_file`` (see :ref:`extinction_correction` for the list of currently included files). If constructing a sesitivity function for a telescope not within 5 deg of a listed observatory, this parameter may be set to the name of one of the listed extinction files. Alternatively, a custom extinction file may be installed in the PypeIt cache using the ``pypeit_install_extinctfile`` script; this parameter may then be set to the name of the custom extinction file. -``nresln`` int, float .. 20 Parameter governing the spacing of the bspline breakpoints in terms of number of resolution elements. -``polycorrect`` bool .. True Whether you want to correct the sensfunc with polynomial in the telluric and recombination line regions -``polyfunc`` bool .. False Whether you want to use the polynomial fit as your final SENSFUNC -``resolution`` int, float .. 3000.0 Expected resolution of the standard star spectrum. This should be measured from the data. -``sensfunc`` str .. .. FITS file that contains or will contain the sensitivity function. -``std_file`` str .. .. Standard star file to generate sensfunc -``std_obj_id`` str, int .. .. Specifies object in spec1d file to use as standard. The brightest object found is used otherwise. -``telluric`` bool .. False If ``telluric=True`` the code creates a synthetic standard star spectrum using the Kurucz models, the sens func is created setting nresln=1.5 it contains the correction for telluric lines. -``telluric_correct`` bool .. False If ``telluric_correct=True`` the code will grab the sens_dict['telluric'] tag from the sensfunc dictionary and apply it to the data. -``trans_thresh`` float .. 0.9 Parameter for selecting telluric regions which are masked. Locations below this transmission value are masked. If you have significant telluric absorption you should be using telluric.sensnfunc_telluric +``nresln`` int, float .. 20 Parameter governing the spacing of the bspline breakpoints in terms of number of resolution elements. +``polycorrect`` bool .. True Whether you want to correct the sensfunc with polynomial in the telluric and recombination line regions +``polyfunc`` bool .. False Whether you want to use the polynomial fit as your final SENSFUNC +``resolution`` int, float .. 3000.0 Expected resolution of the standard star spectrum. This should be measured from the data. +``sensfunc`` str .. .. FITS file that contains or will contain the sensitivity function. +``std_file`` str .. .. Standard star file to generate sensfunc +``std_obj_id`` str, int .. .. Specifies object in spec1d file to use as standard. The brightest object found is used otherwise. +``telluric`` bool .. False If ``telluric=True`` the code creates a synthetic standard star spectrum using the Kurucz models, the sens func is created setting nresln=1.5 it contains the correction for telluric lines. +``telluric_correct`` bool .. False If ``telluric_correct=True`` the code will grab the sens_dict['telluric'] tag from the sensfunc dictionary and apply it to the data. +``trans_thresh`` float .. 0.9 Parameter for selecting telluric regions which are masked. Locations below this transmission value are masked. If you have significant telluric absorption you should be using telluric.sensnfunc_telluric ==================== ========== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= @@ -932,45 +931,45 @@ TelluricPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.TelluricPar` ======================= ================== ======= ========================== ================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= -Key Type Options Default Description +Key Type Options Default Description ======================= ================== ======= ========================== ================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= -``bal_wv_min_max`` list, ndarray .. .. Min/max wavelength of broad absorption features. If there are several BAL features, the format for this mask is [wave_min_bal1, wave_max_bal1,wave_min_bal2, wave_max_bal2,...]. These masked pixels will be ignored during the fitting. -``bounds_norm`` tuple .. (0.1, 3.0) Normalization bounds for scaling the initial object model. -``delta_coeff_bounds`` tuple .. (-20.0, 20.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. -``delta_redshift`` float .. 0.1 Range within the redshift can be varied for telluric fitting, i.e. the code performs a bounded optimization within the redshift +- delta_redshift -``disp`` bool .. False Argument for scipy.optimize.differential_evolution which will display status messages to the screen indicating the status of the optimization. See documentation for telluric.Telluric for a description of the output and how to know if things are working well. -``fit_wv_min_max`` list .. .. Pixels within this mask will be used during the fitting. The format is the same with bal_wv_min_max, but this mask is good pixel masks. -``func`` str .. ``legendre`` Polynomial model function -``lower`` int, float .. 3.0 Lower rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. The distribution of input chi (defined by chi = (data - model)/sigma) values is analyzed, and a correction factor to the formal error sigma_corr is returned which is multiplied into the formal errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile. This renormalization is performed with coadd1d.renormalize_errors function, and guarantees that rejection is not too agressive in cases where the empirical errors determined from the chi-distribution differ significantly from the formal noise which is used to determine chi. -``mask_lyman_a`` bool .. True Mask the blueward of Lyman-alpha line during the fitting? -``maxiter`` int .. 2 Maximum number of iterations for the telluric + object model fitting. The code performs multiple iterations rejecting outliers at each step. The fit is then performed anew to the remaining good pixels. For this reason if you run with the disp=True option, you will see that the f(x) loss function gets progressively better during the iterations. -``minmax_coeff_bounds`` tuple .. (-5.0, 5.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. Bounds are currently determined as follows. We compute an initial fit to the sensfunc in the :func:`~pypeit.core.telluric.init_sensfunc_model` function. That deterines a set of coefficients. The bounds are then determined according to: [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1]))] -``model`` str .. ``exp`` Types of polynomial model. Options are poly, square, exp corresponding to normal polynomial, squared polynomial, or exponentiated polynomial -``npca`` int .. 8 Number of pca for the objmodel=qso qso PCA fit -``objmodel`` str .. .. The object model to be used for telluric fitting. Currently the options are: qso, star, and poly -``only_orders`` int, list, ndarray .. .. Order number, or list of order numbers if you only want to fit specific orders -``pca_file`` str .. ``qso_pca_1200_3100.fits`` Fits file containing quasar PCA model. Needed for objmodel=qso. NOTE: This parameter no longer includes the full pathname to the Telluric Model file, but is just the filename of the model itself. -``pca_lower`` int, float .. 1220.0 Minimum wavelength for the qso pca model -``pca_upper`` int, float .. 3100.0 Maximum wavelength for the qso pca model -``pix_shift_bounds`` tuple .. (-5.0, 5.0) Bounds for the pixel shift optimization in telluric model fit in units of pixels. The atmosphere will be allowed to shift within this range during the fit. -``polish`` bool .. True If True then differential evolution will perform an additional optimizatino at the end to polish the best fit at the end, which can improve the optimization slightly. See scipy.optimize.differential_evolution for details. -``polyorder`` int .. 3 Order of the polynomial model fit -``popsize`` int .. 30 A multiplier for setting the total population size for the differential evolution optimization. See scipy.optimize.differential_evolution for details. -``recombination`` int, float .. 0.7 The recombination constant for the differential evolution optimization. This should be in the range [0, 1]. See scipy.optimize.differential_evolution for details. -``redshift`` int, float .. 0.0 The redshift for the object model. This is currently only used by objmodel=qso -``resln_frac_bounds`` tuple .. (0.5, 1.5) Bounds for the resolution fit optimization which is part of the telluric model. This range is in units of the resln_guess, so the (0.5, 1.5) would bound the spectral resolution fit to be within the range bounds_resln = (0.5*resln_guess, 1.5*resln_guess) -``resln_guess`` int, float .. .. A guess for the resolution of your spectrum expressed as lambda/dlambda. The resolution is fit explicitly as part of the telluric model fitting, but this guess helps determine the bounds for the optimization (see next). If not provided, the wavelength sampling of your spectrum will be used and the resolution calculated using a typical sampling of 3 spectral pixels per resolution element. -``seed`` int .. 777 An initial seed for the differential evolution optimization, which is a random process. The default is a seed = 777 which will be used to generate a unique seed for every order. A specific seed is used because otherwise the random number generator will use the time for the seed, and the results will not be reproducible. -``sn_clip`` int, float .. 30.0 This adds an error floor to the ivar, preventing too much rejection at high-S/N (`i.e.`, standard stars, bright objects) using the function utils.clip_ivar. A small erorr is added to the input ivar so that the output ivar_out will never give S/N greater than sn_clip. This prevents overly aggressive rejection in high S/N ratio spectra which neverthless differ at a level greater than the formal S/N due to the fact that our telluric models are only good to about 3%. -``star_dec`` float .. .. Object declination in decimal deg -``star_mag`` float, int .. .. AB magnitude in V band -``star_ra`` float .. .. Object right-ascension in decimal deg -``star_type`` str .. .. stellar type +``bal_wv_min_max`` list, ndarray .. .. Min/max wavelength of broad absorption features. If there are several BAL features, the format for this mask is [wave_min_bal1, wave_max_bal1,wave_min_bal2, wave_max_bal2,...]. These masked pixels will be ignored during the fitting. +``bounds_norm`` tuple .. (0.1, 3.0) Normalization bounds for scaling the initial object model. +``delta_coeff_bounds`` tuple .. (-20.0, 20.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. +``delta_redshift`` float .. 0.1 Range within the redshift can be varied for telluric fitting, i.e. the code performs a bounded optimization within the redshift +- delta_redshift +``disp`` bool .. False Argument for scipy.optimize.differential_evolution which will display status messages to the screen indicating the status of the optimization. See documentation for telluric.Telluric for a description of the output and how to know if things are working well. +``fit_wv_min_max`` list .. .. Pixels within this mask will be used during the fitting. The format is the same with bal_wv_min_max, but this mask is good pixel masks. +``func`` str .. ``legendre`` Polynomial model function +``lower`` int, float .. 3.0 Lower rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. The distribution of input chi (defined by chi = (data - model)/sigma) values is analyzed, and a correction factor to the formal error sigma_corr is returned which is multiplied into the formal errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile. This renormalization is performed with coadd1d.renormalize_errors function, and guarantees that rejection is not too agressive in cases where the empirical errors determined from the chi-distribution differ significantly from the formal noise which is used to determine chi. +``mask_lyman_a`` bool .. True Mask the blueward of Lyman-alpha line during the fitting? +``maxiter`` int .. 2 Maximum number of iterations for the telluric + object model fitting. The code performs multiple iterations rejecting outliers at each step. The fit is then performed anew to the remaining good pixels. For this reason if you run with the disp=True option, you will see that the f(x) loss function gets progressively better during the iterations. +``minmax_coeff_bounds`` tuple .. (-5.0, 5.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. Bounds are currently determined as follows. We compute an initial fit to the sensfunc in the :func:`~pypeit.core.telluric.init_sensfunc_model` function. That deterines a set of coefficients. The bounds are then determined according to: [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1]))] +``model`` str .. ``exp`` Types of polynomial model. Options are poly, square, exp corresponding to normal polynomial, squared polynomial, or exponentiated polynomial +``npca`` int .. 8 Number of pca for the objmodel=qso qso PCA fit +``objmodel`` str .. .. The object model to be used for telluric fitting. Currently the options are: qso, star, and poly +``only_orders`` int, list, ndarray .. .. Order number, or list of order numbers if you only want to fit specific orders +``pca_file`` str .. ``qso_pca_1200_3100.fits`` Fits file containing quasar PCA model. Needed for objmodel=qso. NOTE: This parameter no longer includes the full pathname to the Telluric Model file, but is just the filename of the model itself. +``pca_lower`` int, float .. 1220.0 Minimum wavelength for the qso pca model +``pca_upper`` int, float .. 3100.0 Maximum wavelength for the qso pca model +``pix_shift_bounds`` tuple .. (-5.0, 5.0) Bounds for the pixel shift optimization in telluric model fit in units of pixels. The atmosphere will be allowed to shift within this range during the fit. +``polish`` bool .. True If True then differential evolution will perform an additional optimizatino at the end to polish the best fit at the end, which can improve the optimization slightly. See scipy.optimize.differential_evolution for details. +``polyorder`` int .. 3 Order of the polynomial model fit +``popsize`` int .. 30 A multiplier for setting the total population size for the differential evolution optimization. See scipy.optimize.differential_evolution for details. +``recombination`` int, float .. 0.7 The recombination constant for the differential evolution optimization. This should be in the range [0, 1]. See scipy.optimize.differential_evolution for details. +``redshift`` int, float .. 0.0 The redshift for the object model. This is currently only used by objmodel=qso +``resln_frac_bounds`` tuple .. (0.5, 1.5) Bounds for the resolution fit optimization which is part of the telluric model. This range is in units of the resln_guess, so the (0.5, 1.5) would bound the spectral resolution fit to be within the range bounds_resln = (0.5*resln_guess, 1.5*resln_guess) +``resln_guess`` int, float .. .. A guess for the resolution of your spectrum expressed as lambda/dlambda. The resolution is fit explicitly as part of the telluric model fitting, but this guess helps determine the bounds for the optimization (see next). If not provided, the wavelength sampling of your spectrum will be used and the resolution calculated using a typical sampling of 3 spectral pixels per resolution element. +``seed`` int .. 777 An initial seed for the differential evolution optimization, which is a random process. The default is a seed = 777 which will be used to generate a unique seed for every order. A specific seed is used because otherwise the random number generator will use the time for the seed, and the results will not be reproducible. +``sn_clip`` int, float .. 30.0 This adds an error floor to the ivar, preventing too much rejection at high-S/N (`i.e.`, standard stars, bright objects) using the function utils.clip_ivar. A small erorr is added to the input ivar so that the output ivar_out will never give S/N greater than sn_clip. This prevents overly aggressive rejection in high S/N ratio spectra which neverthless differ at a level greater than the formal S/N due to the fact that our telluric models are only good to about 3%. +``star_dec`` float .. .. Object declination in decimal deg +``star_mag`` float, int .. .. AB magnitude in V band +``star_ra`` float .. .. Object right-ascension in decimal deg +``star_type`` str .. .. stellar type ``sticky`` bool .. True Sticky parameter for the utils.djs_reject algorithm for iterative model fit rejection. If set to True then points rejected from a previous iteration are kept rejected, in other words the bad pixel mask is the OR of all previous iterations and rejected pixels accumulate. If set to False, the bad pixel mask is the mask from the previous iteration, and if the model fit changes between iterations, points can alternate from being rejected to not rejected. At present this code only performs optimizations with differential evolution and experience shows that sticky needs to be True in order for these to converge. This is because the outliers can be so large that they dominate the loss function, and one never iteratively converges to a good model fit. In other words, the deformations in the model between iterations with sticky=False are too small to approach a reasonable fit. -``telgridfile`` str .. .. File containing the telluric grid for the observatory in question. These grids are generated from HITRAN models for each observatory using nominal site parameters. They must be downloaded from the GoogleDrive and installed in your PypeIt installation via the pypeit_install_telluric script. NOTE: This parameter no longer includes the full pathname to the Telluric Grid file, but is just the filename of the grid itself. -``tell_norm_thresh`` int, float .. 0.9 Threshold of telluric absorption region -``tol`` float .. 0.001 Relative tolerance for converage of the differential evolution optimization. See scipy.optimize.differential_evolution for details. -``upper`` int, float .. 3.0 Upper rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. See above for description. +``telgridfile`` str .. .. File containing the telluric grid for the observatory in question. These grids are generated from HITRAN models for each observatory using nominal site parameters. They must be downloaded from the GoogleDrive and installed in your PypeIt installation via the pypeit_install_telluric script. NOTE: This parameter no longer includes the full pathname to the Telluric Grid file, but is just the filename of the grid itself. +``tell_norm_thresh`` int, float .. 0.9 Threshold of telluric absorption region +``tol`` float .. 0.001 Relative tolerance for converage of the differential evolution optimization. See scipy.optimize.differential_evolution for details. +``upper`` int, float .. 3.0 Upper rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. See above for description. ======================= ================== ======= ========================== ================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================= @@ -2912,7 +2911,6 @@ Alterations to the default parameters are: satpix = nothing use_pixelflat = False use_illumflat = False - subtract_scattlight = True [[alignframe]] [[[process]]] satpix = nothing @@ -2929,7 +2927,6 @@ Alterations to the default parameters are: satpix = nothing use_illumflat = False use_pattern = True - subtract_scattlight = True [[lampoffflatsframe]] [[[process]]] satpix = nothing @@ -2944,7 +2941,6 @@ Alterations to the default parameters are: mask_cr = True noise_floor = 0.01 use_pattern = True - subtract_scattlight = True [[flatfield]] spec_samp_coarse = 20.0 tweak_slits_thresh = 0.0 @@ -2968,7 +2964,6 @@ Alterations to the default parameters are: noise_floor = 0.01 use_specillum = True use_pattern = True - subtract_scattlight = True [reduce] [[skysub]] no_poly = True @@ -7478,4 +7473,3 @@ Alterations to the default parameters are: mask_cr = True use_overscan = False noise_floor = 0.01 - From 59203abacfef4b555d25621da49f58ac9c388a33 Mon Sep 17 00:00:00 2001 From: joe Date: Fri, 29 Sep 2023 11:20:03 +0200 Subject: [PATCH 45/81] Flipped red side images to try to deal with WCS bug. --- pypeit/display/display.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/display/display.py b/pypeit/display/display.py index 6d3b574199..dbd0c0cda3 100644 --- a/pypeit/display/display.py +++ b/pypeit/display/display.py @@ -443,7 +443,7 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids= def show_trace(viewer, ch, trace, trc_name=None, maskdef_extr=None, manual_extr=None, clear=False, - rotate=False, pstep=50, yval=None, color='blue'): + rotate=False, pstep=3, yval=None, color='blue'): r""" Args: From d593930e0f2c4c47e89aa1adfb93187bcde10191 Mon Sep 17 00:00:00 2001 From: rcooke Date: Fri, 29 Sep 2023 21:03:15 +0100 Subject: [PATCH 46/81] support RL grating --- doc/include/spectrographs_table.rst | 2 +- .../data/arc_lines/reid_arxiv/keck_kcrm_RL.fits | Bin 0 -> 40320 bytes pypeit/spectrographs/keck_kcwi.py | 4 +++- 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 pypeit/data/arc_lines/reid_arxiv/keck_kcrm_RL.fits diff --git a/doc/include/spectrographs_table.rst b/doc/include/spectrographs_table.rst index a1f4674edc..77970d24d0 100644 --- a/doc/include/spectrographs_table.rst +++ b/doc/include/spectrographs_table.rst @@ -18,7 +18,7 @@ jwst_nirspec :class:`~pypeit.spectrographs.jwst_nirspec.JWSTNIRSpec keck_deimos :class:`~pypeit.spectrographs.keck_deimos.KeckDEIMOSSpectrograph` KECK DEIMOS `Link `__ MultiSlit True True Supported gratings: 600ZD, 830G, 900ZD, 1200B, 1200G; see :doc:`deimos` keck_esi :class:`~pypeit.spectrographs.keck_esi.KeckESISpectrograph` KECK ESI Echelle True False keck_hires :class:`~pypeit.spectrographs.keck_hires.KECKHIRESSpectrograph` KECK HIRES `Link `__ Echelle False False -keck_kcrm :class:`~pypeit.spectrographs.keck_kcwi.KeckKCRMSpectrograph` KECK KCRM `Link `__ IFU True False Supported setups: RM1, RM2, RH3; see :doc:`keck_kcwi` +keck_kcrm :class:`~pypeit.spectrographs.keck_kcwi.KeckKCRMSpectrograph` KECK KCRM `Link `__ IFU True False Supported setups: RL, RM1, RM2, RH3; see :doc:`keck_kcwi` keck_kcwi :class:`~pypeit.spectrographs.keck_kcwi.KeckKCWISpectrograph` KECK KCWI `Link `__ IFU True False Supported setups: BL, BM, BH2; see :doc:`keck_kcwi` keck_lris_blue :class:`~pypeit.spectrographs.keck_lris.KeckLRISBSpectrograph` KECK LRISb `Link `__ MultiSlit True False Blue camera; Current FITS file format; used from May 2009, see :doc:`lris` keck_lris_blue_orig :class:`~pypeit.spectrographs.keck_lris.KeckLRISBOrigSpectrograph` KECK LRISb `Link `__ MultiSlit True False Blue camera; Original FITS file format; used until April 2009; see :doc:`lris` diff --git a/pypeit/data/arc_lines/reid_arxiv/keck_kcrm_RL.fits b/pypeit/data/arc_lines/reid_arxiv/keck_kcrm_RL.fits new file mode 100644 index 0000000000000000000000000000000000000000..081de6726a58a304683cec9a7f48b6b03ca98aac GIT binary patch literal 40320 zcmeFYc{G)8-#5HT=6RlN^E^u-?XNLpOopN|ClwMZR9mG~ltd|skfcOH_IU0}hN47< zBt*%OA!Huj<9A)_S@(UfbzSee@AbUvUF%uTZ~tNKkI&~j9N%L&kK;Vg?QZYn>S%+< zo8$k*XBS=<9~coH5)pOe7(O}zZ)?BH9e*s^KRn1kD(GM6(Z4M1ce&bo{~Lc>kU#&Z zDF2iAApdB8eDul4;D7DqKmO}%;cf4Z;J?gI5BUp^IT8>Yg^vjNU!D&h2@XGYFe3ce ze>nM1-)y{h**IJO8-Kh0lkmU!^&iguo7%r+e^=n|3jAGxzbo)}1^%wU-xc_~0{`b% z;IHkZySl6f|8bo9&*Seu0{=I^?Xq>*>GV&3 z=#=!2&;Opkzw-M>|Eh)@jyds{!~Z>hf93a2fB&E4hbFDNtBuva>yLj{;D7M<@AKgQ zpjdxP|E|E_75KXXe^=oD+6q8TW9`AtUMgcl={nZgf`^)SIYU8m5!m;UBF|hqBGfb= z4``|CAWXhy!N0qN@CVmE8ME&Umg$m)uO0}0)bO$2agorwLVllr5(hPH8#;HLrUK{9 z&;nyyJk)&p>b@-^o-mV9U7%ru^c~e7SYNybfy?UdH&t4orYp_i$?uyW@Zi+Az5~MD z2kZ2zmk3KL&2;5Zr0=n^xPE^DB;P2_WsT-RO|LZdgX3*rkG^AZ@DUDb`sOx|xK4uD zJ+r5p7yY2-+lPuidv4$nIe00-q91AoA8D6=-~j0xU+{q@dQkJ@Z1LWNWe`-H_o18t zsQKxu@O_+x!B%!sH`1O6H6xpHY)RH2Wt=2`$|4VH#`wg}#`nTT4UkPXsk_gQDdr8J94CnmN_q&zH^+8amS7#Kt1~1*WiW&qpBfIr&F?Oh42tb`N&O zy#wBPhmRU!c&J&f^nJ{vN~j&LdDW+c^sA}uyUw#>ZO0pKDm5Xze!!_Mr-HCXsy=z( z7yyY0>QskgLHYFSc6077AZ?J`=DJn{LbtT`y6Hv&iDjnRIqoK*==6u>uq`+su{P_- z&D{n59NmjUN6Uf4Ui9D%k05NMR7%OlAfA&bWA43|Fm>mB4eKbv+`BW9qq+(GHbVNW zK13k#8gf0Dn*HF@8R|-nMF?pUae=&H4?Ha-=+t{|&6kr3PY_nXy zkX}tJzG9y{HpAGy*&&MoB=xDu*?1k;xVt{DVH{!2w?WL0T0uhMyYq!QS0HH@boQL@ zB`ozcE{%Rg_PVEbBo@tL;|3+uy895;_oV+W!GNWDbN^3AB9ILA474;x8Qgcad*CVo zNSinw=)IB#0X>1*NB0pn9u!;W;>D&uDimniAp0%U%Qy2P8PeaUSD1N_y=msky!0wI zH!8Z4as}b75fON)D8izTW$s!a!rSb=2^e1nw!I5YeRuId0)?G#IoN@h_r22Tt2-zWfC*jbmg|`Sl15SoU8W4fx zJl+*MzY|2O0v_Gifv{`6onq7|w$2S{kiw^!hFrVI(a{RCU*`H1we{utB^fSG_FCSt5!x~50s30_O zt3&)89!P;Ux1Q=d6V|7-{q{@%kb>n{p8B~EDlXLKr1T?w$imSh%T2(=b}xe-i1eW! zzy7+{%3zQCY5Hg!@nMf0)W1g)er6sm-1UnHq{HW{l@HW|Se$eS!P5^&;rrx!Uk?D+ zQ{$_5N&P^I+??uh<2!cGwZKK3f$XFB7cNDO5*AO`Z=PX8_Qys8Pp(KXgjj}u8_eVZ zDdzQimA6?8{zL2?VYPlhI-axT+R3Yg>13ixyS?AH0O`ssiP!2Ws9p$4YrCEWq}2C#mDg(^;o!zS+mr^R zYY#8Xu_g)fwHM`A`m|OhZ!fF+6Pi zm1Xq!Fye3V$T_!}6Z+$~T{8G9->jkYkJbeV&l@5IBG7tDx`U07(z&o!v!QTeDZ+Pe zJD*a$Lzua~#mU)}2&8*a6|+7%z}`>L+cUucQl6ujY=b%B%O%?fEAmL6uN=3+2@I(N z_9xdgBY{+~)_a9og$3(Yb0iTF|DemFIbIr=Qk;HFSoQ`Zjw3K2+; zuCN7PYQaAJY&8q6NBYM>2PpzNgxS+C&CmR`K9rijdntIIuoOL*>3)a-q%tw>YcZIS_`ml@-NM@tB+N5!VDz5pP- z4&OnzsRisb^VaE02*26>NL*SEWPP<n(or~5hn#!TzaAe=G6xc|qB8+q8 zKB9`^fb_PFki1^Mo798qI!0WWtoc%hA_xWD7GDkUx4SPH2*7F1D%kkNtw=Xi-qd9!$Rr`VT z)y>&8I)lO0TA@^z1VHN3c(~vA6=6dBok@Ep5lG)ya6<7b3>lBGRS&TiAPs!=KHP7N zO=VY={V-$z>3el~*m?-oret=#!4n6hp)_`$nDZdIH^SZjK{=3y5AJvGy#oAi%zJzb z(ttE-S#91CkNt}FpE;|9{Qr{Xe!J+2HH$1q6&N7<@jn4egQ|cZ9!a{Pj_fDj*EH)+ z!KUMuTn@QtzWpxdwTOQMI*DrHJC6(lX(lnyE1(h#*{`2;>JbLgpFK6a#5UNf-n8r( zhw!`+cSw{m7#g@roLJ2O(jt%FzSd>J+S`z5-F_5*Y3TVp0SJYUvL(3vQ2Z4Jo36VG zHg$rVfMEcnwcC4GOIist+8^3qRK>>ln4;oP zd&n%xuGRrsAYpsPdhvcFkTZI*YPAIQ==JG%w%g3g!A zC*xmi0kSMxE{Ayw{NivRwaY%gZj%VG{HyyJQDsd9tyd=om1Mu+JE~qWl$a zB&z&44;xgYGi{Ig0a^L*yUMqM*sAuB*3DyxSG5)S?9YsSI*u>NzJLR=y4?N@+h$<) zN*&t7*$-rmh2kG#qoBr5sgo#0_F5l*zl;0?_&>R;x`v3?d2DJs!3L~LyV&C9@Icl( zf7VNj6Pt;%byR%|KsMOhxLog#jRpH!pE!a8ve9M%Bi{^$n5ek5WfjsJ^Lyix^AUvM>Az@MiCCOBO`fE|q1ETsH74AFr($ME&m& zVjCp?ov_X+sdw}h4#rd>gq7X#6P>EapR;(QQmQXuCf4(}-$x>lU8Xs@^_U3L zZ1-leqY&@*&Ozqp7_dDO752(y0NK6p(!{=g!W*e`m-8!dK=wG*=%`cxJS8eOb4>k! z?B&I7oWw$4s0I!;8Y6zUo-JCsfGeBiSz&_*vJc1kvgB4m#YsVq{wIj{9e8CPJVaQ& zZQ)mpM|dA)-P83uVQN@nSHvK~ewk*ALl3}PB|f|7(KL_)!cVByazY5F=y8#hI3NdY zf8Lk000AbY#R(VMfgFOL#5cNvd0OoL@*r^_A6QZ!)5HOO^5;-NxEhcTwgqk5KEq(q zX?4#PX8`%olY0_0Nvx&m%8{!=DBh8z&Qq!f38Py?92Gr?K#thYAFB8n^cuY7^XHNM z(Jgk}E*&>Fiu(JQ0I_X5D~LsBv(KGL19J3e&CZ9l457+R)p!O|7leuNWYBGWSA#~HvqOq7!6MmX_{uD|;ZU@kX#w-%1<&s7Gp3evE; z3GMN)XdIA}Qg3QA{$P_a9OVgC2wx0o=v)?Ku$b@P4( zKIa_C@<$|)(@#yr7*-P)=|+ZEhH*g7@RBKt@@6p0nst5Y!2|iGo}-t_S&&mLLDv+d z&*F%?Fdj=-*?EEdWdiAM4-{_-O2CGH#9b_WiEuWhJwWX%u*n5Y2Nx58oRi7S-8qVV zbsil2ZGrgQaGg!OQ^2Iesc97K2ju(PcXuDM1>rhsc9Rtz$OZWGE0--nxXC5TR0M!r zxLB6W0l;>!WA^L5JRldfey&Q&1Kbr2mwi8QKrVi=VaMHCZ02M|((YvjkV}%Ze1GwQ zKsdQ|Y#*|JvTxT2|2SbPFg0n+cpId?Vde4q2dbU7cu6#gG~%h zrjnbF#q)qnat`1TcOZPOJg#5<*LUI^fo!KPT0k7@=IflR}d=mfMdHtwEV(%p{y z(LXmIeLl%xuRm1hHH9#vVoEcYl`v2yEEhbA^siC`eO{ITtDZngbr-_5!A4A^aKdz9 zuk{NJgzL6>$S6)=e`J--J?ar|5Q{0j*NEo7zLVB?Kad-zGArtDqxo#%CS`*I^1H?= zt*xvecW1fh3lZhtoc~GUb`>Z|q{lKbT7di^emd^-PS~txld1OgE|5QZ@H<~j1qHV4 zW@17BYb!Z4g*W28x$S-$sKpye&-|2G-4DPYA==Kx>dCcgzg9IK_ zhgH0@%JhId&XZDRs>R^Qz^{K_>j(10(EUW;iww3p;=|HC$lnxJ#gLU|@MTyW^!lqm zrn4FkNw)(|!b*slANij>+LJh!56rl)T}-k7+p; z8zBRy{eZ&yD}I}k1!F^bYf9J%!t8G@_BzHBz8XqRRWc(z=e-+0uJHqhS^?XSSv*j< zkLS8S7bnc_$+G@zzyJ!bYl)g?HSiTvUfo9P3x!|3GEkfcgd@)Qe<^AKiXbz6L#7ol zMfj7qg(AJsmwJxFXn(L2ZXv&j^dglXqRkdS=H?BZ-Dk^zB9_`|b-x|>S+-Fe9q~Ys z2hicxA6=CitO*H12OkN z?{wM*hkKuZf`9u*jlT`JFU(n%{1soJV9{Z|55zCh_LpZ)0YxccC7q1>9((2Dz?6e2U*2Lds+`qt4^I+d0vNTY(?fDgws{^LSZ{Isj zX8;8ZhvEXG0Dp6~&+us)P%OFoMiz_#zoXKzX>k}RRzJEbSC~M=OxgJ4Bxz2ys! zH^|S`RFx!p17-WI2A{XJAb7c^?2ab@#Xb@!%C_3t^D1+9|0hGg+ zGZKHNg1r5K`4_BNKnV{#Eje?A!I#{4Dd%V&P$IV;V6~ng(0;rqun9%}qC{M*&ZiQF z%TmpVCI}y!Fm83T1%=a>qb^=e21-o5OojbCxXpZN-Wz8Dl;e3D26-+*U|Uxy$rg?O zlP5-kOuxWB1=2eO${bg!%6o_~U=|S2AJc8mkc~y!H6_7KlK(EdHgnXFo^|75`DVg7_=H$$>jX zLF7TD+L!kzUh3N{U_A%|(?5P5Zz~7NwSt(?tPo)D7S#@~@B_;A1h<}t3Lv-lfuf0Y z9#C$0XlH+{1VKN^E{^-C-kCb=4_=snWKTN3-|>E+++rPAoO=&O@9NrL+86;PtB;zc zn*l!Edpvoj}Q#`aS<}4jgz+zHF_o1xmqey?tF2Y;JSq6M2K?^MmH> z!7UM>%|@j@s6g#`SQPO(M-7BO<`4GaQ2vk3SdQNK2J#!4c#%IAWUmf_H7IYo_<-Q2TvG4!HOrV z6kdVA=j!77iU_|0qMl&2i$sHBcJ9cY%!&VOBtg*)0nPl()2d(v!)+cjD~US6m1;-3&Cr znK4A4f8wTDqV~U!P|Xni2rN!>Un|}*fYP#kOuMxMSVB1d1m@=fr4|2lV_OIIb#5iA zMGe`vEk;U_{IK5#RZUM75rNX)svn`q2FCneropK0l+Kdh>*YaU_{;X;^~5be`F!qK zGm`)qv{Ebu()xk&#W%8}@(BoJ>Ykc=gZzKpq$?fP349z{HkTz3?&JBD^V$%bJZYY; zC5`;|4?Wtm_Xu#h?|y<=BRqfwyM5LHR@}m?FHHSF`JRP8*P%lA6!)R`4+#g9p~&vZ zSvCeoyjBc((hn%Z_8FP2mq4Yqpj}S}2Y<`526v$T9b4X*>beV(Tl?;C@0140k$t z*w=XjWrIW9=6JaQp?ep&8vvj_29`|93@KZFh5cy&h|gn2ec z79V{?n9#R$@eLsYm5*Pda;FEd*KEfhImiGi|M1J-<)t8`Bk({Q-S46bzC7rsRfV;k z)A~q?K=#6S_}0;N3O}m5-ukT=sG?C-?^oBcu2KESbu@pd;yd=dzu^lk7G^)H%n+7T zTwATn$A+uKYVI#1EVYvFyZJjd+Z}T(pPL9&nRe@uiEX2pWYyaS-y{=jThR4 z-?$ytuA+!H+qZfQ*9y#YPiaHw`bIV1e0{LV6C~eHt35c<4^#rbaspWZgd$qf9-5){ z!ElXv*nZ$%uFribmIqYJmk!FOazW@5;iu{$0IK!vFGd+gAR-;8(sLEl4sp-|GHbvy$rBt`{L<6v}&rsnq9;j|f{X4Gb>s(XfWq-I-2^Z^aqeKt{eGYZE-gui900Sa`N4RNNT3F{#XlP9hAlr_MOtd_5 zI?Dp7to$VvjlDpP;F+_YUB<1>&usC{yFXJr$^_-7%+{!n?Z|21gjh#fGCK+Cx&EE#BHxgZoGyQ;i zksEjMTNPn4(T{8Zh=I* zzbG>=%IE44T663n5ZNB7l7vTmnytncaB+k=n&4xHwDCR zIA_SRAimhjf7hRTASDuFa!E4}s3lS_lcl-{m1n5Ni_GoeE9WNkt%;>!wqQq{ho=aV0r!c+Qz`s|bm>j!<1U2kk)2}St1N5*xv(;zpY zzHRaIfJ(jQY7lM!9)y7%bU$vO(hn3#7z%@7glPN(CJa;tVVPIx5y)TW zeuz^p2I?zu7yf6P3BzY@vuswv0rmA%KE9L>ghlt0PIaOFebX>&UYrbaUnjcj%lv>^ zf8XYS^&e2OfAz@93gy>$GBZc90u*X(bVS?{|IY1u?~pXOeeb>_a}TYz%^HSU+L;jk zHkl}Nd6I92f`i&KK0QGBaL#galAfDKo?_=l-)V>^^-P;d^djEG9XT<+dZokCZ3|n0Jn}>2y{f5>= zU7HVprpamR^L=_i9q#ayy|y2eZ$^B&W3T|!(We=|CmcZDa!C7evo}zGU1)uKr55BD zbiX>wp!qVfpRbtXFbFN`eJc5d>OZx4$7S{*;Mn-PQ$7d})M>s8k(wPKb?{cw7A~Q;F%_%E=GoGP$~%D zF8Ev>I*IsYyL)&JO%To7GxEd;`CG-eiCNo%{85vOT*_HMU0Y=1=Y2((Ri6{!4gsLy zKALiJ`hk87Q+1Fh3(%M!9cJkof=%jg=zgAlKx0YF!AZ-4{<@_`d0!9ESa&xq$9w?U zxGmXsmytcY{_@<#X5c<(d+7I1r03*RpPCsVtR7&tiM2*N*MR4!&oXR3u{@e6SqwBD z^4S6ZE^r&Yx*_?hA<+2JOS>KhfXyibFU^Jopb3O^HoZRqDpfuf1yV>aWQC)jSOE#+ z$s?s<$iIk`TB%(JNKd7n3g5^NG|?HCtiD-b;m-|LNWlY5{9O#OA3%7LexfR_1!$55 zcY?;Af+Az228bvh>3F(zp&^K}5XYDm@_;7m{!Q-jTf%fSC7>sn2sFGF^Pj?lAg|ia z7-ZA{O@T$RPEQ0BrMC{Gz0(7lVvkkY)CZ8Oo8>usJrZcjl>r{fIUqjh9AtSE#Zyfo z3f)d7tZ&IY_Hi$Yryg|o?VJ0+YplV#@sJ}2EV48eSPnE}C%bWlEg+SgaY4fy<+DY3@BSHd9&->SY&=Q?n#pQpL%Js@ zitS&ITR{1kwVzW#=SpM64d;6kwgSz(G~;BgHdyenZfdt*K|8gi!k5_=U@^m$H!*h} zXcl{)uQrB&VgCHD?O)J%u-f!Wd#VRCiRn+vi}iqJ!}G!Ic^?=Fo4E$PMe%HZdE%t(9h!j~#6&D)T9rCKOj5Q11eoL*`WYA}3hbGoLygi}H6ovbZ-<26i!PozJDm z0nOQ#$-Kn`lAUU6T@pKi<|@n1m;4-FdABQGtRVo+ZH}kaaS)pGOiV0S9DwHjUN8w$ zg;!Chig=sCUMQblM zQ30Bdj^uNeCqO@wy+^O_1km=fN(%dLg4#OV_^zsIpzZ4w4>H{jmBUJ6^$(|j=2tC} zZ?F?$%d`9r>?H#&;Hp6LSS#2kZ`03}eGjyt5bmEG763<-ukZVD8fYQgSjV+xU=ugf zR_H_Z3>8`LTO0$ugA@G&SJjEa7219%zT&Ok5U^0G)J&@8+hczDIJt zL)kXqU6wvRyMXE&5z{qrQVrxOb5@m2p+Gz8)D$893Y5-DvRkg8@q0{}KBQL&@+rP0 zTyc>=i(W0?9(Vxocl=Bac_aJS_PliWVi1U8B4vKV0qsQTwYowskWTQ;zw`jbk4ri= zIBx**n!`^Vw%P$LVQ<*TWFz4FA1nlH%>vr#O`ctPCkQpCoW2D}BYPr`#Z#Xt!mry# z8K%m}f8r0#!@`5W!SyiR_&f4#Jvxpy4z{m1}XdV>BnuP+SN{=GZ_}866AhPv0iRCczm$l*TZfhCXn143*<|aJQZhv`D!!`zj zJ_)Z$*=az_etu>V0zppYig|S_04?XrZt1imgh`vmhUQcv&~gLyd}jOXn#m65d8Y#kN}9j9Z&e?tp~Ki(Td}-cR*=>@9l&fQd~cN8^TZQ7du`W0Lv7PH+Rn#*j zywj1`7`7GZtB8wvc|=g&Cj9l?w-%ti@P4%{xCd03|EQOYGk`|YyJDc$1BxvrcSCxS zJ%xRz%|&HU>|{-IeTc>fwO`z*#T!(XOi0h+DGXJV22L0p!KDepp^K{0*tNBa~7~7`#RC@FI>_63-i&q<$Xp# ztDh*SJB7{%3C4E!N$4#K`z*f{X zg@N|z6LpY(KeUAP{@C|47HAz$qMj?*LSu^Ohb!-#f!1|ib$3}FD z9X_wY+vHD}q1$Vq^%$nAz2ySij4AW>JCQ)^<=W<&(*dT9df66UO+f1#m~e`_1O~fS z!@3Gle&5JBavLsyUa?+eo_rqA2GbpCdrUwvwYM?ld=1ck9Gu@)%ma#(j~wFVPACj2PIIv(WyUq_}8EyK7=ZQ?0+@ngh=IqazBNg zX9Vdd@~qZOe}jCoWxKjTInbt#kAx~11KT>6?^*>BXwxpK1<~`MQl68D^`iV{RrSg` z$e`${eZJxmvj4Nznli=)x+Br4nmx#VzT=pw$1&KvJ$dIIHxzI2so)Dz8mQzcCvm+_ z2HJAcGZykB@F-enIA$||wz}6Dzp4%5p|6B)qw^kZ-Ds3s@h7l$y7II+6M@deef~`= z1>~Z|w(Vqg1v>M0S*KEGP<+QLNqe0KbQVf=f!B6WI$Gb|d%hUxtT&wMqTN6$z>_^I zq95q&VT0Ahl7P<+DEp|T2Xs!$6UT14gVKdG;u_jd(77eKRtCxe$8zndV+xAL^ZVXT zyKs zD#Za^S3-Qn4WNFTB~!@}UZCquW$1h5z{B6$w#)q<1-d~4uckIH_;ZF#W66Bo4Omaf zoU=OW2Xwn{!5-*-fsm|^YuN`J&>d<%XzQW-ZwC9LbQuhwJEm=}EqDd;ex}itwKYI@ zK9DPQ?f^)ov$OB`tG`_d8}*MhgH$w&?#7QTK;J1E#wwu>!WyQRgLWf(_wnZ2V%3C- z?o^{cJ;=Yu8{JW%N^CKAur)OvfbMlSZQ)r6h<8o7-)r#$`tD;>wVAwtYofn-h#`BQ z9WG7*iy%ETGs2-d zFK=FT07-Tc$>GbWePOI3i*NDR^!b<6l`_O1`g&0Gg+F0PPZ7j4a6ms&N&dAI&){`< zb#-W^ALtP&TnU@|K=7wdcdQxWj|T3k6l?FV$COwoJ4S*$fq|J)yR-lr$d zj-I0o5vH?x?-a2ie6C4rLg5XFbv=EPlGg(Cq=IlBQ*GdRzxDa$twf+-j4SOgsssV7 zjqgSX%KwtvuTbt)V2i|MAJxtS`W1Ec0)Z0XJEkv^%!UVgDlYi??f_sCJ6!yUxgY3P zJM#>Z--2-9T0yK6vQK;Z`GR!@2<2`V?wi5^J^g|}iay#;tcT%6*AULwYiBjf2}08A zuI+ovfqv8IRH?oKVJtAubWRHKSzOQRQzIDyWla}?+w*{ad*FL|DLNnSxYpY;M+AB{ zS(MlKA&4CJeYW(tALu#PZB3dbfomnO@q!4l&kc~;%#EK<{Y+v*Po&K!eDPjlcckqSJiU$Fmk=$$RKL8wNQOo=f5dUib z%&@g42tE+&TbM@vYd5nSP;?3Jc_QVmXyJff$1A0Lm>ER2y)4{|;elTNLkn9b18ZZ^ z&xIRE-$*rQ<~xKv?mwJMz$5*;8#~4v4G7cs16q=}i9l}-+a2$v140?7w;vyhpEFcLI%F?<#W^luL{x1XK?9y#y3 zrfYbh51vj>O}(kc1to5&u&wdA*YzHpC6zuXaLsWW!m_ zkaHl|7To+o5C`!%%mZx{&WECxg}s+b4y|9_fU@wsW#Unqygh;?i^`)3JeaRpsk(ifbkVp zGJozu?=4zXFJyMl0Cyr$RM=@ei>J42b!kCg21i)nrBCITj+P8jO22NTWP zY#bH|fQjKmgKnpRzMXCvU7!#!@s6>$;`5;R`q;!l$_cHBfc2&OtyF z4i7<~$|ByYwI47|)2WvuI-qozRpjYJF<{yP)jOsO(EG+O&P@nr0j4t?7slNOVjKNR zIL1*vdYHM?S2Ac@{jvKgkNU$PbIqTN8*6yId(kKWE#pwltv`KH1ZFthqHH`W%=Q(9U# zUPgNJd!furji9x)B&VSQ2N)qn&gN4dY`OJlV2Bs>kHwC53DX^5cI-&Td@8?1Youw#%FZY;f3>3MN<1=!0d|dZ6t^TJv{NV z|HqqvIh=AVlK26SgLl!lx4i|-(S5N!=q?=F#QBRWC>th z?fc+NdjZ-_uD|b8qW<1jePmfo1vFT7E`F9k{o$9&Ww2ocR9_}sW=7X5EFiFGMq?BD z{o=E9mb-Ysf=n&8=@CG2X5{J^7wXRt{%%34J#6az!X;6T#_NHfME6-Ih z>P%`Rx-1SP3#zUm%%|R~0T%t{(BKa?kbKwT{AUZY zkIkMx?cWL(ay5%{e)j-75tVpvuoAXOP$i?$dX2@|bJ_^-f;NkVT{PMcVhM8BBh%~% zbDv+Ik#{Bnc4|&co~;2C%;(2Tuc3OKY0CDLSwr`8y5@M>P5_ozAj@~s8@Ra?njOyM z0e0?Wt|P-9_*(Qz9 zgFxlLYlmacv5j^Z0oAAhIIXOIk7#)h9^^XZ^uR?A-?9|>4y_ipMI zV^C&&`h%W=;@>KNo0{ST8raK^qZ_9HyM5857oCf>{VhTBHxaPxy${PcEMZfdvNGcg zil1X7v~~vFe>B$ks6k-|ESK|iQ$Z4J?wJqcQb+Of`iCW?ZNMaEKxx$KJ7D=QTn5v; z!6KQ@u4Rt{V1=pGnnmH@JZ{R_poaLOAj#hww?Y!zYc-_+go{m28@iuF&rv(9=RHuo z68^sF%>(czz$%^T9O}QvKS6u<7N}V=GJJUk_kG`1ATsvoXFVGNtn9{$==gK+ zQS0od9qj?^S(sPt(@=;WDZO;(lLlbV;o10hHJ}PPdvxBu1z43BXWG76X!-i}8~p+U zu3!4I+OY^ zU@Z^M=fs78Qn9t}qf9iOTjOg<>S3VHv#DmOx(l#RJ2%)L(FO7AX?Kg>0$?3#;Pk5& z#7y%K>MFPb*0~m6=VA`>*>8Ca_oIA1f2yi5djcCO8a?}CQ2oA?ObF~&2gzig@4@K) z8um3&Y0$X?gx&VqhW_dYtj}wY>gy(85ewDa<%a|8n|5mNetp8!+%*>*dVc~N+`wRM z&?d~V3KO600KmR~p2~YL3>#&9L=AY6{ZP5wfmIB{jSDSBTX8@}MA>WhBwb}C(j zqKU@usP9RkbK$VzF8hqld*uI@K|usnfbip2^0A!1u5T0UO=hJhfImj%(6|NSCwpgN z*9L(}xj22lC;;}mN+BcCGvkN^Iclz!myar+RK zf$Zlu6JLl5g1qC^MwvYbFY@F`3Re=wWq(~#^(6wfG)NoWFAWltm92L9{eZ2Ix>fJq z1;M1bJ>R7A09(5@`!V_k;3MMBrv|eF182xOQ?CO&>jRXi2|O^EWCbf5N`e2};FIfU zKhI$1ka?Vb92CY<0|g805S~@-T(Bf8eXJkCeFI=@=+if8e$8Myq~3nu6v8ZTOaHU8dT?d?HubI`jd(R#q(bo4FvFa+t2sFusQEx_Q~y#GZw zgRpK|snhTq2Mlfn|LKDjgmsl9PNRDO3?809e~DI*eD>tr(J~Z|XCY|A&SBu(6|Y2| zK|JqZVAn7PB4W?gkD~R5!S_DkTbUvVKC4?&J%TVl$zSH=07&iIW7ny|0ER%}z7%Qn z`*5$@Q%q?5F$AyqXdfyEk=KXkg4CygAr$AuWEwztWm;=AHi`p=aPUqx-x}aL(%zER zgRqFJBcUi6_~)4}b~Yd^YG(6jtQZtJ*@SJfiNFw3*=Do;2aW%W6rNEAFvJCngO{NW=AiaV7vq*%Izg&*c;Na0gk{oaCMXmT__T0;;Q_LjJvAgG+yUaJ z+j*H|QG4V$0Vu~KGw~&FMG#c(AZG`Sy zH7ZH@_W?t>efLylEZR45b%+|E@uR}9eyI}(_s{%@cnq6?gC3;Y1~1 zsNEEs5K{u9s~$>t2b7G?jT))ADC zS~!PwFo2I%209#702=Va>P?#{w{P znf;AxLqL`{<(7&n5#e#G);k%%z4h}HeH{-By)NbK@Y96JRNQshZX7W5U$b5p)HF8)nrpD@&vK!6|U1(hnF$=L%14i38TvLlK5A@xa&= zepcy7ABen(D@hwb{bjs+-~4TV*zh4^+`1F#H(Q&|T@wa5LX06n2H`EbqVkFY*x(B8 z-ChSAFia$-j;kL9CVyo{!R~%wn6fl3#JXZbeN5Yu9Eb={<`;f_2ORel-iPWSY}Or@ zV8p}_<9oKpZ3XGKzHvM4BnZrOoXHuQA27_zRZCX!AXMmYb@GubFt*)UWfph+f7(0q zf2^MW|6lff-}g&o-}e;fsj^m_Jg#AD~Ac9jEe<#C$TWIg@I-qE_1D5P(;abU4M2;#nreT`|Tye;}Nn!`!JmX;!0 z@QDieR+(St$2&mw$nPga9b#+t+C|L}kerVCDsWy8@zk~K6EVPvNt{oxNB%Zn-Hznh z1BGqO>26jY;I?m+z0p1e7Q?d1KmEwx?#X1-F#5gB*Ct=}HL|zQdQt!A2li`6I$PCU z8sH9BVvUN@u)jtLuMb)R;MZ2OL) zfq5HcCHPTnX8xRYE+)nR53n%h`@41Hh1ojDB!XExJ&e(-Fq z_X1kZCg^&0krL0KsN?yJOMb!}|kg;jk&4*nMzrU?`5^$g6rt+FCVA^}qV{QG9AN2a=Z)pT(pE@Ru62<^OWS`UWvlH|wN{m>! ztAP6&xp@E80AsIly2&AQe|`!qBHL481KVV*`wefv{kbZKD=vevMFO|y8BWBrhx#eO zpnuKZ@#Zs0zyo@Pm$_enslr!}Ggnyw4}42(xHkyKcO)u0Gu{J!xGMBckr&v!)sf|l z`v~}vTp5|Z9B|c5ux4sS<6F?R23%bhZqnk8ok9Dl@!;b}W6{A$#yj(hx<*UDLk`G< zn&9wX;EwGrJ6-}FYXACFVGr8JSaD)~mJ{G%M!|O@w19RZm&R%)0C>29$izu)aAoz8 z38(!4{3utgM~n~HWv6#Lo+w2;dtfBk8MbBiW~hBYF)k9b=g zpm`M1{2T|u1k(XOUbSsc$x~=}(A2T>w;|vsa>o7L+TmlFg?#k`cEC?wOHAY553e-T zEhFRI0Y7zIXYf`J&=~6bT=`IWr@cP9ejEhnYw;p!{9J&au@C+p#|B$e8m?&tpz_Ze zao@;n1D*O)8Aau&yh!=`UTA-bTC6M2M~WKY=eV|P6Ke(Onbe`VpQwG#&3xOVY5+=^ zwKW_6ngM?P+v({3Tu|-W@GIgZYR{;*LXA>2pf|vCt=pgz@aW1?okD4l<+@t%w-ogs zT8>raZeLKkbwt%h2F1T{wQ~=%8BjS{X5Usy0v;0)DjvNNgr_d{=GueY+MHUob5;?sgUAF#+?FX(sq65s`)n#!Y>VaGd< zzq==-0WaK}TUV?Fw`q-$H_s*les7CN$baZ}tk->!uG3k77iq^Dt7QT4$)T=Z6g{u^ zB^E?(8A9p7=rc{5(0EYHe3*0a7x+=G*%q=pARhZ7`nv)ws`w|qsiEjgLUTrrp4Cg=Lcm<^s0Oi3OfDhkUqMfvb1F7nHiuiQT3k|cmw{B zwv;Ev0m{?66%)QS0{$q-UvI1hn@M`~X_}+}{&;s&hx0ZN^(q{%Gt~pU9L!QgLxJP{ z&OlRcq(9Nf^W`@JX+FP!_io96SBP;q2xovKn=DUqUOD2`kgfCiAXWPyqJSz0c;(0^ z*CZR@E6dlb7{mduYBM?gBp$e{qQZ(}Xn;R`b?1q}84yzM4c<5A4S03&(!U|JKKTtL zqk>3(ma^Zg&>ciG3%;q`n*_Y(d|fMcfIc11{%%VviuXKF&Lcq`gud44mJ&#>-5L4k zU_JfcDckGwR!D!b=}XbA&A=%ov#HSn0Dq}!61eynn7(HTU0v)0yiVw*rJpSJlm3au zctJzFG_F#Xhb?u9oa4QW_|;Ea#d|wIw1?{9+>Y*#-khhaY>rcQ(L6q8c%N~wCHh?*e>-5}zRi$MR|;9x zUVEQ-_x{ob)>sfVqO$WT`z|y4{J=kIS!?Il(**80EA)79MqjX7b z<69HJTa2y?p5O$F*Q}DXy^?@`l5YvIqtIvS44%uS;efYtiGPtR1m1ss+imsMc*bMb zf4bN=K~9#gI25m~C#Prq09tS7DywKBZhzB%g847VstxxkS)=>=T%jamS__-8_{~w) zI)Ha%9csE00GoSiTsLlA2Y6>}!Hs{`V3u4mw;bpS_?Pg3gJyT=ooPk;oNQ6NE)ObD zAJZ3k&N+_!0>Hbi_iM&A17B8ItAtJ-;9qs`C~vo=k696I8YgIg_eiyAnEL{M(Ul#3 zYwMA3to#-l$ARs3T+!cKRKyduo>TwP`|YDy+Rh>S-p=T2ezm~dZ@p@((g%1SSxtJR zq5k&YtBcJ@?=KtYD6R(fjZL>Zt&u)(d)>Zu$AI!{Pt-*RZ@|A_*i)UO45BZ1$Gds* z0RIsbr!b|8OU2KM+&qN*2Y0_ro$SOG>zONBPve09#3sa!rGvo4x$mKZRKSNcx&rh)10NQ3G;^s3MYpp*Db#YrE9biQEdsM>=I0D029mwve)`=a3eN z13tB3&O22EcG6uUMXQ(qpH`L*i1z}`n-eG4%TReU{FbLG(0SFci-nv#==x?Be3HL9 zfSO$Dm#Wopz-I?8R($IR^^DLwt)Je2&%H0~N%{(kB3}~4-k|Y*zPeuUbuBg*`N1=N zt-t-r`>}oJ5^(U3vX`nN`-SVw$*A7e{o;KYnK2Cb;t7TBmNHoPRr`#5x*6bq4{Xv6 zSr6g|_gUOlqyoNV?-o}hge|g&bG7?X0ADsdI(}sVYk8>Qs9lZtpWJn~3SW>{7eA*G zjpDCx6ujjfzy~0#iSYh4}e;t5e+q-r1t5p!WXUUx3O$CB|^R98%M&Nu-TUA5vZv=n)9tJzGw`okL62ehD-r^#4wgQ}&^og8IL^|Kihf8Me zxS+<4*4_f-&ws9}B~Kpvv0=;EIa8zy_}7*RXM%87gLr7+BoKnm^fXu`3(C6o(6r6tX{_M!Kv);i%CAU~a#laQ+7lgq&>V_=#}X97=h(tvwtF zdG;IA=sS&_ko?A9Pj4cgj9s9x!s)DD8*Q?lNa6_6bq|T3IYfeF2$_2+3$fnzg;Nn7U}(I0`=N>&5c(!x zB2GAiVMwO;zL32@7*L;wSW2P&qET`wrg=aZa;N(iD$#!#9=*_|MngP%a^HDx;7@#U z{m3;G&*~glXmv zK`9^l@|Y^?$r)sCcKPMpi$(0cXxG+{p%frCgr#=0-^6)jH%Be1c>`g-FQ|Hx3O2k_ zV<+n5b^zEcS0AWd~ z<4E2If+EDsK#?=z(Zm*}DO_pG_13s_8W339fm^RLKu|D%OY9Vihjwi3e&mHySRR#C zk9q@Pb$>3J6R4UaY;|2ZW8;g1^2nhNpk8bV^0K?K*rlO&bK2ruO8Z z`G~ONy?XQ07|!+dI@hfqYdr6yP!|Z&5raoO-y?tfKG7o0I?$)ySuD342EyU(m-Vxp zpx9-n>1N&VzWz6>4OC4=}lXNM-($ z2ZUpIU+=Ds^aUv$6Z%G^JGtj=JZwTA>+9egkwWae)hnrI4&=7Ee0mU!*hNR?>y>;E zsC3Da-Q5SoPKi#%O@~1E(!2PG?|DG%VoKTnDG=*+cec%;P!RvxaZ}+NP;56wPOy3d z;o8n#_jeZi^hkEc$-_wB{pwZ!hHhYS7}srT>jPp>@rk91dir3C`f?6hpAl|1wfHs; zf!tNo;Fwc-K@pq=x)$uuC`o$R>N8gcH8kCt(? zo*_KU7Uy3rWB)GfJyp01vFEzdFDh|ZmGbi`%{IjQdEK8BJO<{1wunYn(7@alcG)wCWonopVX)uaP);O(J*OBVEz)T&2m4anZRQsANXUSMYFNxdhB z1L2cZ>r!|STL|x9$-7Mf;$ZBdUnaT0kvx=Qke3I

WUq5?;^Of63W;jgvYzoP)CwneumZ=vTkK)lduJ_0ydN`xA1 zQM^EkZ5r;1Gl^e&9jZw~JTlUIA&dU2p`&BNBgBVWugePN(HAN6b~|nY5J&1Xw{Lj~ z66;^?afuBFBBUa?*&yc5I5$Mdv5BrWsw*EC(Xods!_B_tHnJS&Tw8k^p&xA#B7G?&!;tmQB5j}D8wJaq`ZDpy3Z(%9!tTeUvj0r!GdvIC|o& zT#>o&l9B)E5P`w0agbEn?Eh$44~R2+ayQI#qkO6rB4^T2ytAMkt*4I7XI&3y?ZJVF zRIh!ih+}iiX05#0NIxfH)1AMK{`bg-onFRBKeya6ec2i)SLY?Z>h=L~{-?)cp*qfd zh>3OlWgZYwO+O~vPJ^U%$!?c_=z5}`2X~A;0in-_r@lO+0YSSv^N6t*B7sRh`axy*Z4a6mTR$u2U z=)BJ+`K#+td6x|?4;@too-4odm10r6SXuUcZu>#VGndIxwj78nY?q&ZA%JO9c~ARi zD&pU)oFDr@@Ser@xA`<6u6ACq;oFAYcE&c`z$rjnBmNx?N&^v@43CTJQM|YZr{i!} zZ1H=f;EOK=ug%-vfux_oU_pDI0Wd zj!1NIH#=1ayg7C=JqM6|l6ljdZY^ls?OGb>4g?}ud27z#2b5oF%u_E4={NXljKkBh zzZ_$Axz;qqe>7@}>Og|hq8wMz2gJ?3EH$rwkcstIdY^(g!epg+5w&lo zSYRUGP7oa%qkNi8M!eFbzhDWRAFN#>)+7I{VIw>T!)-Pz5&L+w&A%ZxP#M_OvI+1-b6oaH1LUZwE8hbP(=O5q##62gLo( zyS3j>0C&Zr%sWmLubB9&;)n8MKT9MEp5j21Ja9gH&*cf}<8w~#jw51n(mzaPPRMmU73Sdjgr z4Zoj#NdvA^Te@p^(13WXghlRR1+nYAA5}W_fGFopR8fgXX0Ao)Q(zjZ zw9O!&7l<05J{!{k%_tvy{220ouI_s%>kOD-laMA2l0=5|Nip=utM7%T{ z#6OP`P~JY%*PaT*i$S%9FFartk`rwz_Y#Pg9|Jk+WY2hK1#(D`;@3z31V#*4nGN^A%Zh4{kb1 zNl&WZ0iuQY5!^;XbKS@HIk)zp?~o5T7mO0&>*7)2;4)PpT2r-rA8SJE=t2?~MH!~K_L*q4F$!acIT=JI-XPlI>URT+{zn) z=;~3}uQ&+m>k_&RCiVi+-C$OBU>8W39=1MwwGW7|kDa>}6w&%DkfU&Ieb$p6Fk*HI zsQUR~-?sn|-!7d09Pfb08cM6Pun z5dE8KI>u*UowiOYKGX-q!1{M^Xg}z06Z>BE&=rX9{9RWpN&o(fZ4@8|-?1j&bOJtM+JTfy0L0HqL8ri6kPn~LAL&Bv zJ(MN=t5Y2KBvf^xJdpkHWo5r-OY|T29F$Df=F5>#t(U#&*lc2PS>xLJYShheO2r7| zt1o_5l%N9f3(T1h$%6PMtBh^kNFP(ToD9hZ{wnJEQ9a~8F1-1L96MH|>Ju4LOhLS4 zebBB1=tNBhzFn}wy~yQn=Tt8FgGD}lUP zg1pNn6n`prYkI*H$g3E&U03Y`V*0Ap^{zr}MRERqi5-eRbJW89P6kK|T={;z2=T1D z8EcIe+K)4MfVyG_#M~DBIKCI)aXfGEjxOr2^IGboM~fiB*0|qhwjPK-Vv6fuIK%M} z#@Y7IPZ6(3Y$RpCD`WL_K``on3q!nG`$NIuh+~-07gtjX?aZ zna+A_4@yc$tw!IX`d!NZAuCz{;)85VD}TL#SdMEu5v&Dz;{w@7r`h_a0V&) zuM6^P{gANK%2M=)`%5z61R?I%XS;gMnm{ zI6l+x9>8ox^NWKWkjxY>zf+~)xH0(sadTzFBQ}p$zJf>8lR(k1ARt*jX}21Fg|r=E z#e*&lK(f9N`8cs28sx533>A0-$yTsXz-9s;ZQX7E`!WV3d;C`?=R~w`?ry|2MSdVT zPP}TpUj|p7Zz!?pt^tzMD^GVo6dYxHtDc6j0?B1dJJhiaFx;*vx|Iebx88v;uXfP) z+R5H*%nKxsl*LZxO`v_AXWQIWJs^3RrFk!Mq5LDS8BssicyuoN;~Nm;TGeAk^+ocv zHna0EgQ}vTg0{1SsSL&jCT$WYk=z9$*n7G&G;}#H- z^|JkV8I><|N{7o#3WPn~cDLsu7WQV&Z*&0RpaX5$;>cgb?z7;YKx{JY@HV+~0HmmX zv1_d$_A{xBT{!{Si%Fjeeb)nWBfE}YGC}s@EcRaBfgm8~VOV61bn!7srJXy`_}Y-X z5A9DPCE5n-PMf3iJtz;%)=3~G>ngNLNSqRVvddW08%U|5^Wm=&uzeA2zjTuA zwM4*{1q~M7yHJ3XIW2f{GKM~WcPX_Y0S=TRWl3~oAXD1=>ZZf`7X3V>9XvA?r}3q-Hqd|tJ&4@eD`)!I;3?C%MS z?4uPp;<2iSC+x9*;+99G(EB~9*&cXo!y{~3JIAFZ2!PajCBvCw4?@h$AC)#yfz&Q~ zAHMG=wz_$4<`^&1b&}4N`0c=cTe+}~Yg2&KJ*`=pu!G)fe)aK*5yX1lUlL2IK!Wo; zVVr^N_3dNLZ@PeFeskM5Tf_zi`p@L&>GNkV*kl>gfHai;%Bp%A8~@I0(%FRkjaV)# zUVK1*(}gSHC)apPTYv|>|6krBB8t9Gkj8DT@6#>;ckgGh#N&uf>dx$2?gqB{!jZJ6 zh)s*+o{4Wm>&dJa@qIKP%@XnNwdEkZ(>5{t2^GiNq~)-1H5v~rq`v)3N9QYzXM`Fz z(15gLK09u72mARw;XtlG1@SL2a`SZ%5|L(nRYi=oKK9GVNBb>?-S?=Xc<|D;L$m?? ze(N#!UmEhaD(n@%`U}=?H2RpNV+Q2rgcC}r9w?hLFSPf21G(iS*UGCuu)hACQY!i` zLT>fSzOUB+cA3AIqQhPSX>F?~yX*~#*Q6PaTsA=3=n@AOamXu;$^AR_5J+1|mvBlS zWOlXCOX`Y%w4?NGA9)7Hb+z*%Cv*{ygv`fVgQ-Wy#?&Enz4k4CgYhdMTfG1GF>Q3c z4z-sbdqrat{x_1UMUcL&K;V_7E^z5)rBb<<|B4@f6#%_*I;*syXYcLx>OJ8M7wyDEt-Jhz?t6h;Hm zMcklSF&7jx%_R1P<{@6G3J;aRz9&U)I}w8HcMchKzsv^tBa$>@XKx^PHB||r{HZcW zrZr4%Ab-~y17Z4KV9q_u;iQf9-Fc6`DX-8MRQ9;|T?8QaT+@gcbED6FAK)B*i390& zw6Jx8N?#cA46S{Kc(1$M3iDAq`i~Z!+4eLb_ies4%@lyMwKmFTwDbY#uF3nDK}GqL zdYLoM1CSmfF{TQ&xGG)a3&jGB4<0Bz#%~lq?z@gD2hu%%9KOpfg)L#G2dnQ#9NUx{c^)D2F$j;8_djsA;9>{)Clvo2Y-ib+FOK80Fj#d9*n*`El z54zOX_5tY=nl^fA7s%)&dA#z_1M;97^K&@^Z1wf~=v)6$fINhSIE}jFY_bCzKB>`w z^i}=*h_M6`J!gYtBd9?730k(t*kh|7kC+FhAbY=sdyIdlfL-mu(1o@3@$ct3%U27$ zaseOijv)Vlw_$&-qw|3Yw-j8saUcVqv~F@dNS~HlT$TKQ_;7}?ovt|u4a1@14(R@m z#H7iv3t{bU(|EN}ypH{sH;}=*Tt(+AF#DX!AIB+3583!+Bfbjq zt-lyYtjmE6RpwJ0*nur=`^SWq+dzi#27EQ>2T7SF3z;MoKWvuReVq+B@`_uBcF=$f z?~zI+|AFM#qu16^DE`s<@H>?fAi2U@LyyH#UWkVczJ}SrW~!GVo{TsmO?p=L8wlFw zxQK*Pfjk}+YHzDSulU>@E7(T?@`OKrD6W}4FA&Brvi3ZmbQ0Q(HX({FUb=TIybs7z zrrtm4Q8;g&%atJ+9LUoOkDYwbeuTzx^+T)Z`p$4HpF5v|4Jo`0_BlaAJZTp)&I}tz z#fay3(0qNiGezmj7qE|>$~%~b=HE#CYqrX902`A3=r3*{&y^^5m% zH-fsGPtu287ANXRqIgkfp5(>np?tm9%dKbK!r8b10DH2kW1dGa8Ur;#Kw6@5JUFPAkfmApPp`13PP<?heK=hCFzMSSsAmhah1J7K-{{1LqmAsAoulGhEPI@|%bee)Q`JOt@AJVKbRw_dFh>cxeJ6^vk#Dv${S_wbE<+ z$8@9y?eBEGU&?Py1@d;Ud+T~GdUJtYvFO@(mj32kWZ?{*aaeTPRtNE&$64P6LxG1J zDU;x%0hw|8B^QX{Jk&Sm+Je1-%#8jr!Hdpc4vp@M{ERp&VE%@qEq&SRiwoNrvd?xF zn*PR19}pS(#N>}S$4pB&HJCm##%AuZ4S>w0Zk=@Cpnuc55}#>70Wy!%^LhsjnDV>| zueEz4o;)@vb{2%ikKHfY+6QEQ=ar?enZSSHXzJz%NWY6`HnYV8j|tb{+%YPU1;vki z0%x$*{O;;tmw`MO(zt@L5vJv%9D zL&s# z5%;lW>prP%yO8~}JM3lLg4pOTlfSQyAg-Y;i|kqf32pxu3p-GMeI7Vzq+tcDd`{G) zX&lH}=kNS}kLhitWSk2%jvR3;Axva#u4rPN8_R&-P{KMg>>eTLPSvV)wM zE@L+b(%Lqx>U_A&S@1^WTXm2FUtS9lr!O1No^wv*11rWPY*R{W$0hWb4Bt zvA^9w%~bm^-`aTCcFRmb*Ab{@1{dV@)qre2&l(KDVE9S?onVPKke_{_f5wTJiYVSCr3`*!x`Kz6ZA z&@T4EUU>G`)=nMquSyR42{`z{$*%a40+8LUC#{+j;Aoqzs1$49L#~XZIPQ^u!;cLd+el+{{eV)HQ#HCW)zTp z?gk@)T96#1rmE=~24uh0gqxKj_{D$PB#?9;$N}}@Q}29W*Rl4vv@Pd>{4V6<(8B{e zU}5}I%{<~o$>yC~!OEM%x68l;$RB-8oGb?DJXohy!+~=^4mMt#YFq>htJ~fRE_y)z zEH_O!#{k33Kkcx170996BVB)L!7y1z3On{1$l>UWE)gD34PJL^alBLNO^_Xj}h z?qI2K7^>e<$Az(^qoALsa+sZIFOa`X?ycS(1i`S0UmWGAe#hip7%xtM@QA}%xh~Xx z<7~@j6}vzvS!e!98*1P2@q1q0)gZL--6NKqJRm39cNpb<1{US|)U$dtAb-D{d0{t> z{n~HxE5;Y`WMTTWu_-pE#kDd50OVBsrY{zgAP~t2d+^O0$mxjQ@J(*Omix!c-meeH z84ubsu7|kb){|3Fnuuq&P#;Dc<7_Uv4R6-Qt2qt&u9F5>qw?lWC2P;iys&5Sn_s{> zIhf+U)<5SL7ar~?0{))Nyr$QP|MaJL_mu(DOa#g;nFr*;TP=qBHf&P!W3R~m; zqs=%)hyB&vsXWBvyL-*qan`Klz4d54$zW<5;P;nD@7MDyom0r3`DO5E(-{!mwl^uG zSPvL11@p(9j^j*QH9rsBrU8TXdQ>SroIWFm4Me}efx#BRmaxzYqVY~Z(wk$-`w2o0xEG9D!q6zFt~(7I%@1eQoLvT z{ujufYavxKyBwQoZoNBw6P3r^C&K1Cg|i%MyCmC?2MnG!$=O3uAn6%ptoNJ>4Bp4Q z{VQlc#_OFK=M|iR!IyTW<&p(9lu-G#V+-=901UzHlLczqfXdz6DIbfjN65(Ea!>-DS2@^FtmzF5VVSPWPJtlKX_xN2nF0(E zmK`Sno%b0!BRF?r67eWq=w<>=aokAd@J81o`bpPK-UkGYQ&0I+BYUyuIW~1Qu>RoQ zZx8A?fgzsH)8`-tT3z)Qc{Zc#k+^zVDQgu>SEKUWHW&dzGVHrx(h_W{d^Y%JwGkLn zZrh8NQ(=pi_SUd$UtmaER{ebG3Qm8mdyk&+1cr=?OjGe2*vquuaoq=Wy|R24d@|2L zLPu5N&6H}yvx8Zo%<$x$MQhRUJ7CCtwTnji)1Fq_<DhsH&vmg2j>zM;zf&_EwBG>;sM6;=l3>>p2~ zUm<@j7kXdQTK=pYBQnq#7}^_n`k!+F-;HfedK+lK(4p?RTJj(8p6pVn=LBHra^$v0 zvf(T|VQrOk9Pz|ZBYP12`(+LFD1Qnt^gio&udl%RVs2Zqj3a;jx`^zGn;?Dh6l27+ z4;Ti8&uFo)>2nrKw|dZg&oI3Hm-8Gm_IBlkC-WHcH;UM39L$6BxffqZbo#B*HYpIvfq?y_hef!u!Ky1y{(FLi^y}f%}K!fcW9tH7qR8RM;9KT--qse-a4Fs z7_;y7dS(EeKT3})KF$LM=<|PjpA3TR6{j+S0T@=27EJYYkc}>k7)|s>yn6V}xtrL0 zuEy8DOGw}R^Oo+r@4&Cd*x!DY3XClu=;m>8=vZJawr>N{w^j`e-SVRkg})Zm`b`0b zb(Vlhy)!oHbM^lIyNGQr8mrD~(tk91tNiPt0mJslzS^aBY#x8<`c@MMhMmi~-_-^< z#pz4J6Hfq!y;)BEKNb4)CUoKnjkgR31-eCP8ohgYqnNlR;%)5T_M`{lV#oSRn2gqV zd|5Crk=|}?=#bxwcze5KP}V3msuIp|d?O7QJ6;+F1O(BiLwnA?bq8QL7VKc@Vxs?B zE}OZ4)*lR~I6wH9i3^?T6ro;d28Q#|bMgvW^aa5j0l5cAciDR*^Ij45FX!^3pamQl zJF$DuH&N(+Z$D#JypMR7O3lfaC_i1wWd)r1aq|b!$OD7Ej;@L0F{VO<6 zokf|4S06BTcMpebOvWjaRy4N+r0*f;ihmB#XFUDOwQVWDa4TiH_==f6wd=~yZQeLA z_TJ$6mob3zu}j4-WFq^0r^UqG;!*yh&C6{w$-rs#=v>G{93=9t&C8bUm z5SE<$DBPC^3{Pz}N#{sl3My1!b)^Agzo_N|!FXWG`lNgBCk646cK38LeTDD9ch{Xr z_ZrYCZBE6$4C+7i;YRv_w>lD+O6X4>9Fe``jo7+wgKO+BwQOW}9f3W2vURPgQBKt#ra#JtZ>3_=`sy?5^f#JJd{HSjnE_Bk*h~+%; z_cP>6q~zi}&wQ$IIppsz&0>4l3tOnCT`NfjU<6S9WS<@c_FlP7Dj~=|U}!X%YbP#1 zS3aM34A}>M{3<`&NS}Pp^v*qk0*u2?-#du;(^o9YR2mBqAIW+lWbJ}8Z!M1!Tt*yp zvG}-GIrh7BKIphT@((_e8nLki7Ym@MDW(VlBg7?|ui6h+zU$kzc&!f@p=NM!B!CKtTE1NC`eUDF9v|KZ~@S3e&C6|tAMc!W{> zqpho&I%vPIkdQkTR1S<|wf%us=>HL`vD8~SQ-BeXPhum@*y17MZ`oY{jN@1HE0`?k z%Z}Hu#6}z#C&Hp%9n`@^4_c|7V-5$#$vvLFKfmH)rcP7E!zkV<3)6dtV?adQZ((;+ z9xzTT3q`;72AS#_wvhKI-Wl$ZnU+eBq?a;Ony84U2&SM{*bsO15$# zp#RPKk<$ABfDt1gSv^*PQVw92ghB-eu8N#5v*axrDX; zc8L*aIv|NF2v3F7%pv`9sW$6|N?@n+JN~xn14eA}WFWr-Hs?X1t=iInaphFSA#-zJ zH9c{8?-}HO)$7>Y?Oytl?7mMt{K)^BwSfXt8vUoZOh8~b1sHLfzk6LsTzJ_-WF{5W z?{(oalY_5uIZh3u$rx|Mivh~~tMos;t**8j$UnZ1TCCTBzBlPlP3j=~goaM?ZZ&;! zhHWye7{yC`m=uAr(*K;=&=4I1z(`8n`n*qr{#~}8g_DBxm|I3)T#9$7to)<` z<9504Ih`U}E#)@hzr_Z3x-tz8Ata9jAAns&1vdLGkh`9>njj zq5r#)p_L$n^!z(pgKymcF_yRz%Ofb>-RSP#S4m*!&3L`*?n_`4_#Yjt1lTEh`DWiQ zE?^YyV2^S%0~>K3zgtOOz_@34r{o9^%I_%A5WfzsXNsgX{rk5A%h=>_x?3MG?o(b= zC*HzlC;J|#+`thJIizoorca8!e|$I&m0$d!lV`3F=Pv8QQWB7TNtLgVZyqj4&#S$D z73rlJ6UE1caHbE-g^O$T|KP%@=9P3{b!52a)gb$_fYrEfpFxcFi=rQi_@U$F_TS+k zAn#{*d$%_*9vO32@NnWxomAGsJj9P>5*2uDu)kCm?T6oKz$j-Hl>N|#nKD~K2yV|oaliA<2iHNYUUJhE5tfYCQuQN*lICvrjJwy zeU4dczuKl}Lx<7tYyT<5X?#HZqFPn|K`TnDkT^2)nF5TLS+}?gZ{aK(HB^m55!YR0 zI;P`_{@)eOt>TE*udfaV2P(DUT&4QmI>|UN=uYip2fFA#-*<+8Dn{{e6N{BPJ^G(r zo8I6701QI5FjqVmTRB&AQ+Ev6lPp{xY$mbqzCXr7)sa0p5^_y#3LE^~Cv$8kVn$Qb zr`ut;jD5TMkq~76x>~O!WE0N+W2gDLwme|eXC?DpR;PDdTX+zwLjguZ%=k(z7q;>^ b^;T~R<$r(wtAYR3!2fFCe>L#`XAS%x@N?OX literal 0 HcmV?d00001 diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 55de00a350..e0adaa9fb7 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -138,6 +138,8 @@ def config_specific_par(self, scifile, inp_par=None): par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcwi_BM.fits' elif self.get_meta_value(headarr, 'dispname') == 'BL': par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcwi_BL.fits' + elif self.get_meta_value(headarr, 'dispname') == 'RL': + par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcrm_RL.fits' elif self.get_meta_value(headarr, 'dispname') == 'RM1': par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcrm_RM1.fits' elif self.get_meta_value(headarr, 'dispname') == 'RM2': @@ -1097,7 +1099,7 @@ class KeckKCRMSpectrograph(KeckKCWIKCRMSpectrograph): camera = 'KCRM' url = 'https://www2.keck.hawaii.edu/inst/kcwi/' # TODO :: Need to update this website header_name = 'KCRM' - comment = 'Supported setups: RM1, RM2, RH3; see :doc:`keck_kcwi`' + comment = 'Supported setups: RL, RM1, RM2, RH3; see :doc:`keck_kcwi`' def get_detector_par(self, det, hdu=None): """ From 3c398cdee6d496e5d25e3fdd887434bdfc4af403 Mon Sep 17 00:00:00 2001 From: rcooke Date: Fri, 29 Sep 2023 21:03:53 +0100 Subject: [PATCH 47/81] support RL grating --- doc/releases/1.14.1dev.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/releases/1.14.1dev.rst b/doc/releases/1.14.1dev.rst index bc0b7f6445..3fdf7d6069 100644 --- a/doc/releases/1.14.1dev.rst +++ b/doc/releases/1.14.1dev.rst @@ -8,6 +8,8 @@ Dependency Changes Functionality/Performance Improvements and Additions ---------------------------------------------------- +- Added support for Keck/KCRM RL data reduction + Instrument-specific Updates --------------------------- From 9a1f81bea12a3f966837374e8b9de5bc137cdb6d Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 1 Oct 2023 19:01:07 +0100 Subject: [PATCH 48/81] updated changes --- doc/releases/1.14.1dev.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/releases/1.14.1dev.rst b/doc/releases/1.14.1dev.rst index 758e332261..c32266a170 100644 --- a/doc/releases/1.14.1dev.rst +++ b/doc/releases/1.14.1dev.rst @@ -24,9 +24,13 @@ Script Changes Datamodel Changes ----------------- +- A wavelength array is now stored for DataCube() + Under-the-hood Improvements --------------------------- +- The CoAdd3D code has been refactored into a series of core modules and PypeIt-specific routines. + Bug Fixes --------- From 0978d081ec06eecf664746cee664d99cd206e5ed Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 1 Oct 2023 19:25:50 +0100 Subject: [PATCH 49/81] add poly corr for skysub --- doc/releases/1.14.1dev.rst | 4 +++- pypeit/spectrographs/keck_kcwi.py | 3 +-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/releases/1.14.1dev.rst b/doc/releases/1.14.1dev.rst index c32266a170..8a03f858ec 100644 --- a/doc/releases/1.14.1dev.rst +++ b/doc/releases/1.14.1dev.rst @@ -10,11 +10,13 @@ Functionality/Performance Improvements and Additions - Started the development of instrument-specific scattered light removal. In this release, we only model KCWI/KCRM scattered light. -- Added support for Keck/KCRM RL data reduction +- Added support for Keck/KCRM RL data reduction. Instrument-specific Updates --------------------------- +- Keck/KCWI and Keck/KCRM: Turned on polynomial correction for sky subtraction. + Script Changes -------------- diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 472ec333f2..f0274bff76 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -300,7 +300,7 @@ def default_pypeit_par(cls): par['reduce']['cube']['combine'] = False # Make separate spec3d files from the input spec2d files # Sky subtraction parameters - par['reduce']['skysub']['no_poly'] = True + par['reduce']['skysub']['no_poly'] = False par['reduce']['skysub']['bspline_spacing'] = 0.6 par['reduce']['skysub']['joint_fit'] = False @@ -1313,7 +1313,6 @@ def default_pypeit_par(cls): par['calibrations']['flatfield']['fit_2d_det_response'] = True # Include the 2D detector response in the pixelflat. # Sky subtraction parameters - par['reduce']['skysub']['no_poly'] = False par['reduce']['skysub']['bspline_spacing'] = 0.4 par['reduce']['skysub']['joint_fit'] = False From 47de2c5e432d41058ac8e781c9e03d433370826c Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 1 Oct 2023 20:10:05 +0100 Subject: [PATCH 50/81] minor comment --- pypeit/spectrographs/keck_kcwi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index f0274bff76..0b4b01beb8 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -1195,7 +1195,7 @@ def get_detector_par(self, det, hdu=None): dataext = 0, specaxis = 0, specflip = specflip, - spatflip = True, # TODO There is a flip in the slices relative to KCWI + spatflip = True, # Due to the extra mirror, the slices are flipped relative to KCWI platescale = 0.145728, # arcsec/pixel TODO :: Need to double check this darkcurr = None, # e-/pixel/hour TODO :: Need to check this. mincounts = -1e10, From 565d8f5b99a2f7cfedd8b6be110e4aa9829ed4a2 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 2 Oct 2023 21:32:12 +0100 Subject: [PATCH 51/81] core make_sensfunc --- pypeit/coadd3d.py | 59 +++++++------------------------------ pypeit/core/datacube.py | 64 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 50 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index ec1b4ea222..390709c890 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -459,7 +459,6 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, ove self.skyImgDef, self.skySclDef = None, None # This is the default behaviour (i.e. to use the "image" for the sky subtraction) self.set_default_skysub() - def check_outputs(self): """ Check if any of the intended output files already exist. This check should be done near the @@ -553,55 +552,17 @@ def make_sensfunc(self): Generate the sensitivity function to be used for the flux calibration. """ self.fluxcal = True - ss_file = self.cubepar['standard_cube'] - if not os.path.exists(ss_file): - msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file) - msgs.info(f"Loading standard star cube: {ss_file:s}") - # Load the standard star cube and retrieve its RA + DEC - stdcube = fits.open(ss_file) - star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2'] - - # Extract a spectrum of the standard star - wave, Nlam_star, Nlam_ivar_star, gpm_star = datacube.extract_standard_spec(stdcube) - - # Extract the information about the blaze - if self.cubepar['grating_corr']: - blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data - blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, + # The first standard star cube is used as the reference blaze spline + if self.cubepar['grating_corr'] and self.blaze_spline is None: + # Load the blaze spline + stdcube = fits.open(self.cubepar['standard_cube']) + self.blaze_wave, self.blaze_spec = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data + self.blaze_spline = interp1d(self.blaze_wave, self.blaze_spec, kind='linear', bounds_error=False, fill_value="extrapolate") - # The first standard star cube is used as the reference blaze spline - if self.blaze_spline is None: - self.blaze_wave, self.blaze_spec = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data - self.blaze_spline = interp1d(self.blaze_wave, self.blaze_spec, - kind='linear', bounds_error=False, fill_value="extrapolate") - # Perform a grating correction - grat_corr = datacube.correct_grating_shift(wave.value, blaze_wave_curr, blaze_spline_curr, self.blaze_wave, - self.blaze_spline) - # Apply the grating correction to the standard star spectrum - Nlam_star /= grat_corr - Nlam_ivar_star *= grat_corr ** 2 - - # Read in some information above the standard star - std_dict = flux_calib.get_standard_spectrum(star_type=self.senspar['star_type'], - star_mag=self.senspar['star_mag'], - ra=star_ra, dec=star_dec) - # Calculate the sensitivity curve - # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. - msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") - zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \ - flux_calib.fit_zeropoint(wave.value, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, - mask_hydrogen_lines=self.senspar['mask_hydrogen_lines'], - mask_helium_lines=self.senspar['mask_helium_lines'], - hydrogen_mask_wid=self.senspar['hydrogen_mask_wid'], - nresln=self.senspar['UVIS']['nresln'], - resolution=self.senspar['UVIS']['resolution'], - trans_thresh=self.senspar['UVIS']['trans_thresh'], - polyorder=self.senspar['polyorder'], - polycorrect=self.senspar['UVIS']['polycorrect'], - polyfunc=self.senspar['UVIS']['polyfunc']) - wgd = np.where(zeropoint_fit_gpm) - sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) - self.flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") + # Generate a spline representation of the sensitivity function + self.flux_spline = datacube.make_sensfunc(self.cubepar['standard_cube'], self.senspar, + blaze_wave=self.blaze_wave, blaze_spline=self.blaze_spline, + grating_corr=self.cubepar['grating_corr']) def set_default_scalecorr(self): """ diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 01bb64ba3a..3a8a6d6f36 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -15,7 +15,7 @@ from pypeit import msgs from pypeit import utils -from pypeit.core import coadd +from pypeit.core import coadd, flux_calib from IPython import embed @@ -280,6 +280,68 @@ def extract_standard_spec(stdcube, subpixel=20): return wave, ret_flux, utils.inverse(ret_var), ret_gpm +def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_corr=False): + """ + Generate the sensitivity function from a standard star DataCube. + + Args: + ss_file (:obj:`str`_): + The relative path and filename of the standard star datacube. It should be fits format, and + for full functionality, should ideally of the form `pypeit.coadd3d.DataCube`_ + slitimg (`numpy.ndarray`_): + An image of the slit indicating which slit each pixel belongs to + tilts (`numpy.ndarray`_): + Spectral tilts. + + Returns: + `numpy.ndarray`_: A mask of the good sky pixels (True = good) + """ + # Check if the standard star datacube exists + if not os.path.exists(ss_file): + msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file) + msgs.info(f"Loading standard star cube: {ss_file:s}") + # Load the standard star cube and retrieve its RA + DEC + stdcube = fits.open(ss_file) + star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2'] + + # Extract a spectrum of the standard star + wave, Nlam_star, Nlam_ivar_star, gpm_star = extract_standard_spec(stdcube) + + # Extract the information about the blaze + if grating_corr: + blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data + blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, + kind='linear', bounds_error=False, fill_value="extrapolate") + # Perform a grating correction + grat_corr = correct_grating_shift(wave.value, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline) + # Apply the grating correction to the standard star spectrum + Nlam_star /= grat_corr + Nlam_ivar_star *= grat_corr ** 2 + + # Read in some information above the standard star + std_dict = flux_calib.get_standard_spectrum(star_type=senspar['star_type'], + star_mag=senspar['star_mag'], + ra=star_ra, dec=star_dec) + # Calculate the sensitivity curve + # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. + msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") + zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \ + flux_calib.fit_zeropoint(wave.value, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, + mask_hydrogen_lines=senspar['mask_hydrogen_lines'], + mask_helium_lines=senspar['mask_helium_lines'], + hydrogen_mask_wid=senspar['hydrogen_mask_wid'], + nresln=senspar['UVIS']['nresln'], + resolution=senspar['UVIS']['resolution'], + trans_thresh=senspar['UVIS']['trans_thresh'], + polyorder=senspar['polyorder'], + polycorrect=senspar['UVIS']['polycorrect'], + polyfunc=senspar['UVIS']['polyfunc']) + wgd = np.where(zeropoint_fit_gpm) + sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) + flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") + return flux_spline + + def make_good_skymask(slitimg, tilts): """ Mask the spectral edges of each slit (i.e. the pixels near the ends of the From f57ec7e43cbc05e7e17fc5d7fd68403a9747d400 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 2 Oct 2023 21:35:54 +0100 Subject: [PATCH 52/81] add docs sensfunc --- pypeit/core/datacube.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 3a8a6d6f36..12af218466 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -288,10 +288,14 @@ def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_ ss_file (:obj:`str`_): The relative path and filename of the standard star datacube. It should be fits format, and for full functionality, should ideally of the form `pypeit.coadd3d.DataCube`_ - slitimg (`numpy.ndarray`_): - An image of the slit indicating which slit each pixel belongs to - tilts (`numpy.ndarray`_): - Spectral tilts. + senspar (:class:`~pypeit.par.pypeitpar.SensFuncPar`): + The parameters required for the sensitivity function computation. + blaze_wave (`numpy.ndarray`_, optional): + Wavelength array used to construct blaze_spline + blaze_spline (`scipy.interpolate.interp1d`_, optional): + Spline representation of the reference blaze function (based on the illumflat). + grating_corr (:obj:`bool`_, optional): + If a grating correction should be performed, set this variable to True. Returns: `numpy.ndarray`_: A mask of the good sky pixels (True = good) From cac8037decd7790e3b5ec0287e0b0935403395a4 Mon Sep 17 00:00:00 2001 From: rcooke Date: Tue, 3 Oct 2023 10:58:41 +0100 Subject: [PATCH 53/81] hist spec --- pypeit/coadd3d.py | 149 +++++++++++++++++++++-------------------- pypeit/core/extract.py | 45 +++++++++++++ 2 files changed, 123 insertions(+), 71 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 390709c890..c9e3f5f444 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -16,7 +16,7 @@ from pypeit import msgs from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase -from pypeit.core import datacube, flux_calib, parse, ref_index +from pypeit.core import datacube, extract, flux_calib, parse, ref_index from pypeit.spectrographs.util import load_spectrograph # Use a fast histogram for speed! @@ -441,6 +441,7 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, ove self.fluxcal = False self.blaze_wave, self.blaze_spec = None, None self.blaze_spline, self.flux_spline = None, None + self.flat_splines = dict() # A dictionary containing the splines of the flatfield if self.cubepar['standard_cube'] is not None: self.make_sensfunc() @@ -547,18 +548,36 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwave, collapse=False, eq reference=self.cubepar['reference_image'], collapse=collapse, equinox=equinox, specname=specname) + def set_blaze_spline(self, wave_spl, spec_spl): + """ + Generate a spline that represents the blaze function. This only needs to be done once, + because it is used as the reference blaze. It is only important if you are combining + frames that require a grating correction (i.e. have slightly different grating angles). + + Args: + wave_spl (`numpy.ndarray`_): + 1D wavelength array where the blaze has been evaluated + spec_spl (`numpy.ndarray`_): + 1D array (same size as wave_spl), that represents the blaze function for each wavelength. + """ + # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous + # exposure in this for loop) + if self.blaze_spline is None: + self.blaze_wave, self.blaze_spec = wave_spl, spec_spl + self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', + bounds_error=False, fill_value="extrapolate") + def make_sensfunc(self): """ Generate the sensitivity function to be used for the flux calibration. """ self.fluxcal = True # The first standard star cube is used as the reference blaze spline - if self.cubepar['grating_corr'] and self.blaze_spline is None: - # Load the blaze spline + if self.cubepar['grating_corr']: + # Load the blaze information stdcube = fits.open(self.cubepar['standard_cube']) - self.blaze_wave, self.blaze_spec = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data - self.blaze_spline = interp1d(self.blaze_wave, self.blaze_spec, - kind='linear', bounds_error=False, fill_value="extrapolate") + # If a reference blaze spline has not been set, do that now. + self.set_blaze_spline(stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data) # Generate a spline representation of the sensitivity function self.flux_spline = datacube.make_sensfunc(self.cubepar['standard_cube'], self.senspar, blaze_wave=self.blaze_wave, blaze_spline=self.blaze_spline, @@ -730,6 +749,53 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): # Return the skysub params for this frame return this_skysub, skyImg, skyScl + def get_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): + """ + Calculate the relative spectral sensitivity correction due to grating shifts with the + input frames. + + Parameters + ---------- + flatfile : :obj:`str` + Unique path of a flatfield frame used to calculate the relative spectral sensitivity + of the corresponding science frame. + waveimg : `numpy.ndarray`_ + 2D image (same shape as the science frame) indicating the wavelength of each detector pixel. + slits : :class:`pypeit.slittrace.SlitTraceSet`_): + Class containing information about the slits + spat_flexure: :obj:`float`, optional: + Spatial flexure in pixels + """ + if flatfile not in self.flat_splines.keys(): + msgs.info("Calculating relative sensitivity for grating correction") + # Check if the Flat file exists + if not os.path.exists(flatfile): + msgs.error("Grating correction requested, but the following file does not exist:" + msgs.newline() + flatfile) + # Load the Flat file + flatimages = flatfield.FlatImages.from_file(flatfile) + total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, spat_flexure=spat_flexure) * \ + flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, spat_flexure=spat_flexure) + flatframe = flatimages.pixelflat_raw / total_illum + if flatimages.pixelflat_spec_illum is None: + # Calculate the relative scale + scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits, + slit_illum_ref_idx=self.flatpar['slit_illum_ref_idx'], + model=None, + skymask=None, trim=self.flatpar['slit_trim'], + flexure=spat_flexure, + smooth_npix=self.flatpar['slit_illum_smooth_npix']) + else: + msgs.info("Using relative spectral illumination from FlatImages") + scale_model = flatimages.pixelflat_spec_illum + # Extract a quick spectrum of the flatfield + wave_spl, spec_spl = extract.extract_hist_spectrum(waveimg, flatframe*utils.inverse(scale_model), + gpm=waveimg != 0, bins=slits.nspec) + # Store the result + self.flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', bounds_error=False, fill_value="extrapolate") + self.flat_splines[flatfile + "_wave"] = wave_spl.copy() + # Finally, if a reference blaze spline has not been set, do that now. + self.set_blaze_spline(wave_spl, spec_spl) + def align_user_offsets(self): """ Align the RA and DEC of all input frames, and then @@ -764,15 +830,17 @@ def coadd(self): class SlicerIFUCoAdd3D(CoAdd3D): """ Child of CoAdd3D for SlicerIFU data reduction. For documentation, see CoAdd3d parent class above. - spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, - show=False, debug=False + This child class of the IFU datacube creation performs the series of steps that are specific to + slicer-based IFUs, including the following steps: + + * Calculates the astrometric correction that is needed to align spatial positions along the slices + * """ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, show=False, debug=False): super().__init__(spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, show=show, debug=debug) - self.flat_splines = dict() # A dictionary containing the splines of the flatfield self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale self._specscale = np.zeros(self.numfiles) @@ -824,67 +892,6 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): # Return the alignment splines return alignSplines - def get_grating_shift(self, flatfile, waveimg, slits, spat_flexure=None): - """ - Calculate the relative spectral sensitivity correction due to grating shifts with the - input frames. - - Parameters - ---------- - flatfile : :obj:`str` - Unique path of a flatfield frame used to calculate the relative spectral sensitivity - of the corresponding science frame. - waveimg : `numpy.ndarray`_ - 2D image (same shape as the science frame) indicating the wavelength of each detector pixel. - slits : :class:`pypeit.slittrace.SlitTraceSet`_): - Class containing information about the slits - spat_flexure: :obj:`float`, optional: - Spatial flexure in pixels - """ - if flatfile not in self.flat_splines.keys(): - msgs.info("Calculating relative sensitivity for grating correction") - # Check if the Flat file exists - if not os.path.exists(flatfile): - msgs.error("Grating correction requested, but the following file does not exist:" + - msgs.newline() + flatfile) - # Load the Flat file - flatimages = flatfield.FlatImages.from_file(flatfile) - total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, - spat_flexure=spat_flexure) * \ - flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, - spat_flexure=spat_flexure) - flatframe = flatimages.pixelflat_raw / total_illum - if flatimages.pixelflat_spec_illum is None: - # Calculate the relative scale - scale_model = flatfield.illum_profile_spectral(flatframe, waveimg, slits, - slit_illum_ref_idx=self.flatpar['slit_illum_ref_idx'], - model=None, - skymask=None, trim=self.flatpar['slit_trim'], - flexure=spat_flexure, - smooth_npix=self.flatpar['slit_illum_smooth_npix']) - else: - msgs.info("Using relative spectral illumination from FlatImages") - scale_model = flatimages.pixelflat_spec_illum - # Apply the relative scale and generate a 1D "spectrum" - onslit = waveimg != 0 - wavebins = np.linspace(np.min(waveimg[onslit]), np.max(waveimg[onslit]), slits.nspec) - hist, edge = np.histogram(waveimg[onslit], bins=wavebins, - weights=flatframe[onslit] / scale_model[onslit]) - cntr, edge = np.histogram(waveimg[onslit], bins=wavebins) - cntr = cntr.astype(float) - norm = (cntr != 0) / (cntr + (cntr == 0)) - spec_spl = hist * norm - wave_spl = 0.5 * (wavebins[1:] + wavebins[:-1]) - self.flat_splines[flatfile] = interp1d(wave_spl, spec_spl, kind='linear', - bounds_error=False, fill_value="extrapolate") - self.flat_splines[flatfile + "_wave"] = wave_spl.copy() - # Check if a reference blaze spline exists (either from a standard star if fluxing or from a previous - # exposure in this for loop) - if self.blaze_spline is None: - self.blaze_wave, self.blaze_spec = wave_spl, spec_spl - self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', - bounds_error=False, fill_value="extrapolate") - def set_voxel_sampling(self): """ This function checks if the spatial and spectral scales of all frames are consistent. @@ -1087,7 +1094,7 @@ def load(self): msgs.error('Processed flat calibration file not recorded by spec2d file!') flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) # Setup the grating correction - self.get_grating_shift(flatfile, waveimg, slits, spat_flexure=spat_flexure) + self.get_grating_corr(flatfile, waveimg, slits, spat_flexure=spat_flexure) # Calculate the grating correction gratcorr_sort = datacube.correct_grating_shift(wave_sort, self.flat_splines[flatfile + "_wave"], self.flat_splines[flatfile], diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 0e26639340..796e76b702 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -451,6 +451,51 @@ def extract_boxcar(sciimg, ivar, mask, waveimg, skyimg, spec, fwhmimg=None, base spec.BOX_NPIX = pixtot-pixmsk +def extract_hist_spectrum(waveimg, frame, gpm=None, bins=1000): + """ + Generate a quick spectrum using the nearest grid point (histogram) algorithm. + + Args: + waveimg (`numpy.ndarray`_): + A 2D image of the wavelength at each pixel. + frame (`numpy.ndarray`_): + The frame to use to extract a spectrum. Shape should be the same as waveimg + gpm (`numpy.ndarray`_, optional): + A boolean array indicating the pixels to include in the histogram (True = include) + bins (`numpy.ndarray`_, int, optional): + Either a 1D array indicating the bin edges to be used for the histogram, + or an integer that specifies the number of bin edges to generate + + Returns: + `numpy.ndarray`_: The wavelength at the centre of each histogram bin + `numpy.ndarray`_: The spectrum at each pixel of the returned wavelength array + """ + # Check the inputs + if waveimg.shape != frame.shape: + msgs.error("Wavelength image is not the same shape as the input frame") + # Check the GPM + _gpm = gpm if gpm is not None else waveimg > 0 + if waveimg.shape != _gpm.shape: + msgs.error("Wavelength image is not the same shape as the GPM") + # Set the bins + if type(bins) is int: + _bins = np.linspace(np.min(waveimg[_gpm]), np.max(waveimg[_gpm]), bins) + elif type(bins) is np.ndarray: + _bins = bins + else: + msgs.error("Argument 'bins' should be an integer or a numpy array") + + # Construct a histogram and the normalisation + hist, edge = np.histogram(waveimg[gpm], bins=_bins, weights=frame[gpm]) + cntr, edge = np.histogram(waveimg[gpm], bins=_bins) + # Normalise + cntr = cntr.astype(float) + spec = hist * utils.inverse(cntr) + # Generate the corresponding wavelength array - set it to be the bin centre + wave = 0.5 * (_bins[1:] + _bins[:-1]) + return wave, spec + + def findfwhm(model, sig_x): r""" Calculate the spatial FWHM of an object profile. This is utility routine is used in :func:`~pypeit.core.extract.fit_profile`. From ccb7fe518ee2f5f63c3cdaf2edf1a89bbab48d9f Mon Sep 17 00:00:00 2001 From: rcooke Date: Tue, 3 Oct 2023 11:52:42 +0100 Subject: [PATCH 54/81] align code --- pypeit/coadd3d.py | 56 +++++++++++++++++++---------------------- pypeit/core/datacube.py | 46 +++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 30 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index c9e3f5f444..a8c2b4abc6 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -796,27 +796,6 @@ def get_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): # Finally, if a reference blaze spline has not been set, do that now. self.set_blaze_spline(wave_spl, spec_spl) - def align_user_offsets(self): - """ - Align the RA and DEC of all input frames, and then - manually shift the cubes based on user-provided offsets. - The offsets should be specified in arcseconds, and the - ra_offset should include the cos(dec) factor. - """ - # First, translate all coordinates to the coordinates of the first frame - # Note: You do not need cos(dec) here, this just overrides the IFU coordinate centre of each frame - # The cos(dec) factor should be input by the user, and should be included in the self.opts['ra_offset'] - ref_shift_ra = self.ifu_ra[0] - self.ifu_ra - ref_shift_dec = self.ifu_dec[0] - self.ifu_dec - for ff in range(self.numfiles): - # Apply the shift - self.all_ra[self.all_idx == ff] += ref_shift_ra[ff] + self.opts['ra_offset'][ff] / 3600.0 - self.all_dec[self.all_idx == ff] += ref_shift_dec[ff] + self.opts['dec_offset'][ff] / 3600.0 - msgs.info("Spatial shift of cube #{0:d}:" + msgs.newline() + - "RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, - self.opts['ra_offset'][ff], - self.opts['dec_offset'][ff])) - def coadd(self): """ Main entry routine to set the order of operations to coadd the data. For specific @@ -1180,14 +1159,26 @@ def load(self): def run_align(self): """ This routine aligns multiple cubes by using manual input offsets or by cross-correlating white light images. + + Returns: + `numpy.ndarray`_: A new set of RA values that have been aligned + `numpy.ndarray`_: A new set of Dec values that has been aligned """ # Grab cos(dec) for convenience cosdec = np.cos(np.mean(self.all_dec) * np.pi / 180.0) - # Register spatial offsets between all frames if self.opts['ra_offset'] is not None: - self.align_user_offsets() + # Fill in some offset arrays + ra_offset, dec_offset = np.zeros(self.numfiles), np.zeros(self.numfiles) + for ff in range(self.numfiles): + ra_offset[ff] = self.opts['ra_offset'][ff] + dec_offset[ff] = self.opts['dec_offset'][ff] + # Calculate the offsets + new_ra, new_dec = datacube.align_user_offsets(self.all_ra, self.all_dec, self.all_idx, + self.ifu_ra, self.ifu_dec, + ra_offset, dec_offset) else: + new_ra, new_dec = self.all_ra.copy(), self.all_dec.copy() # Find the wavelength range where all frames overlap min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength @@ -1200,12 +1191,12 @@ def run_align(self): msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") # Setup the WCS to use for all white light images ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied - image_wcs, voxedge, reference_image = self.create_wcs(self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], + image_wcs, voxedge, reference_image = self.create_wcs(new_ra[ww], new_dec[ww], self.all_wave[ww], self._dspat, wavediff, collapse=True) if voxedge[2].size != 2: msgs.error("Spectral range for WCS is incorrect for white light image") - wl_imgs = generate_image_subpixel(image_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], + wl_imgs = generate_image_subpixel(image_wcs, new_ra[ww], new_dec[ww], self.all_wave[ww], self.all_sci[ww], self.all_ivar[ww], self.all_wghts[ww], self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], self.all_tilts, self.all_slits, self.all_align, voxedge, @@ -1228,17 +1219,21 @@ def run_align(self): dec_shift *= self._dspat msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff+1, ra_shift*3600.0, dec_shift*3600.0)) # Apply the shift - self.all_ra[self.all_idx == ff] += ra_shift - self.all_dec[self.all_idx == ff] += dec_shift + new_ra[self.all_idx == ff] += ra_shift + new_dec[self.all_idx == ff] += dec_shift + return new_ra, new_dec def compute_weights(self): """ Compute the relative weights to apply to pixels that are collected into the voxels of the output DataCubes + + Returns: + `numpy.ndarray`_: The individual pixel weights for each detector pixel, and every frame. """ # Calculate the relative spectral weights of all pixels if self.numfiles == 1: # No need to calculate weights if there's just one frame - self.all_wghts = np.ones_like(self.all_sci) + all_wghts = np.ones_like(self.all_sci) else: # Find the wavelength range where all frames overlap min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength @@ -1256,8 +1251,9 @@ def compute_weights(self): self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) # Compute the weights - self.all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, wl_full[:, :, 0], + all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, wl_full[:, :, 0], self._dspat, self._dwv, relative_weights=self.cubepar['relative_weights']) + return all_wghts def coadd(self): """ @@ -1307,7 +1303,7 @@ def coadd(self): self.run_align() # Compute the relative weights on the spectra - self.compute_weights() + self.all_wghts = self.compute_weights() # Generate the WCS, and the voxel edges cube_wcs, vox_edges, _ = self.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, self._dwv) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 12af218466..5d126a0f66 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -563,6 +563,52 @@ def load_imageWCS(filename, ext=0): return image, imgwcs +def align_user_offsets(all_ra, all_dec, all_idx, ifu_ra, ifu_dec, ra_offset, dec_offset): + """ + Align the RA and DEC of all input frames, and then + manually shift the cubes based on user-provided offsets. + The offsets should be specified in arcseconds, and the + ra_offset should include the cos(dec) factor. + + Args: + all_ra (`numpy.ndarray`_): + A 1D array containing the RA values of each detector pixel of every frame. + all_dec (`numpy.ndarray`_): + A 1D array containing the Dec values of each detector pixel of every frame. + Same size as all_ra. + all_idx (`numpy.ndarray`_): + A 1D array containing an ID value for each detector frame (0-indexed). + Same size as all_ra. + ifu_ra (`numpy.ndarray`_): + A list of RA values of the IFU (one value per frame) + ifu_dec (`numpy.ndarray`_): + A list of Dec values of the IFU (one value per frame) + ra_offset (`numpy.ndarray`_): + A list of RA offsets to be applied to the input pixel values (one value per frame). + Note, the ra_offset MUST contain the cos(dec) factor. This is the number of arcseconds + on the sky that represents the telescope offset. + dec_offset (`numpy.ndarray`_): + A list of Dec offsets to be applied to the input pixel values (one value per frame). + + Returns: + `numpy.ndarray`_: A new set of RA values that have been aligned + `numpy.ndarray`_: A new set of Dec values that has been aligned + """ + # First, translate all coordinates to the coordinates of the first frame + # Note: You do not need cos(dec) here, this just overrides the IFU coordinate centre of each frame + # The cos(dec) factor should be input by the user, and should be included in the self.opts['ra_offset'] + ref_shift_ra = ifu_ra[0] - ifu_ra + ref_shift_dec = ifu_dec[0] - ifu_dec + numfiles = ra_offset.size + for ff in range(numfiles): + # Apply the shift + all_ra[all_idx == ff] += ref_shift_ra[ff] + ra_offset[ff] / 3600.0 + all_dec[all_idx == ff] += ref_shift_dec[ff] + dec_offset[ff] / 3600.0 + msgs.info("Spatial shift of cube #{0:d}:" + msgs.newline() + + "RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, ra_offset[ff], dec_offset[ff])) + return all_ra, all_dec + + def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None, reference=None, collapse=False, equinox=2000.0, specname="PYP_SPEC"): From 32e0f3385a389a30c279f6eaaee8bd0fe42f4f63 Mon Sep 17 00:00:00 2001 From: rcooke Date: Tue, 3 Oct 2023 11:56:46 +0100 Subject: [PATCH 55/81] load to init --- pypeit/coadd3d.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index a8c2b4abc6..bc1dd6cda2 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -823,6 +823,8 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwr self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale self._specscale = np.zeros(self.numfiles) + # Loop through all of the frames, load the data, and save datacubes if no combining is required + self.load() def get_alignments(self, spec2DObj, slits, spat_flexure=None): """ @@ -1287,9 +1289,6 @@ def coadd(self): Note, there are several algorithms used to combine multiple frames. Refer to the subpixellate() routine for more details about the combination options. """ - # First loop through all of the frames, load the data, and save datacubes if no combining is required - self.load() - # No need to continue if we are not combining nor aligning frames if not self.combine and not self.align: return From d6825ef1e3568d131a5545ebe22d60d1842fa455 Mon Sep 17 00:00:00 2001 From: rcooke Date: Tue, 3 Oct 2023 14:37:54 +0100 Subject: [PATCH 56/81] extra docs --- pypeit/coadd3d.py | 59 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index bc1dd6cda2..caa766171a 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -749,7 +749,7 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): # Return the skysub params for this frame return this_skysub, skyImg, skyScl - def get_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): + def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): """ Calculate the relative spectral sensitivity correction due to grating shifts with the input frames. @@ -811,10 +811,32 @@ class SlicerIFUCoAdd3D(CoAdd3D): Child of CoAdd3D for SlicerIFU data reduction. For documentation, see CoAdd3d parent class above. This child class of the IFU datacube creation performs the series of steps that are specific to - slicer-based IFUs, including the following steps: + slicer-based IFUs, including the following steps + + Data preparation: + + * Loads individual spec2d files + * If requested, subtract the sky (either from a dedicated sky frame, or use the sky model stored in the science spec2d file) + * The sky regions near the spectral edges of the slits are masked + * Apply a relative spectral illumination correction (scalecorr) that registers all input frames to the scale illumination. + * Generate a WCS of each individual frame, and calculate the RA and DEC of each individual detector pixel + * Calculate the astrometric correction that is needed to align spatial positions along the slices + * Compute the differential atmospheric refraction correction + * Apply the extinction correction + * Apply a grating correction (gratcorr) - This corrects for the relative spectral efficiency of combining data taken with multiple different grating angles + * Flux calibrate + + Data cube generation: + + * If frames are not being combined, individual data cubes are generated and saved as a DataCube object. A white light image is also produced, if requested + * If frames are being aligned and/or combined, the following steps are followed: + - The output voxel sampling is computed (this must be consistent for all frames) + - Frames are aligned (either by user-specified offsets, or by a fancy cross-correlation) + - The relative weights to each for each detector pixel is computed + - If frames are not being combined, individual DataCube's will be generated for each frame + - If frames are being combined, a single DataCube will be generated. + - White light images are also produced, if requested. - * Calculates the astrometric correction that is needed to align spatial positions along the slices - * """ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, show=False, debug=False): @@ -965,6 +987,9 @@ def load(self): waveimg = spec2DObj.waveimg bpmmask = spec2DObj.bpmmask + # Mask the edges of the spectrum where the sky model is bad + sky_is_good = datacube.make_good_skymask(slitid_img_init, spec2DObj.tilts) + # TODO :: Really need to write some detailed information in the docs about all of the various corrections that can optionally be applied # TODO :: Include a flexure correction from the sky frame? Note, you cannot use the waveimg from a sky frame, @@ -1000,20 +1025,9 @@ def load(self): self.mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init]) self.mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init]) - # Remove edges of the spectrum where the sky model is bad - sky_is_good = datacube.make_good_skymask(slitid_img_init, spec2DObj.tilts) - - # Construct a good pixel mask - # TODO: This should use the mask function to figure out which elements are masked. - onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good - - # Grab the WCS of this frame - frame_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv) - self.all_wcs.append(copy.deepcopy(frame_wcs)) - # Find the largest spatial scale of all images being combined # TODO :: probably need to put this in the DetectorContainer - pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This should be degrees/pixel + pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This is degrees/pixel slscl = self.spec.get_meta_value([spec2DObj.head0], 'slitwid') self._spatscale[ff, 0] = pxscl self._spatscale[ff, 1] = slscl @@ -1028,8 +1042,15 @@ def load(self): msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( 3600.0 * self._dspat, 3600.0 * slscl)) - # Generate the alignment splines, and then - # retrieve images of the RA and Dec of every pixel, + # Construct a good pixel mask + # TODO: This should use the mask function to figure out which elements are masked. + onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good + + # Grab the WCS of this frame + frame_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv) + self.all_wcs.append(copy.deepcopy(frame_wcs)) + + # Generate the alignment splines, and then retrieve images of the RA and Dec of every pixel, # and the number of spatial pixels in each slit alignSplines = self.get_alignments(spec2DObj, slits, spat_flexure=spat_flexure) raimg, decimg, minmax = slits.get_radec_image(frame_wcs, alignSplines, spec2DObj.tilts, @@ -1075,7 +1096,7 @@ def load(self): msgs.error('Processed flat calibration file not recorded by spec2d file!') flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) # Setup the grating correction - self.get_grating_corr(flatfile, waveimg, slits, spat_flexure=spat_flexure) + self.add_grating_corr(flatfile, waveimg, slits, spat_flexure=spat_flexure) # Calculate the grating correction gratcorr_sort = datacube.correct_grating_shift(wave_sort, self.flat_splines[flatfile + "_wave"], self.flat_splines[flatfile], From 4d4c37fbed72592155ae8c9fcbc187a6a16f780c Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 4 Oct 2023 07:43:03 +0100 Subject: [PATCH 57/81] fix subpixel --- pypeit/coadd3d.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index caa766171a..e2467f76aa 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -423,7 +423,7 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, ove self.spec_subpixel, self.spat_subpixel = 1, 1 if self.method == "subpixel": msgs.info("Adopting the subpixel algorithm to generate the datacube.") - spec_subpixel, spat_subpixel = self.cubepar['spec_subpixel'], self.cubepar['spat_subpixel'] + self.spec_subpixel, self.spat_subpixel = self.cubepar['spec_subpixel'], self.cubepar['spat_subpixel'] elif self.method == "ngp": msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") else: @@ -1784,6 +1784,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w # Calculate the DAR correction at each sub pixel ra_corr, dec_corr = _all_dar[fr].correction(this_wave) # This routine needs the wavelengths to be expressed in Angstroms # Calculate spatial and spectral positions of the subpixels + embed() spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() # Transform this to spatial location From 451b67a2ab56883d08ea3a93f8a96994a0a04bb7 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 4 Oct 2023 07:43:34 +0100 Subject: [PATCH 58/81] fix subpixel --- pypeit/coadd3d.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index e2467f76aa..e52d43e4ea 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1784,7 +1784,6 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w # Calculate the DAR correction at each sub pixel ra_corr, dec_corr = _all_dar[fr].correction(this_wave) # This routine needs the wavelengths to be expressed in Angstroms # Calculate spatial and spectral positions of the subpixels - embed() spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() # Transform this to spatial location From f1d3513d5e61c2bb1c4b72d8bc18cdded9bca01c Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 4 Oct 2023 07:51:02 +0100 Subject: [PATCH 59/81] fix time --- pypeit/scripts/coadd_datacube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 5ae3a74974..3eb62feb25 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -53,10 +53,10 @@ def main(args): parset['rdx']['detnum'] = int(args.det) # Instantiate CoAdd3d + tstart = time.time() coadd = CoAdd3D.get_instance(coadd3dfile.filenames, coadd3dfile.options, spectrograph=spectrograph, par=parset, det=args.det, overwrite=args.overwrite) # Coadd the files - tstart = time.time() coadd.coadd() msgs.info(utils.get_time_string(time.time()-tstart)) From 3e4307023b16b045bd4f4704bb45221a5cfb0a96 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 4 Oct 2023 08:19:13 +0100 Subject: [PATCH 60/81] fix subpix --- pypeit/coadd3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index e52d43e4ea..b4a7f5887f 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1222,7 +1222,7 @@ def run_align(self): wl_imgs = generate_image_subpixel(image_wcs, new_ra[ww], new_dec[ww], self.all_wave[ww], self.all_sci[ww], self.all_ivar[ww], self.all_wghts[ww], self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], - self.all_tilts, self.all_slits, self.all_align, voxedge, + self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, all_idx=self.all_idx[ww], spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) if reference_image is None: From 8c53a1fcad213e701cae6b59969f42c4b61707cb Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 4 Oct 2023 21:33:48 +0100 Subject: [PATCH 61/81] docstrings and cleanup --- pypeit/coadd3d.py | 229 +++++++++++++++++-------------- pypeit/core/extract.py | 4 +- pypeit/inputfiles.py | 6 +- pypeit/par/pypeitpar.py | 10 +- pypeit/scripts/coadd_datacube.py | 9 +- pypeit/slittrace.py | 1 + 6 files changed, 142 insertions(+), 117 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index b4a7f5887f..56ddca8715 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -37,8 +37,6 @@ class DataCube(datamodel.DataContainer): .. include:: ../include/class_datamodel_datacube.rst Args: - wave (`numpy.ndarray`_): - A 1D numpy array containing the wavelength array for convenience (nwave) flux (`numpy.ndarray`_): The science datacube (nwave, nspaxel_y, nspaxel_x) sig (`numpy.ndarray`_): @@ -46,6 +44,8 @@ class DataCube(datamodel.DataContainer): bpm (`numpy.ndarray`_): The bad pixel mask of the datacube (nwave, nspaxel_y, nspaxel_x). True values indicate a bad pixel + wave (`numpy.ndarray`_): + A 1D numpy array containing the wavelength array for convenience (nwave) blaze_wave (`numpy.ndarray`_): Wavelength array of the spectral blaze function blaze_spec (`numpy.ndarray`_): @@ -70,16 +70,16 @@ class DataCube(datamodel.DataContainer): """ version = '1.2.0' - datamodel = {'wave': dict(otype=np.ndarray, atype=np.floating, - descr='Wavelength of each slice in the spectral direction. ' - 'The units are Angstroms.'), - 'flux': dict(otype=np.ndarray, atype=np.floating, + datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating, descr='Flux datacube in units of counts/s/Ang/arcsec^2 or ' '10^-17 erg/s/cm^2/Ang/arcsec^2'), 'sig': dict(otype=np.ndarray, atype=np.floating, descr='Error datacube (matches units of flux)'), 'bpm': dict(otype=np.ndarray, atype=np.uint8, descr='Bad pixel mask of the datacube (0=good, 1=bad)'), + 'wave': dict(otype=np.ndarray, atype=np.floating, + descr='Wavelength of each slice in the spectral direction. ' + 'The units are Angstroms.'), 'blaze_wave': dict(otype=np.ndarray, atype=np.floating, descr='Wavelength array of the spectral blaze function'), 'blaze_spec': dict(otype=np.ndarray, atype=np.floating, @@ -95,7 +95,7 @@ class DataCube(datamodel.DataContainer): 'spect_meta' ] - def __init__(self, wave, flux, sig, bpm, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, + def __init__(self, flux, sig, bpm, wave, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, fluxed=None): args, _, _, values = inspect.getargvalues(inspect.currentframe()) @@ -203,39 +203,39 @@ class DARcorrection: """ This class holds all of the functions needed to quickly compute the differential atmospheric refraction correction. """ - def __init__(self, hdr0, cosdec, spectrograph=None, wave_ref=4500.0): + def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, co2=400.0, wave_ref=4500.0): """ Args: - hdr0 (`astropy.io.fits.Header`_): - Header of the spec2d file. This input should be retrieved from spec2DObj.head0 + airmass (:obj:`float`): + The airmass of the observations (unitless) + parangle (:obj:`float`): + The parallactic angle of the observations (units=degree, relative to North, towards East is postive) + pressure (:obj:`float`): + The atmospheric pressure during the observations in Pascal. Valid range is from 10kPa - 140 kPa. + temperature (:obj:`float`): + Temperature in degree Celsius. Valid temperate range is -40 to + 100 degree Celsius. + humidity (:obj:`float`): + The humidity during the observations (Expressed as a percentage, not a fraction!). + Valid range is 0 to 100. cosdec (:obj:`float`): Cosine of the target declination. - spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): - The name or instance of the spectrograph used to obtain the data. - If None, this is pulled from the file header. + co2 (:obj:`float`, optional): + Carbon dioxide concentration in µmole/mole. The default value + of 450 should be enough for most purposes. Valid range is from + 0 - 2000 µmole/mole. wave_ref (:obj:`float`, optional): Reference wavelength (The DAR correction will be performed relative to this wavelength) """ msgs.info("Preparing the parameters for the DAR correction") - # Check on Spectrograph input - if spectrograph is None: - spectrograph = hdr0['PYP_SPEC'] - - if isinstance(spectrograph, str): - self.spec = load_spectrograph(spectrograph) - self.specname = spectrograph - else: - # Assume it's a Spectrograph instance - self.spec = spectrograph - self.specname = spectrograph.name # Get DAR parameters - self.airmass = self.spec.get_meta_value([hdr0], 'airmass') # unitless - self.parangle = self.spec.get_meta_value([hdr0], 'parangle') - self.pressure = self.spec.get_meta_value([hdr0], 'pressure') # units are pascals - self.temperature = self.spec.get_meta_value([hdr0], 'temperature') # units are degrees C - self.humidity = self.spec.get_meta_value([hdr0], 'humidity') # Expressed as a percentage (not a fraction!) - self.co2 = 400.0 # units are mu-mole/mole + self.airmass = airmass # unitless + self.parangle = parangle + self.pressure = pressure + self.temperature = temperature + self.humidity = humidity + self.co2 = co2 self.wave_ref = wave_ref # This should be in Angstroms self.cosdec = cosdec @@ -306,8 +306,8 @@ class CoAdd3D: """ # Superclass factory method generates the subclass instance @classmethod - def get_instance(cls, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, - show=False, debug=False): + def get_instance(cls, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, dec_offsets=None, + spectrograph=None, det=1, overwrite=False, show=False, debug=False): """ Instantiate the subclass appropriate for the provided spectrograph. @@ -323,27 +323,38 @@ def get_instance(cls, spec2dfiles, opts, spectrograph=None, par=None, det=1, ove return next(c for c in cls.__subclasses__() if c.__name__ == (spectrograph.pypeline + 'CoAdd3D'))( - spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, + spec2dfiles, par, skysub_frame=skysub_frame, scale_corr=scale_corr, ra_offsets=ra_offsets, + dec_offsets=dec_offsets, spectrograph=spectrograph, det=det, overwrite=overwrite, show=show, debug=debug) - def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, overwrite=False, - show=False, debug=False): + def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, dec_offsets=None, + spectrograph=None, det=None, overwrite=False, show=False, debug=False): """ Args: spec2dfiles (:obj:`list`): List of all spec2D files - opts (:obj:`dict`): - Options associated with each spec2d file - spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): - The name or instance of the spectrograph used to obtain the data. - If None, this is pulled from the file header. - par (:class:`~pypeit.par.pypeitpar.PypeItPar`, optional): + par (:class:`~pypeit.par.pypeitpar.PypeItPar`): An instance of the parameter set. If None, assumes that detector 1 is the one reduced and uses the default reduction parameters for the spectrograph (see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.default_pypeit_par` for the relevant spectrograph class). + skysub_frame (:obj:`list`, optional): + If not None, this should be a list of frames to use for the sky subtraction of each individual + entry of spec2dfiles. It should be the same length as spec2dfiles. + scale_corr (:obj:`list`, optional): + If not None, this should be a list of relative scale correction options. It should be the + same length as spec2dfiles. + ra_offsets (:obj:`list`, optional): + If not None, this should be a list of relative RA offsets of each frame. It should be the + same length as spec2dfiles. + dec_offsets (:obj:`list`, optional): + If not None, this should be a list of relative Dec offsets of each frame. It should be the + same length as spec2dfiles. + spectrograph (:obj:`str`, :class:`~pypeit.spectrographs.spectrograph.Spectrograph`, optional): + The name or instance of the spectrograph used to obtain the data. + If None, this is pulled from the file header. det (:obj:`int`_, optional): Detector index overwrite (:obj:`bool`, optional): @@ -356,24 +367,34 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, ove """ self.spec2d = spec2dfiles self.numfiles = len(spec2dfiles) - self.opts = opts + self.par = par self.overwrite = overwrite + # Do some quick checks on the input options + if skysub_frame is not None: + if len(skysub_frame) != len(spec2dfiles): + msgs.error("The skysub_frame list should be identical length to the spec2dfiles list") + if scale_corr is not None: + if len(scale_corr) != len(spec2dfiles): + msgs.error("The scale_corr list should be identical length to the spec2dfiles list") + if ra_offsets is not None: + if len(ra_offsets) != len(spec2dfiles): + msgs.error("The ra_offsets list should be identical length to the spec2dfiles list") + if dec_offsets is not None: + if len(dec_offsets) != len(spec2dfiles): + msgs.error("The dec_offsets list should be identical length to the spec2dfiles list") + # Set the frame-specific options + self.skysub_frame = skysub_frame + self.scale_corr = scale_corr + self.ra_offsets = np.array(ra_offsets) if isinstance(ra_offsets, list) else ra_offsets + self.dec_offsets = np.array(dec_offsets) if isinstance(dec_offsets, list) else dec_offsets # Check on Spectrograph input if spectrograph is None: with fits.open(spec2dfiles[0]) as hdu: spectrograph = hdu[0].header['PYP_SPEC'] - if isinstance(spectrograph, str): - self.spec = load_spectrograph(spectrograph) - self.specname = spectrograph - else: - # Assume it's a Spectrograph instance - self.spec = spectrograph - self.specname = spectrograph.name - - # Grab the parset, if not provided - self.par = self.spec.default_pypeit_par() if par is None else par + self.spec = load_spectrograph(spectrograph) + self.specname = self.spec.name # Extract some parsets for simplicity self.cubepar = self.par['reduce']['cube'] @@ -402,7 +423,7 @@ def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=None, ove msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image") msgs.info("Setting 'align' to False") self.align = False - if self.opts['ra_offset'] is not None: + if self.ra_offsets is not None: if not self.align: msgs.warn("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") msgs.info("Setting 'align' to True") @@ -596,17 +617,19 @@ def set_default_scalecorr(self): msgs.newline() + self.cubepar['scale_corr']) try: spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['scale_corr'], self.detname) - self.relScaleImgDef = spec2DObj.scaleimg - self.scalecorr_default = self.cubepar['scale_corr'] - except: + except Exception as e: + msgs.warn(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') msgs.warn("Could not load scaleimg from spec2d file:" + msgs.newline() + self.cubepar['scale_corr'] + msgs.newline() + "scale correction will not be performed unless you have specified the correct" + msgs.newline() + "scale_corr file in the spec2d block") self.cubepar['scale_corr'] = None self.scalecorr_default = "none" + else: + self.relScaleImgDef = spec2DObj.scaleimg + self.scalecorr_default = self.cubepar['scale_corr'] - def get_current_scalecorr(self, spec2DObj, opts_scalecorr=None): + def get_current_scalecorr(self, spec2DObj, scalecorr=None): """ Determine the scale correction that should be used to correct for the relative spectral scaling of the science frame @@ -614,43 +637,46 @@ def get_current_scalecorr(self, spec2DObj, opts_scalecorr=None): Args: spec2DObj (:class:`~pypeit.spec2dobj.Spec2DObj`_): 2D PypeIt spectra object. - opts_scalecorr (:obj:`str`, optional): + scalecorr (:obj:`str`, optional): A string that describes what mode should be used for the sky subtraction. The allowed values are: - default - Use the default value, as defined in self.set_default_scalecorr() - image - Use the relative scale that was derived from the science frame - none - Do not perform relative scale correction + + * default: Use the default value, as defined in self.set_default_scalecorr() + * image: Use the relative scale that was derived from the science frame + * none: Do not perform relative scale correction Returns: - :obj:`str`_: A string that describes the scale correction mode to be used (see opts_scalecorr description) - `numpy.ndarray`_: 2D image (same shape as science frame) containing the relative spectral scaling to apply to the science frame + A tuple (this_scalecorr, relScaleImg) where this_scalecorr is a :obj:`str`_ that describes the + scale correction mode to be used (see scalecorr description) and relScaleImg is a `numpy.ndarray`_ + (2D, same shape as science frame) containing the relative spectral scaling to apply to the science frame. """ this_scalecorr = self.scalecorr_default relScaleImg = self.relScaleImgDef.copy() - if opts_scalecorr is not None: - if opts_scalecorr.lower() == 'default': + if scalecorr is not None: + if scalecorr.lower() == 'default': if self.scalecorr_default == "image": relScaleImg = spec2DObj.scaleimg this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling else: this_scalecorr = self.scalecorr_default # Use the default value for the scale correction - elif opts_scalecorr.lower() == 'image': + elif scalecorr.lower() == 'image': relScaleImg = spec2DObj.scaleimg this_scalecorr = "image" # Use the current spec2d for the relative spectral illumination scaling - elif opts_scalecorr.lower() == 'none': + elif scalecorr.lower() == 'none': relScaleImg = np.array([1]) this_scalecorr = "none" # Don't do relative spectral illumination scaling else: # Load a user specified frame for sky subtraction msgs.info("Loading the following frame for the relative spectral illumination correction:" + - msgs.newline() + opts_scalecorr) + msgs.newline() + scalecorr) try: - spec2DObj_scl = spec2dobj.Spec2DObj.from_file(opts_scalecorr, self.detname) - except: - msgs.error( - "Could not load skysub image from spec2d file:" + msgs.newline() + opts_scalecorr) - relScaleImg = spec2DObj_scl.scaleimg - this_scalecorr = opts_scalecorr + spec2DObj_scl = spec2dobj.Spec2DObj.from_file(scalecorr, self.detname) + except Exception as e: + msgs.warn(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') + msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + scalecorr) + else: + relScaleImg = spec2DObj_scl.scaleimg + this_scalecorr = scalecorr if this_scalecorr == "none": msgs.info("Relative spectral illumination correction will not be performed.") else: @@ -677,12 +703,13 @@ def set_default_skysub(self): try: spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['skysub_frame'], self.detname) skysub_exptime = fits.open(self.cubepar['skysub_frame'])[0].header['EXPTIME'] + except: + msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) + else: self.skysub_default = self.cubepar['skysub_frame'] self.skyImgDef = spec2DObj.sciimg / skysub_exptime # Sky counts/second # self.skyImgDef = spec2DObj.skymodel/skysub_exptime # Sky counts/second self.skySclDef = spec2DObj.scaleimg - except: - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): """ @@ -701,9 +728,11 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): none - Do not perform sky subtraction Returns: - :obj:`str`_: A string that describes the sky subtration mode to be used (see opts_skysub description) - `numpy.ndarray`_: 2D image (same shape as science frame) containing the sky frame to be subtracted from the science frame - `numpy.ndarray`_: 2D image (same shape as science frame) containing the relative spectral scaling that has been applied to the returned sky frame + A tuple (this_skysub, skyImg, skyScl) where this_skysub is a :obj:`str`_ that describes the sky subtration + mode to be used (see opts_skysub description), skyImg is a `numpy.ndarray`_ (2D, same shape as science + frame) containing the sky frame to be subtracted from the science frame, and skyScl is a `numpy.ndarray`_ + (2D, same shape as science frame) containing the relative spectral scaling that has been applied to the + returned sky frame. """ this_skysub = self.skysub_default if self.skysub_default == "image": @@ -803,7 +832,6 @@ def coadd(self): """ msgs.bug("This routine should be overridden by child classes.") msgs.error("Cannot proceed without coding the coadd routine.") - return class SlicerIFUCoAdd3D(CoAdd3D): @@ -838,9 +866,10 @@ class SlicerIFUCoAdd3D(CoAdd3D): - White light images are also produced, if requested. """ - def __init__(self, spec2dfiles, opts, spectrograph=None, par=None, det=1, overwrite=False, - show=False, debug=False): - super().__init__(spec2dfiles, opts, spectrograph=spectrograph, par=par, det=det, overwrite=overwrite, + def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, dec_offsets=None, + spectrograph=None, det=1, overwrite=False, show=False, debug=False): + super().__init__(spec2dfiles, par, skysub_frame=skysub_frame, scale_corr=scale_corr, ra_offsets=ra_offsets, + dec_offsets=dec_offsets, spectrograph=spectrograph, det=det, overwrite=overwrite, show=show, debug=debug) self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale @@ -889,11 +918,8 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): else: locations = self.par['calibrations']['alignment']['locations'] traces = alignments.traces - # Generate an RA/DEC image - msgs.info("Generating RA/DEC image") - alignSplines = alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts) - # Return the alignment splines - return alignSplines + msgs.info("Generating alignment splines") + return alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts) def set_voxel_sampling(self): """ @@ -950,8 +976,7 @@ def load(self): self.ifu_dec = np.append(self.ifu_dec, self.spec.compound_meta([hdr0], 'dec')) # Get the exposure time - # TODO :: Surely this should be retrieved from metadata... although it's coming from spec2d file? - exptime = hdr0['EXPTIME'] + exptime = self.spec.compound_meta([hdr0], 'exptime') # Setup for PypeIt imports msgs.reset(verbosity=2) @@ -971,12 +996,11 @@ def load(self): # Set the default behaviour if a global skysub frame has been specified this_skysub, skyImg, skyScl = self.get_current_skysub(spec2DObj, exptime, - opts_skysub=self.opts['skysub_frame'][ff]) + opts_skysub=self.skysub_frame[ff]) # Load the relative scale image, if something other than the default has been provided this_scalecorr, relScaleImg = self.get_current_scalecorr(spec2DObj, - opts_scalecorr=self.opts['scale_corr'][ff]) - + scalecorr=self.scale_corr[ff]) # Prepare the relative scaling factors relSclSky = skyScl / spec2DObj.scaleimg # This factor ensures the sky has the same relative scaling as the science frame relScale = spec2DObj.scaleimg / relScaleImg # This factor is applied to the sky subtracted science frame @@ -1075,13 +1099,17 @@ def load(self): # Compute the DAR correction cosdec = np.cos(np.mean(dec_sort) * np.pi / 180.0) - darcorr = DARcorrection(spec2DObj.head0, cosdec, spectrograph=self.spec) + airmass = self.spec.get_meta_value([spec2DObj.head0], 'airmass') # unitless + parangle = self.spec.get_meta_value([spec2DObj.head0], 'parangle') + pressure = self.spec.get_meta_value([spec2DObj.head0], 'pressure') # units are pascals + temperature = self.spec.get_meta_value([spec2DObj.head0], 'temperature') # units are degrees C + humidity = self.spec.get_meta_value([spec2DObj.head0], 'humidity') # Expressed as a percentage (not a fraction!) + darcorr = DARcorrection(airmass, parangle, pressure, temperature, humidity, cosdec) # Perform extinction correction msgs.info("Applying extinction correction") longitude = self.spec.telescope['longitude'] latitude = self.spec.telescope['latitude'] - airmass = spec2DObj.head0[self.spec.meta['airmass']['card']] extinct = flux_calib.load_extinction_data(longitude, latitude, self.senspar['UVIS']['extinct_file']) # extinction_correction requires the wavelength is sorted extcorr_sort = flux_calib.extinction_correction(wave_sort * units.AA, airmass, extinct) @@ -1190,16 +1218,11 @@ def run_align(self): # Grab cos(dec) for convenience cosdec = np.cos(np.mean(self.all_dec) * np.pi / 180.0) # Register spatial offsets between all frames - if self.opts['ra_offset'] is not None: - # Fill in some offset arrays - ra_offset, dec_offset = np.zeros(self.numfiles), np.zeros(self.numfiles) - for ff in range(self.numfiles): - ra_offset[ff] = self.opts['ra_offset'][ff] - dec_offset[ff] = self.opts['dec_offset'][ff] + if self.ra_offsets is not None: # Calculate the offsets new_ra, new_dec = datacube.align_user_offsets(self.all_ra, self.all_dec, self.all_idx, self.ifu_ra, self.ifu_dec, - ra_offset, dec_offset) + self.ra_offsets, self.dec_offsets) else: new_ra, new_dec = self.all_ra.copy(), self.all_dec.copy() # Find the wavelength range where all frames overlap @@ -1624,7 +1647,7 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s # Write out the datacube msgs.info("Saving datacube as: {0:s}".format(outfile)) - final_cube = DataCube(wave, flxcube.T, np.sqrt(varcube.T), bpmcube.T, specname, blaze_wave, blaze_spec, + final_cube = DataCube(flxcube.T, np.sqrt(varcube.T), bpmcube.T, wave, specname, blaze_wave, blaze_spec, sensfunc=sensfunc, fluxed=fluxcal) final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 796e76b702..f4a1d00ab2 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -478,9 +478,9 @@ def extract_hist_spectrum(waveimg, frame, gpm=None, bins=1000): if waveimg.shape != _gpm.shape: msgs.error("Wavelength image is not the same shape as the GPM") # Set the bins - if type(bins) is int: + if isinstance(bins, int): _bins = np.linspace(np.min(waveimg[_gpm]), np.max(waveimg[_gpm]), bins) - elif type(bins) is np.ndarray: + elif isinstance(bins, np.ndarray): _bins = bins else: msgs.error("Argument 'bins' should be an integer or a numpy array") diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index 292ec31fb5..83619c6d8d 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -796,16 +796,16 @@ def options(self): Dictionary containing cube options. """ # Define the list of allowed parameters - opts = dict(scale_corr=None, skysub_frame=None) + opts = dict(scale_corr=None, skysub_frame=None, ra_offset=None, dec_offset=None) # Get the scale correction files - scale_corr = self.path_and_files('scale_corr', skip_blank=True) + scale_corr = self.path_and_files('scale_corr', skip_blank=False, check_exists=False) if scale_corr is None: opts['scale_corr'] = [None]*len(self.filenames) elif len(scale_corr) == 1 and len(self.filenames) > 1: opts['scale_corr'] = scale_corr.lower()*len(self.filenames) elif len(scale_corr) != 0: - opts['scale_corr'] = scale_corr.lower() + opts['scale_corr'] = scale_corr # Get the skysub files skysub_frame = self.path_and_files('skysub_frame', skip_blank=False, check_exists=False) diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index 8a4a5e4e1f..442007ee53 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -1456,6 +1456,7 @@ def __init__(self, slit_spec=None, relative_weights=None, align=None, combine=No 'line map to register two frames.' \ defaults['method'] = "subpixel" + options['method'] = ["subpixel", "ngp"] dtypes['method'] = str descr['method'] = 'What method should be used to generate the datacube. There are currently two options: ' \ '(1) "subpixel" (default) - this algorithm divides each pixel in the spec2d frames ' \ @@ -1588,19 +1589,12 @@ def from_dict(cls, cfg): return cls(**kwargs) def validate(self): - # Check the method options - allowed_methods = ["subpixel", "ngp"] - if self.data['method'] not in allowed_methods: - # Check if the supplied name exists - if not os.path.exists(self.data['method']): - raise ValueError("The 'method' must be one of:\n"+", ".join(allowed_methods) + - "\nor, the relative path to a spec2d file.") # Check the skysub options allowed_skysub_options = ["none", "image", ""] # Note, "None" is treated as None which gets assigned to the default value "image". if self.data['skysub_frame'] not in allowed_skysub_options: # Check if the supplied name exists if not os.path.exists(self.data['method']): - raise ValueError("The 'skysub_frame' must be one of:\n" + ", ".join(allowed_methods) + + raise ValueError("The 'skysub_frame' must be one of:\n" + ", ".join(allowed_skysub_options) + "\nor, the relative path to a spec2d file.") if len(self.data['whitelight_range']) != 2: raise ValueError("The 'whitelight_range' must be a two element list of either NoneType or float") diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 3eb62feb25..04eeed13bc 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -52,9 +52,16 @@ def main(args): msgs.info("Restricting to detector={}".format(args.det)) parset['rdx']['detnum'] = int(args.det) + # Extract the options + ra_offsets = coadd3dfile.options['ra_offset'] + dec_offsets = coadd3dfile.options['dec_offset'] + skysub_frame = coadd3dfile.options['skysub_frame'] + scale_corr = coadd3dfile.options['scale_corr'] + # Instantiate CoAdd3d tstart = time.time() - coadd = CoAdd3D.get_instance(coadd3dfile.filenames, coadd3dfile.options, spectrograph=spectrograph, par=parset, + coadd = CoAdd3D.get_instance(coadd3dfile.filenames, parset, skysub_frame=skysub_frame, scale_corr=scale_corr, + ra_offsets=ra_offsets, dec_offsets=dec_offsets, spectrograph=spectrograph, det=args.det, overwrite=args.overwrite) # Coadd the files diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index aaa266cd70..5554ac753b 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -447,6 +447,7 @@ def get_radec_image(self, wcs, alignSplines, tilts, initial=True, flexure=None): reference (usually the centre of the slit) and the edges of the slits. Shape is (nslits, 2). """ + msgs.info("Generating an RA/DEC image") # Initialise the output raimg = np.zeros((self.nspec, self.nspat)) decimg = np.zeros((self.nspec, self.nspat)) From 63d3259ff2e70991e4c3d9be2c95d48006c9b95b Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 5 Oct 2023 13:29:52 +0100 Subject: [PATCH 62/81] docstrings and cleanup --- pypeit/coadd3d.py | 25 +++++++++-------- pypeit/core/datacube.py | 42 +++++++++++++--------------- pypeit/core/extract.py | 4 +-- pypeit/find_objects.py | 5 ++-- pypeit/spectrographs/gemini_gnirs.py | 14 ++++------ pypeit/spectrographs/keck_kcwi.py | 12 -------- 6 files changed, 42 insertions(+), 60 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 56ddca8715..6f153b8a8d 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -978,9 +978,6 @@ def load(self): # Get the exposure time exptime = self.spec.compound_meta([hdr0], 'exptime') - # Setup for PypeIt imports - msgs.reset(verbosity=2) - # TODO :: Consider loading all calibrations into a single variable within the main CoAdd3D parent class. # Initialise the slit edges @@ -1158,6 +1155,7 @@ def load(self): # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now numpix = ra_sort.size + embed() if not self.combine and not self.align: # Get the output filename if self.numfiles == 1 and self.cubepar['output_filename'] != "": @@ -1169,8 +1167,10 @@ def load(self): numwav = int((np.max(waveimg) - wave0) / dwv) bins = self.spec.get_datacube_bins(slitlength, minmax, numwav) # Generate the output WCS for the datacube - crval_wv = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else 1.0E10 * frame_wcs.wcs.crval[2] - cd_wv = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else 1.0E10 * frame_wcs.wcs.cd[2, 2] + tmp_crval_wv = (frame_wcs.wcs.crval[2] * frame_wcs.wcs.cunit[2]).to(units.Angstrom).value + tmp_cd_wv = (frame_wcs.wcs.cd[2,2] * frame_wcs.wcs.cunit[2]).to(units.Angstrom).value + crval_wv = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else tmp_crval_wv + cd_wv = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else tmp_cd_wv output_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv) # Set the wavelength range of the white light image. wl_wvrng = None @@ -1353,9 +1353,10 @@ def coadd(self): sensfunc = None if self.flux_spline is not None: - # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) + # Get wavelength of each pixel numwav = vox_edges[2].size - 1 - senswave = cube_wcs.spectral.wcs_pix2world(np.arange(numwav), 0)[0] * 1.0E10 + wcs_scale = (1.0 * cube_wcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms + senswave = wcs_scale * cube_wcs.spectral.wcs_pix2world(np.arange(numwav), 0)[0] sensfunc = self.flux_spline(senswave) # Generate a datacube @@ -1623,9 +1624,10 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s else: flxcube, varcube, bpmcube = subpix - # Get wavelength of each pixel, and note that the WCS gives this in m, so convert to Angstroms (x 1E10) + # Get wavelength of each pixel nspec = flxcube.shape[2] - wave = 1.0E10 * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] # The factor 1.0E10 convert to Angstroms + wcs_scale = (1.0*output_wcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms + wave = wcs_scale * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] # Check if the user requested a white light image if whitelight_range is not None: @@ -1742,7 +1744,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w residual cube. The latter is only returned if debug is True. """ # Check for combinations of lists or not - if type(tilts) is list and type(slits) is list and type(astrom_trans) is list and type(all_dar) is list: + if all([isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]): # Several frames are being combined. Check the lists have the same length numframes = len(tilts) if len(slits) != numframes or len(astrom_trans) != numframes or len(all_dar) != numframes: @@ -1760,8 +1762,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.") # Store in the following variables _tilts, _slits, _astrom_trans, _all_dar = tilts, slits, astrom_trans, all_dar - elif type(tilts) is not list and type(slits) is not list and \ - type(astrom_trans) is not list and type(all_dar) is not list: + elif all([not isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]): # Just a single frame - store as lists for this code _tilts, _slits, _astrom_trans, _all_dar = [tilts], [slits], [astrom_trans], [all_dar] all_idx = np.zeros(all_sci.size) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 5d126a0f66..2f508077b3 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -213,8 +213,9 @@ def extract_standard_spec(stdcube, subpixel=20): # Setup the WCS stdwcs = wcs.WCS(stdcube['FLUX'].header) - wcs_wav = stdwcs.wcs_pix2world(np.vstack((np.zeros(numwave), np.zeros(numwave), np.arange(numwave))).T, 0) - wave = wcs_wav[:, 2] * 1.0E10 * units.AA + + wcs_scale = (1.0 * stdwcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms + wave = wcs_scale * stdwcs.spectral.wcs_pix2world(np.arange(numwave), 0)[0] # Generate a whitelight image, and fit a 2D Gaussian to estimate centroid and width wl_img = make_whitelight_fromcube(flxcube) @@ -342,8 +343,7 @@ def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_ polyfunc=senspar['UVIS']['polyfunc']) wgd = np.where(zeropoint_fit_gpm) sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) - flux_spline = interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") - return flux_spline + return interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") def make_good_skymask(slitimg, tilts): @@ -399,18 +399,14 @@ def get_output_filename(fil, par_outfile, combine, idx=1): str: The output filename to use. """ if combine: - if par_outfile == "": - par_outfile = "datacube.fits" - # Check the output files don't exist - outfile = par_outfile if ".fits" in par_outfile else par_outfile + ".fits" - else: - if par_outfile == "": - outfile = fil.replace("spec2d_", "spec3d_") - else: - # Use the output filename as a prefix - outfile = os.path.splitext(par_outfile)[0] + "_{0:03d}.fits".format(idx) - # Return the outfile - return outfile + if par_outfile == '': + par_outfile = 'datacube.fits' + # Check if we needs to append an extension + return par_outfile if '.fits' in par_outfile else f'{par_outfile}.fits' + if par_outfile == '': + return fil.replace('spec2d_', 'spec3d_') + # Finally, if nothing else, use the output filename as a prefix, and a numerical suffic + return os.path.splitext(par_outfile)[0] + f'_{idx:03}.fits' def get_output_whitelight_filename(outfile): @@ -423,10 +419,9 @@ def get_output_whitelight_filename(outfile): The output filename used for the datacube. Returns: - str: The output filename to use for the whitelight image. + A string containing the output filename to use for the whitelight image. """ - out_wl_filename = os.path.splitext(outfile)[0] + "_whitelight.fits" - return out_wl_filename + return os.path.splitext(outfile)[0] + "_whitelight.fits" def get_whitelight_pixels(all_wave, min_wl, max_wl): @@ -515,7 +510,7 @@ def make_whitelight_fromcube(cube, wave=None, wavemin=None, wavemax=None): reduce the wavelength range. Returns: - `numpy.ndarray`_: Whitelight image of the input cube. + A whitelight image of the input cube (of type `numpy.ndarray`_). """ # Make a wavelength cut, if requested cutcube = cube.copy() @@ -591,8 +586,8 @@ def align_user_offsets(all_ra, all_dec, all_idx, ifu_ra, ifu_dec, ra_offset, dec A list of Dec offsets to be applied to the input pixel values (one value per frame). Returns: - `numpy.ndarray`_: A new set of RA values that have been aligned - `numpy.ndarray`_: A new set of Dec values that has been aligned + A tuple containing a new set of RA and Dec values that have been aligned. Both arrays + are of type `numpy.ndarray`_. """ # First, translate all coordinates to the coordinates of the first frame # Note: You do not need cos(dec) here, this just overrides the IFU coordinate centre of each frame @@ -842,7 +837,8 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white mask_stack = (flux_stack != 0.0) & (ivar_stack != 0.0) # Obtain a wavelength of each pixel wcs_res = whitelightWCS.wcs_pix2world(np.vstack((np.zeros(numwav), np.zeros(numwav), np.arange(numwav))).T, 0) - wave_spec = wcs_res[:, 2] * 1.0E10 + wcs_scale = (1.0 * whitelightWCS.wcs.cunit[2]).to(units.Angstrom).value # Ensures the WCS is in Angstroms + wave_spec = wcs_scale * wcs_res[:, 2] # Compute the smoothing scale to use if sn_smooth_npix is None: sn_smooth_npix = int(np.round(0.1 * wave_spec.size)) diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index f4a1d00ab2..a618028af5 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -467,8 +467,8 @@ def extract_hist_spectrum(waveimg, frame, gpm=None, bins=1000): or an integer that specifies the number of bin edges to generate Returns: - `numpy.ndarray`_: The wavelength at the centre of each histogram bin - `numpy.ndarray`_: The spectrum at each pixel of the returned wavelength array + A tuple containing the wavelength and spectrum at the centre of each histogram bin. Both + arrays returned in the tuple are `numpy.ndarray`_. """ # Check the inputs if waveimg.shape != frame.shape: diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index ca7a669dcf..de2b921407 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -42,7 +42,7 @@ class FindObjects: Specifies object being reduced. Should be 'science', 'standard', or 'science_coadd2d'. wv_calib (:class:`~pypeit.wavecalib.WaveCalib`, optional): - This is only used for the SlicerIFU child when a joint sky subtraction + This is only used for the :class:`SlicerIFUFindObjects` child when a joint sky subtraction is requested. waveTilts (:class:`~pypeit.wavetilts.WaveTilts`, optional): Calibration frame with arc/sky line tracing of the wavelength @@ -471,7 +471,8 @@ def get_platescale(self, slitord_id=None): Args: slitord_id (:obj:`int`, optional): - slit spat_id (MultiSlit, SlicerIFU) or ech_order (Echelle) value + slit spat_id (:class:`MultiSlitFindObjects`, :class:`SlicerIFUFindObjects`) + or ech_order (:class:`EchelleFindObjects`) value. Returns: :obj:`float`: plate scale in binned pixels diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 7a48421418..ff7c1f042b 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -137,32 +137,28 @@ def compound_meta(self, headarr, meta_key): try: return headarr[0]['PRESSUR2'] # Must be in astropy.units.pascal except KeyError: - msgs.warn("Pressure is not in header") - msgs.info("The default pressure will be assumed: 61.1 kPa") + msgs.warn("Pressure is not in header - The default pressure (61.1 kPa) will be assumed") return 61.1E3 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: - msgs.warn("Temperature is not in header") - msgs.info("The default temperature will be assumed: 1.5 deg C") + msgs.warn("Temperature is not in header - The default temperature (1.5 deg C) will be assumed") return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': try: # Humidity expressed as a percentage, not a fraction return headarr[0]['HUMIDITY'] except KeyError: - msgs.warn("Humidity is not in header") - msgs.info("The default relative humidity will be assumed: 20 %") + msgs.warn("Humidity is not in header - The default relative humidity (20 %) will be assumed") return 20.0 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'parangle': try: # Humidity expressed as a percentage, not a fraction - msgs.work("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") + msgs.warn("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") return headarr[0]['PARANGLE'] # Must be expressed in radians except KeyError: - msgs.warn("Parallactic angle is not in header!") - msgs.info("The default parallactic angle will be assumed: 0 degrees") + msgs.warn("Parallactic angle is not in header - The default parallactic angle (0 degrees) will be assumed") return 0.0 else: msgs.error("Not ready for this compound meta") diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 0b4b01beb8..be81a8f57c 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -599,18 +599,6 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) skypa = self.compound_meta([hdr], 'posang') - # Now in compont_meta - # Get rotator position - #if 'ROTPOSN' in hdr: - # rpos = hdr['ROTPOSN'] - #else: - # rpos = 0. - #if 'ROTREFAN' in hdr: - # rref = hdr['ROTREFAN'] - #else: - # rref = 0. - # Get the offset and PA - #skypa = rpos + rref # IFU position angle (degrees) rotoff = 0.0 # IFU-SKYPA offset (degrees) crota = np.radians(-(skypa + rotoff)) From 898e86e21f2209ce9a3bde91bebf797146370d6d Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 5 Oct 2023 21:33:35 +0100 Subject: [PATCH 63/81] fix DAR and dependencies --- pypeit/coadd3d.py | 32 +- pypeit/core/datacube.py | 4 +- pypeit/core/ref_index.py | 738 --------------------------------- pypeit/tests/test_ref_index.py | 177 -------- setup.cfg | 2 + 5 files changed, 21 insertions(+), 932 deletions(-) delete mode 100644 pypeit/core/ref_index.py delete mode 100644 pypeit/tests/test_ref_index.py diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 6f153b8a8d..113aa5c054 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -10,13 +10,14 @@ from astropy import wcs, units from astropy.io import fits +import erfa from scipy.interpolate import interp1d import numpy as np from pypeit import msgs from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase -from pypeit.core import datacube, extract, flux_calib, parse, ref_index +from pypeit.core import datacube, extract, flux_calib, parse from pypeit.spectrographs.util import load_spectrograph # Use a fast histogram for speed! @@ -203,7 +204,7 @@ class DARcorrection: """ This class holds all of the functions needed to quickly compute the differential atmospheric refraction correction. """ - def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, co2=400.0, wave_ref=4500.0): + def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, wave_ref=4500.0): """ Args: airmass (:obj:`float`): @@ -220,10 +221,6 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, c Valid range is 0 to 100. cosdec (:obj:`float`): Cosine of the target declination. - co2 (:obj:`float`, optional): - Carbon dioxide concentration in µmole/mole. The default value - of 450 should be enough for most purposes. Valid range is from - 0 - 2000 µmole/mole. wave_ref (:obj:`float`, optional): Reference wavelength (The DAR correction will be performed relative to this wavelength) """ @@ -232,13 +229,16 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, c # Get DAR parameters self.airmass = airmass # unitless self.parangle = parangle - self.pressure = pressure - self.temperature = temperature - self.humidity = humidity - self.co2 = co2 + self.pressure = pressure * units.mbar + self.temperature = temperature * units.Celsius + self.humidity = humidity/100.0 self.wave_ref = wave_ref # This should be in Angstroms self.cosdec = cosdec + # Calculate the coefficients of the correction + self.refa, self.refb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), + self.humidity, (self.wave_ref*units.Angstrom).to_value(units.micron)) + # Print out the DAR parameters msgs.info("DAR correction parameters:" + msgs.newline() + " Airmass = {0:.2f}".format(self.airmass) + msgs.newline() + @@ -261,13 +261,13 @@ def calculate_dispersion(self, waves): The atmospheric dispersion (in degrees) for each wavelength input """ - # Calculate + # Calculate the zenith angle z = np.arccos(1.0/self.airmass) - n0 = ref_index.ciddor(wave=self.wave_ref/10.0, t=self.temperature, p=self.pressure, rh=self.humidity, co2=self.co2) - n1 = ref_index.ciddor(wave=waves/10.0, t=self.temperature, p=self.pressure, rh=self.humidity, co2=self.co2) - - return (180.0/np.pi) * (n0 - n1) * np.tan(z) # This is in degrees + # Calculate the coefficients of the correction + cnsa, cnsb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), + self.humidity, (waves*units.Angstrom).to_value(units.micron)) + return (180.0/np.pi) * (self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3 def correction(self, waves): """ @@ -289,6 +289,7 @@ def correction(self, waves): corr_ang = self.parangle - np.pi/2 # Calculate the full amount of refraction dar_full = self.calculate_dispersion(waves) + # Calculate the correction in dec and RA for each detector pixel # These numbers should be ADDED to the original RA and Dec values ra_corr = (dar_full/self.cosdec)*np.cos(corr_ang) @@ -1102,6 +1103,7 @@ def load(self): temperature = self.spec.get_meta_value([spec2DObj.head0], 'temperature') # units are degrees C humidity = self.spec.get_meta_value([spec2DObj.head0], 'humidity') # Expressed as a percentage (not a fraction!) darcorr = DARcorrection(airmass, parangle, pressure, temperature, humidity, cosdec) + darcorr.correction(wave_sort) # Perform extinction correction msgs.info("Applying extinction correction") diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 2f508077b3..f58c357bda 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -318,7 +318,7 @@ def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_ blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, kind='linear', bounds_error=False, fill_value="extrapolate") # Perform a grating correction - grat_corr = correct_grating_shift(wave.value, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline) + grat_corr = correct_grating_shift(wave, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline) # Apply the grating correction to the standard star spectrum Nlam_star /= grat_corr Nlam_ivar_star *= grat_corr ** 2 @@ -331,7 +331,7 @@ def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_ # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \ - flux_calib.fit_zeropoint(wave.value, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, + flux_calib.fit_zeropoint(wave, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, mask_hydrogen_lines=senspar['mask_hydrogen_lines'], mask_helium_lines=senspar['mask_helium_lines'], hydrogen_mask_wid=senspar['hydrogen_mask_wid'], diff --git a/pypeit/core/ref_index.py b/pypeit/core/ref_index.py deleted file mode 100644 index 1a21d83a24..0000000000 --- a/pypeit/core/ref_index.py +++ /dev/null @@ -1,738 +0,0 @@ -""" -Module containing the core methods for calculating the refractive index -of the atmosphere based on current conditions. - -Note that this code is directly copied (on 25 September 2023) from the following -repository: https://github.com/phn/ref_index - -CREDIT :: All equations used in this module come from the documentation for the -NIST online refractive index calculator, written by Jack A. Stone and Jay H. Zimmerman, -and is available here: -https://emtoolbox.nist.gov/Wavelength/Documentation.asp - -Credit to the original source -################ -Refractive index of air. - -NIST provides an online calculator for calculating refractive index of -air, for light of a certain wave length, under varying atmospheric -conditions. This module implements the equations provided in the -documentation for the online calculator. - -In addition to calculating the refractive index, this module also has -functions for converting wave length of light in vacuum to that in air, -and vice-versa. - -The documentation for the online calculator is provided at -http://emtoolbox.nist.gov/Wavelength/Documentation.asp, and includes a -link to the online calculator. - -The following comments are based on the discussions presented in the -NIST documentation. It is intended as a brief overview. See -http://emtoolbox.nist.gov/Wavelength/Documentation.asp, for detailed -discussions. - -Refractive index of air can be caclulated using two different -algorithms: one due to Edlén (updated by Birch and Down), and one due -to Ciddor. The latter has been adopted by the International Association -of Geodesy (IAG) as the reference equation for calculating refractive -index of air. Functions for calculating refractive index using either -of these are defined in this module. - -The vacuum to air and air to vacuum wave length conversion functions in -this module use the Ciddor equation, in the form presented in the NIST -documentation. - -Uncertainities in refractive index, and hence in wave length -conversions, due to uncertanities in measured values of temperature, -pressure, and humidity exceeds that due to the intrinsic uncertainity -in the equations used. - -An uncertainty of 1e-6 in refractive index can result from a -combination of: - - + an error of 1°C (1.8 °F) in air temperature - - + an error of 0.4kPa (3mm of Hg) in air pressure - - + an error of 50% in relative humidity at sufficiently high air - temperatures (near 35°C) - -Valid range for input parameters for the refractive index calculations -are presented below. The online calculator issues a warning if input -parameters are outside a smaller interval within the maximum -range. Functions in this module do not raise a warning by default. But -they accept a keyword ``warn``, which when set to ``True`` will result -in warnings, when the input parameters are outside the accepted range. - - + Wavelength [300nm - 1700nm] - - Warning is issued if value is outside [350nm - 1600nm]. - - + Pressure [10kPa - 140kPa] - - Warning is issued if value is outside [60kPa - 120kPa]. - - + Temperature [-40∘C - 100∘C]. - - Warning is issued if value is outside [0∘C - 40∘C]. - - + Humidity [0 - 100] - - Can be given as relative humidity, dew point, frost point or - partial pressure of water vapour. A warning is given if the mole - fraction of water vapour exceeds 20% or, equivalently, relative - humidity exceeds 85%. A warning is issued if relative humidity is - less than 1%. - - + CO2 concentration [0µmole/mole - 2000µmole/mole] - - The common value to use is 450. Outdoor values are rarely below 300 - and indoor can be as high as 600. A difference of 150 will lead to - a difference of only ~ 2e-8 in index of refraction. - - A warning is issued if a value other than 450 is used. - - -In astronomy, the convention is to use the refraction correction for -wave length greater than 200nm, eventhough the equations are not -strictly valid at wave lengths shorter than 300nm. For example, the -popular IDLASTRO IDL code vactoair.pro and airtovac.pro will accept any -wave length greater than 2000Å. - -To accomodate this type of usage, instead of limiting the possible -input wave lengths, functions in this module will accept any wave -length value. It is up to the user to decide if a particular wave -length is to be used as an input to the equations. - -Comparison with the IDLASTRO vactoair.pro and airtovac.pro algorithms -show that the equivalent functions in this module, vac2air and air2vac, -give results that agree to within 1e-4nm, over a range of wavelengths -from 200nm to 1700nm. This uncertainty translates to a velocity -difference of 150m/s to 17m/s, over the wave length range 1700nm to -200nm. - -The IDLASTRO code uses a fixed value of temperature and humidity which -is not documented in the code. The above comparison was carried out at -a temperature of 15∘C and a relative humidity of 0. - -The IDL code used for testing was downloaded on 2011/10/07. The -revision history indicates that the IDL code in vactoair.pro and -airtovac.pro were last modified in March 2011. - -The PypeIt developers have made some minor adjustments to the code. - -Original author details: -:author: Prasanth Nair -:contact: prasanthhn@gmail.com -:license: BSD (http://www.opensource.org/licenses/bsd-license.php) -################### - -.. include:: ../include/links.rst - -""" - -from __future__ import division -from __future__ import print_function -import numpy as np -from pypeit import msgs - - -def f2k(f): - """Converts Fahrenheit to Kelvin.""" - return (f - 32.0) * (100.0 / 180.0) + 273.15 - - -def k2f(k): - """Converts Kelvin to Fahrenheit.""" - return (k - 273.15) * (180.0 / 100.0) + 32.0 - - -def c2k(c): - """Converts Celsius to Kelvin.""" - return c + 273.15 - - -def k2c(k): - """Converts Kelvin to Celsius.""" - return k - 273.15 - - -def c2f(c): - """Converts Celsius to Fahrenheit.""" - return c * (180.0 / 100.0) - 32.0 - - -def f2c(f): - """Converts Fahrenheit to Celsius.""" - return (f - 32.0) * (100.0 / 180.0) - - -def svp_water(t): - """Saturation vapour pressure over water at given temperature. - - Parameters - ---------- - t : float - Air temperature in degree Celsius. - - Returns - ------- - p_sv : float - Saturation vapour pressure over water, at the given - temperature, in Pascal. - - Notes - ----- - From section A-I of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - K1 = 1.16705214528e+03 - K2 = -7.24213167032e+05 - K3 = -1.70738469401e+01 - K4 = 1.20208247025e+04 - K5 = -3.23255503223e+06 - K6 = 1.49151086135e+01 - K7 = -4.82326573616e+03 - K8 = 4.05113405421e+05 - K9 = -2.38555575678e-01 - K10 = 6.50175348448e+02 - - T = t + 273.15 - omega = T + K9 / (T - K10) - A = omega ** 2 + K1 * omega + K2 - B = K3 * omega ** 2 + K4 * omega + K5 - C = K6 * omega ** 2 + K7 * omega + K8 - X = -B + np.sqrt(B ** 2 - 4 * A * C) - - p_sv = 1.0e6 * ((2.0 * C / X) ** 4) - - return p_sv - - -def svp_ice(t): - """Saturation vapour pressure over ice at given temperature. - - - Parameters - ---------- - t : float - Temperature in degree Celsius. - - Returns - ------- - p_sv : float - Saturation vapour pressure over ice, at the given - temperature, in Pascal. - - Notes - ----- - From section A-I of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - A1 = -13.928169 - A2 = 34.7078238 - - t += 273.15 - theta = t / 273.16 - Y = A1 * (1 - theta ** -1.5) + A2 * (1 - theta ** -1.25) - - p_sv = 611.657 * np.exp(Y) - - return p_sv - - -def dew_point_wvpp(td): - """Water vapour saturation pressure, given dew point temperature.""" - return svp_water(td) - - -def frost_point_wvpp(tf): - """Water vapour saturation pressure, given frost point temperature.""" - return svp_ice(tf) - - -def rh2wvpp(rh, t): - """Convert relative humidity to water vapour partial pressure. - - Parameters - ---------- - rh : float - Relative humidity as a number between 0 and 100. - t : float - Temperature in degree Celsius. - - Returns - ------- - p_sv : float - Water vapour partial pressure, in Pascal. - - Notes - ----- - See section A-II of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - # t > 0 according to documentation. - if t >= 0: - p_sv = svp_water(t) - elif t < 0: - p_sv = svp_ice(t) - - return (rh / 100.0) * p_sv - - -def f_factor(p, t): - """Enhancement factor for calculating mole fraction. - - Parameters - ---------- - p : float - Pressure in Pascal. - t : float - Temperature in degree Celsius. - - Returns - ------- - f : float - Enhancement factor needed in calculation of mole fraction. - - Notes - ----- - See section A-II of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - alpha = 1.00062 - beta = 3.14e-8 - gamma = 5.60e-7 - - return alpha + beta * p + gamma * (t ** 2) - - -def dew_point_mole_fraction(p, t): - """Water vapour mole fraction for given dew point temperature. - Parameters - ---------- - p : float - Pressure in Pascal. - t : float - Temperature in degree Celsius. - - Returns - ------- - xv : float - Mole fraction. - - Notes - ----- - See section A-II of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - return f_factor(p, t) * dew_point_wvpp(t) / p - - -def frost_point_mole_fraction(p, t): - """Water vapour mole fraction for given frost point temperature. - Parameters - ---------- - p : float - Pressure in Pascal. - t : float - Temperature in degree Celsius. - - Returns - ------- - xv : float - Mole fraction. - - Notes - ----- - See section A-II of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - return f_factor(p, t) * frost_point_wvpp(t) / p - - -def rh2mole_fraction(rh, p, t): - """Water vapour mole fraction from relative humidity. - - Parameters - ---------- - rh : float - Relative humidity as a number between 0 and 100. - p : float - Pressure in Pascal. - t : float - Temperature in Kelvin. - - Returns - ------- - xv : float - Mole fraction. - - Notes - ----- - See section A-II of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - return f_factor(p, t) * rh2wvpp(rh, t) / p - - -def pp2mole_fraction(pv, p, t): - """Water vapour mole fraction from partial pressure. - - Parameters - ---------- - rh : float - Relative humidity as a number between 0 and 100. - p : float - Pressure in Pascal. - t : float - Temperature in Kelvin. - - Returns - ------- - xv : float - Mole fraction. - - Notes - ----- - See section A-II of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - return f_factor(p, t) * pv / p - - -def _check_range(**kwargs): - """Return True if value is inside accepted range.""" - if not (350 <= kwargs.get('wave', 633) <= 1600): - msgs.warn("Wave length outside [350nm, 1600nm].") - if not (60000 <= kwargs.get('p', 101325) <= 120000): - msgs.warn("Pressure outside [60000Pa - 120000Pa].") - if not (0 <= kwargs.get('t', 20) <= 40): - msgs.warn("Temperature outside [0C - 40C].") - if not (1 < kwargs.get('rh', 50) <= 85): - msgs.warn("Relative humidity outside (1 - 85].") - if not (kwargs.get('xv', 0.4) >= 0.2): - msgs.warn("Mole fraction less than 0.2.") - if kwargs.get('co2', 450) != 450: - msgs.warn("CO2 concentration is not 450.") - - -def ciddor_ri(wave, t, p, xv, co2=450, warn=False): - """Refractive index of air according to the Ciddor equation. - - Parameters - ---------- - wave : float or Numpy array of float - Wavelength in vacuum, in nano-meters. Valid wavelength range is - 300nm - 1700nm. - t : float - Temperature in degree Celsius. Valid temperate range is -40 to - 100 degree Celsius. - p : float - Pressure in Pascal. Valid range is from 10kPa - 140 kPa. - xv : float - Water vapour mole fraction, as a number between 0 and - 1. Default is set to 0. - co2 : float - Carbon dioxide concentration in µmole/mole. The default value - of 450 should be enough for most purposes. Valid range is from - 0 - 2000 µmole/mole. - warn : bool - Warning is issued if parameters fall outside accept - range. Accepted range is smaller than the valid ranges - mentioned above. See module docstring for accepted ranges. - - The default is False and no warnings are issued. - - Notes - ----- - See section A-III of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - See - """ - if warn: - _check_range(wave, t, p, xv) - - w0 = 295.235 - w1 = 2.6422 - w2 = -0.03238 - w3 = 0.004028 - k0 = 238.0185 - k1 = 5792105 - k2 = 57.362 - k3 = 167917 - a0 = 1.58123e-6 - a1 = -2.9331e-8 - a2 = 1.1043e-10 - b0 = 5.707e-6 - b1 = -2.051e-8 - c0 = 1.9898e-4 - c1 = -2.376e-6 - d = 1.83e-11 - e = -0.765e-8 - pr1 = 101325 - tr1 = 288.15 - Za = 0.9995922115 - rhovs = 0.00985938 - R = 8.314472 - Mv = 0.018015 - - wave = wave * 1.0e-3 - S = 1.0 / wave ** 2 - - ras = 1e-8 * ((k1 / (k0 - S)) + (k3 / (k2 - S))) - rvs = 1.022e-8 * (w0 + w1 * S + w2 * S ** 2 + w3 * S ** 3) - - Ma = 0.0289635 + 1.2011e-8 * (co2 - 400.0) - - raxs = ras * (1 + 5.34e-7 * (co2 - 450.0)) - - T = t + 273.15 - - Zm = a0 + a1 * t + a2 * t ** 2 + (b0 + b1 * t) * xv + \ - (c0 + c1 * t) * xv ** 2 - Zm *= -(p / T) - Zm += (p / T ) ** 2 * (d + e * xv ** 2) - Zm += 1 - - rhoaxs = pr1 * Ma / (Za * R * tr1) - - rhov = xv * p * Mv / (Zm * R * T) - - rhoa = (1 - xv) * p * Ma / (Zm * R * T) - - n = 1.0 + (rhoa / rhoaxs) * raxs + (rhov / rhovs) * rvs - - return n - - -def ciddor(wave, t, p, rh, co2=450, warn=False): - """Refractive index of air according to the Ciddor equation. - - Accepts relative humidity instead of mole fraction, as done in - ``ciddor_ri()``. - - Parameters - ---------- - wave : float or Numpy array of float - Wavelength in vacuum, in nano-meters. Valid wavelength range is - 300nm - 1700nm. - t : float - Temperature in degree Celsius. Valid temperate range is -40 to - 100 degree Celsius. - p : float - Pressure in Pascal. Valid range is from 10kPa - 140 kPa. - rh : float - Relative humidity [0 - 100]. - co2 : float - Carbon dioxide concentration in µmole/mole. The default value - of 450 should be enough for most purposes. Valid range is from - 0 - 2000 µmole/mole. - warn : bool - Warning is issued if parameters fall outside accept - range. Accepted range is smaller than the valid ranges - mentioned above. See module docstring for accepted ranges. - - The default is False and no warnings are issued. - - Notes - ----- - See section A-III of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - if warn: - _check_range(wave, t, p, rh) - # turn off warning, so that ciddor_ri doesn't issue duplicate - # warning. - warn = False - - xv = rh2mole_fraction(rh=rh, p=p, t=t) - return ciddor_ri(wave=wave, t=t, p=p, xv=xv, co2=co2, warn=warn) - - -def edlen_ri(wave, t, p, pv, warn=False): - """Refractive index of air according to the Edlén equation. - - Parameters - ---------- - wave : float or Numpy array of float - Wavelength in vacuum, in nano-meters. Valid wavelength range is - 300nm - 1700nm. - t : float - Temperature in degree Celsius. Valid temperate range is -40 to - 100 degree Celsius. - p : float - Pressure in Pascal. Valid range is from 10kPa - 140 kPa. - pv : float - Water vapour partial pressure, in Pascal. - warn : bool - Warning is issued if parameters fall outside accept - range. Accepted range is smaller than the valid ranges - mentioned above. See module docstring for accepted ranges. - - The default is False and no warnings are issued. - - Notes - ----- - See section A-IV of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - if warn: - _check_range(wave, t, p) - - A = 8342.54 - B = 2406147 - C = 15998 - D = 96095.43 - E = 0.601 - F = 0.00972 - G = 0.003661 - - wave = wave * 1.0e-3 - S = 1.0 / wave ** 2 - - ns = 1 + 1e-8 * (A + B / (130.0 - S) + C / (38.9 - S)) - - X = (1 + 1e-8 * (E - F * t) * p) / (1 + G * t) - - ntp = 1 + p * (ns - 1) * X / D - - n = ntp - 1e-10 * ((292.75 / (t + 273.15)) * \ - (3.7345 - 0.0401 * S)) * pv - - return n - - -def edlen(wave, t, p, rh, warn=False): - """Refractive index of air according to the Edlén equation. - - Accepts relative humidity instead of water vapour partial pressure, - as in ``edlen_ri()``. - - Parameters - ---------- - wave : float or Numpy array of float - Wavelength in vacuum, in nano-meters. Valid wavelength range is - 300nm - 1700nm. - t : float - Temperature in degree Celsius. Valid temperate range is -40 to - 100 degree Celsius. - p : float - Pressure in Pascal. Valid range is from 10kPa - 140 kPa. - rh : float - Relative humidity in [0 - 100]. - warn : bool - Warning is issued if parameters fall outside accept - range. Accepted range is smaller than the valid ranges - mentioned above. See module docstring for accepted ranges. - - The default is False and no warnings are issued. - - Notes - ----- - See section A-IV of - http://emtoolbox.nist.gov/Wavelength/Documentation.asp. - - """ - if warn: - _check_range(wave, t, p) - # turn off warning so that edlen_ri() doesn't raise duplicate - # warning. - warn = False - - pv = rh2wvpp(rh=rh, t=t) - return edlen_ri(wave=wave, t=t, p=p, pv=pv, warn=warn) - - -def vac2air(wave, t=15.0, p=101325, rh=0.0, co2=450, warn=False): - """Wavelength of light in air, using Ciddor refractive index. - - Parameters - ---------- - wave : float or Numpy array of float - Wavelength in nano-meters. Valid range is 300nm - 1700nm. - t : float - Temperature in degree Celsius. Valid range is -40 - 100 degree - Celsius. Default is 15 degree Celsius (288.15 Kelvin). - p : float - Pressure in Pascal. Valid range is 10kPa - 140kPa. Default is - 101325 Pa (1 atmosphere). - rh : float - Relative humidity as a number between 0 and 100. Default is 0. - co2 : float - Carbon dioxide concentration in µmole/mole. The default value - of 450 is sufficient for most purposes. Valid range is 0 - 2000 - µmole/mole. - warn : bool - Warning is issued if parameters fall outside accept - range. Accepted range is smaller than the valid ranges - mentioned above. See module docstring for accepted ranges. - - The default is False and no warnings are issued. - - Returns - ------- - w : float - Wavelength in air, in nm. - - """ - if warn: - _check_range(wave, t, p, rh, co2) - - n = ciddor(wave, t, p, rh, co2) - return wave / n - - -def air2vac(wave, t=15.0, p=101325, rh=0.0, co2=450, warn=False): - """Wavelength of light in vacuum, using Ciddor refractive index. - - The refractive index calculation needs wavelength in vacuum. In - this function, the wavelength in air is used. The errors are on the - order of 1e-5 nm. - - Parameters - ---------- - wave : float or Numpy array of float - Wavelength in nano-meters. Valid range is 300nm - 1700nm. - t : float - Temperature in degree Celsius. Valid range is -40 - 100 degree - Celsius. Default is 15 degree Celsius (288.15 Kelvin). - p : float - Pressure in Pascal. Valid range is 10kPa - 140kPa. Default is - 101325 Pa (1 atmosphere). - rh : float - Relative humidity as a number between 0 and 100. Default is 0. - co2 : float - Carbon dioxide concentration in µmole/mole. The default value - of 450 is sufficient for most purposes. Valid range is 0 - 2000 - µmole/mole. - warn : bool - Warning is issued if parameters fall outside accept - range. Accepted range is smaller than the valid ranges - mentioned above. See module docstring for accepted ranges. - - The default is False and no warnings are issued. - - Returns - ------- - w : float - Wavelength in vacuum, in nm. - - """ - if warn: - _check_range(wave=wave, t=t, p=p, rh=rh, co2=co2) - - n = ciddor(wave, t, p, rh, co2) - return wave * n diff --git a/pypeit/tests/test_ref_index.py b/pypeit/tests/test_ref_index.py deleted file mode 100644 index b46dc01c80..0000000000 --- a/pypeit/tests/test_ref_index.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -Module to run tests on pyidl functions -""" -import numpy as np - -from pypeit.core import ref_index -import pytest - - -def test_nist_ciddor_1(): - """Compare with NIST output. - - Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. - - fix t at 20, p at 101325, rh at 50 - """ - wave = [321.456, 500, 600.1234, 633.0, 700, 1000.987, 1500.8, 1700.0] - nist_n = [1.000283543, 1.000273781, 1.000271818, 1.000271373, - 1.000270657, 1.000269038, 1.00026819, 1.000268041] - nist_w = [321.364879, 499.863147, 599.96032, 632.828268, - 699.810591, 1000.717769, 1500.397608, 1699.544453] - - xv = ref_index.rh2mole_fraction(50, 101325, 20) - - n = [ref_index.ciddor_ri(i, 20, 101325, xv) for i in wave] - wave_n = [ref_index.vac2air(i, t=20, p=101325, rh=50.0) for i in wave] - - for i, j in zip(n, nist_n): - assert abs(i - j) < 1e-8 - - for i, j in zip(wave_n, nist_w): - assert abs(i - j) < 1e-6 - - n = [ref_index.ciddor(i, 20, 101325, 50.0) for i in wave] - wave_n = [ref_index.vac2air(i, t=20, p=101325, rh=50.0) for i in wave] - - for i, j in zip(n, nist_n): - assert abs(i - j) < 1e-8 - - for i, j in zip(wave_n, nist_w): - assert abs(i - j) < 1e-6 - - -def test_nist_ciddor_2(): - """Compare with NIST output. - - Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. - - fix wave at 633.0 p at 101325 rh at 50 - """ - t = [-20.0, 0.0, 20, 26.7982, 40.123, 60.45] - nist_w = [632.800737, 632.815441, 632.828268, 632.832303, 632.839872, - 632.850953] - nist_n = [1.00031489, 1.000291647, 1.000271373, 1.000264994, 1.000253031, - 1.000235516] - - xv = [ref_index.rh2mole_fraction(50, 101325, i) for i in t] - n = [ref_index.ciddor_ri(633.0, i, 101325, j) for i, j in zip(t, xv)] - - wave_n = [ref_index.vac2air(633.0, i, 101325, 50) for i in t] - - for i, j in zip(n, nist_n): - assert abs(i - j) < 1e-8 - - for i, j in zip(wave_n, nist_w): - assert abs(i - j) < 1e-6 - - -def test_nist_ciddor_3(): - """Compare with NIST output. - - Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. - - fix wave at 633.0, t at 20, rh at 50. vary p - """ - p = [1000 * i for i in [10, 50.123, 100.1234, 140.0]] - - nist_n = [1.000026385, 1.000133999, 1.000268148, 1.000375169] - nist_w = [632.983299, 632.91519, 632.830308, 632.762607] - - xv = [ref_index.rh2mole_fraction(50, i, 20) for i in p] - n = [ref_index.ciddor_ri(633.0, 20, i, j) for i, j in zip(p, xv)] - - wave_n = [ref_index.vac2air(633.0, 20, i, 50) for i in p] - - for i, j in zip(n, nist_n): - assert abs(i - j) < 1e-8 - - for i, j in zip(wave_n, nist_w): - assert abs(i - j) < 1e-6 - - -def test_nist_ciddor_4(): - """Compare with NIST output. - - Values from http://emtoolbox.nist.gov/Wavelength/Ciddor.asp. - - fix wave at 633.0, t at 20, p at 101325, vary rh. - """ - rh = [0.0, 20.123, 40, 50.9876, 70, 90.7432, 100.0] - nist_n = [1.0002718, 1.000271627, 1.000271458, 1.000271364, - 1.000271203, 1.000271027, 1.000270949] - nist_w = [632.827997, 632.828106, 632.828214, 632.828273, - 632.828375, 632.828486, 632.828535] - - xv = [ref_index.rh2mole_fraction(i, 101325, 20) for i in rh] - n = [ref_index.ciddor_ri(633.0, 20, 101325, j) for j in xv] - - wave_n = [ref_index.vac2air(633.0, 20, 101325, i) for i in rh] - - for i, j in zip(n, nist_n): - assert abs(i - j) < 1e-8 - - for i, j in zip(wave_n, nist_w): - assert abs(i - j) < 1e-6 - - -def test_air2vac(): - """Test reversibility with vac2air.""" - wave = np.array([321.456, 500, 600.1234, 633.0, 700, 1000.987, 1500.8, 1700.0]) - wave_o = ref_index.air2vac(ref_index.vac2air(wave)) - assert np.allclose(wave, wave_o) - - -def test_idlastro(): - # Using IDLASTRO downloaded on 2011/10/07. The vac2air.pro uses a - # formulation of the Ciddor equation. Previous versions used a - # different equation. - - # The REVISION HISTORY from the vac2air.pro file is: - # ; REVISION HISTORY - # ; Written W. Landsman November 1991 - # ; Use Ciddor (1996) formula for better accuracy in the infrared - # ; Added optional output vector, W Landsman Mar 2011 - # ; Iterate for better precision W.L./D. Schlegel Mar 2011 - - # The REVISION HISTORY from air2vac.pro file is: - # ; REVISION HISTORY - # ; Written, D. Lindler 1982 - # ; Documentation W. Landsman Feb. 1989 - # ; Use Ciddor (1996) formula for better accuracy in the infrared - # ; Added optional output vector, W Landsman Mar 2011 - - # Velocity errors in m/s for different wave length errors, at - # different wave lengths. - # >>> 1e-5/330.0 * 299792458 - # 9.0846199393939404 - # >>> 1e-5/200.0 * 299792458 - # 14.989622900000001 - # >>> 1e-5/1000.0 * 299792458 - # 2.9979245800000003 - - # nm - wave = np.array([200.0, 300.0, 500.0, 800.0, 1200.0, 1600.0, 1700.0]) - - # angstrom - wave_idl_vactoair = np.array([1999.3526550081103323, 2999.1255923046301177, - 4998.6055889614663101, 7997.8003315140686027, - 11996.7167708424640296, 15995.6298776736693981, - 16995.3579139663052047]) - wave_vac2air = ref_index.vac2air(wave, t=15, rh=0) - - # values in wave_idl_vactoair was fed to airtovac idl procedure. - wave_idl_airtovac = np.array([1999.3526550081103323, - 3000.0000371189012185, - 5000.0000183785432455, - 8000.0000108292333607, - 12000.0000070745754783, - 16000.0000052688483265, - 17000.0000049538284657]) - # Have to convert angstrom to nm. - wave_air2vac = ref_index.air2vac(wave_idl_vactoair / 10.0, t=15, rh=0) - - assert np.allclose(wave_vac2air, wave_idl_vactoair/10.0) - - # IDL code ignores values under 2000 angstrom. - assert np.allclose(wave_air2vac[1:], wave_idl_airtovac[1:]/10.0) diff --git a/setup.cfg b/setup.cfg index 7b7b5c476c..5f0e25ee7e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -39,6 +39,8 @@ install_requires = scipy>=1.7 matplotlib>=3.7 PyYAML>=5.1 + PyERFA>=2.0.0 + fast-histogram>=0.11 configobj>=5.0.6 scikit-learn>=1.0 IPython>=7.10.0 From 1406bee29c99031f51d012d0871842b4b4a006c3 Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 5 Oct 2023 21:34:54 +0100 Subject: [PATCH 64/81] fix DAR --- pypeit/coadd3d.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 113aa5c054..609469ae77 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1103,7 +1103,6 @@ def load(self): temperature = self.spec.get_meta_value([spec2DObj.head0], 'temperature') # units are degrees C humidity = self.spec.get_meta_value([spec2DObj.head0], 'humidity') # Expressed as a percentage (not a fraction!) darcorr = DARcorrection(airmass, parangle, pressure, temperature, humidity, cosdec) - darcorr.correction(wave_sort) # Perform extinction correction msgs.info("Applying extinction correction") From 9ac6190db9c00d1a9134b8ebf920d0352c795abd Mon Sep 17 00:00:00 2001 From: rcooke Date: Thu, 5 Oct 2023 21:51:18 +0100 Subject: [PATCH 65/81] rm embed --- pypeit/coadd3d.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 609469ae77..abcdbe4035 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1156,7 +1156,6 @@ def load(self): # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now numpix = ra_sort.size - embed() if not self.combine and not self.align: # Get the output filename if self.numfiles == 1 and self.cubepar['output_filename'] != "": From 66d95124090da7d3638bde3566e5ea6d3b8c293b Mon Sep 17 00:00:00 2001 From: rcooke Date: Sat, 7 Oct 2023 08:56:07 +0100 Subject: [PATCH 66/81] fix DAR pressure units consistent --- pypeit/coadd3d.py | 19 +++++++++++-------- pypeit/spectrographs/gemini_gnirs.py | 4 ++-- pypeit/spectrographs/gtc_osiris.py | 4 ++-- pypeit/spectrographs/keck_kcwi.py | 6 +++--- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index abcdbe4035..e94d34c10a 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -230,22 +230,22 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, w self.airmass = airmass # unitless self.parangle = parangle self.pressure = pressure * units.mbar - self.temperature = temperature * units.Celsius + self.temperature = temperature * units.deg_C self.humidity = humidity/100.0 - self.wave_ref = wave_ref # This should be in Angstroms + self.wave_ref = wave_ref*units.Angstrom self.cosdec = cosdec # Calculate the coefficients of the correction self.refa, self.refb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), - self.humidity, (self.wave_ref*units.Angstrom).to_value(units.micron)) + self.humidity, self.wave_ref.to_value(units.micron)) # Print out the DAR parameters msgs.info("DAR correction parameters:" + msgs.newline() + " Airmass = {0:.2f}".format(self.airmass) + msgs.newline() + - " Pressure = {0:.2f} Pa".format(self.pressure) + msgs.newline() + - " Humidity = {0:.2f} %".format(self.humidity) + msgs.newline() + - " Temperature = {0:.2f} deg C".format(self.temperature) + msgs.newline() + - " Reference wavelength = {0:.2f}".format(self.wave_ref)) + " Pressure = {0:.2f} mbar".format(self.pressure.to_value(units.mbar)) + msgs.newline() + + " Humidity = {0:.2f} %".format(self.humidity*100.0) + msgs.newline() + + " Temperature = {0:.2f} deg C".format(self.temperature.to_value(units.deg_C)) + msgs.newline() + + " Reference wavelength = {0:.2f} Angstroms".format(self.wave_ref.to_value(units.Angstrom))) def calculate_dispersion(self, waves): """ Calculate the total atmospheric dispersion relative to the reference wavelength @@ -265,9 +265,12 @@ def calculate_dispersion(self, waves): z = np.arccos(1.0/self.airmass) # Calculate the coefficients of the correction + # self.refa, self.refb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), + # self.humidity, self.wave_ref.to_value(units.micron)) cnsa, cnsb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), self.humidity, (waves*units.Angstrom).to_value(units.micron)) - return (180.0/np.pi) * (self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3 + dar_full = (180.0/np.pi) * ((self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3) + return dar_full def correction(self, waves): """ diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index ff7c1f042b..76d90df451 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -137,8 +137,8 @@ def compound_meta(self, headarr, meta_key): try: return headarr[0]['PRESSUR2'] # Must be in astropy.units.pascal except KeyError: - msgs.warn("Pressure is not in header - The default pressure (61.1 kPa) will be assumed") - return 61.1E3 + msgs.warn("Pressure is not in header - The default pressure (0.611 mbar) will be assumed") + return 0.611 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 6b157b221e..86fe889073 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -176,8 +176,8 @@ def compound_meta(self, headarr, meta_key): return headarr[0]['PRESSURE'] # Must be in astropy.units.pascal except KeyError: msgs.warn("Pressure is not in header") - msgs.info("The default pressure will be assumed: 61.1 kPa") - return 61.1E3 + msgs.info("The default pressure will be assumed: 0.611 mbar") + return 0.611 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index be81a8f57c..f5ca76163f 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -219,11 +219,11 @@ def compound_meta(self, headarr, meta_key): return headarr[0][hdrstr] elif meta_key == 'pressure': try: - return headarr[0]['WXPRESS'] * 100.0 # Must be in astropy.units.pascal + return headarr[0]['WXPRESS'] # Must be in astropy.units.mbar except KeyError: msgs.warn("Pressure is not in header") - msgs.info("The default pressure will be assumed: 61.1 kPa") - return 61.1E3 + msgs.info("The default pressure will be assumed: 0.611 mbar") + return 0.611 elif meta_key == 'temperature': try: return headarr[0]['WXOUTTMP'] # Must be in astropy.units.deg_C From a46cfc20b725e4ad17401409795a4dd3b24cf8bd Mon Sep 17 00:00:00 2001 From: rcooke Date: Sat, 7 Oct 2023 11:42:48 +0100 Subject: [PATCH 67/81] fix DAR pressure units consistent --- pypeit/coadd3d.py | 4 ++-- pypeit/spectrographs/gemini_gnirs.py | 6 +++--- pypeit/spectrographs/gtc_osiris.py | 6 +++--- pypeit/spectrographs/keck_kcwi.py | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index e94d34c10a..ee4845c96c 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -210,7 +210,7 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, w airmass (:obj:`float`): The airmass of the observations (unitless) parangle (:obj:`float`): - The parallactic angle of the observations (units=degree, relative to North, towards East is postive) + The parallactic angle of the observations (units=radians, relative to North, towards East is postive) pressure (:obj:`float`): The atmospheric pressure during the observations in Pascal. Valid range is from 10kPa - 140 kPa. temperature (:obj:`float`): @@ -269,7 +269,7 @@ def calculate_dispersion(self, waves): # self.humidity, self.wave_ref.to_value(units.micron)) cnsa, cnsb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), self.humidity, (waves*units.Angstrom).to_value(units.micron)) - dar_full = (180.0/np.pi) * ((self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3) + dar_full = (180.0/np.pi) * ((self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3) return dar_full def correction(self, waves): diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 76d90df451..921ecccaea 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -135,10 +135,10 @@ def compound_meta(self, headarr, meta_key): return 0.0 elif meta_key == 'pressure': try: - return headarr[0]['PRESSUR2'] # Must be in astropy.units.pascal + return headarr[0]['PRESSUR2']/100.0 # Must be in astropy.units.mbar except KeyError: - msgs.warn("Pressure is not in header - The default pressure (0.611 mbar) will be assumed") - return 0.611 + msgs.warn("Pressure is not in header - The default pressure (611 mbar) will be assumed") + return 611.0 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 86fe889073..2c788caab9 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -173,11 +173,11 @@ def compound_meta(self, headarr, meta_key): return binning elif meta_key == 'pressure': try: - return headarr[0]['PRESSURE'] # Must be in astropy.units.pascal + return headarr[0]['PRESSURE'] # Must be in astropy.units.mbar except KeyError: msgs.warn("Pressure is not in header") - msgs.info("The default pressure will be assumed: 0.611 mbar") - return 0.611 + msgs.info("The default pressure will be assumed: 611 mbar") + return 611.0 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index f5ca76163f..ea9a1249b6 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -222,8 +222,8 @@ def compound_meta(self, headarr, meta_key): return headarr[0]['WXPRESS'] # Must be in astropy.units.mbar except KeyError: msgs.warn("Pressure is not in header") - msgs.info("The default pressure will be assumed: 0.611 mbar") - return 0.611 + msgs.info("The default pressure will be assumed: 611 mbar") + return 611.0 elif meta_key == 'temperature': try: return headarr[0]['WXOUTTMP'] # Must be in astropy.units.deg_C @@ -242,7 +242,7 @@ def compound_meta(self, headarr, meta_key): elif meta_key == 'parangle': try: # Parallactic angle expressed in radians - return headarr[0]['PARANG'] * np.pi / 180 + return headarr[0]['PARANG'] * np.pi / 180.0 except KeyError: msgs.error("Parallactic angle is not in header") elif meta_key == 'obstime': From 07c73547e783543b6b8a18572e8c3d2fc8fc917a Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 8 Oct 2023 07:04:56 +0100 Subject: [PATCH 68/81] cleanup DAR --- pypeit/coadd3d.py | 4 ++-- pypeit/spectrographs/keck_kcwi.py | 15 +++------------ 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index ee4845c96c..d10ea6d5fd 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -245,7 +245,7 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, w " Pressure = {0:.2f} mbar".format(self.pressure.to_value(units.mbar)) + msgs.newline() + " Humidity = {0:.2f} %".format(self.humidity*100.0) + msgs.newline() + " Temperature = {0:.2f} deg C".format(self.temperature.to_value(units.deg_C)) + msgs.newline() + - " Reference wavelength = {0:.2f} Angstroms".format(self.wave_ref.to_value(units.Angstrom))) + " Reference wavelength = {0:.2f} Angstrom".format(self.wave_ref.to_value(units.Angstrom))) def calculate_dispersion(self, waves): """ Calculate the total atmospheric dispersion relative to the reference wavelength @@ -269,7 +269,7 @@ def calculate_dispersion(self, waves): # self.humidity, self.wave_ref.to_value(units.micron)) cnsa, cnsb = erfa.refco(self.pressure.to_value(units.hPa), self.temperature.to_value(units.deg_C), self.humidity, (waves*units.Angstrom).to_value(units.micron)) - dar_full = (180.0/np.pi) * ((self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3) + dar_full = np.rad2deg((self.refa-cnsa) * np.tan(z) + (self.refb-cnsb) * np.tan(z)**3) return dar_full def correction(self, waves): diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index ea9a1249b6..b4a2a21156 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -592,11 +592,8 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) # Get RA/DEC - raval = self.compound_meta([hdr], 'ra') - decval = self.compound_meta([hdr], 'dec') - - # Create a coordinate - coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) + ra = self.compound_meta([hdr], 'ra') + dec = self.compound_meta([hdr], 'dec') skypa = self.compound_meta([hdr], 'posang') rotoff = 0.0 # IFU-SKYPA offset (degrees) @@ -605,13 +602,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): # Calculate the fits coordinates cdelt1 = -slscl cdelt2 = pxscl - if coord is None: - ra = 0. - dec = 0. - crota = 1 - else: - ra = coord.ra.degree - dec = coord.dec.degree + # Calculate the CD Matrix cd11 = cdelt1 * np.cos(crota) # RA degrees per column cd12 = abs(cdelt2) * np.sign(cdelt1) * np.sin(crota) # RA degrees per row From 5ac775b485dc8dbef88f190bd8a92f7181e74bf2 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 8 Oct 2023 20:52:29 +0100 Subject: [PATCH 69/81] code cleanup --- pypeit/coadd3d.py | 46 ++++---------------------- pypeit/core/datacube.py | 55 +++++++++++++++++++++++++++++++ pypeit/spectrographs/keck_kcwi.py | 2 +- 3 files changed, 63 insertions(+), 40 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index d10ea6d5fd..0b3095b55c 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -888,11 +888,11 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): Parameters ---------- - spec2DObj : :class:`~pypeit.spec2dobj.Spec2DObj`_: + spec2DObj : :class:`~pypeit.spec2dobj.Spec2DObj`_ 2D PypeIt spectra object. - slits : :class:`pypeit.slittrace.SlitTraceSet`_: + slits : :class:`pypeit.slittrace.SlitTraceSet`_ Class containing information about the slits - spat_flexure: :obj:`float`, optional: + spat_flexure: :obj:`float`, optional Spatial flexure in pixels Returns @@ -925,39 +925,6 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): msgs.info("Generating alignment splines") return alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts) - def set_voxel_sampling(self): - """ - This function checks if the spatial and spectral scales of all frames are consistent. - If the user has not specified either the spatial or spectral scales, they will be set here. - """ - # Make sure all frames have consistent pixel scales - ratio = (self._spatscale[:, 0] - self._spatscale[0, 0]) / self._spatscale[0, 0] - if np.any(np.abs(ratio) > 1E-4): - msgs.warn("The pixel scales of all input frames are not the same!") - spatstr = ", ".join(["{0:.6f}".format(ss) for ss in self._spatscale[:,0]*3600.0]) - msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") - # Make sure all frames have consistent slicer scales - ratio = (self._spatscale[:, 1] - self._spatscale[0, 1]) / self._spatscale[0, 1] - if np.any(np.abs(ratio) > 1E-4): - msgs.warn("The slicer scales of all input frames are not the same!") - spatstr = ", ".join(["{0:.6f}".format(ss) for ss in self._spatscale[:,1]*3600.0]) - msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") - # Make sure all frames have consistent wavelength sampling - ratio = (self._specscale - self._specscale[0]) / self._specscale[0] - if np.any(np.abs(ratio) > 1E-2): - msgs.warn("The wavelength samplings of the input frames are not the same!") - specstr = ", ".join(["{0:.6f}".format(ss) for ss in self._specscale]) - msgs.info("Wavelength samplings of all input frames:" + msgs.newline() + specstr) - - # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale - if self._dspat is None: - self._dspat = np.max(self._spatscale) - msgs.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * self._dspat)) - # If the user has not specified the spectral sampling, then set it now to the largest value - if self._dwv is None: - self._dwv = np.max(self._specscale) - msgs.info("Adopting a wavelength sampling of {0:f} Angstrom".format(self._dwv)) - def load(self): """ This is the main function that loads in the data, and performs several frame-specific corrections. @@ -1342,11 +1309,12 @@ def coadd(self): # If the user is aligning or combining, the spatial scale of the output cubes needs to be consistent. # Set the spatial and spectral scales of the output datacube - self.set_voxel_sampling() + self._dspat, self._dwv = datacube.set_voxel_sampling(self._spatscale, self._specscale, + dspat=self._dspat, dwv=self._dwv) # Align the frames if self.align: - self.run_align() + self.all_ra, self.all_dec = self.run_align() # Compute the relative weights on the spectra self.all_wghts = self.compute_weights() @@ -1363,7 +1331,6 @@ def coadd(self): sensfunc = self.flux_spline(senswave) # Generate a datacube - outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) if self.method in ['subpixel', 'ngp']: # Generate the datacube wl_wvrng = None @@ -1372,6 +1339,7 @@ def coadd(self): np.min(self.mnmx_wv[:, :, 1]), self.cubepar['whitelight_range']) if self.combine: + outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, np.ones(self.all_wghts.size), # all_wghts, self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, self.all_dar, vox_edges, diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index f58c357bda..10aec65a3b 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -604,6 +604,61 @@ def align_user_offsets(all_ra, all_dec, all_idx, ifu_ra, ifu_dec, ra_offset, dec return all_ra, all_dec +def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None): + """ + This function checks if the spatial and spectral scales of all frames are consistent. + If the user has not specified either the spatial or spectral scales, they will be set here. + + Parameters + ---------- + spatscale : `numpy.ndarray`_ + 2D array, shape is (N, 2), listing the native spatial scales of N spec2d frames. + spatscale[:,0] refers to the spatial pixel scale of each frame + spatscale[:,1] refers to the slicer scale of each frame + Each element of the array must be in degrees + specscale : `numpy.ndarray`_ + 1D array listing the native spectral scales of multiple frames. The length of this array should be equal + to the number of frames you are using. Each element of the array must be in Angstrom + dspat: :obj:`float`, optional + Spatial scale to use as the voxel spatial sampling. If None, a new value will be derived based on the inputs + dwv: :obj:`float`, optional + Spectral scale to use as the voxel spectral sampling. If None, a new value will be derived based on the inputs + + Returns + ------- + _dspat : :obj:`float` + Spatial sampling + _dwv : :obj:`float` + Wavelength sampling + """ + # Make sure all frames have consistent pixel scales + ratio = (spatscale[:, 0] - spatscale[0, 0]) / spatscale[0, 0] + if np.any(np.abs(ratio) > 1E-4): + msgs.warn("The pixel scales of all input frames are not the same!") + spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,0]*3600.0]) + msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") + # Make sure all frames have consistent slicer scales + ratio = (spatscale[:, 1] - spatscale[0, 1]) / spatscale[0, 1] + if np.any(np.abs(ratio) > 1E-4): + msgs.warn("The slicer scales of all input frames are not the same!") + spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,1]*3600.0]) + msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") + # Make sure all frames have consistent wavelength sampling + ratio = (specscale - specscale[0]) / specscale[0] + if np.any(np.abs(ratio) > 1E-2): + msgs.warn("The wavelength samplings of the input frames are not the same!") + specstr = ", ".join(["{0:.6f}".format(ss) for ss in specscale]) + msgs.info("Wavelength samplings of all input frames:" + msgs.newline() + specstr + "Angstrom") + + # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale + _dspat = np.max(spatscale) if dspat is None else dspat + msgs.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * _dspat)) + # If the user has not specified the spectral sampling, then set it now to the largest value + _dwv = np.max(specscale) if dwv is None else dwv + msgs.info("Adopting a wavelength sampling of {0:f} Angstrom".format(_dwv)) + return _dspat, _dwv + + def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None, reference=None, collapse=False, equinox=2000.0, specname="PYP_SPEC"): diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index b4a2a21156..8bc2c7a764 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -323,7 +323,7 @@ def pypeit_file_keys(self): :class:`~pypeit.metadata.PypeItMetaData` instance to print to the :ref:`pypeit_file`. """ - return super().pypeit_file_keys() + ['posang', 'ra_off', 'dec_off', 'idname', 'calpos'] + return super().pypeit_file_keys() + ['posang', 'ra_off', 'dec_off', 'idname', 'calpos'] def check_frame_type(self, ftype, fitstbl, exprng=None): """ From 6754e32d3544a665d278e7ff332eb8b1d069a391 Mon Sep 17 00:00:00 2001 From: rcooke Date: Sun, 8 Oct 2023 22:08:24 +0100 Subject: [PATCH 70/81] move to core --- pypeit/coadd3d.py | 659 ++++++------------------------- pypeit/core/datacube.py | 441 +++++++++++++++++++++ pypeit/scripts/coadd_datacube.py | 2 +- 3 files changed, 556 insertions(+), 546 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 0b3095b55c..34f9f6f7e1 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -20,12 +20,6 @@ from pypeit.core import datacube, extract, flux_calib, parse from pypeit.spectrographs.util import load_spectrograph -# Use a fast histogram for speed! -try: - from fast_histogram import histogramdd -except ImportError: - histogramdd = None - from IPython import embed @@ -436,13 +430,6 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs # It should be possible (and perhaps desirable) to do a spatial alignment (i.e. align=True), apply this to the # RA,Dec values of each pixel, and then use the instrument WCS to save the output (or, just adjust the crval). # At the moment, if the user wishes to spatially align the frames, a different WCS is generated. - # Check if fast-histogram exists - if histogramdd is None: - msgs.warn("Generating a datacube is faster if you install fast-histogram:"+msgs.newline()+ - "https://pypi.org/project/fast-histogram/") - if self.method != 'ngp': - msgs.warn("Forcing NGP algorithm, because fast-histogram is not installed") - self.method = 'ngp' # Determine what method is requested self.spec_subpixel, self.spat_subpixel = 1, 1 @@ -516,8 +503,7 @@ def check_outputs(self): if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) - def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwave, collapse=False, equinox=2000.0, - specname="PYP_SPEC"): + def wcs_bounds(self, all_ra, all_dec, all_wave): """ Create a WCS and the expected edges of the voxels, based on user-specified parameters or the extremities of the data. This is a convenience function @@ -534,28 +520,21 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwave, collapse=False, eq all_wave : `numpy.ndarray`_ 1D flattened array containing the wavelength values of each pixel from all spec2d files - dspat : float - Spatial size of each square voxel (in arcsec). The default is to use the - values in cubepar. - dwave : float - Linear wavelength step of each voxel (in Angstroms) - collapse : bool, optional - If True, the spectral dimension will be collapsed to a single channel - (primarily for white light images) - equinox : float, optional - Equinox of the WCS - specname : str, optional - Name of the spectrograph Returns ------- - cubewcs : `astropy.wcs.WCS`_ - astropy WCS to be used for the combined cube - voxedges : tuple - A three element tuple containing the bin edges in the x, y (spatial) and - z (wavelength) dimensions - reference_image : `numpy.ndarray`_ - The reference image to be used for the cross-correlation. Can be None. + ra_min : :obj:`float` + Minimum RA of the WCS + ra_max : :obj:`float` + Maximum RA of the WCS + dec_min : :obj:`float` + Minimum Dec of the WCS + dec_max : :obj:`float` + Maximum RA of the WCS + wav_min : :obj:`float` + Minimum wavelength of the WCS + wav_max : :obj:`float` + Maximum RA of the WCS """ # Setup the cube ranges reference_image = None # The default behaviour is that the reference image is not used @@ -565,13 +544,7 @@ def create_wcs(self, all_ra, all_dec, all_wave, dspat, dwave, collapse=False, eq dec_max = self.cubepar['dec_max'] if self.cubepar['dec_max'] is not None else np.max(all_dec) wav_min = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else np.min(all_wave) wav_max = self.cubepar['wave_max'] if self.cubepar['wave_max'] is not None else np.max(all_wave) - if self.cubepar['wave_delta'] is not None: - dwave = self.cubepar['wave_delta'] - - return datacube.create_wcs(all_ra, all_dec, all_wave, dspat, dwave, ra_min=ra_min, ra_max=ra_max, - dec_min=dec_min, dec_max=dec_max, wave_min=wav_min, wave_max=wav_max, - reference=self.cubepar['reference_image'], collapse=collapse, equinox=equinox, - specname=specname) + return ra_min, ra_max, dec_min, dec_max, wav_min, wav_max def set_blaze_spline(self, wave_spl, spec_spl): """ @@ -829,13 +802,13 @@ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): # Finally, if a reference blaze spline has not been set, do that now. self.set_blaze_spline(wave_spl, spec_spl) - def coadd(self): + def run(self): """ Main entry routine to set the order of operations to coadd the data. For specific details of this procedure, see the child routines. """ msgs.bug("This routine should be overridden by child classes.") - msgs.error("Cannot proceed without coding the coadd routine.") + msgs.error("Cannot proceed without coding the run() routine.") class SlicerIFUCoAdd3D(CoAdd3D): @@ -1151,14 +1124,25 @@ def load(self): # Make the datacube if self.method in ['subpixel', 'ngp']: # Generate the datacube - generate_cube_subpixel(outfile, output_wcs, ra_sort[resrt], dec_sort[resrt], wave_sort[resrt], - flux_sort[resrt], ivar_sort[resrt], np.ones(numpix), - this_spatpos, this_specpos, this_spatid, - spec2DObj.tilts, slits, alignSplines, darcorr, bins, - all_idx=None, overwrite=self.overwrite, - blaze_wave=self.blaze_wave, blaze_spec=self.blaze_spec, - fluxcal=self.fluxcal, specname=self.specname, whitelight_range=wl_wvrng, - spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + flxcube, sigcube, bpmcube, wave = \ + datacube.generate_cube_subpixel(outfile, output_wcs, ra_sort[resrt], dec_sort[resrt], wave_sort[resrt], + flux_sort[resrt], ivar_sort[resrt], np.ones(numpix), + this_spatpos, this_specpos, this_spatid, + spec2DObj.tilts, slits, alignSplines, darcorr, bins, all_idx=None, + overwrite=self.overwrite, whitelight_range=wl_wvrng, + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + # Prepare the header + hdr = output_wcs.to_header() + if self.fluxcal: + hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2") + else: + hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") + # Write out the datacube + msgs.info("Saving datacube as: {0:s}".format(outfile)) + final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, + sensfunc=None, fluxed=self.fluxcal) + final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite) + # No need to proceed and store arrays - we are writing individual datacubes continue # Store the information if we are combining multiple frames @@ -1205,19 +1189,25 @@ def run_align(self): numiter = 2 for dd in range(numiter): msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") - # Setup the WCS to use for all white light images ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied - image_wcs, voxedge, reference_image = self.create_wcs(new_ra[ww], new_dec[ww], self.all_wave[ww], - self._dspat, wavediff, collapse=True) + # Determine the bounds of the WCS + ra_min, ra_max, dec_min, dec_max, wav_min, wav_max = \ + self.wcs_bounds(new_ra[ww], new_dec[ww], self.all_wave[ww]) + # Generate the WCS + image_wcs, voxedge, reference_image = \ + datacube.create_wcs(new_ra[ww], new_dec[ww], self.all_wave[ww], self._dspat, wavediff, + ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, + wave_min=wav_min, wave_max=wav_max, reference=self.cubepar['reference_image'], + collapse=True, equinox=2000.0, specname=self.specname) if voxedge[2].size != 2: msgs.error("Spectral range for WCS is incorrect for white light image") - wl_imgs = generate_image_subpixel(image_wcs, new_ra[ww], new_dec[ww], self.all_wave[ww], - self.all_sci[ww], self.all_ivar[ww], self.all_wghts[ww], - self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], - self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, - all_idx=self.all_idx[ww], - spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + wl_imgs = datacube.generate_image_subpixel(image_wcs, new_ra[ww], new_dec[ww], self.all_wave[ww], + self.all_sci[ww], self.all_ivar[ww], self.all_wghts[ww], + self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], + self.all_tilts, self.all_slits, self.all_align, self.all_dar, + voxedge, all_idx=self.all_idx[ww], + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) if reference_image is None: # ref_idx will be the index of the cube with the highest S/N ref_idx = np.argmax(self.weights) @@ -1251,27 +1241,36 @@ def compute_weights(self): # No need to calculate weights if there's just one frame all_wghts = np.ones_like(self.all_sci) else: + # TODO :: Need to decide if this can be moved to core or not... # Find the wavelength range where all frames overlap min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength self.cubepar['whitelight_range']) # The user-specified values (if any) # Get the good white light pixels ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl) - # Get a suitable WCS - image_wcs, voxedge, reference_image = self.create_wcs(self.all_ra, self.all_dec, self.all_wave, - self._dspat, wavediff, collapse=True) + + # Determine the bounds of the WCS + ra_min, ra_max, dec_min, dec_max, wav_min, wav_max = \ + self.wcs_bounds(self.all_ra, self.all_dec, self.all_wave) + # Generate the WCS + image_wcs, voxedge, reference_image = \ + datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, wavediff, + ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, + wave_min=wav_min, wave_max=wav_max, reference=self.cubepar['reference_image'], + collapse=True, equinox=2000.0, specname=self.specname) + # Generate the white light image (note: hard-coding subpixel=1 in both directions, and combining into a single image) - wl_full = generate_image_subpixel(image_wcs, self.all_ra, self.all_dec, self.all_wave, - self.all_sci, self.all_ivar, self.all_wghts, - self.all_spatpos, self.all_specpos, self.all_spatid, - self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, - all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) + wl_full = datacube.generate_image_subpixel(image_wcs, self.all_ra, self.all_dec, self.all_wave, + self.all_sci, self.all_ivar, self.all_wghts, + self.all_spatpos, self.all_specpos, self.all_spatid, + self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, + all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) # Compute the weights all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, wl_full[:, :, 0], self._dspat, self._dwv, relative_weights=self.cubepar['relative_weights']) return all_wghts - def coadd(self): + def run(self): """ This is the main routine called to convert PypeIt spec2d files into PypeIt DataCube objects. It is specific to the SlicerIFU data. @@ -1319,8 +1318,15 @@ def coadd(self): # Compute the relative weights on the spectra self.all_wghts = self.compute_weights() + # Determine the bounds of the WCS + ra_min, ra_max, dec_min, dec_max, wav_min, wav_max = \ + self.wcs_bounds(self.all_ra, self.all_dec, self.all_wave) # Generate the WCS, and the voxel edges - cube_wcs, vox_edges, _ = self.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, self._dwv) + cube_wcs, vox_edges, _ = \ + datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, self._dwv, + ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, + wave_min=wav_min, wave_max=wav_max, reference=self.cubepar['reference_image'], + collapse=False, equinox=2000.0, specname=self.specname) sensfunc = None if self.flux_spline is not None: @@ -1340,482 +1346,45 @@ def coadd(self): self.cubepar['whitelight_range']) if self.combine: outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) - generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, - np.ones(self.all_wghts.size), # all_wghts, - self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, self.all_dar, vox_edges, - all_idx=self.all_idx, overwrite=self.overwrite, blaze_wave=self.blaze_wave, - blaze_spec=self.blaze_spec, - fluxcal=self.fluxcal, sensfunc=sensfunc, specname=self.specname, whitelight_range=wl_wvrng, - spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + # Generate the datacube + flxcube, sigcube, bpmcube, wave = \ + datacube.generate_cube_subpixel(outfile, cube_wcs, self.all_ra, self.all_dec, self.all_wave, + self.all_sci, self.all_ivar, np.ones(self.all_wghts.size), + self.all_spatpos, self.all_specpos, self.all_spatid, + self.all_tilts, self.all_slits, self.all_align, self.all_dar, vox_edges, + all_idx=self.all_idx, overwrite=self.overwrite, whitelight_range=wl_wvrng, + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + # Prepare the header + hdr = cube_wcs.to_header() + if self.fluxcal: + hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2") + else: + hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") + # Write out the datacube + msgs.info("Saving datacube as: {0:s}".format(outfile)) + final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, + sensfunc=sensfunc, fluxed=self.fluxcal) + final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite) else: for ff in range(self.numfiles): outfile = datacube.get_output_filename("", self.cubepar['output_filename'], False, ff) ww = np.where(self.all_idx == ff) - generate_cube_subpixel(outfile, cube_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], self.all_sci[ww], - self.all_ivar[ww], np.ones(self.all_wghts[ww].size), - self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], self.all_tilts[ff], - self.all_slits[ff], self.all_align[ff], self.all_dar[ff], vox_edges, - all_idx=self.all_idx[ww], overwrite=self.overwrite, blaze_wave=self.blaze_wave, - blaze_spec=self.blaze_spec, - fluxcal=self.fluxcal, sensfunc=sensfunc, specname=self.specname, - whitelight_range=wl_wvrng, - spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) - - -def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, - all_idx=None, spec_subpixel=10, spat_subpixel=10, combine=False): - """ - Generate a white light image from the input pixels - - Args: - image_wcs (`astropy.wcs.WCS`_): - World coordinate system to use for the white light images. - all_ra (`numpy.ndarray`_): - 1D flattened array containing the right ascension of each pixel - (units = degrees) - all_dec (`numpy.ndarray`_): - 1D flattened array containing the declination of each pixel (units = - degrees) - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = - Angstroms) - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_ivar (`numpy.ndarray`_): - 1D flattened array containing the inverse variance of each pixel - from all spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights of each pixel to be used - in the combination - all_spatpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spatial direction - all_specpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spectral direction - all_spatid (`numpy.ndarray`_): - 1D flattened array containing the spatid of each pixel - tilts (`numpy.ndarray`_, list): - 2D wavelength tilts frame, or a list of tilt frames (see all_idx) - slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): - Information stored about the slits, or a list of SlitTraceSet (see - all_idx) - astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): - A Class containing the transformation between detector pixel - coordinates and WCS pixel coordinates, or a list of Alignment - Splines (see all_idx) - all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): - A Class containing the DAR correction information, or a list of DARcorrection - classes. If a list, it must be the same length as astrom_trans. - bins (tuple): - A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial - and z wavelength coordinates - all_idx (`numpy.ndarray`_, optional): - If tilts, slits, and astrom_trans are lists, this should contain a - 1D flattened array, of the same length as all_sci, containing the - index the tilts, slits, and astrom_trans lists that corresponds to - each pixel. Note that, in this case all of these lists need to be - the same length. - spec_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spectral direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spectral - direction. - spat_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spatial direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spatial - direction. - combine (:obj:`bool`, optional): - If True, all of the input frames will be combined into a single - output. Otherwise, individual images will be generated. - - Returns: - `numpy.ndarray`_: The white light images for all frames - """ - # Perform some checks on the input -- note, more complete checks are performed in subpixellate() - _all_idx = np.zeros(all_sci.size) if all_idx is None else all_idx - if combine: - numfr = 1 - else: - numfr = np.unique(_all_idx).size - if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr or len(all_dar) != numfr: - msgs.error("The following arguments must be the same length as the expected number of frames to be combined:" - + msgs.newline() + "tilts, slits, astrom_trans, all_dar") - # Prepare the array of white light images to be stored - numra = bins[0].size-1 - numdec = bins[1].size-1 - all_wl_imgs = np.zeros((numra, numdec, numfr)) - - # Loop through all frames and generate white light images - for fr in range(numfr): - msgs.info(f"Creating image {fr+1}/{numfr}") - if combine: - # Subpixellate - img, _, _ = subpixellate(image_wcs, all_ra, all_dec, all_wave, - all_sci, all_ivar, all_wghts, all_spatpos, - all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, all_idx=_all_idx) - else: - ww = np.where(_all_idx == fr) - # Subpixellate - img, _, _ = subpixellate(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], - all_sci[ww], all_ivar[ww], all_wghts[ww], all_spatpos[ww], - all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], - all_dar[fr], bins, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) - all_wl_imgs[:, :, fr] = img[:, :, 0] - # Return the constructed white light images - return all_wl_imgs - - -def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, - all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, - all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, blaze_wave=None, - blaze_spec=None, fluxcal=False, sensfunc=None, whitelight_range=None, - specname="PYP_SPEC", debug=False): - """ - Save a datacube using the subpixel algorithm. Refer to the subpixellate() - docstring for further details about this algorithm - - Args: - outfile (str): - Filename to be used to save the datacube - output_wcs (`astropy.wcs.WCS`_): - Output world coordinate system. - all_ra (`numpy.ndarray`_): - 1D flattened array containing the right ascension of each pixel - (units = degrees) - all_dec (`numpy.ndarray`_): - 1D flattened array containing the declination of each pixel (units = - degrees) - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = - Angstroms) - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_ivar (`numpy.ndarray`_): - 1D flattened array containing the inverse variance of each pixel - from all spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights of each pixel to be used - in the combination - all_spatpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spatial direction - all_specpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spectral direction - all_spatid (`numpy.ndarray`_): - 1D flattened array containing the spatid of each pixel - tilts (`numpy.ndarray`_, list): - 2D wavelength tilts frame, or a list of tilt frames (see all_idx) - slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): - Information stored about the slits, or a list of SlitTraceSet (see - all_idx) - astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): - A Class containing the transformation between detector pixel - coordinates and WCS pixel coordinates, or a list of Alignment - Splines (see all_idx) - all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): - A Class containing the DAR correction information, or a list of DARcorrection - classes. If a list, it must be the same length as astrom_trans. - bins (tuple): - A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial - and z wavelength coordinates - all_idx (`numpy.ndarray`_, optional): - If tilts, slits, and astrom_trans are lists, this should contain a - 1D flattened array, of the same length as all_sci, containing the - index the tilts, slits, and astrom_trans lists that corresponds to - each pixel. Note that, in this case all of these lists need to be - the same length. - spec_subpixel (int, optional): - What is the subpixellation factor in the spectral direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spectral - direction. - spat_subpixel (int, optional): - What is the subpixellation factor in the spatial direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spatial - direction. - overwrite (bool, optional): - If True, the output cube will be overwritten. - blaze_wave (`numpy.ndarray`_, optional): - Wavelength array of the spectral blaze function - blaze_spec (`numpy.ndarray`_, optional): - Spectral blaze function - fluxcal (bool, optional): - Are the data flux calibrated? If True, the units are: :math:`{\rm - erg/s/cm}^2{\rm /Angstrom/arcsec}^2` multiplied by the - PYPEIT_FLUX_SCALE. Otherwise, the units are: :math:`{\rm - counts/s/Angstrom/arcsec}^2`. - sensfunc (`numpy.ndarray`_, None, optional): - Sensitivity function that has been applied to the datacube - whitelight_range (None, list, optional): - A two element list that specifies the minimum and maximum - wavelengths (in Angstroms) to use when constructing the white light - image (format is: [min_wave, max_wave]). If None, the cube will be - collapsed over the full wavelength range. If a list is provided an - either element of the list is None, then the minimum/maximum - wavelength range of that element will be set by the minimum/maximum - wavelength of all_wave. - specname (str, optional): - Name of the spectrograph - debug (bool, optional): - If True, a residuals cube will be output. If the datacube generation - is correct, the distribution of pixels in the residual cube with no - flux should have mean=0 and std=1. - """ - # Prepare the header, and add the unit of flux to the header - hdr = output_wcs.to_header() - if fluxcal: - hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2") - else: - hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") - - # Subpixellate - subpix = subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=all_idx, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug) - # Extract the variables that we need - if debug: - flxcube, varcube, bpmcube, residcube = subpix - # Save a residuals cube - outfile_resid = outfile.replace(".fits", "_resid.fits") - msgs.info("Saving residuals datacube as: {0:s}".format(outfile_resid)) - hdu = fits.PrimaryHDU(residcube.T, header=hdr) - hdu.writeto(outfile_resid, overwrite=overwrite) - else: - flxcube, varcube, bpmcube = subpix - - # Get wavelength of each pixel - nspec = flxcube.shape[2] - wcs_scale = (1.0*output_wcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms - wave = wcs_scale * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] - - # Check if the user requested a white light image - if whitelight_range is not None: - # Grab the WCS of the white light image - whitelight_wcs = output_wcs.celestial - # Determine the wavelength range of the whitelight image - if whitelight_range[0] is None: - whitelight_range[0] = np.min(all_wave) - if whitelight_range[1] is None: - whitelight_range[1] = np.max(all_wave) - msgs.info("White light image covers the wavelength range {0:.2f} A - {1:.2f} A".format( - whitelight_range[0], whitelight_range[1])) - # Get the output filename for the white light image - out_whitelight = datacube.get_output_whitelight_filename(outfile) - whitelight_img = datacube.make_whitelight_fromcube(flxcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) - msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) - img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) - img_hdu.writeto(out_whitelight, overwrite=overwrite) - - # Write out the datacube - msgs.info("Saving datacube as: {0:s}".format(outfile)) - final_cube = DataCube(flxcube.T, np.sqrt(varcube.T), bpmcube.T, wave, specname, blaze_wave, blaze_spec, - sensfunc=sensfunc, fluxed=fluxcal) - final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite) - - -def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, - all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None, - spec_subpixel=10, spat_subpixel=10, debug=False): - r""" - Subpixellate the input data into a datacube. This algorithm splits each - detector pixel into multiple subpixels, and then assigns each subpixel to a - voxel. For example, if ``spec_subpixel = spat_subpixel = 10``, then each - detector pixel is divided into :math:`10^2=100` subpixels. Alternatively, - when spec_subpixel = spat_subpixel = 1, this corresponds to the nearest grid - point (NGP) algorithm. - - Important Note: If spec_subpixel > 1 or spat_subpixel > 1, the errors will - be correlated, and the covariance is not being tracked, so the errors will - not be (quite) right. There is a tradeoff one has to make between sampling - and better looking cubes, versus no sampling and better behaved errors. - - Args: - output_wcs (`astropy.wcs.WCS`_): - Output world coordinate system. - all_ra (`numpy.ndarray`_): - 1D flattened array containing the right ascension of each pixel - (units = degrees) - all_dec (`numpy.ndarray`_): - 1D flattened array containing the declination of each pixel (units = - degrees) - all_wave (`numpy.ndarray`_): - 1D flattened array containing the wavelength of each pixel (units = - Angstroms) - all_sci (`numpy.ndarray`_): - 1D flattened array containing the counts of each pixel from all - spec2d files - all_ivar (`numpy.ndarray`_): - 1D flattened array containing the inverse variance of each pixel - from all spec2d files - all_wghts (`numpy.ndarray`_): - 1D flattened array containing the weights of each pixel to be used - in the combination - all_spatpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spatial direction - all_specpos (`numpy.ndarray`_): - 1D flattened array containing the detector pixel location in the - spectral direction - all_spatid (`numpy.ndarray`_): - 1D flattened array containing the spatid of each pixel - tilts (`numpy.ndarray`_, list): - 2D wavelength tilts frame, or a list of tilt frames (see all_idx) - slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): - Information stored about the slits, or a list of SlitTraceSet (see - all_idx) - astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): - A Class containing the transformation between detector pixel - coordinates and WCS pixel coordinates, or a list of Alignment - Splines (see all_idx) - all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): - A Class containing the DAR correction information, or a list of DARcorrection - classes. If a list, it must be the same length as astrom_trans. - bins (tuple): - A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial - and z wavelength coordinates - all_idx (`numpy.ndarray`_, optional): - If tilts, slits, and astrom_trans are lists, this should contain a - 1D flattened array, of the same length as all_sci, containing the - index the tilts, slits, and astrom_trans lists that corresponds to - each pixel. Note that, in this case all of these lists need to be - the same length. - spec_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spectral direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spectral - direction. - spat_subpixel (:obj:`int`, optional): - What is the subpixellation factor in the spatial direction. Higher - values give more reliable results, but note that the time required - goes as (``spec_subpixel * spat_subpixel``). The default value is 5, - which divides each detector pixel into 5 subpixels in the spatial - direction. - debug (bool): - If True, a residuals cube will be output. If the datacube generation - is correct, the distribution of pixels in the residual cube with no - flux should have mean=0 and std=1. - - Returns: - :obj:`tuple`: Three or four `numpy.ndarray`_ objects containing (1) the - datacube generated from the subpixellated inputs, (2) the corresponding - variance cube, (3) the corresponding bad pixel mask cube, and (4) the - residual cube. The latter is only returned if debug is True. - """ - # Check for combinations of lists or not - if all([isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]): - # Several frames are being combined. Check the lists have the same length - numframes = len(tilts) - if len(slits) != numframes or len(astrom_trans) != numframes or len(all_dar) != numframes: - msgs.error("The following lists must have the same length:" + msgs.newline() + - "tilts, slits, astrom_trans, all_dar") - # Check all_idx has been set - if all_idx is None: - if numframes != 1: - msgs.error("Missing required argument for combining frames: all_idx") - else: - all_idx = np.zeros(all_sci.size) - else: - tmp = np.unique(all_idx).size - if tmp != numframes: - msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.") - # Store in the following variables - _tilts, _slits, _astrom_trans, _all_dar = tilts, slits, astrom_trans, all_dar - elif all([not isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]): - # Just a single frame - store as lists for this code - _tilts, _slits, _astrom_trans, _all_dar = [tilts], [slits], [astrom_trans], [all_dar] - all_idx = np.zeros(all_sci.size) - numframes = 1 - else: - msgs.error("The following input arguments should all be of type 'list', or all not be type 'list':" + - msgs.newline() + "tilts, slits, astrom_trans, all_dar") - # Prepare the output arrays - outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1) - binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]] - flxcube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape) - if debug: - residcube = np.zeros(outshape) - # Divide each pixel into subpixels - spec_offs = np.arange(0.5/spec_subpixel, 1, 1/spec_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. - spat_offs = np.arange(0.5/spat_subpixel, 1, 1/spat_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. - spat_x, spec_y = np.meshgrid(spat_offs, spec_offs) - num_subpixels = spec_subpixel * spat_subpixel - area = 1 / num_subpixels - all_wght_subpix = all_wghts * area - all_var = utils.inverse(all_ivar) - # Loop through all exposures - for fr in range(numframes): - # Extract tilts and slits for convenience - this_tilts = _tilts[fr] - this_slits = _slits[fr] - # Loop through all slits - for sl, spatid in enumerate(this_slits.spat_id): - if numframes == 1: - msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits}") - else: - msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits} of frame {fr+1}/{numframes}") - this_sl = np.where((all_spatid == spatid) & (all_idx == fr)) - wpix = (all_specpos[this_sl], all_spatpos[this_sl]) - # Generate a spline between spectral pixel position and wavelength - yspl = this_tilts[wpix]*(this_slits.nspec - 1) - tiltpos = np.add.outer(yspl, spec_y).flatten() - wspl = all_wave[this_sl] - asrt = np.argsort(yspl) - wave_spl = interp1d(yspl[asrt], wspl[asrt], kind='linear', bounds_error=False, fill_value='extrapolate') - # Calculate the wavelength at each subpixel - this_wave = wave_spl(tiltpos) - # Calculate the DAR correction at each sub pixel - ra_corr, dec_corr = _all_dar[fr].correction(this_wave) # This routine needs the wavelengths to be expressed in Angstroms - # Calculate spatial and spectral positions of the subpixels - spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() - spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() - # Transform this to spatial location - spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy) - spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl]) - # Interpolate the RA/Dec over the subpixel spatial positions - ssrt = np.argsort(spatpos) - tmp_ra = all_ra[this_sl] - tmp_dec = all_dec[this_sl] - ra_spl = interp1d(spatpos[ssrt], tmp_ra[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') - dec_spl = interp1d(spatpos[ssrt], tmp_dec[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') - this_ra = ra_spl(spatpos_subpix) - this_dec = dec_spl(spatpos_subpix) - # Now apply the DAR correction - this_ra += ra_corr - this_dec += dec_corr - # Convert world coordinates to voxel coordinates, then histogram - vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0) - if histogramdd is not None: - # use the "fast histogram" algorithm, that assumes regular bin spacing - flxcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels)) - varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels)) - normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels)) - if debug: - residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels)) - else: - flxcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels))[0] - varcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels))[0] - normcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels))[0] - if debug: - residcube += np.histogramdd(vox_coord, bins=outshape, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels))[0] - # Normalise the datacube and variance cube - nc_inverse = utils.inverse(normcube) - flxcube *= nc_inverse - varcube *= nc_inverse**2 - bpmcube = (normcube == 0).astype(np.uint8) - if debug: - residcube *= nc_inverse - return flxcube, varcube, bpmcube, residcube - return flxcube, varcube, bpmcube + # Generate the datacube + flxcube, sigcube, bpmcube, wave = \ + datacube.generate_cube_subpixel(outfile, cube_wcs, self.all_ra[ww], self.all_dec[ww], self.all_wave[ww], + self.all_sci[ww], self.all_ivar[ww], np.ones(ww[0].size), + self.all_spatpos[ww], self.all_specpos[ww], self.all_spatid[ww], + self.all_tilts[ff], self.all_slits[ff], self.all_align[ff], self.all_dar[ff], vox_edges, + all_idx=self.all_idx[ww], overwrite=self.overwrite, whitelight_range=wl_wvrng, + spec_subpixel=self.spec_subpixel, spat_subpixel=self.spat_subpixel) + # Prepare the header + hdr = cube_wcs.to_header() + if self.fluxcal: + hdr['FLUXUNIT'] = (flux_calib.PYPEIT_FLUX_SCALE, "Flux units -- erg/s/cm^2/Angstrom/arcsec^2") + else: + hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") + # Write out the datacube + msgs.info("Saving datacube as: {0:s}".format(outfile)) + final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, + sensfunc=sensfunc, fluxed=self.fluxcal) + final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 10aec65a3b..a545beed52 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -17,6 +17,9 @@ from pypeit import utils from pypeit.core import coadd, flux_calib +# Use a fast histogram for speed! +from fast_histogram import histogramdd + from IPython import embed @@ -908,3 +911,441 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white bounds_error=False, fill_value="extrapolate")(all_wave[ww]) msgs.info("Optimal weighting complete") return all_wghts + + + +def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, + all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, + all_idx=None, spec_subpixel=10, spat_subpixel=10, combine=False): + """ + Generate a white light image from the input pixels + + Args: + image_wcs (`astropy.wcs.WCS`_): + World coordinate system to use for the white light images. + all_ra (`numpy.ndarray`_): + 1D flattened array containing the right ascension of each pixel + (units = degrees) + all_dec (`numpy.ndarray`_): + 1D flattened array containing the declination of each pixel (units = + degrees) + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = + Angstroms) + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. + bins (tuple): + A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial + and z wavelength coordinates + all_idx (`numpy.ndarray`_, optional): + If tilts, slits, and astrom_trans are lists, this should contain a + 1D flattened array, of the same length as all_sci, containing the + index the tilts, slits, and astrom_trans lists that corresponds to + each pixel. Note that, in this case all of these lists need to be + the same length. + spec_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spectral direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spectral + direction. + spat_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spatial direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spatial + direction. + combine (:obj:`bool`, optional): + If True, all of the input frames will be combined into a single + output. Otherwise, individual images will be generated. + + Returns: + `numpy.ndarray`_: The white light images for all frames + """ + # Perform some checks on the input -- note, more complete checks are performed in subpixellate() + _all_idx = np.zeros(all_sci.size) if all_idx is None else all_idx + if combine: + numfr = 1 + else: + numfr = np.unique(_all_idx).size + if len(tilts) != numfr or len(slits) != numfr or len(astrom_trans) != numfr or len(all_dar) != numfr: + msgs.error("The following arguments must be the same length as the expected number of frames to be combined:" + + msgs.newline() + "tilts, slits, astrom_trans, all_dar") + # Prepare the array of white light images to be stored + numra = bins[0].size-1 + numdec = bins[1].size-1 + all_wl_imgs = np.zeros((numra, numdec, numfr)) + + # Loop through all frames and generate white light images + for fr in range(numfr): + msgs.info(f"Creating image {fr+1}/{numfr}") + if combine: + # Subpixellate + img, _, _ = subpixellate(image_wcs, all_ra, all_dec, all_wave, + all_sci, all_ivar, all_wghts, all_spatpos, + all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, all_idx=_all_idx) + else: + ww = np.where(_all_idx == fr) + # Subpixellate + img, _, _ = subpixellate(image_wcs, all_ra[ww], all_dec[ww], all_wave[ww], + all_sci[ww], all_ivar[ww], all_wghts[ww], all_spatpos[ww], + all_specpos[ww], all_spatid[ww], tilts[fr], slits[fr], astrom_trans[fr], + all_dar[fr], bins, spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel) + all_wl_imgs[:, :, fr] = img[:, :, 0] + # Return the constructed white light images + return all_wl_imgs + + +def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, + all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, + all_idx=None, spec_subpixel=10, spat_subpixel=10, overwrite=False, + whitelight_range=None, debug=False): + """ + Save a datacube using the subpixel algorithm. Refer to the subpixellate() + docstring for further details about this algorithm + + Args: + outfile (str): + Filename to be used to save the datacube + output_wcs (`astropy.wcs.WCS`_): + Output world coordinate system. + all_ra (`numpy.ndarray`_): + 1D flattened array containing the right ascension of each pixel + (units = degrees) + all_dec (`numpy.ndarray`_): + 1D flattened array containing the declination of each pixel (units = + degrees) + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = + Angstroms) + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. + bins (tuple): + A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial + and z wavelength coordinates + all_idx (`numpy.ndarray`_, optional): + If tilts, slits, and astrom_trans are lists, this should contain a + 1D flattened array, of the same length as all_sci, containing the + index the tilts, slits, and astrom_trans lists that corresponds to + each pixel. Note that, in this case all of these lists need to be + the same length. + spec_subpixel (int, optional): + What is the subpixellation factor in the spectral direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spectral + direction. + spat_subpixel (int, optional): + What is the subpixellation factor in the spatial direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spatial + direction. + overwrite (bool, optional): + If True, the output cube will be overwritten. + whitelight_range (None, list, optional): + A two element list that specifies the minimum and maximum + wavelengths (in Angstroms) to use when constructing the white light + image (format is: [min_wave, max_wave]). If None, the cube will be + collapsed over the full wavelength range. If a list is provided an + either element of the list is None, then the minimum/maximum + wavelength range of that element will be set by the minimum/maximum + wavelength of all_wave. + debug (bool, optional): + If True, a residuals cube will be output. If the datacube generation + is correct, the distribution of pixels in the residual cube with no + flux should have mean=0 and std=1. + + Returns: + :obj:`tuple`: Four `numpy.ndarray`_ objects containing + (1) the datacube generated from the subpixellated inputs, + (2) the corresponding error cube (standard deviation), + (3) the corresponding bad pixel mask cube, and + (4) a 1D array containing the wavelength at each spectral coordinate of the datacube. + """ + # Prepare the header, and add the unit of flux to the header + hdr = output_wcs.to_header() + + # Subpixellate + subpix = subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, + all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=all_idx, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, debug=debug) + # Extract the variables that we need + if debug: + flxcube, varcube, bpmcube, residcube = subpix + # Save a residuals cube + outfile_resid = outfile.replace(".fits", "_resid.fits") + msgs.info("Saving residuals datacube as: {0:s}".format(outfile_resid)) + hdu = fits.PrimaryHDU(residcube.T, header=hdr) + hdu.writeto(outfile_resid, overwrite=overwrite) + else: + flxcube, varcube, bpmcube = subpix + + # Get wavelength of each pixel + nspec = flxcube.shape[2] + wcs_scale = (1.0*output_wcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms + wave = wcs_scale * output_wcs.spectral.wcs_pix2world(np.arange(nspec), 0)[0] + + # Check if the user requested a white light image + if whitelight_range is not None: + # Grab the WCS of the white light image + whitelight_wcs = output_wcs.celestial + # Determine the wavelength range of the whitelight image + if whitelight_range[0] is None: + whitelight_range[0] = np.min(all_wave) + if whitelight_range[1] is None: + whitelight_range[1] = np.max(all_wave) + msgs.info("White light image covers the wavelength range {0:.2f} A - {1:.2f} A".format( + whitelight_range[0], whitelight_range[1])) + # Get the output filename for the white light image + out_whitelight = get_output_whitelight_filename(outfile) + whitelight_img = make_whitelight_fromcube(flxcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) + msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) + img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) + img_hdu.writeto(out_whitelight, overwrite=overwrite) + # TODO :: Avoid transposing these large cubes + return flxcube.T, np.sqrt(varcube.T), bpmcube.T, wave + + +def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, + all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None, + spec_subpixel=10, spat_subpixel=10, debug=False): + r""" + Subpixellate the input data into a datacube. This algorithm splits each + detector pixel into multiple subpixels, and then assigns each subpixel to a + voxel. For example, if ``spec_subpixel = spat_subpixel = 10``, then each + detector pixel is divided into :math:`10^2=100` subpixels. Alternatively, + when spec_subpixel = spat_subpixel = 1, this corresponds to the nearest grid + point (NGP) algorithm. + + Important Note: If spec_subpixel > 1 or spat_subpixel > 1, the errors will + be correlated, and the covariance is not being tracked, so the errors will + not be (quite) right. There is a tradeoff one has to make between sampling + and better looking cubes, versus no sampling and better behaved errors. + + Args: + output_wcs (`astropy.wcs.WCS`_): + Output world coordinate system. + all_ra (`numpy.ndarray`_): + 1D flattened array containing the right ascension of each pixel + (units = degrees) + all_dec (`numpy.ndarray`_): + 1D flattened array containing the declination of each pixel (units = + degrees) + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength of each pixel (units = + Angstroms) + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. + bins (tuple): + A 3-tuple (x,y,z) containing the histogram bin edges in x,y spatial + and z wavelength coordinates + all_idx (`numpy.ndarray`_, optional): + If tilts, slits, and astrom_trans are lists, this should contain a + 1D flattened array, of the same length as all_sci, containing the + index the tilts, slits, and astrom_trans lists that corresponds to + each pixel. Note that, in this case all of these lists need to be + the same length. + spec_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spectral direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spectral + direction. + spat_subpixel (:obj:`int`, optional): + What is the subpixellation factor in the spatial direction. Higher + values give more reliable results, but note that the time required + goes as (``spec_subpixel * spat_subpixel``). The default value is 5, + which divides each detector pixel into 5 subpixels in the spatial + direction. + debug (bool): + If True, a residuals cube will be output. If the datacube generation + is correct, the distribution of pixels in the residual cube with no + flux should have mean=0 and std=1. + + Returns: + :obj:`tuple`: Three or four `numpy.ndarray`_ objects containing (1) the + datacube generated from the subpixellated inputs, (2) the corresponding + variance cube, (3) the corresponding bad pixel mask cube, and (4) the + residual cube. The latter is only returned if debug is True. + """ + # Check for combinations of lists or not + if all([isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]): + # Several frames are being combined. Check the lists have the same length + numframes = len(tilts) + if len(slits) != numframes or len(astrom_trans) != numframes or len(all_dar) != numframes: + msgs.error("The following lists must have the same length:" + msgs.newline() + + "tilts, slits, astrom_trans, all_dar") + # Check all_idx has been set + if all_idx is None: + if numframes != 1: + msgs.error("Missing required argument for combining frames: all_idx") + else: + all_idx = np.zeros(all_sci.size) + else: + tmp = np.unique(all_idx).size + if tmp != numframes: + msgs.warn("Indices in argument 'all_idx' does not match the number of frames expected.") + # Store in the following variables + _tilts, _slits, _astrom_trans, _all_dar = tilts, slits, astrom_trans, all_dar + elif all([not isinstance(l, list) for l in [tilts, slits, astrom_trans, all_dar]]): + # Just a single frame - store as lists for this code + _tilts, _slits, _astrom_trans, _all_dar = [tilts], [slits], [astrom_trans], [all_dar] + all_idx = np.zeros(all_sci.size) + numframes = 1 + else: + msgs.error("The following input arguments should all be of type 'list', or all not be type 'list':" + + msgs.newline() + "tilts, slits, astrom_trans, all_dar") + # Prepare the output arrays + outshape = (bins[0].size-1, bins[1].size-1, bins[2].size-1) + binrng = [[bins[0][0], bins[0][-1]], [bins[1][0], bins[1][-1]], [bins[2][0], bins[2][-1]]] + flxcube, varcube, normcube = np.zeros(outshape), np.zeros(outshape), np.zeros(outshape) + if debug: + residcube = np.zeros(outshape) + # Divide each pixel into subpixels + spec_offs = np.arange(0.5/spec_subpixel, 1, 1/spec_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. + spat_offs = np.arange(0.5/spat_subpixel, 1, 1/spat_subpixel) - 0.5 # -0.5 is to offset from the centre of each pixel. + spat_x, spec_y = np.meshgrid(spat_offs, spec_offs) + num_subpixels = spec_subpixel * spat_subpixel + area = 1 / num_subpixels + all_wght_subpix = all_wghts * area + all_var = utils.inverse(all_ivar) + # Loop through all exposures + for fr in range(numframes): + # Extract tilts and slits for convenience + this_tilts = _tilts[fr] + this_slits = _slits[fr] + # Loop through all slits + for sl, spatid in enumerate(this_slits.spat_id): + if numframes == 1: + msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits}") + else: + msgs.info(f"Resampling slit {sl+1}/{this_slits.nslits} of frame {fr+1}/{numframes}") + this_sl = np.where((all_spatid == spatid) & (all_idx == fr)) + wpix = (all_specpos[this_sl], all_spatpos[this_sl]) + # Generate a spline between spectral pixel position and wavelength + yspl = this_tilts[wpix]*(this_slits.nspec - 1) + tiltpos = np.add.outer(yspl, spec_y).flatten() + wspl = all_wave[this_sl] + asrt = np.argsort(yspl) + wave_spl = interp1d(yspl[asrt], wspl[asrt], kind='linear', bounds_error=False, fill_value='extrapolate') + # Calculate the wavelength at each subpixel + this_wave = wave_spl(tiltpos) + # Calculate the DAR correction at each sub pixel + ra_corr, dec_corr = _all_dar[fr].correction(this_wave) # This routine needs the wavelengths to be expressed in Angstroms + # Calculate spatial and spectral positions of the subpixels + spat_xx = np.add.outer(wpix[1], spat_x.flatten()).flatten() + spec_yy = np.add.outer(wpix[0], spec_y.flatten()).flatten() + # Transform this to spatial location + spatpos_subpix = _astrom_trans[fr].transform(sl, spat_xx, spec_yy) + spatpos = _astrom_trans[fr].transform(sl, all_spatpos[this_sl], all_specpos[this_sl]) + # Interpolate the RA/Dec over the subpixel spatial positions + ssrt = np.argsort(spatpos) + tmp_ra = all_ra[this_sl] + tmp_dec = all_dec[this_sl] + ra_spl = interp1d(spatpos[ssrt], tmp_ra[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') + dec_spl = interp1d(spatpos[ssrt], tmp_dec[ssrt], kind='linear', bounds_error=False, fill_value='extrapolate') + this_ra = ra_spl(spatpos_subpix) + this_dec = dec_spl(spatpos_subpix) + # Now apply the DAR correction + this_ra += ra_corr + this_dec += dec_corr + # Convert world coordinates to voxel coordinates, then histogram + vox_coord = output_wcs.wcs_world2pix(np.vstack((this_ra, this_dec, this_wave * 1.0E-10)).T, 0) + # Use the "fast histogram" algorithm, that assumes regular bin spacing + flxcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * all_wght_subpix[this_sl], num_subpixels)) + varcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_var[this_sl] * all_wght_subpix[this_sl]**2, num_subpixels)) + normcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_wght_subpix[this_sl], num_subpixels)) + if debug: + residcube += histogramdd(vox_coord, bins=outshape, range=binrng, weights=np.repeat(all_sci[this_sl] * np.sqrt(all_ivar[this_sl]), num_subpixels)) + # Normalise the datacube and variance cube + nc_inverse = utils.inverse(normcube) + flxcube *= nc_inverse + varcube *= nc_inverse**2 + bpmcube = (normcube == 0).astype(np.uint8) + if debug: + residcube *= nc_inverse + return flxcube, varcube, bpmcube, residcube + return flxcube, varcube, bpmcube diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 04eeed13bc..26e12a6bd2 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -65,5 +65,5 @@ def main(args): det=args.det, overwrite=args.overwrite) # Coadd the files - coadd.coadd() + coadd.run() msgs.info(utils.get_time_string(time.time()-tstart)) From 577822fe94dd993cf72ff2bed5252f757ff921e4 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 10:09:29 +0100 Subject: [PATCH 71/81] wcs to core --- pypeit/coadd3d.py | 83 ++++++++++------------------------------- pypeit/core/datacube.py | 65 ++++++++++++++++++++++++++++---- 2 files changed, 77 insertions(+), 71 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 34f9f6f7e1..984d7d5fd8 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -503,49 +503,6 @@ def check_outputs(self): if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) - def wcs_bounds(self, all_ra, all_dec, all_wave): - """ - Create a WCS and the expected edges of the voxels, based on user-specified - parameters or the extremities of the data. This is a convenience function - that calls the core function in `pypeit.core.datacube`_. - - Parameters - ---------- - all_ra : `numpy.ndarray`_ - 1D flattened array containing the RA values of each pixel from all - spec2d files - all_dec : `numpy.ndarray`_ - 1D flattened array containing the DEC values of each pixel from all - spec2d files - all_wave : `numpy.ndarray`_ - 1D flattened array containing the wavelength values of each pixel from - all spec2d files - - Returns - ------- - ra_min : :obj:`float` - Minimum RA of the WCS - ra_max : :obj:`float` - Maximum RA of the WCS - dec_min : :obj:`float` - Minimum Dec of the WCS - dec_max : :obj:`float` - Maximum RA of the WCS - wav_min : :obj:`float` - Minimum wavelength of the WCS - wav_max : :obj:`float` - Maximum RA of the WCS - """ - # Setup the cube ranges - reference_image = None # The default behaviour is that the reference image is not used - ra_min = self.cubepar['ra_min'] if self.cubepar['ra_min'] is not None else np.min(all_ra) - ra_max = self.cubepar['ra_max'] if self.cubepar['ra_max'] is not None else np.max(all_ra) - dec_min = self.cubepar['dec_min'] if self.cubepar['dec_min'] is not None else np.min(all_dec) - dec_max = self.cubepar['dec_max'] if self.cubepar['dec_max'] is not None else np.max(all_dec) - wav_min = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else np.min(all_wave) - wav_max = self.cubepar['wave_max'] if self.cubepar['wave_max'] is not None else np.max(all_wave) - return ra_min, ra_max, dec_min, dec_max, wav_min, wav_max - def set_blaze_spline(self, wave_spl, spec_spl): """ Generate a spline that represents the blaze function. This only needs to be done once, @@ -1190,15 +1147,14 @@ def run_align(self): for dd in range(numiter): msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") ref_idx = None # Don't use an index - This is the default behaviour when a reference image is supplied - # Determine the bounds of the WCS - ra_min, ra_max, dec_min, dec_max, wav_min, wav_max = \ - self.wcs_bounds(new_ra[ww], new_dec[ww], self.all_wave[ww]) # Generate the WCS image_wcs, voxedge, reference_image = \ datacube.create_wcs(new_ra[ww], new_dec[ww], self.all_wave[ww], self._dspat, wavediff, - ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, - wave_min=wav_min, wave_max=wav_max, reference=self.cubepar['reference_image'], - collapse=True, equinox=2000.0, specname=self.specname) + ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'], + dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'], + wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'], + reference=self.cubepar['reference_image'], collapse=True, equinox=2000.0, + specname=self.specname) if voxedge[2].size != 2: msgs.error("Spectral range for WCS is incorrect for white light image") @@ -1249,25 +1205,25 @@ def compute_weights(self): # Get the good white light pixels ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl) - # Determine the bounds of the WCS - ra_min, ra_max, dec_min, dec_max, wav_min, wav_max = \ - self.wcs_bounds(self.all_ra, self.all_dec, self.all_wave) # Generate the WCS image_wcs, voxedge, reference_image = \ datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, wavediff, - ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, - wave_min=wav_min, wave_max=wav_max, reference=self.cubepar['reference_image'], - collapse=True, equinox=2000.0, specname=self.specname) + ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'], + dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'], + wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'], + reference=self.cubepar['reference_image'], collapse=True, equinox=2000.0, + specname=self.specname) - # Generate the white light image (note: hard-coding subpixel=1 in both directions, and combining into a single image) + # Generate the white light image (note: hard-coding subpixel=1 in both directions for speed, and combining into a single image) wl_full = datacube.generate_image_subpixel(image_wcs, self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_wghts, self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) # Compute the weights - all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, wl_full[:, :, 0], - self._dspat, self._dwv, relative_weights=self.cubepar['relative_weights']) + all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, + self.all_idx, wl_full[:, :, 0], self._dspat, self._dwv, + relative_weights=self.cubepar['relative_weights']) return all_wghts def run(self): @@ -1318,15 +1274,14 @@ def run(self): # Compute the relative weights on the spectra self.all_wghts = self.compute_weights() - # Determine the bounds of the WCS - ra_min, ra_max, dec_min, dec_max, wav_min, wav_max = \ - self.wcs_bounds(self.all_ra, self.all_dec, self.all_wave) # Generate the WCS, and the voxel edges cube_wcs, vox_edges, _ = \ datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, self._dwv, - ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, - wave_min=wav_min, wave_max=wav_max, reference=self.cubepar['reference_image'], - collapse=False, equinox=2000.0, specname=self.specname) + ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'], + dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'], + wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'], + reference=self.cubepar['reference_image'], collapse=False, equinox=2000.0, + specname=self.specname) sensfunc = None if self.flux_spline is not None: diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index a545beed52..0e868dd41b 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -662,6 +662,61 @@ def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None): return _dspat, _dwv +def wcs_bounds(all_ra, all_dec, all_wave, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None): + """ + Create a WCS and the expected edges of the voxels, based on user-specified + parameters or the extremities of the data. This is a convenience function + that calls the core function in `pypeit.core.datacube`_. + + Parameters + ---------- + all_ra : `numpy.ndarray`_ + 1D flattened array containing the RA values of each pixel from all + spec2d files + all_dec : `numpy.ndarray`_ + 1D flattened array containing the DEC values of each pixel from all + spec2d files + all_wave : `numpy.ndarray`_ + 1D flattened array containing the wavelength values of each pixel from + all spec2d files + ra_min : :obj:`float`, optional + Minimum RA of the WCS + ra_max : :obj:`float`, optional + Maximum RA of the WCS + dec_min : :obj:`float`, optional + Minimum Dec of the WCS + dec_max : :obj:`float`, optional + Maximum RA of the WCS + wav_min : :obj:`float`, optional + Minimum wavelength of the WCS + wav_max : :obj:`float`, optional + Maximum RA of the WCS + + Returns + ------- + _ra_min : :obj:`float` + Minimum RA of the WCS + _ra_max : :obj:`float` + Maximum RA of the WCS + _dec_min : :obj:`float` + Minimum Dec of the WCS + _dec_max : :obj:`float` + Maximum RA of the WCS + _wav_min : :obj:`float` + Minimum wavelength of the WCS + _wav_max : :obj:`float` + Maximum RA of the WCS + """ + # Setup the cube ranges + _ra_min = ra_min if ra_min is not None else np.min(all_ra) + _ra_max = ra_max if ra_max is not None else np.max(all_ra) + _dec_min = dec_min if dec_min is not None else np.min(all_dec) + _dec_max = dec_max if dec_max is not None else np.max(all_dec) + _wav_min = wave_min if wave_min is not None else np.min(all_wave) + _wav_max = wave_max if wave_max is not None else np.max(all_wave) + return _ra_min, _ra_max, _dec_min, _dec_max, _wav_min, _wav_max + + def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None, reference=None, collapse=False, equinox=2000.0, specname="PYP_SPEC"): @@ -721,13 +776,9 @@ def create_wcs(all_ra, all_dec, all_wave, dspat, dwave, cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0) # Setup the cube ranges - _ra_min = ra_min if ra_min is not None else np.min(all_ra) - _ra_max = ra_max if ra_max is not None else np.max(all_ra) - _dec_min = dec_min if dec_min is not None else np.min(all_dec) - _dec_max = dec_max if dec_max is not None else np.max(all_dec) - _wav_min = wave_min if wave_min is not None else np.min(all_wave) - _wav_max = wave_max if wave_max is not None else np.max(all_wave) - # dwave = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else dwv + _ra_min, _ra_max, _dec_min, _dec_max, _wav_min, _wav_max = \ + wcs_bounds(all_ra, all_dec, all_wave, ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, + wave_min=wave_min, wave_max=wave_max) # Number of voxels in each dimension numra = int((_ra_max - _ra_min) * cosdec / dspat) From 7eba69e6e10eaa7e515fec57bfc63fb60281363e Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 10:54:10 +0100 Subject: [PATCH 72/81] bpm spatflip --- pypeit/spectrographs/keck_kcwi.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 8bc2c7a764..c0d1116675 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -717,6 +717,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # KCWI --> AMPMODE = 'ALL', 'TBO', 'TUP' # KCRM --> AMPMODE = 'L2U2', 'L2U2L1U1' bc = None + embed() if ampmode == 'ALL': # TODO: There are several bad columns in this mode, but this is typically only used for arcs. # It's the same set of bad columns seen in the TBO and TUP amplifier modes. @@ -748,9 +749,9 @@ def bpm(self, filename, det, shape=None, msbias=None): [1838, 1838, 933, 2055]] elif ampmode == 'L2U2': if binning == '1,1': - bc = [[3458, 3462, 0, 613]] + bc = [[649, 651, 0, 613]] # This accounts for the spatflip - not sure if the 649-651 is too broad though... elif binning == '2,2': - bc = [[1730, 1730, 0, 307]] + bc = [[325, 325, 0, 307]] # This accounts for the spatflip elif ampmode == "L2U2L1U1": pass # Currently unchecked... From f1599cfa547840c0ae7c1dfe9b4c5c6a8e724c96 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 10:54:56 +0100 Subject: [PATCH 73/81] bpm spatflip --- pypeit/spectrographs/keck_kcwi.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index c0d1116675..f7163cae78 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -717,7 +717,6 @@ def bpm(self, filename, det, shape=None, msbias=None): # KCWI --> AMPMODE = 'ALL', 'TBO', 'TUP' # KCRM --> AMPMODE = 'L2U2', 'L2U2L1U1' bc = None - embed() if ampmode == 'ALL': # TODO: There are several bad columns in this mode, but this is typically only used for arcs. # It's the same set of bad columns seen in the TBO and TUP amplifier modes. From 4e50f3023e7146045b66f8dd7736282d5230390c Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 11:18:28 +0100 Subject: [PATCH 74/81] update docstring --- pypeit/coadd3d.py | 37 +++++++++++++++++++++++++++++++++---- pypeit/core/datacube.py | 3 +-- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 984d7d5fd8..550a8f7651 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -363,6 +363,7 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs Show QA for debugging. """ + # TODO :: Consider loading all calibrations into a single variable within the main CoAdd3D parent class. self.spec2d = spec2dfiles self.numfiles = len(spec2dfiles) self.par = par @@ -860,9 +861,39 @@ def load(self): This is the main function that loads in the data, and performs several frame-specific corrections. If the user does not wish to align or combine the individual datacubes, then this routine will also produce a spec3d file, which is a DataCube representation of a PypeIt spec2d frame for SlicerIFU data. + + This function should be called in the __init__ method, and initialises multiple variables. The variables + initialised by this function include: + + * self.ifu_ra - The RA of the IFU pointing + * self.ifu_dec - The Dec of the IFU pointing + * self.mnmx_wv - The minimum and maximum wavelengths of every slit and frame. + * self._spatscale - The native spatial scales of all spec2d frames. + * self._specscale - The native spectral scales of all spec2d frames. + * self.weights - Weights to use when combining cubes + * self.flat_splines - Spline representations of the blaze function (based on the illumflat). + * self.blaze_spline - Spline representation of the reference blaze function + * self.blaze_wave - Wavelength array used to construct the reference blaze function + * self.blaze_spec - Spectrum used to construct the reference blaze function + + As well as the primary arrays that store the pixel information for multiple spec2d frames, including: + + * self.all_ra + * self.all_dec + * self.all_wave + * self.all_sci + * self.all_ivar + * self.all_idx + * self.all_wghts + * self.all_spatpos + * self.all_specpos + * self.all_spatid + * self.all_tilts + * self.all_slits + * self.all_align + * self.all_dar + * self.all_wcs """ - # Initialise variables - wave_ref = None # Load all spec2d files and prepare the data for making a datacube for ff, fil in enumerate(self.spec2d): # Load it up @@ -879,8 +910,6 @@ def load(self): # Get the exposure time exptime = self.spec.compound_meta([hdr0], 'exptime') - # TODO :: Consider loading all calibrations into a single variable within the main CoAdd3D parent class. - # Initialise the slit edges msgs.info("Constructing slit image") slits = spec2DObj.slits diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 0e868dd41b..b8120a878c 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -946,7 +946,7 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white mask_stack = (flux_stack != 0.0) & (ivar_stack != 0.0) # Obtain a wavelength of each pixel wcs_res = whitelightWCS.wcs_pix2world(np.vstack((np.zeros(numwav), np.zeros(numwav), np.arange(numwav))).T, 0) - wcs_scale = (1.0 * whitelightWCS.wcs.cunit[2]).to(units.Angstrom).value # Ensures the WCS is in Angstroms + wcs_scale = (1.0 * whitelightWCS.wcs.cunit[2]).to_value(units.Angstrom) # Ensures the WCS is in Angstroms wave_spec = wcs_scale * wcs_res[:, 2] # Compute the smoothing scale to use if sn_smooth_npix is None: @@ -964,7 +964,6 @@ def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, white return all_wghts - def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, all_spatpos, all_specpos, all_spatid, tilts, slits, astrom_trans, all_dar, bins, all_idx=None, spec_subpixel=10, spat_subpixel=10, combine=False): From 270eaf153617cf6c320702ef1a2b84f10d5503c0 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 11:22:54 +0100 Subject: [PATCH 75/81] wcs fix --- pypeit/coadd3d.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 550a8f7651..7a01a1c926 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -998,13 +998,12 @@ def load(self): onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good # Grab the WCS of this frame - frame_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv) - self.all_wcs.append(copy.deepcopy(frame_wcs)) + self.all_wcs.append(self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv)) # Generate the alignment splines, and then retrieve images of the RA and Dec of every pixel, # and the number of spatial pixels in each slit alignSplines = self.get_alignments(spec2DObj, slits, spat_flexure=spat_flexure) - raimg, decimg, minmax = slits.get_radec_image(frame_wcs, alignSplines, spec2DObj.tilts, + raimg, decimg, minmax = slits.get_radec_image(self.all_wcs[ff], alignSplines, spec2DObj.tilts, initial=True, flexure=spat_flexure) # Get copies of arrays to be saved @@ -1069,8 +1068,8 @@ def load(self): # Convert units to Counts/s/Ang/arcsec2 # Slicer sampling * spatial pixel sampling - sl_deg = np.sqrt(frame_wcs.wcs.cd[0, 0] ** 2 + frame_wcs.wcs.cd[1, 0] ** 2) - px_deg = np.sqrt(frame_wcs.wcs.cd[1, 1] ** 2 + frame_wcs.wcs.cd[0, 1] ** 2) + sl_deg = np.sqrt(self.all_wcs[ff].wcs.cd[0, 0] ** 2 + self.all_wcs[ff].wcs.cd[1, 0] ** 2) + px_deg = np.sqrt(self.all_wcs[ff].wcs.cd[1, 1] ** 2 + self.all_wcs[ff].wcs.cd[0, 1] ** 2) scl_units = dwav_sort * (3600.0 * sl_deg) * (3600.0 * px_deg) flux_sort /= scl_units ivar_sort *= scl_units ** 2 @@ -1096,8 +1095,8 @@ def load(self): numwav = int((np.max(waveimg) - wave0) / dwv) bins = self.spec.get_datacube_bins(slitlength, minmax, numwav) # Generate the output WCS for the datacube - tmp_crval_wv = (frame_wcs.wcs.crval[2] * frame_wcs.wcs.cunit[2]).to(units.Angstrom).value - tmp_cd_wv = (frame_wcs.wcs.cd[2,2] * frame_wcs.wcs.cunit[2]).to(units.Angstrom).value + tmp_crval_wv = (self.all_wcs[ff].wcs.crval[2] * self.all_wcs[ff].wcs.cunit[2]).to(units.Angstrom).value + tmp_cd_wv = (self.all_wcs[ff].wcs.cd[2,2] * self.all_wcs[ff].wcs.cunit[2]).to(units.Angstrom).value crval_wv = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else tmp_crval_wv cd_wv = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else tmp_cd_wv output_wcs = self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv) From 75dbe3396ce5c6270979a8767c9d0630d86bf733 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 11:25:07 +0100 Subject: [PATCH 76/81] rm todo --- pypeit/coadd3d.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 7a01a1c926..fd1fa14bff 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1225,11 +1225,10 @@ def compute_weights(self): # No need to calculate weights if there's just one frame all_wghts = np.ones_like(self.all_sci) else: - # TODO :: Need to decide if this can be moved to core or not... # Find the wavelength range where all frames overlap min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength - np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength - self.cubepar['whitelight_range']) # The user-specified values (if any) + np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength + self.cubepar['whitelight_range']) # The user-specified values (if any) # Get the good white light pixels ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl) From d979e198ced33c43c0cde7ed95152aa47fb7cce8 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 11:44:07 +0100 Subject: [PATCH 77/81] weights to core --- pypeit/coadd3d.py | 37 ++++--------- pypeit/core/datacube.py | 113 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 26 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index fd1fa14bff..f60fdf0143 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1225,32 +1225,17 @@ def compute_weights(self): # No need to calculate weights if there's just one frame all_wghts = np.ones_like(self.all_sci) else: - # Find the wavelength range where all frames overlap - min_wl, max_wl = datacube.get_whitelight_range(np.max(self.mnmx_wv[:, :, 0]), # The max blue wavelength - np.min(self.mnmx_wv[:, :, 1]), # The min red wavelength - self.cubepar['whitelight_range']) # The user-specified values (if any) - # Get the good white light pixels - ww, wavediff = datacube.get_whitelight_pixels(self.all_wave, min_wl, max_wl) - - # Generate the WCS - image_wcs, voxedge, reference_image = \ - datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, self._dspat, wavediff, - ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'], - dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'], - wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'], - reference=self.cubepar['reference_image'], collapse=True, equinox=2000.0, - specname=self.specname) - - # Generate the white light image (note: hard-coding subpixel=1 in both directions for speed, and combining into a single image) - wl_full = datacube.generate_image_subpixel(image_wcs, self.all_ra, self.all_dec, self.all_wave, - self.all_sci, self.all_ivar, self.all_wghts, - self.all_spatpos, self.all_specpos, self.all_spatid, - self.all_tilts, self.all_slits, self.all_align, self.all_dar, voxedge, - all_idx=self.all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) - # Compute the weights - all_wghts = datacube.compute_weights(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, - self.all_idx, wl_full[:, :, 0], self._dspat, self._dwv, - relative_weights=self.cubepar['relative_weights']) + all_wghts = datacube.compute_weights_frompix(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, + self.all_idx, self._dspat, self._dwv, self.mnmx_wv, self.all_wghts, + self.all_spatpos, self.all_specpos, self.all_spatid, + self.all_tilts, self.all_slits, self.all_align, self.all_dar, + ra_min=self.cubepar['ra_min'], ra_max=self.cubepar['ra_max'], + dec_min=self.cubepar['dec_min'], dec_max=self.cubepar['dec_max'], + wave_min=self.cubepar['wave_min'], wave_max=self.cubepar['wave_max'], + relative_weights=self.cubepar['relative_weights'], + whitelight_range=self.cubepar['whitelight_range'], + reference_image=self.cubepar['reference_image'], + specname=self.specname) return all_wghts def run(self): diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index b8120a878c..3360481ee3 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -860,6 +860,119 @@ def generate_WCS(crval, cdelt, equinox=2000.0, name="PYP_SPEC"): return w +def compute_weights_frompix(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, dspat, dwv, mnmx_wv, all_wghts, + all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, all_dar, + ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None, + sn_smooth_npix=None, relative_weights=False, reference_image=None, whitelight_range=None, + specname="PYPSPEC"): + r""" + Calculate wavelength dependent optimal weights. The weighting is currently + based on a relative :math:`(S/N)^2` at each wavelength. Note, this function + first prepares a whitelight image, and then calls compute_weights() to + determine the appropriate weights of each pixel. + + Args: + all_ra (`numpy.ndarray`_): + 1D flattened array containing the RA values of each pixel from all + spec2d files + all_dec (`numpy.ndarray`_): + 1D flattened array containing the DEC values of each pixel from all + spec2d files + all_wave (`numpy.ndarray`_): + 1D flattened array containing the wavelength values of each pixel + from all spec2d files + all_sci (`numpy.ndarray`_): + 1D flattened array containing the counts of each pixel from all + spec2d files + all_ivar (`numpy.ndarray`_): + 1D flattened array containing the inverse variance of each pixel + from all spec2d files + all_idx (`numpy.ndarray`_): + 1D flattened array containing an integer identifier indicating which + spec2d file each pixel originates from. For example, a 0 would + indicate that a pixel originates from the first spec2d frame listed + in the input file. a 1 would indicate that this pixel originates + from the second spec2d file, and so forth. + dspat (float): + The size of each spaxel on the sky (in degrees) + dwv (float): + The size of each wavelength pixel (in Angstroms) + mnmx_wv (`numpy.ndarray`_): + TODO + all_wghts (`numpy.ndarray`_): + 1D flattened array containing the weights of each pixel to be used + in the combination + all_spatpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spatial direction + all_specpos (`numpy.ndarray`_): + 1D flattened array containing the detector pixel location in the + spectral direction + all_spatid (`numpy.ndarray`_): + 1D flattened array containing the spatid of each pixel + tilts (`numpy.ndarray`_, list): + 2D wavelength tilts frame, or a list of tilt frames (see all_idx) + slits (:class:`~pypeit.slittrace.SlitTraceSet`, list): + Information stored about the slits, or a list of SlitTraceSet (see + all_idx) + astrom_trans (:class:`~pypeit.alignframe.AlignmentSplines`, list): + A Class containing the transformation between detector pixel + coordinates and WCS pixel coordinates, or a list of Alignment + Splines (see all_idx) + all_dar (:class:`~pypeit.coadd3d.DARcorrection`, list): + A Class containing the DAR correction information, or a list of DARcorrection + classes. If a list, it must be the same length as astrom_trans. + ra_min (float, optional): + Minimum RA of the WCS (degrees) + ra_max (float, optional): + Maximum RA of the WCS (degrees) + dec_min (float, optional): + Minimum Dec of the WCS (degrees) + dec_max (float, optional): + Maximum Dec of the WCS (degrees) + wave_min (float, optional): + Minimum wavelength of the WCS (degrees) + wave_max (float, optional): + Maximum wavelength of the WCS (degrees) + sn_smooth_npix (float, optional): + Number of pixels used for determining smoothly varying S/N ratio + weights. This is currently not required, since a relative weighting + scheme with a polynomial fit is used to calculate the S/N weights. + relative_weights (bool, optional): + Calculate weights by fitting to the ratio of spectra? + reference_image (`numpy.ndarray`_): + Reference image to use for the determination of the highest S/N spaxel in the image. + specname (str): + Name of the spectrograph + + Returns: + `numpy.ndarray`_ : a 1D array the same size as all_sci, containing + relative wavelength dependent weights of each input pixel. + """ + # Find the wavelength range where all frames overlap + min_wl, max_wl = get_whitelight_range(np.max(mnmx_wv[:, :, 0]), # The max blue wavelength + np.min(mnmx_wv[:, :, 1]), # The min red wavelength + whitelight_range) # The user-specified values (if any) + # Get the good white light pixels + ww, wavediff = get_whitelight_pixels(all_wave, min_wl, max_wl) + + # Generate the WCS + image_wcs, voxedge, reference_image = \ + create_wcs(all_ra, all_dec, all_wave, dspat, wavediff, + ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, wave_min=wave_min, wave_max=wave_max, + reference=reference_image, collapse=True, equinox=2000.0, + specname=specname) + + # Generate the white light image + # NOTE: hard-coding subpixel=1 in both directions for speed, and combining into a single image + wl_full = generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_wghts, + all_spatpos, all_specpos, all_spatid, all_tilts, all_slits, all_align, all_dar, + voxedge, all_idx=all_idx, spec_subpixel=1, spat_subpixel=1, combine=True) + # Compute the weights + return compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, wl_full[:, :, 0], dspat, dwv, + sn_smooth_npix=sn_smooth_npix, relative_weights=relative_weights) + + def compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx, whitelight_img, dspat, dwv, sn_smooth_npix=None, relative_weights=False): r""" From e330ec29de3e396f7cbb908026466296d4c932e1 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 11:46:05 +0100 Subject: [PATCH 78/81] docstring --- pypeit/core/datacube.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 3360481ee3..2af96106d3 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -898,7 +898,8 @@ def compute_weights_frompix(all_ra, all_dec, all_wave, all_sci, all_ivar, all_id dwv (float): The size of each wavelength pixel (in Angstroms) mnmx_wv (`numpy.ndarray`_): - TODO + The minimum and maximum wavelengths of every slit and frame. The shape is (Nframes, Nslits, 2), + The minimum and maximum wavelengths are stored in the [:,:,0] and [:,:,1] indices, respectively. all_wghts (`numpy.ndarray`_): 1D flattened array containing the weights of each pixel to be used in the combination From dfa747ff691b4507029989e84f200f9d956baed5 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 11:49:01 +0100 Subject: [PATCH 79/81] cleanup --- pypeit/coadd3d.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index f60fdf0143..659324dfcb 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1221,11 +1221,8 @@ def compute_weights(self): `numpy.ndarray`_: The individual pixel weights for each detector pixel, and every frame. """ # Calculate the relative spectral weights of all pixels - if self.numfiles == 1: - # No need to calculate weights if there's just one frame - all_wghts = np.ones_like(self.all_sci) - else: - all_wghts = datacube.compute_weights_frompix(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, + return np.ones_like(self.all_sci) if self.numfiles == 1 else \ + datacube.compute_weights_frompix(self.all_ra, self.all_dec, self.all_wave, self.all_sci, self.all_ivar, self.all_idx, self._dspat, self._dwv, self.mnmx_wv, self.all_wghts, self.all_spatpos, self.all_specpos, self.all_spatid, self.all_tilts, self.all_slits, self.all_align, self.all_dar, @@ -1236,7 +1233,6 @@ def compute_weights(self): whitelight_range=self.cubepar['whitelight_range'], reference_image=self.cubepar['reference_image'], specname=self.specname) - return all_wghts def run(self): """ From b1d6e1f8f0a26816ed5856ac0c40bc82c3d52235 Mon Sep 17 00:00:00 2001 From: rcooke Date: Mon, 9 Oct 2023 12:19:30 +0100 Subject: [PATCH 80/81] cleanup --- pypeit/coadd3d.py | 3 ++- pypeit/core/datacube.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 659324dfcb..882662306f 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -1207,7 +1207,8 @@ def run_align(self): # Convert pixel shift to degrees shift ra_shift *= self._dspat/cosdec dec_shift *= self._dspat - msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff+1, ra_shift*3600.0, dec_shift*3600.0)) + msgs.info("Spatial shift of cube #{0:d}:".format(ff + 1) + msgs.newline() + + "RA, DEC (arcsec) = {0:+0.3f} E, {1:+0.3f} N".format(ra_shift*3600.0, dec_shift*3600.0)) # Apply the shift new_ra[self.all_idx == ff] += ra_shift new_dec[self.all_idx == ff] += dec_shift diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 2af96106d3..499e14c5fd 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -602,8 +602,8 @@ def align_user_offsets(all_ra, all_dec, all_idx, ifu_ra, ifu_dec, ra_offset, dec # Apply the shift all_ra[all_idx == ff] += ref_shift_ra[ff] + ra_offset[ff] / 3600.0 all_dec[all_idx == ff] += ref_shift_dec[ff] + dec_offset[ff] / 3600.0 - msgs.info("Spatial shift of cube #{0:d}:" + msgs.newline() + - "RA, DEC (arcsec) = {1:+0.3f} E, {2:+0.3f} N".format(ff + 1, ra_offset[ff], dec_offset[ff])) + msgs.info("Spatial shift of cube #{0:d}:".format(ff + 1) + msgs.newline() + + "RA, DEC (arcsec) = {0:+0.3f} E, {1:+0.3f} N".format(ra_offset[ff], dec_offset[ff])) return all_ra, all_dec From 3ad7d3c8b00cd8d5e2c3237b2db27993d44ac657 Mon Sep 17 00:00:00 2001 From: rcooke Date: Wed, 11 Oct 2023 15:19:09 +0100 Subject: [PATCH 81/81] cleanup --- pypeit/coadd3d.py | 37 ++++++++++++++++++++++++++++++++----- pypeit/core/datacube.py | 2 +- pypeit/par/pypeitpar.py | 2 +- 3 files changed, 34 insertions(+), 7 deletions(-) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 882662306f..58b74265d3 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -61,6 +61,10 @@ class DataCube(datamodel.DataContainer): Parsed meta from the header spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): Build from PYP_SPEC + _ivar (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): + Build from PYP_SPEC + _wcs (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): + Build from PYP_SPEC """ version = '1.2.0' @@ -87,7 +91,9 @@ class DataCube(datamodel.DataContainer): internals = ['head0', 'filename', 'spectrograph', - 'spect_meta' + 'spect_meta', + '_ivar', # This is set internally, and should be accessed with self.ivar + '_wcs' # This is set internally, and should be accessed with self.wcs ] def __init__(self, flux, sig, bpm, wave, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, @@ -97,6 +103,9 @@ def __init__(self, flux, sig, bpm, wave, PYP_SPEC, blaze_wave, blaze_spec, sensf _d = dict([(k, values[k]) for k in args[1:]]) # Setup the DataContainer datamodel.DataContainer.__init__(self, d=_d) + # Initialise the internals + self._ivar = None + self._wcs = None def _bundle(self): """ @@ -177,21 +186,39 @@ def from_file(cls, ifile): # Meta self.spectrograph = load_spectrograph(self.PYP_SPEC) self.spect_meta = self.spectrograph.parse_spec_header(hdu[0].header) + self._ivar = None + self._wcs = None return self @property def ivar(self): """ Utility function to compute the inverse variance cube + + Returns + ------- + self._ivar : `numpy.ndarray`_ + The inverse variance of the datacube. Note that self._ivar should + not be accessed directly, and you should only call self.ivar """ - return utils.inverse(self.sig**2) + if self._ivar is None: + self._ivar = utils.inverse(self.sig**2) + return self._ivar @property def wcs(self): """ Utility function to provide the world coordinate system of the datacube + + Returns + ------- + self._wcs : `astropy.wcs.WCS`_ + The WCS based on the stored header information. Note that self._wcs should + not be accessed directly, and you should only call self.wcs """ - return wcs.WCS(self.head0) + if self._wcs is None: + self._wcs = wcs.WCS(self.head0) + return self._wcs class DARcorrection: @@ -637,7 +664,7 @@ def set_default_skysub(self): msgs.newline() + self.cubepar['skysub_frame']) try: spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['skysub_frame'], self.detname) - skysub_exptime = fits.open(self.cubepar['skysub_frame'])[0].header['EXPTIME'] + skysub_exptime = self.spec.get_meta_value([spec2DObj.head0], 'exptime') except: msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) else: @@ -700,7 +727,7 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): msgs.info("Loading skysub frame:" + msgs.newline() + opts_skysub) try: spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts_skysub, self.detname) - skysub_exptime = fits.open(opts_skysub)[0].header['EXPTIME'] + skysub_exptime = self.spec.get_meta_value([spec2DObj_sky.head0], 'exptime') except: msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts_skysub) skyImg = spec2DObj_sky.sciimg * exptime / skysub_exptime # Sky counts diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 499e14c5fd..a7391b1c50 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -664,7 +664,7 @@ def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None): def wcs_bounds(all_ra, all_dec, all_wave, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None): """ - Create a WCS and the expected edges of the voxels, based on user-specified + Calculate the bounds of the WCS and the expected edges of the voxels, based on user-specified parameters or the extremities of the data. This is a convenience function that calls the core function in `pypeit.core.datacube`_. diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index 442007ee53..2fa8156edc 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -1593,7 +1593,7 @@ def validate(self): allowed_skysub_options = ["none", "image", ""] # Note, "None" is treated as None which gets assigned to the default value "image". if self.data['skysub_frame'] not in allowed_skysub_options: # Check if the supplied name exists - if not os.path.exists(self.data['method']): + if not os.path.exists(self.data['skysub_frame']): raise ValueError("The 'skysub_frame' must be one of:\n" + ", ".join(allowed_skysub_options) + "\nor, the relative path to a spec2d file.") if len(self.data['whitelight_range']) != 2: